From xeioex at nginx.com Sat Oct 1 04:11:59 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Sat, 01 Oct 2022 04:11:59 +0000 Subject: [njs] Fixed parsing of environ variables when duplicate keys are present. Message-ID: details: https://hg.nginx.org/njs/rev/d6a15aa909cd branches: changeset: 1972:d6a15aa909cd user: Dmitry Volyntsev date: Fri Sep 30 17:35:52 2022 -0700 description: Fixed parsing of environ variables when duplicate keys are present. This closes #581 issue on Github. diffstat: src/njs_builtin.c | 22 +++++++++++++++++++--- 1 files changed, 19 insertions(+), 3 deletions(-) diffs (39 lines): diff -r b61a7a4f286e -r d6a15aa909cd src/njs_builtin.c --- a/src/njs_builtin.c Thu Sep 29 16:32:52 2022 -0700 +++ b/src/njs_builtin.c Fri Sep 30 17:35:52 2022 -0700 @@ -1795,7 +1795,7 @@ njs_env_hash_init(njs_vm_t *vm, njs_lvlh char **ep; u_char *val, *entry; njs_int_t ret; - njs_object_prop_t *prop; + njs_object_prop_t *prop, *prev; njs_lvlhsh_query_t lhq; lhq.replace = 0; @@ -1836,8 +1836,24 @@ njs_env_hash_init(njs_vm_t *vm, njs_lvlh ret = njs_lvlhsh_insert(hash, &lhq); if (njs_slow_path(ret != NJS_OK)) { - njs_internal_error(vm, "lvlhsh insert failed"); - return NJS_ERROR; + if (ret == NJS_ERROR) { + njs_internal_error(vm, "lvlhsh insert failed"); + return NJS_ERROR; + } + + /* ret == NJS_DECLINED: entry already exists */ + + /* + * Always using the first element among the duplicates + * and ignoring the rest. + */ + + prev = lhq.value; + + if (!njs_values_same(&prop->value, &prev->value)) { + njs_vm_warn(vm, "environment variable \"%V\" has more than one" + " value\n", &lhq.key); + } } } From xeioex at nginx.com Sat Oct 1 04:12:01 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Sat, 01 Oct 2022 04:12:01 +0000 Subject: [njs] Improved population of process.env object. Message-ID: details: https://hg.nginx.org/njs/rev/8517d9593c30 branches: changeset: 1973:8517d9593c30 user: Dmitry Volyntsev date: Thu Sep 29 18:48:09 2022 -0700 description: Improved population of process.env object. 1) Keys are always casted to upper case. 2) Keys and values are converted to safe Unicode strings. diffstat: src/njs_builtin.c | 27 +++++++++++++++++++++------ src/njs_unix.h | 2 ++ src/test/njs_unit_test.c | 8 ++++++++ 3 files changed, 31 insertions(+), 6 deletions(-) diffs (99 lines): diff -r d6a15aa909cd -r 8517d9593c30 src/njs_builtin.c --- a/src/njs_builtin.c Fri Sep 30 17:35:52 2022 -0700 +++ b/src/njs_builtin.c Thu Sep 29 18:48:09 2022 -0700 @@ -107,9 +107,6 @@ static const njs_object_type_init_t *con }; -extern char **environ; - - njs_inline njs_int_t njs_object_hash_init(njs_vm_t *vm, njs_lvlhsh_t *hash, const njs_object_init_t *init) @@ -1793,9 +1790,13 @@ static njs_int_t njs_env_hash_init(njs_vm_t *vm, njs_lvlhsh_t *hash, char **environment) { char **ep; - u_char *val, *entry; + u_char *dst; + ssize_t length; + uint32_t cp; njs_int_t ret; + const u_char *val, *entry, *s, *end; njs_object_prop_t *prop, *prev; + njs_string_prop_t string; njs_lvlhsh_query_t lhq; lhq.replace = 0; @@ -1818,14 +1819,28 @@ njs_env_hash_init(njs_vm_t *vm, njs_lvlh continue; } - ret = njs_string_set(vm, &prop->name, entry, val - entry); + ret = njs_string_create(vm, &prop->name, (char *) entry, val - entry); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } + (void) njs_string_prop(&string, &prop->name); + + length = string.length; + s = string.start; + end = s + string.size; + dst = (u_char *) s; + + while (length != 0) { + cp = njs_utf8_upper_case(&s, end); + dst = njs_utf8_encode(dst, cp); + length--; + } + val++; - ret = njs_string_set(vm, &prop->value, val, njs_strlen(val)); + ret = njs_string_create(vm, &prop->value, (char *) val, + njs_strlen(val)); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } diff -r d6a15aa909cd -r 8517d9593c30 src/njs_unix.h --- a/src/njs_unix.h Fri Sep 30 17:35:52 2022 -0700 +++ b/src/njs_unix.h Thu Sep 29 18:48:09 2022 -0700 @@ -47,6 +47,8 @@ #include +extern char **environ; + #if defined(PATH_MAX) #define NJS_MAX_PATH PATH_MAX #else diff -r d6a15aa909cd -r 8517d9593c30 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Fri Sep 30 17:35:52 2022 -0700 +++ b/src/test/njs_unit_test.c Thu Sep 29 18:48:09 2022 -0700 @@ -13877,6 +13877,9 @@ static njs_unit_test_t njs_test[] = { njs_str("Object.values(process)"), njs_str("") }, + { njs_str("Object.keys(process.env).sort()"), + njs_str("DUP,TZ") }, + { njs_str("Object.values()"), njs_str("TypeError: cannot convert undefined argument to object") }, @@ -24160,9 +24163,14 @@ main(int argc, char **argv) return (ret == NJS_DONE) ? EXIT_SUCCESS: EXIT_FAILURE; } + environ = NULL; + (void) putenv((char *) "TZ=UTC"); tzset(); + (void) putenv((char *) "DUP=bar"); + (void) putenv((char *) "dup=foo"); + njs_mm_denormals(1); njs_memzero(&stat, sizeof(njs_stat_t)); From xeioex at nginx.com Sat Oct 1 04:12:03 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Sat, 01 Oct 2022 04:12:03 +0000 Subject: [njs] Parser: simplified njs_parser_string_create() using existing API. Message-ID: details: https://hg.nginx.org/njs/rev/80b59b59b7d8 branches: changeset: 1974:80b59b59b7d8 user: Dmitry Volyntsev date: Fri Sep 30 17:40:57 2022 -0700 description: Parser: simplified njs_parser_string_create() using existing API. diffstat: src/njs_parser.c | 39 +++++++++++++-------------------------- 1 files changed, 13 insertions(+), 26 deletions(-) diffs (49 lines): diff -r 8517d9593c30 -r 80b59b59b7d8 src/njs_parser.c --- a/src/njs_parser.c Thu Sep 29 18:48:09 2022 -0700 +++ b/src/njs_parser.c Fri Sep 30 17:40:57 2022 -0700 @@ -8307,32 +8307,19 @@ njs_int_t njs_parser_string_create(njs_vm_t *vm, njs_lexer_token_t *token, njs_value_t *value) { - u_char *dst; - size_t size, length; - njs_str_t *src; - const u_char *p, *end; - njs_unicode_decode_t ctx; - - src = &token->text; - - njs_utf8_decode_init(&ctx); - - length = njs_utf8_stream_length(&ctx, src->start, src->length, 1, 0, &size); - - dst = njs_string_alloc(vm, value, size, length); - if (njs_slow_path(dst == NULL)) { - return NJS_ERROR; - } - - p = src->start; - end = src->start + src->length; - - njs_utf8_decode_init(&ctx); - - (void) njs_utf8_stream_encode(&ctx, p, end, dst, 1, 0); - - if (length > NJS_STRING_MAP_STRIDE && size != length) { - njs_string_offset_map_init(value->long_string.data->start, size); + size_t length; + njs_str_t dst; + + length = njs_decode_utf8_length(&token->text, &dst.length); + dst.start = njs_string_alloc(vm, value, dst.length, length); + if (njs_slow_path(dst.start == NULL)) { + return NJS_ERROR; + } + + njs_decode_utf8(&dst, &token->text); + + if (length > NJS_STRING_MAP_STRIDE && dst.length != length) { + njs_string_offset_map_init(value->long_string.data->start, dst.length); } return NJS_OK; From mdounin at mdounin.ru Sat Oct 1 08:58:20 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 1 Oct 2022 11:58:20 +0300 Subject: [PATCH 11 of 11] SSL: automatic rotation of session ticket keys In-Reply-To: <12290DCD-4B09-4DA0-8057-16172C5F5D28@nginx.com> References: <5c26fe5f6ab0bf4c0d18.1661482878@vm-bsd.mdounin.ru> <384C1C36-43CA-490B-9559-DD77DE6346E6@nginx.com> <12290DCD-4B09-4DA0-8057-16172C5F5D28@nginx.com> Message-ID: Hello! On Thu, Sep 29, 2022 at 08:00:03PM +0400, Sergey Kandaurov wrote: > > On 28 Sep 2022, at 22:37, Maxim Dounin wrote: > > > > On Mon, Sep 26, 2022 at 02:17:18PM +0400, Sergey Kandaurov wrote: [...] > >> And by the way, while reviewing this patch, I noticed that > >> OpenSSL doesn't allow a client to gracefully renew TLSv1.2 session > >> when the client receives a new session ticket in resumed sessions. > >> In practice, it is visible when client resumes a not yet expired > >> session encrypted with not a fresh ticket key (after rotation), > >> which results in sending a new session ticket. > >> See ssl_update_cache() for the !s->hit condition. > >> In the opposite, BoringSSL always allows to renew TLSv1.2 sessions. > > > > You mean on the client side? Yes, it looks like > > ngx_ssl_new_client_session() won't be called for such a new > > session ticket, and updated ticket will be never saved. This > > might need to be worked around. > > Yes, I mean the client side. > > > > > This should be safe with the key rotation logic introduced in this > > patch though, given that the previous key is preserved till the > > last ticket encrypted with it is expected to expire. > > > > One of the possible solutions might be to avoid re-encryption of > > tickets with the new key, as the old key is anyway expected to be > > available till the session expires. > > I don't think it's worth the effort. If I got you right, and > as far as I understand, re-encrypting the ticket essentially > means sending a fresh session (renewal). Well, not really. Re-encryption of a ticket does not imply session renewal. Further, doing so implies security risk: if we renew a session during re-encryption, this makes it possible to create essentially infinite sessions. And, for example, if a session used a client certificate, this effectively means that this certificate will never expire and cannot be revoked. With TLSv1.2, OpenSSL follows this logic: session expiration time is set when a session is created, and ticket re-encryption only re-encrypts the session, but doesn't change session expiration. As such, any certificate validation which happens during session creation needs to be redone once session timeout expires - and this makes it possible to implement certificate revocation. On the other hand, as implemented for TLSv1.3 at least in OpenSSL it seems not to be the case. Every ticket sent to the client actually creates a new session with _updated_ expiration time. As such, it is possible to create a session authenticated with a client certificate, and use this session indefinitely, even after the certificate will expire and/or will be revoked. This seems to be a security issue in OpenSSL. BoringSSL seems to behave similarly with TLSv1.3, that is, it updates session expiration time, making it possible to use an authenticated session for much longer than session timeout configured. But BoringSSL also has session's auth_timeout, which prevents indefinite use of the session. The auth_timeout value is hardcoded to 7 days (SSL_DEFAULT_SESSION_AUTH_TIMEOUT), and does not seem to be adjustable (only with SSL_SESSION_set_timeout(), which is documented to be a function for writing tests). I would rather say it is also a security issue in BoringSSL, though it's slightly better than in OpenSSL due to the 7 days limit. > Avoid doing that will > result in eventual session expiration and a full SSL handshake. > Please correct me if I'm wrong. It is expected to result in a full SSL handshake when session timeout expires, see above. With TLSv1.2, re-encryption is expected to switch tickets to the new key faster, without waiting for the full session timeout, so one can throw away the old key faster (at a cost of not being able to decrypt some valid tickets which weren't re-encrypted though). Avoiding re-encryption might save some useless work on re-encryption itself which might not be really needed due to the client-side issue you've mentioned. It's at most a minor optimization though. It is more complicated with TLSv1.3 though, both due to the issue outlined above and the fact that tickets are often used just once, notably with Chrome (see https://trac.nginx.org/nginx/ticket/1892 for details). [...] -- Maxim Dounin http://mdounin.ru/ From yar at nginx.com Mon Oct 3 20:17:26 2022 From: yar at nginx.com (=?utf-8?q?Yaroslav_Zhuravlev?=) Date: Mon, 03 Oct 2022 21:17:26 +0100 Subject: [PATCH] Documented automatic rotation of TLS session ticket keys Message-ID: <547c4be44f0db08923b7.1664828246@ORK-ML-00007151> xml/en/docs/http/ngx_http_ssl_module.xml | 5 ++++- xml/ru/docs/http/ngx_http_ssl_module.xml | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx.org.patch Type: text/x-patch Size: 1882 bytes Desc: not available URL: From yar at nginx.com Mon Oct 3 20:21:52 2022 From: yar at nginx.com (=?utf-8?q?Yaroslav_Zhuravlev?=) Date: Mon, 03 Oct 2022 21:21:52 +0100 Subject: [PATCH] Documented behaviour of a single server in upstream with keepalive Message-ID: xml/en/docs/http/ngx_http_fastcgi_module.xml | 6 ++++-- xml/en/docs/http/ngx_http_grpc_module.xml | 6 ++++-- xml/en/docs/http/ngx_http_proxy_module.xml | 8 +++++--- xml/en/docs/http/ngx_http_upstream_module.xml | 7 ++++++- 4 files changed, 19 insertions(+), 8 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx.org.patch Type: text/x-patch Size: 3945 bytes Desc: not available URL: From pluknet at nginx.com Tue Oct 4 14:22:39 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 4 Oct 2022 18:22:39 +0400 Subject: [PATCH] Documented automatic rotation of TLS session ticket keys In-Reply-To: <547c4be44f0db08923b7.1664828246@ORK-ML-00007151> References: <547c4be44f0db08923b7.1664828246@ORK-ML-00007151> Message-ID: <42481660-0678-462A-A683-452BC044440C@nginx.com> > On 4 Oct 2022, at 00:17, Yaroslav Zhuravlev wrote: > > xml/en/docs/http/ngx_http_ssl_module.xml | 5 ++++- > xml/ru/docs/http/ngx_http_ssl_module.xml | 5 ++++- > 2 files changed, 8 insertions(+), 2 deletions(-) > > > # HG changeset patch > # User Yaroslav Zhuravlev > # Date 1664828002 -3600 > # Mon Oct 03 21:13:22 2022 +0100 > # Node ID 547c4be44f0db08923b7dd33bca262d009219a3a > # Parent 9708787aafc70744296baceb2aa0092401a4ef34 > Documented automatic rotation of TLS session ticket keys. > > diff --git a/xml/en/docs/http/ngx_http_ssl_module.xml b/xml/en/docs/http/ngx_http_ssl_module.xml > --- a/xml/en/docs/http/ngx_http_ssl_module.xml > +++ b/xml/en/docs/http/ngx_http_ssl_module.xml > @@ -10,7 +10,7 @@ > link="/en/docs/http/ngx_http_ssl_module.html" > lang="en" > - rev="58"> > + rev="59"> > >
> > @@ -690,6 +690,9 @@ > about 4000 sessions. > Each shared cache should have an arbitrary name. > A cache with the same name can be used in several virtual servers. > +In shared cache, This part looks redundant, as it's already dedicated to shared cache. > +TLS session ticket keys > +are automatically generated, stored, and periodically rotated. - missed - need to clarify relationship with ssl_session_ticket_key, e.g.: Additionally, TLS session ticket keys are automatically generated, stored, and periodically rotated unless explicitly configured using the directive (1.23.2). [..] -- Sergey Kandaurov From e.grebenshchikov at f5.com Tue Oct 4 14:35:21 2022 From: e.grebenshchikov at f5.com (=?iso-8859-1?q?Eugene_Grebenschikov?=) Date: Tue, 04 Oct 2022 07:35:21 -0700 Subject: [PATCH] Tests: http resolver with ipv4/ipv6 parameters Message-ID: <238226015a212a0786cf.1664894121@DHNVMN3.localdomain> Patch subject is complete summary. -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-tests.patch Type: text/x-patch Size: 4975 bytes Desc: not available URL: From pluknet at nginx.com Tue Oct 4 14:51:21 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 4 Oct 2022 18:51:21 +0400 Subject: [PATCH] Documented behaviour of a single server in upstream with keepalive In-Reply-To: References: Message-ID: <7F5A69D6-41DE-4C88-BAA8-43128C66DC4D@nginx.com> > On 4 Oct 2022, at 00:21, Yaroslav Zhuravlev wrote: > > xml/en/docs/http/ngx_http_fastcgi_module.xml | 6 ++++-- > xml/en/docs/http/ngx_http_grpc_module.xml | 6 ++++-- > xml/en/docs/http/ngx_http_proxy_module.xml | 8 +++++--- > xml/en/docs/http/ngx_http_upstream_module.xml | 7 ++++++- > 4 files changed, 19 insertions(+), 8 deletions(-) > > > # HG changeset patch > # User Yaroslav Zhuravlev > # Date 1663861151 -3600 > # Thu Sep 22 16:39:11 2022 +0100 > # Node ID aa3505dc76f13086703543cb079a13e48c57386e > # Parent 9708787aafc70744296baceb2aa0092401a4ef34 > Documented behaviour of a single server in upstream with keepalive. Translation needed. > > diff --git a/xml/en/docs/http/ngx_http_fastcgi_module.xml b/xml/en/docs/http/ngx_http_fastcgi_module.xml > --- a/xml/en/docs/http/ngx_http_fastcgi_module.xml > +++ b/xml/en/docs/http/ngx_http_fastcgi_module.xml > @@ -10,7 +10,7 @@ > link="/en/docs/http/ngx_http_fastcgi_module.html" > lang="en" > - rev="53"> > + rev="54"> > >
> > @@ -1071,7 +1071,9 @@ > > > error > -an error occurred while establishing a connection with the > +an error occurred while establishing > +or reusing > +a connection with the > server, passing a request to it, or reading the response header; > > timeout > diff --git a/xml/en/docs/http/ngx_http_grpc_module.xml b/xml/en/docs/http/ngx_http_grpc_module.xml > --- a/xml/en/docs/http/ngx_http_grpc_module.xml > +++ b/xml/en/docs/http/ngx_http_grpc_module.xml > @@ -10,7 +10,7 @@ > link="/en/docs/http/ngx_http_grpc_module.html" > lang="en" > - rev="8"> > + rev="9"> > >
> > @@ -215,7 +215,9 @@ > > > error > -an error occurred while establishing a connection with the > +an error occurred while establishing > +or reusing > +a connection with the > server, passing a request to it, or reading the response header; > > timeout > diff --git a/xml/en/docs/http/ngx_http_proxy_module.xml b/xml/en/docs/http/ngx_http_proxy_module.xml > --- a/xml/en/docs/http/ngx_http_proxy_module.xml > +++ b/xml/en/docs/http/ngx_http_proxy_module.xml > @@ -10,7 +10,7 @@ > link="/en/docs/http/ngx_http_proxy_module.html" > lang="en" > - rev="75"> > + rev="76"> > >
> > @@ -1288,8 +1288,10 @@ > > > error > -an error occurred while establishing a connection with the > -server, passing a request to it, or reading the response header; > +an error occurred while establishing > +or reusing > +a connection with the > +server, passing a request to it, reading the response header; change in this line is superfluous > > timeout > a timeout has occurred while establishing a connection with the > diff --git a/xml/en/docs/http/ngx_http_upstream_module.xml b/xml/en/docs/http/ngx_http_upstream_module.xml > --- a/xml/en/docs/http/ngx_http_upstream_module.xml > +++ b/xml/en/docs/http/ngx_http_upstream_module.xml > @@ -10,7 +10,7 @@ > link="/en/docs/http/ngx_http_upstream_module.html" > lang="en" > - rev="88"> > + rev="89"> > >
> > @@ -351,6 +351,11 @@ > If there is only a single server in a group, max_fails, > fail_timeout and slow_start parameters > are ignored, and such a server will never be considered unavailable. > +If an error occurred while trying to reuse a > +keepalive connection > +with a single server, and the request is allowed to be passed to the > +next server > +on error, such server will be selected again. > > > Overall, it's a good attempt to describe a specific behaviour seen in the single server / keepalive / next upstream configuration. It pops up when nginx sends a request in a cached keepalive connection that enters the process of shutdown by backend. -- Sergey Kandaurov From pluknet at nginx.com Wed Oct 5 09:41:10 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 5 Oct 2022 13:41:10 +0400 Subject: [PATCH] Tests: http resolver with ipv4/ipv6 parameters In-Reply-To: <238226015a212a0786cf.1664894121@DHNVMN3.localdomain> <238226015a212a0786cf.1664894121@DHNVMN3.localdomain> References: <238226015a212a0786cf.1664894121@DHNVMN3.localdomain> <238226015a212a0786cf.1664894121@DHNVMN3.localdomain> Message-ID: <20221005094110.hfqflmq6et7srpgp@Y9MQ9X2QVV> On Tue, Oct 04, 2022 at 07:35:21AM -0700, Eugene Grebenschikov via nginx-devel wrote: > # HG changeset patch > # User Eugene Grebenschikov > # Date 1664868942 25200 > # Tue Oct 04 00:35:42 2022 -0700 > # Node ID 238226015a212a0786cf720a214bc0eec60c122b > # Parent c2c188c914880df9a04706482b809329aef29467 > Tests: http resolver with ipv4/ipv6 parameters. > > diff -r c2c188c91488 -r 238226015a21 http_resolver_ipv4.t > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/http_resolver_ipv4.t Tue Oct 04 00:35:42 2022 -0700 > @@ -0,0 +1,208 @@ > +#!/usr/bin/perl > + > +# (C) Sergey Kandaurov > +# (C) Eugene Grebenschikov > +# (C) Nginx, Inc. > + > +# Tests for http resolver with ipv4/ipv6 parameters. > + > +############################################################################### > + > +use warnings; > +use strict; > + > +use Test::More; > + > +BEGIN { use FindBin; chdir($FindBin::Bin); } > + > +use lib 'lib'; > +use Test::Nginx; > + > +############################################################################### > + > +select STDERR; $| = 1; > +select STDOUT; $| = 1; > + > +my $t = Test::Nginx->new()->has(qw/http proxy rewrite/); > + > +$t->write_file_expand('nginx.conf', <<'EOF'); > + > +%%TEST_GLOBALS%% > + > +daemon off; > + > +events { > +} > + > +http { > + %%TEST_GLOBALS_HTTP%% > + > + server { > + listen 127.0.0.1:8080; > + server_name localhost; > + > + location / { > + proxy_pass http://$arg_h:%%PORT_8081%%/; > + resolver 127.0.0.1:%%PORT_8980_UDP%% ipv4=on ipv6=on; > + } > + > + location /ipv4 { > + proxy_pass http://$arg_h:%%PORT_8081%%/; > + resolver 127.0.0.1:%%PORT_8980_UDP%% ipv4=on ipv6=off; > + } > + > + location /ipv6 { > + proxy_pass http://$arg_h:%%PORT_8081%%/; > + resolver 127.0.0.1:%%PORT_8980_UDP%% ipv4=off ipv6=on; > + } > + } > + > + server { > + listen 127.0.0.1:8081; > + server_name localhost; > + > + location / { > + return 200 "ipv4"; > + } > + } > + > + server { > + listen [::1]:%%PORT_8081%%; > + server_name localhost; > + > + location / { > + return 200 "ipv6"; > + } > + } > +} > + > +EOF > + > +$t->try_run('no resolver ipv4')->plan(3); > + > +$t->run_daemon(\&dns_daemon, port(8980), $t); > +$t->waitforfile($t->testdir . '/' . port(8980)); > + > +############################################################################### > + > +like(many('/', 10), qr/ipv4: \d+, ipv6: \d+/, 'ipv4 ipv6'); > +is(many('/ipv4', 10), 'ipv4: 10', 'ipv4 only'); > +is(many('/ipv6', 10), 'ipv6: 10', 'ipv6 only'); > + > +############################################################################### > + > +sub many { > + my ($uri, $count) = @_; > + my %hits; > + > + for (1 .. $count) { > + if (http_get("$uri?h=example.com") =~ /(ipv(4|6))/) {; > + $hits{$1} = 0 unless defined $hits{$1}; > + $hits{$1}++; > + } > + } > + > + return join ', ', map { $_ . ": " . $hits{$_} } sort keys %hits; > +} > + > +############################################################################### > + > +sub reply_handler { > + my ($recv_data, $port, $state) = @_; state isn't used, otherwise looks good > + > + my (@name, @rdata); > + > + use constant NOERROR => 0; > + > + use constant A => 1; > + use constant AAAA => 28; > + > + use constant IN => 1; > + > + # default values > + > + my ($hdr, $rcode, $ttl) = (0x8180, NOERROR, 3600); > + > + # decode name > + > + my ($len, $offset) = (undef, 12); > + while (1) { > + $len = unpack("\@$offset C", $recv_data); > + last if $len == 0; > + $offset++; > + push @name, unpack("\@$offset A$len", $recv_data); > + $offset += $len; > + } > + > + $offset -= 1; > + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); > + > + my $name = join('.', @name); > + if ($name eq 'example.com') { > + if ($type == A) { > + push @rdata, rd_addr($ttl, '127.0.0.1'); > + } > + if ($type == AAAA) { > + push @rdata, rd_addr6($ttl, "::1"); > + } > + } > + > + $len = @name; > + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, > + 0, 0, @name, $type, $class) . join('', @rdata); > +} > + > +sub rd_addr { > + my ($ttl, $addr) = @_; > + > + my $code = 'split(/\./, $addr)'; > + > + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); > +} > + > +sub expand_ip6 { > + my ($addr) = @_; > + > + substr ($addr, index($addr, "::"), 2) = > + join "0", map { ":" } (0 .. 8 - (split /:/, $addr) + 1); > + map { hex "0" x (4 - length $_) . "$_" } split /:/, $addr; > +} > + > +sub rd_addr6 { > + my ($ttl, $addr) = @_; > + > + pack 'n3N nn8', 0xc00c, AAAA, IN, $ttl, 16, expand_ip6($addr); > +} > + > +sub dns_daemon { > + my ($port, $t) = @_; > + > + my ($data, $recv_data); > + my $socket = IO::Socket::INET->new( > + LocalAddr => '127.0.0.1', > + LocalPort => $port, > + Proto => 'udp', > + ) > + or die "Can't create listening socket: $!\n"; > + > + # track number of relevant queries > + > + my %state = ( > + cnamecnt => 0, > + twocnt => 0, > + manycnt => 0, > + ); > + > + # signal we are ready > + > + open my $fh, '>', $t->testdir() . '/' . $port; > + close $fh; > + > + while (1) { > + $socket->recv($recv_data, 65536); > + $data = reply_handler($recv_data, $port, \%state); > + $socket->send($data); > + } > +} > + > +############################################################################### From pluknet at nginx.com Wed Oct 5 12:46:30 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 5 Oct 2022 16:46:30 +0400 Subject: [nginx-tests] Tests: upstream certificates specified as an empty string. In-Reply-To: References: Message-ID: <9EC76BAC-8E0D-428D-9AD7-CD4A4D01644D@nginx.com> > On 29 Sep 2022, at 04:02, Eugene Grebenschikov via nginx-devel wrote: > > changeset: 1797:c2c188c91488 > tag: tip > user: Eugene Grebenshchikov > date: Wed Sep 28 16:29:50 2022 -0700 > summary: Tests: upstream certificates specified as an empty string. > Please avoid sending patches in this inappropriate format. Please avoid using MS user agent, it is known to produce badly formatted emails, such as this one. Thank you. Make sure to check how to submit the resulting changeset: http://nginx.org/en/docs/contributing_changes.html > diff -r e1fd234baac0 -r c2c188c91488 grpc_ssl.t > --- a/grpc_ssl.t Tue Sep 27 16:11:56 2022 -0700 > +++ b/grpc_ssl.t Wed Sep 28 16:29:50 2022 -0700 I don't see the reason to touch every single module. The logic to process certificates resides in the common, protocol-independent code. > @@ -29,7 +29,7 @@ > $t->{_configure_args} =~ /OpenSSL ([\d\.]+)/; > plan(skip_all => 'OpenSSL too old') unless defined $1 and $1 ge '1.0.2'; > > -$t->write_file_expand('nginx.conf', <<'EOF')->plan(38); > +$t->write_file_expand('nginx.conf', <<'EOF')->plan(39); > > %%TEST_GLOBALS%% > > @@ -46,6 +46,8 @@ > keepalive 1; > } > > + grpc_ssl_session_reuse off; > + You may need to justify why it is needed on this configuration level, but see below. > server { > listen 127.0.0.1:8081 http2 ssl; > server_name localhost; > @@ -61,6 +63,7 @@ > location / { > grpc_pass 127.0.0.1:8082; > add_header X-Connection $connection; > + add_header X-Verify $ssl_client_verify; > } > } > > @@ -89,6 +92,13 @@ > } > } > > + location /nocert { > + grpc_pass grpcs://127.0.0.1:8081; > + > + grpc_ssl_certificate ""; > + grpc_ssl_certificate_key ""; > + } > + This will break on the stable branch. > location /KeepAlive { > grpc_pass grpcs://u; > } > @@ -232,6 +242,14 @@ > ($frame) = grep { $_->{type} eq "HEADERS" } @$frames; > is($frame->{headers}{'x-connection'}, $c, 'keepalive - connection reuse'); > > +# no client certificate > + > +$f->{http_start}('/nocert'); > +$f->{data}('Hello'); > +$frames = $f->{http_end}(); > +($frame) = grep { $_->{type} eq "HEADERS" } @$frames; > +is($frame->{headers}{'x-verify'}, 'NONE', 'request - no client certificate'); > + > ############################################################################### > > sub grpc { > diff -r e1fd234baac0 -r c2c188c91488 proxy_ssl_certificate.t > --- a/proxy_ssl_certificate.t Tue Sep 27 16:11:56 2022 -0700 > +++ b/proxy_ssl_certificate.t Wed Sep 28 16:29:50 2022 -0700 > @@ -24,7 +24,7 @@ > select STDOUT; $| = 1; > > my $t = Test::Nginx->new()->has(qw/http http_ssl proxy/) > - ->has_daemon('openssl')->plan(5); > + ->has_daemon('openssl')->plan(6); > > $t->write_file_expand('nginx.conf', <<'EOF'); > > @@ -62,6 +62,12 @@ > proxy_ssl_certificate_key 3.example.com.key; > proxy_ssl_password_file password; > } > + > + location /nocert { > + proxy_pass https://127.0.0.1:8082/; > + proxy_ssl_certificate ""; > + proxy_ssl_certificate_key ""; > + } A practical test would be to state that such configuration cancels the effect of the proxy_ssl_certificate inherited from the previous configuration level, such as described in the commit log. Overall, the below should be enough: # HG changeset patch # User Sergey Kandaurov # Date 1664973459 -14400 # Wed Oct 05 16:37:39 2022 +0400 # Node ID 3cad4f7697e995054d8976b543b7b340c09584fa # Parent 88a098b00534ccd403c0704589a94e232f29029f Tests: proxy_ssl_certificate inheritance test. diff --git a/proxy_ssl_certificate_empty.t b/proxy_ssl_certificate_empty.t new file mode 100644 --- /dev/null +++ b/proxy_ssl_certificate_empty.t @@ -0,0 +1,108 @@ +#!/usr/bin/perl + +# (C) Sergey Kandaurov +# (C) Nginx, Inc. + +# Tests for http proxy module with proxy certificate to ssl backend. +# The proxy_ssl_certificate directive with an empty line cancels inheritance. + +############################################################################### + +use warnings; +use strict; + +use Test::More; + +BEGIN { use FindBin; chdir($FindBin::Bin); } + +use lib 'lib'; +use Test::Nginx; + +############################################################################### + +select STDERR; $| = 1; +select STDOUT; $| = 1; + +my $t = Test::Nginx->new()->has(qw/http http_ssl proxy/) + ->has_daemon('openssl'); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +http { + %%TEST_GLOBALS_HTTP%% + + server { + listen 127.0.0.1:8080; + server_name localhost; + + proxy_ssl_session_reuse off; + + proxy_ssl_certificate 1.example.com.crt; + proxy_ssl_certificate_key 1.example.com.key; + + location /verify { + proxy_pass https://127.0.0.1:8081/; + } + + location /cancel { + proxy_pass https://127.0.0.1:8081/; + proxy_ssl_certificate ""; + proxy_ssl_certificate_key ""; + } + } + + server { + listen 127.0.0.1:8081 ssl; + server_name localhost; + + ssl_certificate 2.example.com.crt; + ssl_certificate_key 2.example.com.key; + + ssl_verify_client optional; + ssl_client_certificate 1.example.com.crt; + + location / { + add_header X-Verify $ssl_client_verify; + } + } +} + +EOF + +$t->write_file('openssl.conf', <testdir(); + +foreach my $name ('1.example.com', '2.example.com') { + system('openssl req -x509 -new ' + . "-config $d/openssl.conf -subj /CN=$name/ " + . "-out $d/$name.crt -keyout $d/$name.key " + . ">>$d/openssl.out 2>&1") == 0 + or die "Can't create certificate for $name: $!\n"; +} + +sleep 1 if $^O eq 'MSWin32'; + +$t->write_file('index.html', ''); + +$t->try_run('no empty upstream certificate')->plan(2); + +############################################################################### + +like(http_get('/verify'), qr/X-Verify: SUCCESS/ms, 'verify certificate'); +like(http_get('/cancel'), qr/X-Verify: NONE/ms, 'cancel certificate'); + +############################################################################### -- Sergey Kandaurov From e.grebenshchikov at f5.com Thu Oct 6 06:39:58 2022 From: e.grebenshchikov at f5.com (=?iso-8859-1?q?Eugene_Grebenschikov?=) Date: Wed, 05 Oct 2022 23:39:58 -0700 Subject: [PATCH] Tests: http resolver with ipv4/ipv6 parameters Message-ID: <595dee133b0a3681e955.1665038398@DHNVMN3.localdomain> # HG changeset patch # User Eugene Grebenschikov # Date 1665038181 25200 # Wed Oct 05 23:36:21 2022 -0700 # Node ID 595dee133b0a3681e95567368191c559ad89b3a1 # Parent fac6ad94e062ee30356338c943843c4b34d5f532 Tests: http resolver with ipv4/ipv6 parameters. diff -r fac6ad94e062 -r 595dee133b0a http_resolver_ipv4.t --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/http_resolver_ipv4.t Wed Oct 05 23:36:21 2022 -0700 @@ -0,0 +1,208 @@ +#!/usr/bin/perl + +# (C) Sergey Kandaurov +# (C) Eugene Grebenschikov +# (C) Nginx, Inc. + +# Tests for http resolver with ipv4/ipv6 parameters. + +############################################################################### + +use warnings; +use strict; + +use Test::More; + +BEGIN { use FindBin; chdir($FindBin::Bin); } + +use lib 'lib'; +use Test::Nginx; + +############################################################################### + +select STDERR; $| = 1; +select STDOUT; $| = 1; + +my $t = Test::Nginx->new()->has(qw/http proxy rewrite/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +http { + %%TEST_GLOBALS_HTTP%% + + server { + listen 127.0.0.1:8080; + server_name localhost; + + location / { + proxy_pass http://$arg_h:%%PORT_8081%%/; + resolver 127.0.0.1:%%PORT_8980_UDP%% ipv4=on ipv6=on; + } + + location /ipv4 { + proxy_pass http://$arg_h:%%PORT_8081%%/; + resolver 127.0.0.1:%%PORT_8980_UDP%% ipv4=on ipv6=off; + } + + location /ipv6 { + proxy_pass http://$arg_h:%%PORT_8081%%/; + resolver 127.0.0.1:%%PORT_8980_UDP%% ipv4=off ipv6=on; + } + } + + server { + listen 127.0.0.1:8081; + server_name localhost; + + location / { + return 200 "ipv4"; + } + } + + server { + listen [::1]:%%PORT_8081%%; + server_name localhost; + + location / { + return 200 "ipv6"; + } + } +} + +EOF + +$t->try_run('no resolver ipv4')->plan(3); + +$t->run_daemon(\&dns_daemon, port(8980), $t); +$t->waitforfile($t->testdir . '/' . port(8980)); + +############################################################################### + +like(many('/', 10), qr/ipv4: \d+, ipv6: \d+/, 'ipv4 ipv6'); +is(many('/ipv4', 10), 'ipv4: 10', 'ipv4 only'); +is(many('/ipv6', 10), 'ipv6: 10', 'ipv6 only'); + +############################################################################### + +sub many { + my ($uri, $count) = @_; + my %hits; + + for (1 .. $count) { + if (http_get("$uri?h=example.com") =~ /(ipv(4|6))/) {; + $hits{$1} = 0 unless defined $hits{$1}; + $hits{$1}++; + } + } + + return join ', ', map { $_ . ": " . $hits{$_} } sort keys %hits; +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $port) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + + use constant A => 1; + use constant AAAA => 28; + + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl) = (0x8180, NOERROR, 3600); + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + + my $name = join('.', @name); + if ($name eq 'example.com') { + if ($type == A) { + push @rdata, rd_addr($ttl, '127.0.0.1'); + } + if ($type == AAAA) { + push @rdata, rd_addr6($ttl, "::1"); + } + } + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub expand_ip6 { + my ($addr) = @_; + + substr ($addr, index($addr, "::"), 2) = + join "0", map { ":" } (0 .. 8 - (split /:/, $addr) + 1); + map { hex "0" x (4 - length $_) . "$_" } split /:/, $addr; +} + +sub rd_addr6 { + my ($ttl, $addr) = @_; + + pack 'n3N nn8', 0xc00c, AAAA, IN, $ttl, 16, expand_ip6($addr); +} + +sub dns_daemon { + my ($port, $t) = @_; + + my ($data, $recv_data); + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => $port, + Proto => 'udp', + ) + or die "Can't create listening socket: $!\n"; + + # track number of relevant queries + + my %state = ( + cnamecnt => 0, + twocnt => 0, + manycnt => 0, + ); + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . $port; + close $fh; + + while (1) { + $socket->recv($recv_data, 65536); + $data = reply_handler($recv_data, $port, \%state); + $socket->send($data); + } +} + +############################################################################### From e.grebenshchikov at f5.com Thu Oct 6 19:03:23 2022 From: e.grebenshchikov at f5.com (=?iso-8859-1?q?Eugene_Grebenschikov?=) Date: Thu, 06 Oct 2022 12:03:23 -0700 Subject: [PATCH] Tests: http resolver with ipv4/ipv6 parameters In-Reply-To: <595dee133b0a3681e955.1665038398@DHNVMN3.localdomain> References: <595dee133b0a3681e955.1665038398@DHNVMN3.localdomain> Message-ID: <061aa601e6a33ed9e867.1665083003@DHNVMN3.localdomain> # HG changeset patch # User Eugene Grebenschikov # Date 1665082566 25200 # Thu Oct 06 11:56:06 2022 -0700 # Node ID 061aa601e6a33ed9e8671fbd3c2a150c27c1d9a6 # Parent fac6ad94e062ee30356338c943843c4b34d5f532 Tests: http resolver with ipv4/ipv6 parameters. diff -r fac6ad94e062 -r 061aa601e6a3 http_resolver_ipv4.t --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/http_resolver_ipv4.t Thu Oct 06 11:56:06 2022 -0700 @@ -0,0 +1,200 @@ +#!/usr/bin/perl + +# (C) Sergey Kandaurov +# (C) Eugene Grebenschikov +# (C) Nginx, Inc. + +# Tests for http resolver with ipv4/ipv6 parameters. + +############################################################################### + +use warnings; +use strict; + +use Test::More; + +BEGIN { use FindBin; chdir($FindBin::Bin); } + +use lib 'lib'; +use Test::Nginx; + +############################################################################### + +select STDERR; $| = 1; +select STDOUT; $| = 1; + +my $t = Test::Nginx->new()->has(qw/http proxy rewrite/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +http { + %%TEST_GLOBALS_HTTP%% + + server { + listen 127.0.0.1:8080; + server_name localhost; + + location / { + proxy_pass http://$arg_h:%%PORT_8081%%/; + resolver 127.0.0.1:%%PORT_8980_UDP%% ipv4=on ipv6=on; + } + + location /ipv4 { + proxy_pass http://$arg_h:%%PORT_8081%%/; + resolver 127.0.0.1:%%PORT_8980_UDP%% ipv4=on ipv6=off; + } + + location /ipv6 { + proxy_pass http://$arg_h:%%PORT_8081%%/; + resolver 127.0.0.1:%%PORT_8980_UDP%% ipv4=off ipv6=on; + } + } + + server { + listen 127.0.0.1:8081; + server_name localhost; + + location / { + return 200 "ipv4"; + } + } + + server { + listen [::1]:%%PORT_8081%%; + server_name localhost; + + location / { + return 200 "ipv6"; + } + } +} + +EOF + +$t->try_run('no resolver ipv4')->plan(3); + +$t->run_daemon(\&dns_daemon, port(8980), $t); +$t->waitforfile($t->testdir . '/' . port(8980)); + +############################################################################### + +like(many('/', 10), qr/ipv4: \d+, ipv6: \d+/, 'ipv4 ipv6'); +is(many('/ipv4', 10), 'ipv4: 10', 'ipv4 only'); +is(many('/ipv6', 10), 'ipv6: 10', 'ipv6 only'); + +############################################################################### + +sub many { + my ($uri, $count) = @_; + my %hits; + + for (1 .. $count) { + if (http_get("$uri?h=example.com") =~ /(ipv(4|6))/) {; + $hits{$1} = 0 unless defined $hits{$1}; + $hits{$1}++; + } + } + + return join ', ', map { $_ . ": " . $hits{$_} } sort keys %hits; +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $port) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + + use constant A => 1; + use constant AAAA => 28; + + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl) = (0x8180, NOERROR, 3600); + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + + my $name = join('.', @name); + if ($name eq 'example.com') { + if ($type == A) { + push @rdata, rd_addr($ttl, '127.0.0.1'); + } + if ($type == AAAA) { + push @rdata, rd_addr6($ttl, "::1"); + } + } + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub expand_ip6 { + my ($addr) = @_; + + substr ($addr, index($addr, "::"), 2) = + join "0", map { ":" } (0 .. 8 - (split /:/, $addr) + 1); + map { hex "0" x (4 - length $_) . "$_" } split /:/, $addr; +} + +sub rd_addr6 { + my ($ttl, $addr) = @_; + + pack 'n3N nn8', 0xc00c, AAAA, IN, $ttl, 16, expand_ip6($addr); +} + +sub dns_daemon { + my ($port, $t) = @_; + + my ($data, $recv_data); + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => $port, + Proto => 'udp', + ) + or die "Can't create listening socket: $!\n"; + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . $port; + close $fh; + + while (1) { + $socket->recv($recv_data, 65536); + $data = reply_handler($recv_data, $port); + $socket->send($data); + } +} + +############################################################################### From pluknet at nginx.com Thu Oct 6 22:50:13 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 07 Oct 2022 02:50:13 +0400 Subject: [PATCH 0 of 4] quic libressl support Message-ID: Various patches to make QUIC work with LibreSSL 3.6.0 that features experimental support for the BoringSSL QUIC API. From pluknet at nginx.com Thu Oct 6 22:50:14 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 07 Oct 2022 02:50:14 +0400 Subject: [PATCH 1 of 4] QUIC: using native TLSv1.3 cipher suite constants In-Reply-To: References: Message-ID: <7cfc0bc3fb0484d26778.1665096614@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1665096319 -14400 # Fri Oct 07 02:45:19 2022 +0400 # Branch quic # Node ID 7cfc0bc3fb0484d26778b01dc60c88e8a38fb5d3 # Parent 28fc35b71d7566d5a7e04968c70291a239f05b6f QUIC: using native TLSv1.3 cipher suite constants. BoringSSL aligned with OpenSSL on TLS1_3_CK_* macros, while LibreSSL chose OpenSSL naming from the beginning. See here for details: https://boringssl.googlesource.com/boringssl/+/dfddbc4ded This allows to drop our own variants. Compatibility with older libraries that lack this naming is preserved for a while, though. Additionally, transition from to SSL_CIPHER_get_id() fixes build error with LibreSSL that doesn't implement SSL_CIPHER_get_protocol_id(). diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -15,9 +15,12 @@ #define NGX_QUIC_AES_128_KEY_LEN 16 -#define NGX_AES_128_GCM_SHA256 0x1301 -#define NGX_AES_256_GCM_SHA384 0x1302 -#define NGX_CHACHA20_POLY1305_SHA256 0x1303 +#ifndef TLS1_3_CK_AES_128_GCM_SHA256 +#define TLS1_3_CK_AES_128_GCM_SHA256 0x03001301 +#define TLS1_3_CK_AES_256_GCM_SHA384 0x03001302 +#define TLS1_3_CK_CHACHA20_POLY1305_SHA256 \ + 0x03001303 +#endif #ifdef OPENSSL_IS_BORINGSSL @@ -90,12 +93,12 @@ ngx_quic_ciphers(ngx_uint_t id, ngx_quic ngx_int_t len; if (level == ssl_encryption_initial) { - id = NGX_AES_128_GCM_SHA256; + id = TLS1_3_CK_AES_128_GCM_SHA256; } switch (id) { - case NGX_AES_128_GCM_SHA256: + case TLS1_3_CK_AES_128_GCM_SHA256: #ifdef OPENSSL_IS_BORINGSSL ciphers->c = EVP_aead_aes_128_gcm(); #else @@ -106,7 +109,7 @@ ngx_quic_ciphers(ngx_uint_t id, ngx_quic len = 16; break; - case NGX_AES_256_GCM_SHA384: + case TLS1_3_CK_AES_256_GCM_SHA384: #ifdef OPENSSL_IS_BORINGSSL ciphers->c = EVP_aead_aes_256_gcm(); #else @@ -117,7 +120,7 @@ ngx_quic_ciphers(ngx_uint_t id, ngx_quic len = 32; break; - case NGX_CHACHA20_POLY1305_SHA256: + case TLS1_3_CK_CHACHA20_POLY1305_SHA256: #ifdef OPENSSL_IS_BORINGSSL ciphers->c = EVP_aead_chacha20_poly1305(); #else @@ -642,7 +645,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ peer_secret = is_write ? &keys->secrets[level].server : &keys->secrets[level].client; - keys->cipher = SSL_CIPHER_get_protocol_id(cipher); + keys->cipher = SSL_CIPHER_get_id(cipher); key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); From pluknet at nginx.com Thu Oct 6 22:50:15 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 07 Oct 2022 02:50:15 +0400 Subject: [PATCH 2 of 4] QUIC: do not use SSL_set_quic_early_data_enabled() with LibreSSL In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1665096324 -14400 # Fri Oct 07 02:45:24 2022 +0400 # Branch quic # Node ID bca678bec15a7c4cabf256b0fb0931607ea2ae4b # Parent 7cfc0bc3fb0484d26778b01dc60c88e8a38fb5d3 QUIC: do not use SSL_set_quic_early_data_enabled() with LibreSSL. This function is present in QuicTLS only. After SSL_READ_EARLY_DATA_SUCCESS became visible in LibreSSL together with experimental QUIC API, this required to revise the conditional compilation test to use more narrow macros. diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c --- a/src/event/quic/ngx_event_quic_ssl.c +++ b/src/event/quic/ngx_event_quic_ssl.c @@ -557,7 +557,7 @@ ngx_quic_init_connection(ngx_connection_ return NGX_ERROR; } -#ifdef SSL_READ_EARLY_DATA_SUCCESS +#if (!defined LIBRESSL_VERSION_NUMBER && !defined OPENSSL_IS_BORINGSSL) if (SSL_CTX_get_max_early_data(qc->conf->ssl->ctx)) { SSL_set_quic_early_data_enabled(ssl_conn, 1); } From pluknet at nginx.com Thu Oct 6 22:50:16 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 07 Oct 2022 02:50:16 +0400 Subject: [PATCH 3 of 4] QUIC: support for setting QUIC methods with LibreSSL In-Reply-To: References: Message-ID: <69df425adfc0cddb9762.1665096616@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1665096352 -14400 # Fri Oct 07 02:45:52 2022 +0400 # Branch quic # Node ID 69df425adfc0cddb97629c78d771be285cd36b56 # Parent bca678bec15a7c4cabf256b0fb0931607ea2ae4b QUIC: support for setting QUIC methods with LibreSSL. Setting QUIC methods is converted to use C99 designated initializers for simplicity, as LibreSSL 3.6.0 has different SSL_QUIC_METHOD layout. Additionally, stick with set_read_secret/set_write_secret callbacks. LibreSSL prefers set_encryption_secrets over them but has unexpectedly incompatible behaviour regarding passing the secrets in arguments in a separate call, unlike this is documented in old BoringSSL sources. diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c --- a/src/event/quic/ngx_event_quic_ssl.c +++ b/src/event/quic/ngx_event_quic_ssl.c @@ -18,7 +18,7 @@ #define NGX_QUIC_MAX_BUFFERED 65535 -#if BORINGSSL_API_VERSION >= 10 +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER static int ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); @@ -40,19 +40,19 @@ static ngx_int_t ngx_quic_crypto_input(n static SSL_QUIC_METHOD quic_method = { -#if BORINGSSL_API_VERSION >= 10 - ngx_quic_set_read_secret, - ngx_quic_set_write_secret, +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER + .set_read_secret = ngx_quic_set_read_secret, + .set_write_secret = ngx_quic_set_write_secret, #else - ngx_quic_set_encryption_secrets, + .set_encryption_secrets = ngx_quic_set_encryption_secrets, #endif - ngx_quic_add_handshake_data, - ngx_quic_flush_flight, - ngx_quic_send_alert, + .add_handshake_data = ngx_quic_add_handshake_data, + .flush_flight = ngx_quic_flush_flight, + .send_alert = ngx_quic_send_alert, }; -#if BORINGSSL_API_VERSION >= 10 +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER static int ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, From pluknet at nginx.com Thu Oct 6 22:50:17 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 07 Oct 2022 02:50:17 +0400 Subject: [PATCH 4 of 4] QUIC: removed compatibility with older BoringSSL API In-Reply-To: References: Message-ID: <4c1e6c90445c1d65a92e.1665096617@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1665096357 -14400 # Fri Oct 07 02:45:57 2022 +0400 # Branch quic # Node ID 4c1e6c90445c1d65a92ef797eb60e49c01c21441 # Parent 69df425adfc0cddb97629c78d771be285cd36b56 QUIC: removed compatibility with older BoringSSL API. SSL_CIPHER_get_protocol_id() appeared in BoringSSL somewhere between BORINGSSL_API_VERSION 12 and 13, and started to be used in nginx QUIC methods in a4c05aff8ec0 without macro test, which remained unnoticed. This justifies older BoringSSL API isn't used now and can be dropped. diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c --- a/src/event/quic/ngx_event_quic_ssl.c +++ b/src/event/quic/ngx_event_quic_ssl.c @@ -18,7 +18,7 @@ #define NGX_QUIC_MAX_BUFFERED 65535 -#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER +#if defined BORINGSSL_API_VERSION || defined LIBRESSL_VERSION_NUMBER static int ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); @@ -40,7 +40,7 @@ static ngx_int_t ngx_quic_crypto_input(n static SSL_QUIC_METHOD quic_method = { -#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER +#if defined BORINGSSL_API_VERSION || defined LIBRESSL_VERSION_NUMBER .set_read_secret = ngx_quic_set_read_secret, .set_write_secret = ngx_quic_set_write_secret, #else @@ -52,7 +52,7 @@ static SSL_QUIC_METHOD quic_method = { }; -#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER +#if defined BORINGSSL_API_VERSION || defined LIBRESSL_VERSION_NUMBER static int ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, @@ -602,7 +602,7 @@ ngx_quic_init_connection(ngx_connection_ return NGX_ERROR; } -#if BORINGSSL_API_VERSION >= 11 +#ifdef BORINGSSL_API_VERSION if (SSL_set_quic_early_data_context(ssl_conn, p, clen) == 0) { ngx_log_error(NGX_LOG_INFO, c->log, 0, "quic SSL_set_quic_early_data_context() failed"); From v.zhestikov at f5.com Fri Oct 7 01:30:02 2022 From: v.zhestikov at f5.com (Vadim Zhestikov) Date: Fri, 07 Oct 2022 01:30:02 +0000 Subject: [njs] Fixed double declaration detection in modules. Message-ID: details: https://hg.nginx.org/njs/rev/16442fa970ee branches: changeset: 1975:16442fa970ee user: Vadim Zhestikov date: Thu Oct 06 18:28:52 2022 -0700 description: Fixed double declaration detection in modules. diffstat: src/njs_variable.c | 15 +++++++++------ src/test/njs_unit_test.c | 4 ++++ 2 files changed, 13 insertions(+), 6 deletions(-) diffs (41 lines): diff -r 80b59b59b7d8 -r 16442fa970ee src/njs_variable.c --- a/src/njs_variable.c Fri Sep 30 17:40:57 2022 -0700 +++ b/src/njs_variable.c Thu Oct 06 18:28:52 2022 -0700 @@ -222,14 +222,17 @@ njs_variable_scope_find(njs_parser_t *pa return root; } - module = parser->vm->options.module || parser->module; + if (scope->parent == NULL) { + module = parser->vm->options.module || parser->module; - if (module) { - if (type == NJS_VARIABLE_FUNCTION - || var->type == NJS_VARIABLE_FUNCTION) - { - goto failed; + if (module) { + if (type == NJS_VARIABLE_FUNCTION + || var->type == NJS_VARIABLE_FUNCTION) + { + goto failed; + } } + } return root; diff -r 80b59b59b7d8 -r 16442fa970ee src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Fri Sep 30 17:40:57 2022 -0700 +++ b/src/test/njs_unit_test.c Thu Oct 06 18:28:52 2022 -0700 @@ -21296,6 +21296,10 @@ static njs_unit_test_t njs_module_test[ { njs_str("{ var f = 1; } function f() {};"), njs_str("SyntaxError: \"f\" has already been declared in 1") }, + + { njs_str("function f(v) {var f = v;}; f(1); f"), + njs_str("[object Function]") }, + }; From alvn at alvn.dk Fri Oct 7 10:52:58 2022 From: alvn at alvn.dk (Anders Nicolaisen) Date: Fri, 7 Oct 2022 12:52:58 +0200 Subject: [nginx] allowing auth_request to proxy TOO_MANY_REQUESTS Message-ID: Having the authentication server handling the 429_TOO_MANY_REQUESTS, allows it to accommodate users with varying limits; e.g. user1 is allowed 3 requests per second, and user2 is allowed 10 r/s. The auth_request location can still specify the cache times of the responses to whatever is wanted, like so: location = /auth { internal; proxy_pass http://localhost:8888/auth; proxy_pass_request_body off; # no need to send the POST body proxy_set_header Content-Length ""; proxy_set_header X-Original-URI $request_uri; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Real-Method $request_method; proxy_cache AUTH; proxy_cache_valid 200 10m; proxy_cache_valid 401 403 1m; proxy_cache_valid 429 1s; proxy_cache_use_stale timeout updating http_500 http_502; proxy_cache_lock on; proxy_cache_key $request_method$request_uri$http_authorization; } # HG changeset patch # User Anders L. V. Nicolaisen # Date 1665061111 0 # Thu Oct 06 12:58:31 2022 +0000 # Node ID 1d2e22110b6abe7476417d925cc39178c6bd7d9e # Parent ba5cf8f73a2d0a3615565bf9545f3d65216a0530 [nginx] allowing auth_request to proxy TOO_MANY_REQUESTS diff -r ba5cf8f73a2d -r 1d2e22110b6a src/http/modules/ngx_http_auth_request_module.c --- a/src/http/modules/ngx_http_auth_request_module.c Thu Sep 08 13:53:49 2022 +0400 +++ b/src/http/modules/ngx_http_auth_request_module.c Thu Oct 06 12:58:31 2022 +0000 @@ -138,6 +138,10 @@ return ctx->status; } + if (ctx->status == NGX_HTTP_TOO_MANY_REQUESTS) { + return ctx->status; + } + if (ctx->status == NGX_HTTP_UNAUTHORIZED) { sr = ctx->subrequest; -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Sat Oct 8 10:44:49 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 8 Oct 2022 13:44:49 +0300 Subject: [nginx] allowing auth_request to proxy TOO_MANY_REQUESTS In-Reply-To: References: Message-ID: Hello! On Fri, Oct 07, 2022 at 12:52:58PM +0200, Anders Nicolaisen via nginx-devel wrote: > Having the authentication server handling the 429_TOO_MANY_REQUESTS, > allows it to accommodate users with varying limits; > e.g. user1 is allowed 3 requests per second, and user2 is allowed 10 r/s. The auth_request module is to implement authentication, not request limiting. If you need to implement request limiting, readily available solutions would be: - Use X-Accel-Redirect instead, which is mostly equivalent to auth_request, yet makes it possible to return anything to the client. - Configure an error_page 403 with appropriate handling to return 429 (or whatever) based on additional headers from the auth server. Hope this helps. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Sun Oct 9 04:59:01 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 9 Oct 2022 07:59:01 +0300 Subject: [PATCH 11 of 11] SSL: automatic rotation of session ticket keys In-Reply-To: References: <5c26fe5f6ab0bf4c0d18.1661482878@vm-bsd.mdounin.ru> <384C1C36-43CA-490B-9559-DD77DE6346E6@nginx.com> <12290DCD-4B09-4DA0-8057-16172C5F5D28@nginx.com> Message-ID: Hello! On Sat, Oct 01, 2022 at 11:58:20AM +0300, Maxim Dounin wrote: > On Thu, Sep 29, 2022 at 08:00:03PM +0400, Sergey Kandaurov wrote: > > > > On 28 Sep 2022, at 22:37, Maxim Dounin wrote: > > > > > > On Mon, Sep 26, 2022 at 02:17:18PM +0400, Sergey Kandaurov wrote: > > [...] > > > >> And by the way, while reviewing this patch, I noticed that > > >> OpenSSL doesn't allow a client to gracefully renew TLSv1.2 session > > >> when the client receives a new session ticket in resumed sessions. > > >> In practice, it is visible when client resumes a not yet expired > > >> session encrypted with not a fresh ticket key (after rotation), > > >> which results in sending a new session ticket. > > >> See ssl_update_cache() for the !s->hit condition. > > >> In the opposite, BoringSSL always allows to renew TLSv1.2 sessions. > > > > > > You mean on the client side? Yes, it looks like > > > ngx_ssl_new_client_session() won't be called for such a new > > > session ticket, and updated ticket will be never saved. This > > > might need to be worked around. > > > > Yes, I mean the client side. > > > > > > > > This should be safe with the key rotation logic introduced in this > > > patch though, given that the previous key is preserved till the > > > last ticket encrypted with it is expected to expire. > > > > > > One of the possible solutions might be to avoid re-encryption of > > > tickets with the new key, as the old key is anyway expected to be > > > available till the session expires. > > > > I don't think it's worth the effort. If I got you right, and > > as far as I understand, re-encrypting the ticket essentially > > means sending a fresh session (renewal). > > Well, not really. Re-encryption of a ticket does not imply > session renewal. Further, doing so implies security risk: if we > renew a session during re-encryption, this makes it possible to > create essentially infinite sessions. And, for example, if a > session used a client certificate, this effectively means that > this certificate will never expire and cannot be revoked. > > With TLSv1.2, OpenSSL follows this logic: session expiration time > is set when a session is created, and ticket re-encryption only > re-encrypts the session, but doesn't change session expiration. > As such, any certificate validation which happens during session > creation needs to be redone once session timeout expires - and > this makes it possible to implement certificate revocation. > > On the other hand, as implemented for TLSv1.3 at least in OpenSSL > it seems not to be the case. Every ticket sent to the client > actually creates a new session with _updated_ expiration time. As > such, it is possible to create a session authenticated with a client > certificate, and use this session indefinitely, even after the > certificate will expire and/or will be revoked. > > This seems to be a security issue in OpenSSL. > > BoringSSL seems to behave similarly with TLSv1.3, that is, it > updates session expiration time, making it possible to use an > authenticated session for much longer than session timeout > configured. But BoringSSL also has session's auth_timeout, which > prevents indefinite use of the session. The auth_timeout value is > hardcoded to 7 days (SSL_DEFAULT_SESSION_AUTH_TIMEOUT), and does > not seem to be adjustable (only with SSL_SESSION_set_timeout(), > which is documented to be a function for writing tests). > > I would rather say it is also a security issue in BoringSSL, > though it's slightly better than in OpenSSL due to the 7 days > limit. For the record: https://github.com/openssl/openssl/issues/19341 Note that with the automatic ticket key rotation this issue with TLSv1.3 sessions becomes slightly worse in a typical configuration (with ssl_session_cache in shared memory, but without ssl_session_ticket_key explicitly set and/or ssl_session_tickets switched off). Notably, configuration reload is no longer enough to invalidate all tickets, since ticket keys are now preserved in shared memory. For example, consider that a CRL file is updated with new revocations, and nginx configuration is reloaded. New revocations will be loaded by nginx and will appear to work with new sessions, but can be easily bypassed by maintaining a previously established TLSv1.3 session. Previously, it was possible to bypass revocations in such scenario only if ticket keys were explicitly set or if session tickets were switched off and sessions were cached in shared memory. Given that we do not enable TLSv1.3 by default, we probably can ignore this and wait for appropriate fixes from the affected libraries. On the other hand, it might be a good idea to introduce a workaround, especially if we want to enable TLSv1.3 by default in the foreseeable future. The following patch seems to be simple enough and forces session timeouts for TLSv1.3 sessions to be as configured, for both OpenSSL and BoringSSL: # HG changeset patch # User Maxim Dounin # Date 1665286021 -10800 # Sun Oct 09 06:27:01 2022 +0300 # Node ID c0ec4df7ccbb95b7f2c2842f40012082991bed52 # Parent 37a4ac7ba1c5a003ab85f73d77767058af4eae30 SSL: workaround for session timeout handling with TLSv1.3. OpenSSL with TLSv1.3 updates the session creation time on session resumption and keeps the session timeout unmodified, making it possible to maintain the session forever, bypassing client certificate expiration and revocation. To make sure session timeouts are actually used, we now update the session creation time and reduce the session timeout accordingly. BoringSSL with TLSv1.3 ignores configured session timeouts and uses a hardcoded timeout instead, 7 days. So we update session timeout to the configured value as soon as a session is created. diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -1086,6 +1086,53 @@ ngx_ssl_info_callback(const ngx_ssl_conn #endif +#ifdef TLS1_3_VERSION + + if ((where & SSL_CB_ACCEPT_LOOP) == SSL_CB_ACCEPT_LOOP + && SSL_version(ssl_conn) == TLS1_3_VERSION) + { + time_t now, time, timeout, conf_timeout; + SSL_SESSION *sess; + + /* + * OpenSSL with TLSv1.3 updates the session creation time on + * session resumption and keeps the session timeout unmodified, + * making it possible to maintain the session forever, bypassing + * client certificate expiration and revocation. To make sure + * session timeouts are actually used, we now update the session + * creation time and reduce the session timeout accordingly. + * + * BoringSSL with TLSv1.3 ignores configured session timeouts + * and uses a hardcoded timeout instead, 7 days. So we update + * session timeout to the configured value as soon as a session + * is created. + */ + + c = ngx_ssl_get_connection((ngx_ssl_conn_t *) ssl_conn); + sess = SSL_get0_session(ssl_conn); + + if (!c->ssl->session_timeout_set && sess) { + c->ssl->session_timeout_set = 1; + + now = ngx_time(); + time = SSL_SESSION_get_time(sess); + timeout = SSL_SESSION_get_timeout(sess); + conf_timeout = SSL_CTX_get_timeout(c->ssl->session_ctx); + + timeout = ngx_min(timeout, conf_timeout); + + if (now - time >= timeout) { + SSL_SESSION_set1_id_context(sess, (unsigned char *) "", 0); + + } else { + SSL_SESSION_set_time(sess, now); + SSL_SESSION_set_timeout(sess, timeout - (now - time)); + } + } + } + +#endif + if ((where & SSL_CB_ACCEPT_LOOP) == SSL_CB_ACCEPT_LOOP) { c = ngx_ssl_get_connection((ngx_ssl_conn_t *) ssl_conn); diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -114,6 +114,7 @@ struct ngx_ssl_connection_s { unsigned no_send_shutdown:1; unsigned shutdown_without_free:1; unsigned handshake_buffer_set:1; + unsigned session_timeout_set:1; unsigned try_early_data:1; unsigned in_early:1; unsigned in_ocsp:1; -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Sun Oct 9 21:54:28 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 10 Oct 2022 00:54:28 +0300 Subject: [PATCH] fix weakness by logging of broken header by incorect proxy protocol (IDS/IPS/LOG-analysis) In-Reply-To: <20220928100230.a77g7sxygrbwei4x@N00W24XTQX> References: <20220928100230.a77g7sxygrbwei4x@N00W24XTQX> Message-ID: Hello! On Wed, Sep 28, 2022 at 02:02:30PM +0400, Roman Arutyunyan wrote: > Hi Sergey, > > On Mon, Sep 26, 2022 at 11:16:05PM +0200, Dipl. Ing. Sergey Brester via nginx-devel wrote: > > > > > > Hi, > > > > below is a patch to fix a weakness by logging of broken header by > > incorrect proxy protocol. > > > > If some service (IDS/IPS) analyzing or monitoring log-file, regularly > > formatted lines may be simply confused with lines written not escaped > > directly from buffer supplied from foreign source. > > Not to mention it may open a certain vector allowing "injection" of user > > input in order to avoid detection of failures or even to simulate > > malicious traffic from legitimate service. > > > > How to reproduce: > > > > - enable proxy_protocol for listener and start nginx (here localhost on > > port 80); > > - echo 'set s [socket localhost 80]; puts $s "testntestntest"; close $s' > > | tclsh > > > > Error-log before fix: > > > > 2022/09/26 19:29:58 [error] 10104#17144: *3 broken header: "test > > test > > test > > " while reading PROXY protocol, client: 127.0.0.1, server: 0.0.0.0:80 > > > > Error-log after fix: > > > > 2022/09/26 22:48:50 [error] 13868#6132: *1 broken header: > > "test→→test→→test→→" while reading PROXY protocol, client: 127.0.0.1, > > server: 0.0.0.0:80 > > > > It is not advisable to log such foreign user input unescaped to the > > formatted log-file: instead of "...ntestn..." the attacker can write > > correctly formatted line simulating a 401-, 403-failure or rate-limit > > overran, so IDS could block a innocent service or mistakenly ban > > legitimate user. > > > > The patch proposes simplest escape (LF/CR-char with →, double quote with > > single quote and additionally every char larger or equal than 0x80 to > > avoid possible logging of "broken" utf-8 sequences or unsupported > > surrogates, just as a safest variant for not-valid foreign buffer) > > in-place in the malicious buffer directly (without mem-alloc, etc). > > > > Real life example - > > https://github.com/fail2ban/fail2ban/issues/3303#issuecomment-1148691902 > > Thanks for reporting this. The issue indeed needs to be fixed. Attached is > a patch similar to yours that does this. I don't think we need to do anything > beyond just cutting the first line since there's another similar place in > nginx - ngx_http_log_error_handler(), where exactly that is implemented. > > Whether we need to skip special characters when logging to nginx log is > a topic for a bigger discussion and this will require a much bigger patch. > I suggest that we only limit user data to the first line now. > > [..] > > -- > Roman Arutyunyan > # HG changeset patch > # User Roman Arutyunyan > # Date 1664359213 -14400 > # Wed Sep 28 14:00:13 2022 +0400 > # Node ID 001b2449cfd730fd688a7298458e25113c15a947 > # Parent 615268a957ab930dc4be49fe5f6f88cd7e377f12 > Log only the first line of user input on PROXY protocol v1 error. > > Previously, all received user input was logged. If a multi-line text was > received from client and logged, it could reduce log readability and also make > it harder to parse nginx log by scripts. The change brings to PROXY protocol > the same behavior that exists for HTTP request line in > ngx_http_log_error_handler(). > > diff --git a/src/core/ngx_proxy_protocol.c b/src/core/ngx_proxy_protocol.c > --- a/src/core/ngx_proxy_protocol.c > +++ b/src/core/ngx_proxy_protocol.c > @@ -185,8 +185,14 @@ skip: > > invalid: > > + for (p = buf; p != last; p++) { I would rather prefer "p < last", as in ngx_http_log_error_handler() and in the skip section of this function (and many other places). While there is no real difference, the "p < last" looks more in line with the existing code, and also slightly safer if due to a bug elsewhere "buf" will happen to be larger than "last". > + if (*p == CR || *p == LF) { > + break; > + } > + } > + > ngx_log_error(NGX_LOG_ERR, c->log, 0, > - "broken header: \"%*s\"", (size_t) (last - buf), buf); > + "broken header: \"%*s\"", (size_t) (p - buf), buf); > > return NULL; > } Otherwise looks good. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Sun Oct 9 23:55:30 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 10 Oct 2022 02:55:30 +0300 Subject: [PATCH] Documented behaviour of a single server in upstream with keepalive In-Reply-To: References: Message-ID: Hello! On Mon, Oct 03, 2022 at 09:21:52PM +0100, Yaroslav Zhuravlev wrote: > # HG changeset patch > # User Yaroslav Zhuravlev > # Date 1663861151 -3600 > # Thu Sep 22 16:39:11 2022 +0100 > # Node ID aa3505dc76f13086703543cb079a13e48c57386e > # Parent 9708787aafc70744296baceb2aa0092401a4ef34 > Documented behaviour of a single server in upstream with keepalive. > > diff --git a/xml/en/docs/http/ngx_http_fastcgi_module.xml b/xml/en/docs/http/ngx_http_fastcgi_module.xml > --- a/xml/en/docs/http/ngx_http_fastcgi_module.xml > +++ b/xml/en/docs/http/ngx_http_fastcgi_module.xml > @@ -10,7 +10,7 @@ > link="/en/docs/http/ngx_http_fastcgi_module.html" > lang="en" > - rev="53"> > + rev="54"> > >
> > @@ -1071,7 +1071,9 @@ > > > error > -an error occurred while establishing a connection with the > +an error occurred while establishing > +or reusing > +a connection with the > server, passing a request to it, or reading the response header; That's bullshit. No errors are reported "while reusing a connection". If there is an already established cached connection, it is simply used. Errors, if any, might happen later, while "passing a request to it". [...] > diff --git a/xml/en/docs/http/ngx_http_upstream_module.xml b/xml/en/docs/http/ngx_http_upstream_module.xml > --- a/xml/en/docs/http/ngx_http_upstream_module.xml > +++ b/xml/en/docs/http/ngx_http_upstream_module.xml > @@ -10,7 +10,7 @@ > link="/en/docs/http/ngx_http_upstream_module.html" > lang="en" > - rev="88"> > + rev="89"> > >
> > @@ -351,6 +351,11 @@ > If there is only a single server in a group, max_fails, > fail_timeout and slow_start parameters > are ignored, and such a server will never be considered unavailable. > +If an error occurred while trying to reuse a > +keepalive connection > +with a single server, and the request is allowed to be passed to the > +next server > +on error, such server will be selected again. > > If an error occurs? The "with a single server" clause looks wrong, we are talking about a group with only a single server here. It probably should be either "with such server" or "the server" (probably "... with such server ... the server will be ..." would be good enough considering the whole sentence). -- Maxim Dounin http://mdounin.ru/ From defan at nginx.com Mon Oct 10 16:33:12 2022 From: defan at nginx.com (=?iso-8859-1?q?Andrei_Belov?=) Date: Mon, 10 Oct 2022 20:33:12 +0400 Subject: [PATCH] Linux packages: key verification command adjusted Message-ID: <6bc630596c063fb7c85a.1665419592@qd724qwqtj.lan> # HG changeset patch # User Andrei Belov # Date 1665416594 -14400 # Mon Oct 10 19:43:14 2022 +0400 # Node ID 6bc630596c063fb7c85a35ff6173e75d3ca1982e # Parent 9708787aafc70744296baceb2aa0092401a4ef34 Linux packages: key verification command adjusted. diff --git a/xml/en/linux_packages.xml b/xml/en/linux_packages.xml --- a/xml/en/linux_packages.xml +++ b/xml/en/linux_packages.xml @@ -7,7 +7,7 @@
+ rev="80">
@@ -250,7 +250,7 @@ curl https://nginx.org/keys/nginx_signin Verify that the downloaded file contains the proper key: -gpg --dry-run --quiet --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg +gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg The output should contain the full fingerprint @@ -322,7 +322,7 @@ curl https://nginx.org/keys/nginx_signin Verify that the downloaded file contains the proper key: -gpg --dry-run --quiet --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg +gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg The output should contain the full fingerprint diff --git a/xml/ru/linux_packages.xml b/xml/ru/linux_packages.xml --- a/xml/ru/linux_packages.xml +++ b/xml/ru/linux_packages.xml @@ -7,7 +7,7 @@
+ rev="80">
@@ -250,7 +250,7 @@ curl https://nginx.org/keys/nginx_signin Проверьте, верный ли ключ был загружен: -gpg --dry-run --quiet --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg +gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg Вывод команды должен содержать полный отпечаток ключа @@ -321,7 +321,7 @@ curl https://nginx.org/keys/nginx_signin Проверьте, верный ли ключ был загружен: -gpg --dry-run --quiet --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg +gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg Вывод команды должен содержать полный отпечаток ключа From thresh at nginx.com Mon Oct 10 16:40:11 2022 From: thresh at nginx.com (Konstantin Pavlov) Date: Mon, 10 Oct 2022 20:40:11 +0400 Subject: [PATCH] Linux packages: key verification command adjusted In-Reply-To: <6bc630596c063fb7c85a.1665419592@qd724qwqtj.lan> References: <6bc630596c063fb7c85a.1665419592@qd724qwqtj.lan> Message-ID: <8769280e-d5cc-f658-755f-6b46e17d4a55@nginx.com> Hi, On 10/10/2022 8:33 PM, Andrei Belov wrote: > # HG changeset patch > # User Andrei Belov > # Date 1665416594 -14400 > # Mon Oct 10 19:43:14 2022 +0400 > # Node ID 6bc630596c063fb7c85a35ff6173e75d3ca1982e > # Parent 9708787aafc70744296baceb2aa0092401a4ef34 > Linux packages: key verification command adjusted. > > diff --git a/xml/en/linux_packages.xml b/xml/en/linux_packages.xml > --- a/xml/en/linux_packages.xml > +++ b/xml/en/linux_packages.xml > @@ -7,7 +7,7 @@ >
link="/en/linux_packages.html" > lang="en" > - rev="79"> > + rev="80"> > >
> > @@ -250,7 +250,7 @@ curl https://nginx.org/keys/nginx_signin > > Verify that the downloaded file contains the proper key: > > -gpg --dry-run --quiet --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg > +gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg > > > The output should contain the full fingerprint > @@ -322,7 +322,7 @@ curl https://nginx.org/keys/nginx_signin > > Verify that the downloaded file contains the proper key: > > -gpg --dry-run --quiet --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg > +gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg > > > The output should contain the full fingerprint > diff --git a/xml/ru/linux_packages.xml b/xml/ru/linux_packages.xml > --- a/xml/ru/linux_packages.xml > +++ b/xml/ru/linux_packages.xml > @@ -7,7 +7,7 @@ >
link="/ru/linux_packages.html" > lang="ru" > - rev="79"> > + rev="80"> > >
> > @@ -250,7 +250,7 @@ curl https://nginx.org/keys/nginx_signin > > Проверьте, верный ли ключ был загружен: > > -gpg --dry-run --quiet --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg > +gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg > > > Вывод команды должен содержать полный отпечаток ключа > @@ -321,7 +321,7 @@ curl https://nginx.org/keys/nginx_signin > > Проверьте, верный ли ключ был загружен: > > -gpg --dry-run --quiet --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg > +gpg --dry-run --quiet --no-keyring --import --import-options import-show /usr/share/keyrings/nginx-archive-keyring.gpg > > > Вывод команды должен содержать полный отпечаток ключа The problem might be there for unsupported/EOL distros that ship gnupg1 that lacks this option, but given the fact that we explicitly ask user to install gnupg2, the change looks good to me. From mdounin at mdounin.ru Tue Oct 11 01:20:52 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 11 Oct 2022 04:20:52 +0300 Subject: [PATCH] Core: support for reading PROXY protocol v2 TLVs In-Reply-To: <20220927094125.w7oo4g2quw3yyqfh@N00W24XTQX> References: <4b856f1dff939e4eb9c1.1661961135@arut-laptop> <20220905132318.s27wgtof6wuqde7x@N00W24XTQX> <20220909154658.fpnpndo2opnnzywx@N00W24XTQX> <20220913150304.k2fjjdxgesgzbilu@N00W24XTQX> <20220927094125.w7oo4g2quw3yyqfh@N00W24XTQX> Message-ID: Hello! On Tue, Sep 27, 2022 at 01:41:25PM +0400, Roman Arutyunyan wrote: [...] > # HG changeset patch > # User Roman Arutyunyan > # Date 1664263604 -14400 > # Tue Sep 27 11:26:44 2022 +0400 > # Node ID 38940ff7246574aa19a19c76b072073c34f191be > # Parent ba5cf8f73a2d0a3615565bf9545f3d65216a0530 > PROXY protocol v2 TLV variables. > > The variables have prefix $proxy_protocol_tlv_ and are accessible by name > and by type. Examples are: $proxy_protocol_tlv_0x01, $proxy_protocol_tlv_alpn. > > diff --git a/src/core/ngx_proxy_protocol.c b/src/core/ngx_proxy_protocol.c > --- a/src/core/ngx_proxy_protocol.c > +++ b/src/core/ngx_proxy_protocol.c > @@ -15,6 +15,12 @@ > > #define ngx_proxy_protocol_parse_uint16(p) ((p)[0] << 8 | (p)[1]) > > +#define ngx_proxy_protocol_parse_uint32(p) \ > + ( ((uint32_t) (p)[0] << 24) \ > + + ( (p)[1] << 16) \ > + + ( (p)[2] << 8) \ > + + ( (p)[3]) ) > + > > typedef struct { > u_char signature[12]; > @@ -40,6 +46,24 @@ typedef struct { > } ngx_proxy_protocol_inet6_addrs_t; > > > +typedef struct { > + u_char type; > + u_char len[2]; > +} ngx_proxy_protocol_tlv_t; > + > + > +typedef struct { > + u_char client; > + u_char verify[4]; > +} ngx_proxy_protocol_tlv_ssl_t; > + > + > +typedef struct { > + ngx_str_t name; > + ngx_uint_t type; > +} ngx_proxy_protocol_tlv_entry_t; > + > + > static u_char *ngx_proxy_protocol_read_addr(ngx_connection_t *c, u_char *p, > u_char *last, ngx_str_t *addr); > static u_char *ngx_proxy_protocol_read_port(u_char *p, u_char *last, > @@ -48,6 +72,26 @@ static u_char *ngx_proxy_protocol_v2_rea > u_char *last); > > > +static ngx_proxy_protocol_tlv_entry_t ngx_proxy_protocol_tlv_entries[] = { > + { ngx_string("alpn"), 0x01 }, > + { ngx_string("authority"), 0x02 }, > + { ngx_string("unique_id"), 0x05 }, > + { ngx_string("ssl"), 0x20 }, > + { ngx_string("netns"), 0x30 }, > + { ngx_null_string, 0x00 } > +}; > + > + > +static ngx_proxy_protocol_tlv_entry_t ngx_proxy_protocol_tlv_ssl_entries[] = { > + { ngx_string("version"), 0x21 }, > + { ngx_string("cn"), 0x22 }, > + { ngx_string("cipher"), 0x23 }, > + { ngx_string("sig_alg"), 0x24 }, > + { ngx_string("key_alg"), 0x25 }, > + { ngx_null_string, 0x00 } > +}; > + > + > u_char * > ngx_proxy_protocol_read(ngx_connection_t *c, u_char *buf, u_char *last) > { > @@ -412,11 +456,145 @@ ngx_proxy_protocol_v2_read(ngx_connectio > &pp->src_addr, pp->src_port, &pp->dst_addr, pp->dst_port); > > if (buf < end) { > - ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, > - "PROXY protocol v2 %z bytes of tlv ignored", end - buf); > + pp->tlvs.data = ngx_pnalloc(c->pool, end - buf); > + if (pp->tlvs.data == NULL) { > + return NULL; > + } > + > + ngx_memcpy(pp->tlvs.data, buf, end - buf); > + pp->tlvs.len = end - buf; > } > > c->proxy_protocol = pp; > > return end; > } > + > + > +ngx_int_t > +ngx_proxy_protocol_lookup_tlv(ngx_connection_t *c, ngx_str_t *tlvs, > + ngx_uint_t type, ngx_str_t *value) This probably can be made static and moved after ngx_proxy_protocol_get_tlv(). > +{ > + u_char *p; > + size_t n, len; > + ngx_proxy_protocol_tlv_t *tlv; > + > + ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, > + "PROXY protocol v2 lookup tlv:%02xi", type); > + > + p = tlvs->data; > + n = tlvs->len; > + > + while (n) { > + if (n < sizeof(ngx_proxy_protocol_tlv_t)) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "broken PROXY protocol TLV"); > + return NGX_ERROR; > + } > + > + tlv = (ngx_proxy_protocol_tlv_t *) p; > + len = ngx_proxy_protocol_parse_uint16(tlv->len); > + > + p += sizeof(ngx_proxy_protocol_tlv_t); > + n -= sizeof(ngx_proxy_protocol_tlv_t); > + > + if (n < len) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "broken PROXY protocol TLV"); > + return NGX_ERROR; > + } > + > + ngx_log_debug2(NGX_LOG_DEBUG_CORE, c->log, 0, > + "PROXY protocol v2 tlv:0x%02xd len:%uz", tlv->type, len); I tend to think this is going to be too chatty on real load with multiple TLVs, and probably should be removed or #if 0'ed. > + > + if (tlv->type == type) { > + value->data = p; > + value->len = len; > + return NGX_OK; > + } > + > + p += len; > + n -= len; > + } > + > + return NGX_DECLINED; > +} > + > + > +ngx_int_t > +ngx_proxy_protocol_get_tlv(ngx_connection_t *c, ngx_str_t *name, > + ngx_str_t *value) > +{ > + u_char *p; > + size_t n; > + uint32_t verify; > + ngx_str_t ssl, *tlvs; > + ngx_int_t rc, type; > + ngx_proxy_protocol_tlv_ssl_t *tlv_ssl; > + ngx_proxy_protocol_tlv_entry_t *te; > + > + if (c->proxy_protocol == NULL) { > + return NGX_DECLINED; > + } > + > + ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, > + "PROXY protocol v2 get tlv \"%V\"", name); > + > + te = ngx_proxy_protocol_tlv_entries; > + tlvs = &c->proxy_protocol->tlvs; > + > + p = name->data; > + n = name->len; > + > + if (n >= 4 && p[0] == 's' && p[1] == 's' && p[2] == 'l' && p[3] == '_') { > + > + rc = ngx_proxy_protocol_lookup_tlv(c, tlvs, 0x20, &ssl); > + if (rc != NGX_OK) { > + return rc; > + } > + > + if (ssl.len < sizeof(ngx_proxy_protocol_tlv_ssl_t)) { > + return NGX_ERROR; > + } > + > + p += 4; > + n -= 4; > + > + if (n == 6 && ngx_strncmp(p, "verify", 6) == 0) { > + > + tlv_ssl = (ngx_proxy_protocol_tlv_ssl_t *) ssl.data; > + verify = ngx_proxy_protocol_parse_uint32(tlv_ssl->verify); > + > + value->data = ngx_pnalloc(c->pool, NGX_INT32_LEN); > + if (value->data == NULL) { > + return NGX_ERROR; > + } > + > + value->len = ngx_sprintf(value->data, "%uD", verify) > + - value->data; > + return NGX_OK; > + } > + > + ssl.data += sizeof(ngx_proxy_protocol_tlv_ssl_t); > + ssl.len -= sizeof(ngx_proxy_protocol_tlv_ssl_t); > + > + te = ngx_proxy_protocol_tlv_ssl_entries; > + tlvs = &ssl; > + } > + > + if (n >= 2 && p[0] == '0' && p[1] == 'x') { > + > + type = ngx_hextoi(p + 2, n - 2); > + if (type == NGX_ERROR) { > + return NGX_ERROR; This probably needs some error message. > + } > + > + return ngx_proxy_protocol_lookup_tlv(c, tlvs, type, value); > + } > + > + for ( /* void */ ; te->type; te++) { > + if (te->name.len == n && ngx_strncmp(te->name.data, p, n) == 0) { > + return ngx_proxy_protocol_lookup_tlv(c, tlvs, te->type, value); > + } > + } > + > + return NGX_DECLINED; Invalid/unknown names will silently result in empty variables. I tend to think this is going to be a problem, especially if we'll introduce additional names at some point. Some error instead might be a good idea. > +} > diff --git a/src/core/ngx_proxy_protocol.h b/src/core/ngx_proxy_protocol.h > --- a/src/core/ngx_proxy_protocol.h > +++ b/src/core/ngx_proxy_protocol.h > @@ -21,6 +21,7 @@ struct ngx_proxy_protocol_s { > ngx_str_t dst_addr; > in_port_t src_port; > in_port_t dst_port; > + ngx_str_t tlvs; > }; > > > @@ -28,6 +29,10 @@ u_char *ngx_proxy_protocol_read(ngx_conn > u_char *last); > u_char *ngx_proxy_protocol_write(ngx_connection_t *c, u_char *buf, > u_char *last); > +ngx_int_t ngx_proxy_protocol_lookup_tlv(ngx_connection_t *c, ngx_str_t *tlvs, > + ngx_uint_t type, ngx_str_t *value); > +ngx_int_t ngx_proxy_protocol_get_tlv(ngx_connection_t *c, ngx_str_t *name, > + ngx_str_t *value); > > > #endif /* _NGX_PROXY_PROTOCOL_H_INCLUDED_ */ > diff --git a/src/http/ngx_http_variables.c b/src/http/ngx_http_variables.c > --- a/src/http/ngx_http_variables.c > +++ b/src/http/ngx_http_variables.c > @@ -61,6 +61,8 @@ static ngx_int_t ngx_http_variable_proxy > ngx_http_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_http_variable_proxy_protocol_port(ngx_http_request_t *r, > ngx_http_variable_value_t *v, uintptr_t data); > +static ngx_int_t ngx_http_variable_proxy_protocol_tlv(ngx_http_request_t *r, > + ngx_http_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_http_variable_server_addr(ngx_http_request_t *r, > ngx_http_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_http_variable_server_port(ngx_http_request_t *r, > @@ -214,6 +216,10 @@ static ngx_http_variable_t ngx_http_cor > ngx_http_variable_proxy_protocol_port, > offsetof(ngx_proxy_protocol_t, dst_port), 0, 0 }, > > + { ngx_string("proxy_protocol_tlv_"), NULL, > + ngx_http_variable_proxy_protocol_tlv, > + 0, NGX_HTTP_VAR_PREFIX, 0 }, > + > { ngx_string("server_addr"), NULL, ngx_http_variable_server_addr, 0, 0, 0 }, > > { ngx_string("server_port"), NULL, ngx_http_variable_server_port, 0, 0, 0 }, > @@ -1387,6 +1393,39 @@ ngx_http_variable_proxy_protocol_port(ng > > > static ngx_int_t > +ngx_http_variable_proxy_protocol_tlv(ngx_http_request_t *r, > + ngx_http_variable_value_t *v, uintptr_t data) > +{ > + ngx_str_t *name = (ngx_str_t *) data; > + > + ngx_int_t rc; > + ngx_str_t tlv, value; > + > + tlv.len = name->len - (sizeof("proxy_protocol_tlv_") - 1); > + tlv.data = name->data + sizeof("proxy_protocol_tlv_") - 1; > + > + rc = ngx_proxy_protocol_get_tlv(r->connection, &tlv, &value); > + > + if (rc == NGX_ERROR) { > + return NGX_ERROR; > + } > + > + if (rc == NGX_DECLINED) { > + v->not_found = 1; > + return NGX_OK; > + } > + > + v->len = value.len; > + v->valid = 1; > + v->no_cacheable = 0; > + v->not_found = 0; > + v->data = value.data; > + > + return NGX_OK; > +} > + > + > +static ngx_int_t > ngx_http_variable_server_addr(ngx_http_request_t *r, > ngx_http_variable_value_t *v, uintptr_t data) > { > diff --git a/src/stream/ngx_stream_variables.c b/src/stream/ngx_stream_variables.c > --- a/src/stream/ngx_stream_variables.c > +++ b/src/stream/ngx_stream_variables.c > @@ -23,6 +23,8 @@ static ngx_int_t ngx_stream_variable_pro > ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_stream_variable_proxy_protocol_port( > ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); > +static ngx_int_t ngx_stream_variable_proxy_protocol_tlv( > + ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_stream_variable_server_addr(ngx_stream_session_t *s, > ngx_stream_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_stream_variable_server_port(ngx_stream_session_t *s, > @@ -79,6 +81,10 @@ static ngx_stream_variable_t ngx_stream > ngx_stream_variable_proxy_protocol_port, > offsetof(ngx_proxy_protocol_t, dst_port), 0, 0 }, > > + { ngx_string("proxy_protocol_tlv_"), NULL, > + ngx_stream_variable_proxy_protocol_tlv, > + 0, NGX_STREAM_VAR_PREFIX, 0 }, > + > { ngx_string("server_addr"), NULL, > ngx_stream_variable_server_addr, 0, 0, 0 }, > > @@ -622,6 +628,39 @@ ngx_stream_variable_proxy_protocol_port( > > > static ngx_int_t > +ngx_stream_variable_proxy_protocol_tlv(ngx_stream_session_t *s, > + ngx_stream_variable_value_t *v, uintptr_t data) > +{ > + ngx_str_t *name = (ngx_str_t *) data; > + > + ngx_int_t rc; > + ngx_str_t tlv, value; > + > + tlv.len = name->len - (sizeof("proxy_protocol_tlv_") - 1); > + tlv.data = name->data + sizeof("proxy_protocol_tlv_") - 1; > + > + rc = ngx_proxy_protocol_get_tlv(s->connection, &tlv, &value); > + > + if (rc == NGX_ERROR) { > + return NGX_ERROR; > + } > + > + if (rc == NGX_DECLINED) { > + v->not_found = 1; > + return NGX_OK; > + } > + > + v->len = value.len; > + v->valid = 1; > + v->no_cacheable = 0; > + v->not_found = 0; > + v->data = value.data; > + > + return NGX_OK; > +} > + > + > +static ngx_int_t > ngx_stream_variable_server_addr(ngx_stream_session_t *s, > ngx_stream_variable_value_t *v, uintptr_t data) > { Otherwise looks good. > # HG changeset patch > # User Roman Arutyunyan > # Date 1664263876 -14400 > # Tue Sep 27 11:31:16 2022 +0400 > # Node ID 615268a957ab930dc4be49fe5f6f88cd7e377f12 > # Parent 38940ff7246574aa19a19c76b072073c34f191be > Added type cast to ngx_proxy_protocol_parse_uint16(). > > The cast is added to make ngx_proxy_protocol_parse_uint16() similar to > ngx_proxy_protocol_parse_uint32(). > > diff --git a/src/core/ngx_proxy_protocol.c b/src/core/ngx_proxy_protocol.c > --- a/src/core/ngx_proxy_protocol.c > +++ b/src/core/ngx_proxy_protocol.c > @@ -13,7 +13,9 @@ > #define NGX_PROXY_PROTOCOL_AF_INET6 2 > > > -#define ngx_proxy_protocol_parse_uint16(p) ((p)[0] << 8 | (p)[1]) > +#define ngx_proxy_protocol_parse_uint16(p) \ > + ( ((uint16_t) (p)[0] << 8) \ > + + ( (p)[1]) ) > > #define ngx_proxy_protocol_parse_uint32(p) \ > ( ((uint32_t) (p)[0] << 24) \ Looks good. > # HG changeset patch > # User Roman Arutyunyan > # Date 1664200613 -14400 > # Mon Sep 26 17:56:53 2022 +0400 > # Node ID ff87ed4999b49433a9abecaaf2e574cbfa502961 > # Parent 95ba1e704b7b29c39447135e18ed003ecd305924 > Tests: client PROXY protocol v2 TLV variables. > > diff --git a/proxy_protocol2.t b/proxy_protocol2.t > --- a/proxy_protocol2.t > +++ b/proxy_protocol2.t > @@ -24,7 +24,7 @@ select STDOUT; $| = 1; > > my $t = Test::Nginx->new()->has(qw/http access realip/); > > -$t->write_file_expand('nginx.conf', <<'EOF')->plan(23); > +$t->write_file_expand('nginx.conf', <<'EOF')->plan(26); > > %%TEST_GLOBALS%% > > @@ -45,6 +45,8 @@ http { > set_real_ip_from 127.0.0.1/32; > add_header X-IP $remote_addr!$remote_port; > add_header X-PP $proxy_protocol_addr!$proxy_protocol_port; > + add_header X-TL $proxy_protocol_tlv_0x3-$proxy_protocol_tlv_0x0000ae-$proxy_protocol_tlv_0x0f; > + add_header X-NT $proxy_protocol_tlv_unique_id-$proxy_protocol_tlv_ssl_cn-$proxy_protocol_tlv_ssl_0x22-$proxy_protocol_tlv_ssl_verify; > > location /pp { > real_ip_header proxy_protocol; > @@ -76,7 +78,11 @@ my $p = pack("N3C", 0x0D0A0D0A, 0x000D0A > my $tcp4 = $p . pack("CnN2n2", 0x11, 12, 0xc0000201, 0xc0000202, 123, 5678); > my $tcp6 = $p . pack("CnNx8NNx8Nn2", 0x21, 36, > 0x20010db8, 0x00000001, 0x20010db8, 0x00000002, 123, 5678); > -my $tlv = $p . pack("CnN2n2x9", 0x11, 21, 0xc0000201, 0xc0000202, 123, 5678); > +my $tlv = $p . pack("CnN2n2N3", 0x11, 24, 0xc0000201, 0xc0000202, 123, 5678, > + 0x03000141, 0xAE000531, 0x32333435); > +my $tlv2 = $p . pack("CnN2n2N7", 0x11, 40, 0xc0000201, 0xc0000202, 123, 5678, > + 0x05000555, 0x4E495151, > + 0x20001100, 0xdeadbeef, 0x22000966, 0x6f6f2e62, 0x61727272); > my $unk1 = $p . pack("Cxx", 0x01); > my $unk2 = $p . pack("CnC4", 0x41, 4, 1, 2, 3, 4); > my $r; > @@ -97,6 +103,11 @@ unlike($r, qr/X-IP: (2001:DB8::1|[^!]+!1 > like($r, qr/SEE-THIS/, 'tlv request'); > like($r, qr/X-PP: 192.0.2.1!123\x0d/, 'tlv proxy'); > unlike($r, qr/X-IP: (192.0.2.1|[^!]+!123\x0d)/, 'tlv client'); > +like($r, qr/X-TL: A-12345-\x0d/, 'tlv raw variables'); > +like($r, qr/X-NT: ---\x0d/, 'tlv missing variables'); > + > +$r = pp_get('/t1', $tlv2); > +like($r, qr/X-NT: UNIQQ-foo.barrr-foo.barrr-3735928559\x0d/, 'tlv named variables'); > > $r = pp_get('/t1', $unk1); > like($r, qr/SEE-THIS/, 'unknown request 1'); This is going to fail without the patch and needs either try_run() or a separate test file with try_run(). A separate test file might be better, since it'll preserve relevant test coverage for stable branch. -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Tue Oct 11 02:35:14 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 11 Oct 2022 02:35:14 +0000 Subject: [njs] Refactored bound function calls according to the spec. Message-ID: details: https://hg.nginx.org/njs/rev/e4297a78844e branches: changeset: 1976:e4297a78844e user: Dmitry Volyntsev date: Mon Oct 10 18:45:09 2022 -0700 description: Refactored bound function calls according to the spec. This fixes #533, #546, #579 issues on Github. diffstat: src/njs_disassembler.c | 14 +--- src/njs_error.c | 9 +- src/njs_extern.c | 1 - src/njs_function.c | 171 ++++++++++++++++++++-------------------------- src/njs_function.h | 5 +- src/njs_generator.c | 10 +- src/njs_promise.c | 1 - src/njs_value.h | 6 +- src/njs_vm.c | 1 - src/njs_vmcode.c | 63 +++++++++++----- src/njs_vmcode.h | 9 +-- src/test/njs_unit_test.c | 18 +++- test/js/async_bind.t.js | 13 +++ 13 files changed, 161 insertions(+), 160 deletions(-) diffs (735 lines): diff -r 16442fa970ee -r e4297a78844e src/njs_disassembler.c --- a/src/njs_disassembler.c Thu Oct 06 18:28:52 2022 -0700 +++ b/src/njs_disassembler.c Mon Oct 10 18:45:09 2022 -0700 @@ -17,6 +17,8 @@ typedef struct { static njs_code_name_t code_names[] = { + { NJS_VMCODE_PUT_ARG, sizeof(njs_vmcode_1addr_t), + njs_str("PUT ARG ") }, { NJS_VMCODE_OBJECT, sizeof(njs_vmcode_object_t), njs_str("OBJECT ") }, { NJS_VMCODE_FUNCTION, sizeof(njs_vmcode_function_t), @@ -204,7 +206,6 @@ njs_disassemble(u_char *start, u_char *e njs_vmcode_import_t *import; njs_vmcode_finally_t *finally; njs_vmcode_try_end_t *try_end; - njs_vmcode_move_arg_t *move_arg; njs_vmcode_try_start_t *try_start; njs_vmcode_operation_t operation; njs_vmcode_cond_jump_t *cond_jump; @@ -513,17 +514,6 @@ njs_disassemble(u_char *start, u_char *e continue; } - if (operation == NJS_VMCODE_MOVE_ARG) { - move_arg = (njs_vmcode_move_arg_t *) p; - - njs_printf("%5uD | %05uz MOVE ARGUMENT %uD %04Xz\n", - line, p - start, move_arg->dst, (size_t) move_arg->src); - - p += sizeof(njs_vmcode_move_arg_t); - - continue; - } - code_name = code_names; n = njs_nitems(code_names); diff -r 16442fa970ee -r e4297a78844e src/njs_error.c --- a/src/njs_error.c Thu Oct 06 18:28:52 2022 -0700 +++ b/src/njs_error.c Mon Oct 10 18:45:09 2022 -0700 @@ -1292,6 +1292,11 @@ njs_add_backtrace_entry(njs_vm_t *vm, nj function = native_frame->function; + if (function != NULL && function->bound != NULL) { + /* Skip. */ + return NJS_OK; + } + be = njs_arr_add(stack); if (njs_slow_path(be == NULL)) { return NJS_ERROR; @@ -1301,10 +1306,6 @@ njs_add_backtrace_entry(njs_vm_t *vm, nj be->file = njs_str_value(""); if (function != NULL && function->native) { - while (function->bound != NULL) { - function = function->u.bound_target; - } - ret = njs_builtin_match_native_function(vm, function, &be->name); if (ret == NJS_OK) { return NJS_OK; diff -r 16442fa970ee -r e4297a78844e src/njs_extern.c --- a/src/njs_extern.c Thu Oct 06 18:28:52 2022 -0700 +++ b/src/njs_extern.c Mon Oct 10 18:45:09 2022 -0700 @@ -77,7 +77,6 @@ njs_external_add(njs_vm_t *vm, njs_arr_t function->object.type = NJS_FUNCTION; function->object.shared = 1; function->object.extensible = 1; - function->args_offset = 1; function->native = 1; function->u.native = external->u.method.native; function->magic8 = external->u.method.magic8; diff -r 16442fa970ee -r e4297a78844e src/njs_function.c --- a/src/njs_function.c Thu Oct 06 18:28:52 2022 -0700 +++ b/src/njs_function.c Mon Oct 10 18:45:09 2022 -0700 @@ -30,7 +30,6 @@ njs_function_alloc(njs_vm_t *vm, njs_fun */ function->ctor = lambda->ctor; - function->args_offset = 1; function->u.lambda = lambda; if (function->ctor) { @@ -77,7 +76,6 @@ njs_vm_function_alloc(njs_vm_t *vm, njs_ } function->native = 1; - function->args_offset = 1; function->u.native = native; return function; @@ -376,12 +374,10 @@ njs_function_native_frame(njs_vm_t *vm, njs_bool_t ctor) { size_t size; - njs_uint_t n; - njs_value_t *value, *bound; + njs_value_t *value; njs_native_frame_t *frame; - size = NJS_NATIVE_FRAME_SIZE - + (function->args_offset + nargs) * sizeof(njs_value_t); + size = NJS_NATIVE_FRAME_SIZE + (1 /* this */ + nargs) * sizeof(njs_value_t); frame = njs_function_frame_alloc(vm, size); if (njs_slow_path(frame == NULL)) { @@ -389,31 +385,16 @@ njs_function_native_frame(njs_vm_t *vm, } frame->function = function; - frame->nargs = function->args_offset + nargs; + frame->nargs = nargs; frame->ctor = ctor; frame->native = 1; frame->pc = NULL; value = (njs_value_t *) ((u_char *) frame + NJS_NATIVE_FRAME_SIZE); - frame->arguments = value; - frame->arguments_offset = value + function->args_offset; - - bound = function->bound; - - if (bound == NULL) { - /* GC: njs_retain(this); */ - *value++ = *this; + njs_value_assign(value++, this++); - } else { - n = function->args_offset; - - do { - /* GC: njs_retain(bound); */ - *value++ = *bound++; - n--; - } while (n != 0); - } + frame->arguments = value; if (args != NULL) { memcpy(value, args, nargs * sizeof(njs_value_t)); @@ -430,37 +411,15 @@ njs_function_lambda_frame(njs_vm_t *vm, { size_t n, frame_size; uint32_t args_count, value_count, value_size; - njs_value_t *value, *bound, **new; + njs_value_t *value, **new; njs_frame_t *frame; - njs_function_t *target; njs_native_frame_t *native_frame; njs_function_lambda_t *lambda; - bound = function->bound; - - if (njs_fast_path(bound == NULL)) { - lambda = function->u.lambda; - target = function; - - } else { - target = function->u.bound_target; - - if (njs_slow_path(target->bound != NULL)) { + lambda = function->u.lambda; - /* - * FIXME: bound functions should call target function with - * bound "this" and bound args. - */ - - njs_internal_error(vm, "chain of bound function are not supported"); - return NJS_ERROR; - } - - lambda = target->u.lambda; - } - - args_count = function->args_offset + njs_max(nargs, lambda->nargs); - value_count = args_count + njs_max(args_count, lambda->nlocal); + args_count = njs_max(nargs, lambda->nargs); + value_count = args_count + lambda->nlocal; value_size = value_count * sizeof(njs_value_t *); @@ -485,9 +444,8 @@ njs_function_lambda_frame(njs_vm_t *vm, } native_frame->arguments = value; - native_frame->arguments_offset = value + (function->args_offset - 1); native_frame->local = new + args_count; - native_frame->function = target; + native_frame->function = function; native_frame->nargs = nargs; native_frame->ctor = ctor; native_frame->native = 0; @@ -502,28 +460,11 @@ njs_function_lambda_frame(njs_vm_t *vm, njs_set_object(native_frame->local[0], &vm->global_object); } - if (bound != NULL) { - n = function->args_offset; - native_frame->nargs += n - 1; - - if (!ctor) { - *native_frame->local[0] = *bound; - } - - bound++; - n--; - - while (n != 0) { - *value++ = *bound++; - n--; - }; - } - /* Copy arguments. */ if (args != NULL) { while (nargs != 0) { - *value++ = *args++; + njs_value_assign(value++, args++); nargs--; } } @@ -624,7 +565,7 @@ njs_function_lambda_call(njs_vm_t *vm, v lambda = function->u.lambda; args = vm->top_frame->arguments; - local = vm->top_frame->local + function->args_offset; + local = vm->top_frame->local + 1 /* this */; /* Move all arguments. */ @@ -702,7 +643,7 @@ njs_int_t njs_function_native_call(njs_vm_t *vm) { njs_int_t ret; - njs_function_t *function, *target; + njs_function_t *function; njs_native_frame_t *native, *previous; njs_function_native_t call; @@ -723,21 +664,10 @@ njs_function_native_call(njs_vm_t *vm) } #endif - if (njs_fast_path(function->bound == NULL)) { - call = function->u.native; - - } else { - target = function->u.bound_target; + call = function->u.native; - if (njs_slow_path(target->bound != NULL)) { - njs_internal_error(vm, "chain of bound function are not supported"); - return NJS_ERROR; - } - - call = target->u.native; - } - - ret = call(vm, native->arguments, native->nargs, function->magic8); + ret = call(vm, &native->arguments[-1], 1 /* this */ + native->nargs, + function->magic8); #ifdef NJS_DEBUG_OPCODE if (vm->options.opcode_debug) { @@ -833,14 +763,13 @@ njs_function_frame_save(njs_vm_t *vm, nj function = active->function; lambda = function->u.lambda; - args_count = function->args_offset + njs_max(native->nargs, lambda->nargs); - value_count = args_count + njs_max(args_count, lambda->nlocal); + args_count = njs_max(native->nargs, lambda->nargs); + value_count = args_count + lambda->nlocal; new = (njs_value_t **) ((u_char *) native + NJS_FRAME_SIZE); value = (njs_value_t *) (new + value_count); native->arguments = value; - native->arguments_offset = value + (function->args_offset - 1); native->local = new + njs_function_frame_args_count(active); native->pc = pc; @@ -848,14 +777,14 @@ njs_function_frame_save(njs_vm_t *vm, nj p = native->arguments; while (start < end) { - *p = *start++; + njs_value_assign(p, start++); *new++ = p++; } /* Move all arguments. */ p = native->arguments; - local = native->local + function->args_offset; + local = native->local + 1 /* this */; for (n = 0; n < function->args_count; n++) { if (!njs_is_valid(p)) { @@ -1461,11 +1390,54 @@ activate: static njs_int_t +njs_function_bound_call(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t unused) +{ + u_char *p; + njs_int_t ret; + size_t args_count; + njs_value_t *arguments; + njs_function_t *function, *bound; + + function = vm->top_frame->function; + bound = function->context; + + njs_assert(bound != NULL); + + args_count = 1 /* this */ + function->bound_args; + + if (nargs == 1) { + return njs_function_apply(vm, bound, function->bound, args_count, + &vm->retval); + } + + arguments = njs_mp_alloc(vm->mem_pool, + (args_count + nargs - 1) * sizeof(njs_value_t)); + if (njs_slow_path(arguments == NULL)) { + njs_memory_error(vm); + return NJS_ERROR; + } + + p = njs_cpymem(arguments, function->bound, + args_count * sizeof(njs_value_t)); + memcpy(p, &args[1], (nargs - 1) * sizeof(njs_value_t)); + + ret = njs_function_apply(vm, bound, arguments, args_count + nargs - 1, + &vm->retval); + + njs_mp_free(vm->mem_pool, arguments); + + return ret; +} + + +static njs_int_t njs_function_prototype_bind(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { size_t size; njs_int_t ret; + njs_uint_t bound_args; njs_value_t *values, name; njs_function_t *function; @@ -1481,6 +1453,8 @@ njs_function_prototype_bind(njs_vm_t *vm } *function = *njs_function(&args[0]); + function->native = 1; + function->u.native = njs_function_bound_call; njs_lvlhsh_init(&function->object.hash); @@ -1490,7 +1464,7 @@ njs_function_prototype_bind(njs_vm_t *vm function->object.__proto__ = &vm->prototypes[NJS_OBJ_TYPE_FUNCTION].object; function->object.shared = 0; - function->u.bound_target = njs_function(&args[0]); + function->context = njs_function(&args[0]); ret = njs_value_property(vm, &args[0], njs_value_arg(&njs_string_name), &name); @@ -1509,21 +1483,23 @@ njs_function_prototype_bind(njs_vm_t *vm if (nargs == 1) { args = njs_value_arg(&njs_value_undefined); + bound_args = 0; } else { - nargs--; args++; + bound_args = nargs - 2; } - if (nargs > function->args_count) { + if (bound_args > function->args_count) { function->args_count = 0; } else { - function->args_count -= nargs - 1; + function->args_count -= bound_args; } - function->args_offset = nargs; - size = nargs * sizeof(njs_value_t); + function->bound_args = bound_args; + + size = (1 /* this */ + bound_args) * sizeof(njs_value_t); values = njs_mp_alloc(vm->mem_pool, size); if (njs_slow_path(values == NULL)) { @@ -1700,7 +1676,6 @@ const njs_object_type_init_t njs_functi .constructor_props = &njs_function_constructor_init, .prototype_props = &njs_function_prototype_init, .prototype_value = { .function = { .native = 1, - .args_offset = 1, .u.native = njs_prototype_function, .object = { .type = NJS_FUNCTION } } }, }; diff -r 16442fa970ee -r e4297a78844e src/njs_function.h --- a/src/njs_function.h Thu Oct 06 18:28:52 2022 -0700 +++ b/src/njs_function.h Mon Oct 10 18:45:09 2022 -0700 @@ -47,9 +47,9 @@ struct njs_native_frame_s { njs_function_t *function; njs_native_frame_t *previous; + /* Points to the first arg after 'this'. */ njs_value_t *arguments; njs_object_t *arguments_object; - njs_value_t *arguments_offset; njs_value_t **local; uint32_t size; @@ -57,7 +57,10 @@ struct njs_native_frame_s { njs_value_t *retval; + /* Number of allocated args on the frame. */ uint32_t nargs; + /* Number of already put args. */ + uint32_t put_args; uint8_t native; /* 1 bit */ /* Function is called as constructor with "new" keyword. */ diff -r 16442fa970ee -r e4297a78844e src/njs_generator.c --- a/src/njs_generator.c Thu Oct 06 18:28:52 2022 -0700 +++ b/src/njs_generator.c Mon Oct 10 18:45:09 2022 -0700 @@ -4382,22 +4382,20 @@ njs_generate_move_arguments(njs_vm_t *vm njs_parser_node_t *node) { njs_jump_off_t func_offset; - njs_vmcode_move_arg_t *move_arg; + njs_vmcode_1addr_t *put_arg; njs_vmcode_function_frame_t *func; if (node == NULL) { return njs_generator_stack_pop(vm, generator, generator->context); } - njs_generate_code(generator, njs_vmcode_move_arg_t, move_arg, - NJS_VMCODE_MOVE_ARG, 0, node); - move_arg->src = node->left->index; + njs_generate_code(generator, njs_vmcode_1addr_t, put_arg, + NJS_VMCODE_PUT_ARG, 0, node); + put_arg->index = node->left->index; func_offset = *((njs_jump_off_t *) generator->context); func = njs_code_ptr(generator, njs_vmcode_function_frame_t, func_offset); - move_arg->dst = (njs_uint_t) func->nargs; - func->nargs++; if (node->right == NULL) { diff -r 16442fa970ee -r e4297a78844e src/njs_promise.c --- a/src/njs_promise.c Thu Oct 06 18:28:52 2022 -0700 +++ b/src/njs_promise.c Mon Oct 10 18:45:09 2022 -0700 @@ -255,7 +255,6 @@ njs_promise_create_function(njs_vm_t *vm function->object.shared_hash = vm->shared->arrow_instance_hash; function->object.type = NJS_FUNCTION; function->object.extensible = 1; - function->args_offset = 1; function->native = 1; function->context = context; diff -r 16442fa970ee -r e4297a78844e src/njs_value.h --- a/src/njs_value.h Thu Oct 06 18:28:52 2022 -0700 +++ b/src/njs_value.h Mon Oct 10 18:45:09 2022 -0700 @@ -250,7 +250,8 @@ struct njs_typed_array_s { struct njs_function_s { njs_object_t object; - uint8_t args_offset; + /* Number of bound args excluding 'this'. */ + uint8_t bound_args; uint8_t args_count:4; @@ -265,11 +266,11 @@ struct njs_function_s { union { njs_function_lambda_t *lambda; njs_function_native_t native; - njs_function_t *bound_target; } u; void *context; + /* Bound args including 'this'. */ njs_value_t *bound; }; @@ -428,7 +429,6 @@ typedef struct { .magic8 = _magic, \ .args_count = _args_count, \ .ctor = _ctor, \ - .args_offset = 1, \ .u.native = _function, \ .object = { .type = NJS_FUNCTION, \ .shared = 1, \ diff -r 16442fa970ee -r e4297a78844e src/njs_vm.c --- a/src/njs_vm.c Thu Oct 06 18:28:52 2022 -0700 +++ b/src/njs_vm.c Mon Oct 10 18:45:09 2022 -0700 @@ -300,7 +300,6 @@ njs_vm_compile_module(njs_vm_t *vm, njs_ lambda->declarations = (arr != NULL) ? arr->start : NULL; lambda->ndeclarations = (arr != NULL) ? arr->items : 0; - module->function.args_offset = 1; module->function.u.lambda = lambda; return module; diff -r 16442fa970ee -r e4297a78844e src/njs_vmcode.c --- a/src/njs_vmcode.c Thu Oct 06 18:28:52 2022 -0700 +++ b/src/njs_vmcode.c Mon Oct 10 18:45:09 2022 -0700 @@ -98,6 +98,7 @@ njs_vmcode_interpreter(njs_vm_t *vm, u_c njs_value_t numeric1, numeric2, primitive1, primitive2; njs_frame_t *frame; njs_jump_off_t ret; + njs_vmcode_1addr_t *put_arg; njs_vmcode_await_t *await; njs_native_frame_t *previous, *native; njs_property_next_t *next; @@ -105,7 +106,6 @@ njs_vmcode_interpreter(njs_vm_t *vm, u_c njs_vmcode_finally_t *finally; njs_vmcode_generic_t *vmcode; njs_vmcode_variable_t *var; - njs_vmcode_move_arg_t *move_arg; njs_vmcode_prop_get_t *get; njs_vmcode_prop_set_t *set; njs_vmcode_operation_t op; @@ -657,18 +657,16 @@ next: } else { switch (op) { - case NJS_VMCODE_MOVE_ARG: - move_arg = (njs_vmcode_move_arg_t *) pc; + case NJS_VMCODE_PUT_ARG: + put_arg = (njs_vmcode_1addr_t *) pc; native = vm->top_frame; - hint = move_arg->dst; - - value1 = &native->arguments_offset[hint]; - njs_vmcode_operand(vm, move_arg->src, value2); - - *value1 = *value2; - - ret = sizeof(njs_vmcode_move_arg_t); + value1 = &native->arguments[native->put_args++]; + njs_vmcode_operand(vm, put_arg->index, value2); + + njs_value_assign(value1, value2); + + ret = sizeof(njs_vmcode_1addr_t); break; case NJS_VMCODE_STOP: @@ -1290,7 +1288,6 @@ njs_vmcode_template_literal(njs_vm_t *vm static const njs_function_t concat = { .native = 1, - .args_offset = 1, .u.native = njs_string_prototype_concat }; @@ -1584,7 +1581,7 @@ njs_vmcode_instance_of(njs_vm_t *vm, njs function = njs_function(constructor); if (function->bound != NULL) { - function = function->u.bound_target; + function = function->context; njs_set_function(&bound, function); constructor = &bound; } @@ -1849,33 +1846,57 @@ static njs_jump_off_t njs_function_frame_create(njs_vm_t *vm, njs_value_t *value, const njs_value_t *this, uintptr_t nargs, njs_bool_t ctor) { - njs_value_t val; + njs_int_t ret; + njs_value_t new_target, *args; njs_object_t *object; - njs_function_t *function; + njs_function_t *function, *target; if (njs_fast_path(njs_is_function(value))) { function = njs_function(value); + target = function; + args = NULL; if (ctor) { - if (!function->ctor) { + if (function->bound != NULL) { + target = function->context; + nargs += function->bound_args; + + args = njs_mp_alloc(vm->mem_pool, nargs * sizeof(njs_value_t)); + if (njs_slow_path(args == NULL)) { + njs_memory_error(vm); + return NJS_ERROR; + } + + memcpy(args, &function->bound[1], + function->bound_args * sizeof(njs_value_t)); + } + + if (!target->ctor) { njs_type_error(vm, "%s is not a constructor", njs_type_string(value->type)); return NJS_ERROR; } - if (!function->native) { + if (!target->native) { object = njs_function_new_object(vm, value); if (njs_slow_path(object == NULL)) { return NJS_ERROR; } - njs_set_object(&val, object); - this = &val; + njs_set_object(&new_target, object); + this = &new_target; } } - return njs_function_frame(vm, function, this, NULL, nargs, ctor); + ret = njs_function_frame(vm, target, this, args, nargs, ctor); + + if (args != NULL) { + vm->top_frame->put_args = function->bound_args; + njs_mp_free(vm->mem_pool, args); + } + + return ret; } njs_type_error(vm, "%s is not a function", njs_type_string(value->type)); @@ -1902,7 +1923,7 @@ njs_function_new_object(njs_vm_t *vm, nj function = njs_function(constructor); if (function->bound != NULL) { - function = function->u.bound_target; + function = function->context; njs_set_function(&bound, function); constructor = &bound; } diff -r 16442fa970ee -r e4297a78844e src/njs_vmcode.h --- a/src/njs_vmcode.h Thu Oct 06 18:28:52 2022 -0700 +++ b/src/njs_vmcode.h Mon Oct 10 18:45:09 2022 -0700 @@ -31,7 +31,7 @@ typedef uint8_t enum { - NJS_VMCODE_MOVE_ARG = 0, + NJS_VMCODE_PUT_ARG = 0, NJS_VMCODE_STOP, NJS_VMCODE_JUMP, NJS_VMCODE_PROPERTY_SET, @@ -411,13 +411,6 @@ typedef struct { typedef struct { njs_vmcode_t code; - njs_index_t src; - njs_uint_t dst; -} njs_vmcode_move_arg_t; - - -typedef struct { - njs_vmcode_t code; njs_value_t *function; njs_index_t retval; } njs_vmcode_function_copy_t; diff -r 16442fa970ee -r e4297a78844e src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Thu Oct 06 18:28:52 2022 -0700 +++ b/src/test/njs_unit_test.c Mon Oct 10 18:45:09 2022 -0700 @@ -7818,12 +7818,22 @@ static njs_unit_test_t njs_test[] = { njs_str("var bArray = Array.bind(null, 10); new bArray(16)"), njs_str("10,16") }, -#if 0 /* FIXME: refactor Bound calls (9.4.1.1[[Call]]). */ { njs_str("function f(x,y) {return {args:arguments,length:arguments.length}};" - "var bf = f.bind({}, 'a'); var bbf = bf.bind({},'b'); var o = bbf('c');"), - "[o.args[0], o.args[2], o.length]" + "var bf = f.bind({}, 'a'); var bbf = bf.bind({},'b'); var o = bbf('c');" + "[o.args[0], o.args[2], o.length]"), njs_str("a,c,3") }, -#endif + + { njs_str("var f = function (a, b) {return [this, a, b]};" + "var b1 = f.bind('THIS', 'x');" + "var b2 = b1.bind('WAKA', 'y');" + "njs.dump([f(2,3), b1(3), b2()])"), + njs_str("[[undefined,2,3],['THIS','x',3],['THIS','x','y']]") }, + + { njs_str("var f = Math.max;" + "var b1 = f.bind('THIS', 4);" + "var b2 = b1.bind('WAKA', 5);" + "njs.dump([f(2,3), b1(3), b2()])"), + njs_str("[3,4,5]") }, { njs_str("var s = { toString: function() { return '123' } };" "var a = 'abc'; a.concat('абв', s)"), diff -r 16442fa970ee -r e4297a78844e test/js/async_bind.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_bind.t.js Mon Oct 10 18:45:09 2022 -0700 @@ -0,0 +1,13 @@ +/*--- +includes: [compareArray.js] +flags: [async] +---*/ + +async function f(a1, a2, a3) { + var v = await a1; + return [a1, a2, a3]; +} + +f.bind(null,1,2)('a') +.then(v => assert.compareArray(v, [1, 2, 'a'])) +.then($DONE, $DONE); From xeioex at nginx.com Tue Oct 11 02:35:16 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 11 Oct 2022 02:35:16 +0000 Subject: [njs] Enabled successful tests which were skipped previously. Message-ID: details: https://hg.nginx.org/njs/rev/0b87c0309b37 branches: changeset: 1977:0b87c0309b37 user: Dmitry Volyntsev date: Mon Oct 10 19:01:56 2022 -0700 description: Enabled successful tests which were skipped previously. diffstat: src/test/njs_unit_test.c | 14 +++----------- 1 files changed, 3 insertions(+), 11 deletions(-) diffs (50 lines): diff -r e4297a78844e -r 0b87c0309b37 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Mon Oct 10 18:45:09 2022 -0700 +++ b/src/test/njs_unit_test.c Mon Oct 10 19:01:56 2022 -0700 @@ -203,10 +203,8 @@ static njs_unit_test_t njs_test[] = { njs_str("var func = function f() {let f = null; return f;}; func()"), njs_str("null") }, -#if 0 /* TODO */ { njs_str("var a; Object.getOwnPropertyDescriptor(this, 'a').value"), njs_str("undefined") }, -#endif { njs_str("f() = 1"), njs_str("ReferenceError: Invalid left-hand side in assignment in 1") }, @@ -1928,10 +1926,8 @@ static njs_unit_test_t njs_test[] = { njs_str("var Infinity"), njs_str("undefined") }, -#if 0 /* ES5FIX */ { njs_str("Infinity = 1"), - njs_str("TypeError: Cannot assign to read-only property "Infinity" of object") }, -#endif + njs_str("TypeError: Cannot assign to read-only property \"Infinity\" of object") }, /**/ @@ -1962,10 +1958,8 @@ static njs_unit_test_t njs_test[] = { njs_str("var NaN"), njs_str("undefined") }, -#if 0 /* ES5FIX */ { njs_str("NaN = 1"), - njs_str("TypeError: Cannot assign to read-only property "NaN" of object") }, -#endif + njs_str("TypeError: Cannot assign to read-only property \"NaN\" of object") }, /**/ @@ -3567,10 +3561,8 @@ static njs_unit_test_t njs_test[] = { njs_str("null = 1"), njs_str("ReferenceError: Invalid left-hand side in assignment in 1") }, -#if 0 /* ES5FIX */ { njs_str("undefined = 1"), - njs_str("TypeError: Cannot assign to read-only property "undefined" of object") }, -#endif + njs_str("TypeError: Cannot assign to read-only property \"undefined\" of object") }, { njs_str("null++"), njs_str("ReferenceError: Invalid left-hand side in postfix operation in 1") }, From pluknet at nginx.com Tue Oct 11 10:35:49 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Tue, 11 Oct 2022 14:35:49 +0400 Subject: [PATCH 0 of 4] quic libressl support #2 In-Reply-To: References: Message-ID: I decided to rip SSL_set_quic_use_legacy_codepoint() call as well after some pondering, as it seems useless now. Updated patches remove using of the macro BORINGSSL_API_VERSION, replaced with OPENSSL_IS_BORINGSSL where still need to distinguish building with BoringSSL, which seems consistent wrt rest of quic code. From pluknet at nginx.com Tue Oct 11 10:35:50 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Tue, 11 Oct 2022 14:35:50 +0400 Subject: [PATCH 1 of 4] QUIC: using native TLSv1.3 cipher suite constants In-Reply-To: References: Message-ID: <82b03006a7bd93c3b5c9.1665484550@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1665442920 -14400 # Tue Oct 11 03:02:00 2022 +0400 # Branch quic # Node ID 82b03006a7bd93c3b5c962a3afac89e0639b0c12 # Parent 28fc35b71d7566d5a7e04968c70291a239f05b6f QUIC: using native TLSv1.3 cipher suite constants. After BoringSSL aligned[1] with OpenSSL on TLS1_3_CK_* macros, and LibreSSL uses OpenSSL naming, our own variants can be dropped now. Compatibility is preserved with libraries that lack these macros. Additionally, transition to SSL_CIPHER_get_id() fixes build error with LibreSSL that doesn't implement SSL_CIPHER_get_protocol_id(). [1] https://boringssl.googlesource.com/boringssl/+/dfddbc4ded diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -15,9 +15,12 @@ #define NGX_QUIC_AES_128_KEY_LEN 16 -#define NGX_AES_128_GCM_SHA256 0x1301 -#define NGX_AES_256_GCM_SHA384 0x1302 -#define NGX_CHACHA20_POLY1305_SHA256 0x1303 +#ifndef TLS1_3_CK_AES_128_GCM_SHA256 +#define TLS1_3_CK_AES_128_GCM_SHA256 0x03001301 +#define TLS1_3_CK_AES_256_GCM_SHA384 0x03001302 +#define TLS1_3_CK_CHACHA20_POLY1305_SHA256 \ + 0x03001303 +#endif #ifdef OPENSSL_IS_BORINGSSL @@ -90,12 +93,12 @@ ngx_quic_ciphers(ngx_uint_t id, ngx_quic ngx_int_t len; if (level == ssl_encryption_initial) { - id = NGX_AES_128_GCM_SHA256; + id = TLS1_3_CK_AES_128_GCM_SHA256; } switch (id) { - case NGX_AES_128_GCM_SHA256: + case TLS1_3_CK_AES_128_GCM_SHA256: #ifdef OPENSSL_IS_BORINGSSL ciphers->c = EVP_aead_aes_128_gcm(); #else @@ -106,7 +109,7 @@ ngx_quic_ciphers(ngx_uint_t id, ngx_quic len = 16; break; - case NGX_AES_256_GCM_SHA384: + case TLS1_3_CK_AES_256_GCM_SHA384: #ifdef OPENSSL_IS_BORINGSSL ciphers->c = EVP_aead_aes_256_gcm(); #else @@ -117,7 +120,7 @@ ngx_quic_ciphers(ngx_uint_t id, ngx_quic len = 32; break; - case NGX_CHACHA20_POLY1305_SHA256: + case TLS1_3_CK_CHACHA20_POLY1305_SHA256: #ifdef OPENSSL_IS_BORINGSSL ciphers->c = EVP_aead_chacha20_poly1305(); #else @@ -642,7 +645,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ peer_secret = is_write ? &keys->secrets[level].server : &keys->secrets[level].client; - keys->cipher = SSL_CIPHER_get_protocol_id(cipher); + keys->cipher = SSL_CIPHER_get_id(cipher); key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); From pluknet at nginx.com Tue Oct 11 10:35:51 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Tue, 11 Oct 2022 14:35:51 +0400 Subject: [PATCH 2 of 4] QUIC: do not use SSL_set_quic_early_data_enabled() with LibreSSL In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1665442922 -14400 # Tue Oct 11 03:02:02 2022 +0400 # Branch quic # Node ID caced81ce0a9cb218ae8cdd6176c12e0614acee9 # Parent 82b03006a7bd93c3b5c962a3afac89e0639b0c12 QUIC: do not use SSL_set_quic_early_data_enabled() with LibreSSL. This function is present in QuicTLS only. After SSL_READ_EARLY_DATA_SUCCESS became visible in LibreSSL together with experimental QUIC API, this required to revise the conditional compilation test to use more narrow macros. diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c --- a/src/event/quic/ngx_event_quic_ssl.c +++ b/src/event/quic/ngx_event_quic_ssl.c @@ -557,7 +557,7 @@ ngx_quic_init_connection(ngx_connection_ return NGX_ERROR; } -#ifdef SSL_READ_EARLY_DATA_SUCCESS +#if (!defined LIBRESSL_VERSION_NUMBER && !defined OPENSSL_IS_BORINGSSL) if (SSL_CTX_get_max_early_data(qc->conf->ssl->ctx)) { SSL_set_quic_early_data_enabled(ssl_conn, 1); } From pluknet at nginx.com Tue Oct 11 10:35:52 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Tue, 11 Oct 2022 14:35:52 +0400 Subject: [PATCH 3 of 4] QUIC: support for setting QUIC methods with LibreSSL In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1665484414 -14400 # Tue Oct 11 14:33:34 2022 +0400 # Branch quic # Node ID c0165ddcb1c6981f8e5230081f03a277f62d20c3 # Parent caced81ce0a9cb218ae8cdd6176c12e0614acee9 QUIC: support for setting QUIC methods with LibreSSL. Setting QUIC methods is converted to use C99 designated initializers for simplicity, as LibreSSL 3.6.0 has different SSL_QUIC_METHOD layout. Additionally, it's stick with set_read_secret/set_write_secret callbacks. LibreSSL prefers set_encryption_secrets over them but has unexpectedly incompatible behaviour expressed in passing read and write secrets split in separate calls, unlike this is documented in old BoringSSL sources. diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c --- a/src/event/quic/ngx_event_quic_ssl.c +++ b/src/event/quic/ngx_event_quic_ssl.c @@ -18,7 +18,7 @@ #define NGX_QUIC_MAX_BUFFERED 65535 -#if BORINGSSL_API_VERSION >= 10 +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER static int ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); @@ -40,19 +40,19 @@ static ngx_int_t ngx_quic_crypto_input(n static SSL_QUIC_METHOD quic_method = { -#if BORINGSSL_API_VERSION >= 10 - ngx_quic_set_read_secret, - ngx_quic_set_write_secret, +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER + .set_read_secret = ngx_quic_set_read_secret, + .set_write_secret = ngx_quic_set_write_secret, #else - ngx_quic_set_encryption_secrets, + .set_encryption_secrets = ngx_quic_set_encryption_secrets, #endif - ngx_quic_add_handshake_data, - ngx_quic_flush_flight, - ngx_quic_send_alert, + .add_handshake_data = ngx_quic_add_handshake_data, + .flush_flight = ngx_quic_flush_flight, + .send_alert = ngx_quic_send_alert, }; -#if BORINGSSL_API_VERSION >= 10 +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER static int ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, From pluknet at nginx.com Tue Oct 11 10:35:53 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Tue, 11 Oct 2022 14:35:53 +0400 Subject: [PATCH 4 of 4] QUIC: removed compatibility with older BoringSSL API In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1665484416 -14400 # Tue Oct 11 14:33:36 2022 +0400 # Branch quic # Node ID a75c44ea9902d86a9e88262c3634e34d86374ae4 # Parent c0165ddcb1c6981f8e5230081f03a277f62d20c3 QUIC: removed compatibility with older BoringSSL API. SSL_CIPHER_get_protocol_id() appeared in BoringSSL somewhere between BORINGSSL_API_VERSION 12 and 13 for compatibility with OpenSSL 1.1.1. It was adopted without a proper macro test, which remained unnoticed. This justifies that such old BoringSSL API isn't widely used and its support can be dropped. While here, removed SSL_set_quic_use_legacy_codepoint() that became useless after the default was flipped in BoringSSL over a year ago. diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c --- a/src/event/quic/ngx_event_quic_ssl.c +++ b/src/event/quic/ngx_event_quic_ssl.c @@ -18,7 +18,7 @@ #define NGX_QUIC_MAX_BUFFERED 65535 -#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER +#if defined OPENSSL_IS_BORINGSSL || defined LIBRESSL_VERSION_NUMBER static int ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); @@ -40,7 +40,7 @@ static ngx_int_t ngx_quic_crypto_input(n static SSL_QUIC_METHOD quic_method = { -#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER +#if defined OPENSSL_IS_BORINGSSL || defined LIBRESSL_VERSION_NUMBER .set_read_secret = ngx_quic_set_read_secret, .set_write_secret = ngx_quic_set_write_secret, #else @@ -52,7 +52,7 @@ static SSL_QUIC_METHOD quic_method = { }; -#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER +#if defined OPENSSL_IS_BORINGSSL || defined LIBRESSL_VERSION_NUMBER static int ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, @@ -563,10 +563,6 @@ ngx_quic_init_connection(ngx_connection_ } #endif -#if (BORINGSSL_API_VERSION >= 13 && BORINGSSL_API_VERSION < 15) - SSL_set_quic_use_legacy_codepoint(ssl_conn, 0); -#endif - qsock = ngx_quic_get_socket(c); dcid.data = qsock->sid.id; @@ -602,7 +598,7 @@ ngx_quic_init_connection(ngx_connection_ return NGX_ERROR; } -#if BORINGSSL_API_VERSION >= 11 +#ifdef OPENSSL_IS_BORINGSSL if (SSL_set_quic_early_data_context(ssl_conn, p, clen) == 0) { ngx_log_error(NGX_LOG_INFO, c->log, 0, "quic SSL_set_quic_early_data_context() failed"); From alvn at alvn.dk Tue Oct 11 11:04:36 2022 From: alvn at alvn.dk (alvn at alvn.dk) Date: Tue, 11 Oct 2022 13:04:36 +0200 (CEST) Subject: [nginx] allowing auth_request to proxy TOO_MANY_REQUESTS In-Reply-To: CAJhCB5SfypWO7G4LnbJ283vT8kCDNWuTSX3B3p1JBPR=tKZLRg@mail.gmail.com Message-ID: <142666848.1.1665486276619@alvn> Hi! Thank you for the answer! I have tried your suggestion, but it seems to not quite fit my use case. Does your suggestion not eliminate the authentication server entirely for any upstream servers? My preferred use case would be to have auth_request intercept all calls, and only relay the accepted ones. Something like this: ------------ server { auth_request /auth; location /v1/endpoint { proxy_pass http://localhost:7777/v1; } location /v2/endpoint { proxy_pass http://localhost:6666/v2; } location = /auth { internal; proxy_pass http://localhost:8888/authentication; [..] } } ----------- With the authentication server responding with X-Accel-Redirect, it still gets interpreted by auth_request and 429 can never be sent directly to the user. From arut at nginx.com Tue Oct 11 13:01:11 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 11 Oct 2022 17:01:11 +0400 Subject: [PATCH] Core: support for reading PROXY protocol v2 TLVs In-Reply-To: References: <4b856f1dff939e4eb9c1.1661961135@arut-laptop> <20220905132318.s27wgtof6wuqde7x@N00W24XTQX> <20220909154658.fpnpndo2opnnzywx@N00W24XTQX> <20220913150304.k2fjjdxgesgzbilu@N00W24XTQX> <20220927094125.w7oo4g2quw3yyqfh@N00W24XTQX> Message-ID: <20221011130111.oiljq55eydpp3dh6@N00W24XTQX> Hi, On Tue, Oct 11, 2022 at 04:20:52AM +0300, Maxim Dounin wrote: > Hello! > > On Tue, Sep 27, 2022 at 01:41:25PM +0400, Roman Arutyunyan wrote: > > [...] > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1664263604 -14400 > > # Tue Sep 27 11:26:44 2022 +0400 > > # Node ID 38940ff7246574aa19a19c76b072073c34f191be > > # Parent ba5cf8f73a2d0a3615565bf9545f3d65216a0530 > > PROXY protocol v2 TLV variables. > > > > The variables have prefix $proxy_protocol_tlv_ and are accessible by name > > and by type. Examples are: $proxy_protocol_tlv_0x01, $proxy_protocol_tlv_alpn. > > > > diff --git a/src/core/ngx_proxy_protocol.c b/src/core/ngx_proxy_protocol.c > > --- a/src/core/ngx_proxy_protocol.c > > +++ b/src/core/ngx_proxy_protocol.c > > @@ -15,6 +15,12 @@ > > > > #define ngx_proxy_protocol_parse_uint16(p) ((p)[0] << 8 | (p)[1]) > > > > +#define ngx_proxy_protocol_parse_uint32(p) \ > > + ( ((uint32_t) (p)[0] << 24) \ > > + + ( (p)[1] << 16) \ > > + + ( (p)[2] << 8) \ > > + + ( (p)[3]) ) > > + > > > > typedef struct { > > u_char signature[12]; > > @@ -40,6 +46,24 @@ typedef struct { > > } ngx_proxy_protocol_inet6_addrs_t; > > > > > > +typedef struct { > > + u_char type; > > + u_char len[2]; > > +} ngx_proxy_protocol_tlv_t; > > + > > + > > +typedef struct { > > + u_char client; > > + u_char verify[4]; > > +} ngx_proxy_protocol_tlv_ssl_t; > > + > > + > > +typedef struct { > > + ngx_str_t name; > > + ngx_uint_t type; > > +} ngx_proxy_protocol_tlv_entry_t; > > + > > + > > static u_char *ngx_proxy_protocol_read_addr(ngx_connection_t *c, u_char *p, > > u_char *last, ngx_str_t *addr); > > static u_char *ngx_proxy_protocol_read_port(u_char *p, u_char *last, > > @@ -48,6 +72,26 @@ static u_char *ngx_proxy_protocol_v2_rea > > u_char *last); > > > > > > +static ngx_proxy_protocol_tlv_entry_t ngx_proxy_protocol_tlv_entries[] = { > > + { ngx_string("alpn"), 0x01 }, > > + { ngx_string("authority"), 0x02 }, > > + { ngx_string("unique_id"), 0x05 }, > > + { ngx_string("ssl"), 0x20 }, > > + { ngx_string("netns"), 0x30 }, > > + { ngx_null_string, 0x00 } > > +}; > > + > > + > > +static ngx_proxy_protocol_tlv_entry_t ngx_proxy_protocol_tlv_ssl_entries[] = { > > + { ngx_string("version"), 0x21 }, > > + { ngx_string("cn"), 0x22 }, > > + { ngx_string("cipher"), 0x23 }, > > + { ngx_string("sig_alg"), 0x24 }, > > + { ngx_string("key_alg"), 0x25 }, > > + { ngx_null_string, 0x00 } > > +}; > > + > > + > > u_char * > > ngx_proxy_protocol_read(ngx_connection_t *c, u_char *buf, u_char *last) > > { > > @@ -412,11 +456,145 @@ ngx_proxy_protocol_v2_read(ngx_connectio > > &pp->src_addr, pp->src_port, &pp->dst_addr, pp->dst_port); > > > > if (buf < end) { > > - ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, > > - "PROXY protocol v2 %z bytes of tlv ignored", end - buf); > > + pp->tlvs.data = ngx_pnalloc(c->pool, end - buf); > > + if (pp->tlvs.data == NULL) { > > + return NULL; > > + } > > + > > + ngx_memcpy(pp->tlvs.data, buf, end - buf); > > + pp->tlvs.len = end - buf; > > } > > > > c->proxy_protocol = pp; > > > > return end; > > } > > + > > + > > +ngx_int_t > > +ngx_proxy_protocol_lookup_tlv(ngx_connection_t *c, ngx_str_t *tlvs, > > + ngx_uint_t type, ngx_str_t *value) > > This probably can be made static and moved after > ngx_proxy_protocol_get_tlv(). OK. In fact, I kept this function public for vendor-specific TLV variables which can be added by third-party modules. However, ngx_proxy_protocol_get_tlv() seems to be enough despite extra tlv type parsing. > > +{ > > + u_char *p; > > + size_t n, len; > > + ngx_proxy_protocol_tlv_t *tlv; > > + > > + ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, > > + "PROXY protocol v2 lookup tlv:%02xi", type); > > + > > + p = tlvs->data; > > + n = tlvs->len; > > + > > + while (n) { > > + if (n < sizeof(ngx_proxy_protocol_tlv_t)) { > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "broken PROXY protocol TLV"); > > + return NGX_ERROR; > > + } > > + > > + tlv = (ngx_proxy_protocol_tlv_t *) p; > > + len = ngx_proxy_protocol_parse_uint16(tlv->len); > > + > > + p += sizeof(ngx_proxy_protocol_tlv_t); > > + n -= sizeof(ngx_proxy_protocol_tlv_t); > > + > > + if (n < len) { > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "broken PROXY protocol TLV"); > > + return NGX_ERROR; > > + } > > + > > + ngx_log_debug2(NGX_LOG_DEBUG_CORE, c->log, 0, > > + "PROXY protocol v2 tlv:0x%02xd len:%uz", tlv->type, len); > > I tend to think this is going to be too chatty on real load with > multiple TLVs, and probably should be removed or #if 0'ed. OK, removed. > > + > > + if (tlv->type == type) { > > + value->data = p; > > + value->len = len; > > + return NGX_OK; > > + } > > + > > + p += len; > > + n -= len; > > + } > > + > > + return NGX_DECLINED; > > +} > > + > > + > > +ngx_int_t > > +ngx_proxy_protocol_get_tlv(ngx_connection_t *c, ngx_str_t *name, > > + ngx_str_t *value) > > +{ > > + u_char *p; > > + size_t n; > > + uint32_t verify; > > + ngx_str_t ssl, *tlvs; > > + ngx_int_t rc, type; > > + ngx_proxy_protocol_tlv_ssl_t *tlv_ssl; > > + ngx_proxy_protocol_tlv_entry_t *te; > > + > > + if (c->proxy_protocol == NULL) { > > + return NGX_DECLINED; > > + } > > + > > + ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, > > + "PROXY protocol v2 get tlv \"%V\"", name); > > + > > + te = ngx_proxy_protocol_tlv_entries; > > + tlvs = &c->proxy_protocol->tlvs; > > + > > + p = name->data; > > + n = name->len; > > + > > + if (n >= 4 && p[0] == 's' && p[1] == 's' && p[2] == 'l' && p[3] == '_') { > > + > > + rc = ngx_proxy_protocol_lookup_tlv(c, tlvs, 0x20, &ssl); > > + if (rc != NGX_OK) { > > + return rc; > > + } > > + > > + if (ssl.len < sizeof(ngx_proxy_protocol_tlv_ssl_t)) { > > + return NGX_ERROR; > > + } > > + > > + p += 4; > > + n -= 4; > > + > > + if (n == 6 && ngx_strncmp(p, "verify", 6) == 0) { > > + > > + tlv_ssl = (ngx_proxy_protocol_tlv_ssl_t *) ssl.data; > > + verify = ngx_proxy_protocol_parse_uint32(tlv_ssl->verify); > > + > > + value->data = ngx_pnalloc(c->pool, NGX_INT32_LEN); > > + if (value->data == NULL) { > > + return NGX_ERROR; > > + } > > + > > + value->len = ngx_sprintf(value->data, "%uD", verify) > > + - value->data; > > + return NGX_OK; > > + } > > + > > + ssl.data += sizeof(ngx_proxy_protocol_tlv_ssl_t); > > + ssl.len -= sizeof(ngx_proxy_protocol_tlv_ssl_t); > > + > > + te = ngx_proxy_protocol_tlv_ssl_entries; > > + tlvs = &ssl; > > + } > > + > > + if (n >= 2 && p[0] == '0' && p[1] == 'x') { > > + > > + type = ngx_hextoi(p + 2, n - 2); > > + if (type == NGX_ERROR) { > > + return NGX_ERROR; > > This probably needs some error message. OK, added. > > + } > > + > > + return ngx_proxy_protocol_lookup_tlv(c, tlvs, type, value); > > + } > > + > > + for ( /* void */ ; te->type; te++) { > > + if (te->name.len == n && ngx_strncmp(te->name.data, p, n) == 0) { > > + return ngx_proxy_protocol_lookup_tlv(c, tlvs, te->type, value); > > + } > > + } > > + > > + return NGX_DECLINED; > > Invalid/unknown names will silently result in empty variables. I > tend to think this is going to be a problem, especially if we'll > introduce additional names at some point. Some error instead > might be a good idea. And here. > > +} > > diff --git a/src/core/ngx_proxy_protocol.h b/src/core/ngx_proxy_protocol.h > > --- a/src/core/ngx_proxy_protocol.h > > +++ b/src/core/ngx_proxy_protocol.h > > @@ -21,6 +21,7 @@ struct ngx_proxy_protocol_s { > > ngx_str_t dst_addr; > > in_port_t src_port; > > in_port_t dst_port; > > + ngx_str_t tlvs; > > }; > > > > > > @@ -28,6 +29,10 @@ u_char *ngx_proxy_protocol_read(ngx_conn > > u_char *last); > > u_char *ngx_proxy_protocol_write(ngx_connection_t *c, u_char *buf, > > u_char *last); > > +ngx_int_t ngx_proxy_protocol_lookup_tlv(ngx_connection_t *c, ngx_str_t *tlvs, > > + ngx_uint_t type, ngx_str_t *value); > > +ngx_int_t ngx_proxy_protocol_get_tlv(ngx_connection_t *c, ngx_str_t *name, > > + ngx_str_t *value); > > > > > > #endif /* _NGX_PROXY_PROTOCOL_H_INCLUDED_ */ > > diff --git a/src/http/ngx_http_variables.c b/src/http/ngx_http_variables.c > > --- a/src/http/ngx_http_variables.c > > +++ b/src/http/ngx_http_variables.c > > @@ -61,6 +61,8 @@ static ngx_int_t ngx_http_variable_proxy > > ngx_http_variable_value_t *v, uintptr_t data); > > static ngx_int_t ngx_http_variable_proxy_protocol_port(ngx_http_request_t *r, > > ngx_http_variable_value_t *v, uintptr_t data); > > +static ngx_int_t ngx_http_variable_proxy_protocol_tlv(ngx_http_request_t *r, > > + ngx_http_variable_value_t *v, uintptr_t data); > > static ngx_int_t ngx_http_variable_server_addr(ngx_http_request_t *r, > > ngx_http_variable_value_t *v, uintptr_t data); > > static ngx_int_t ngx_http_variable_server_port(ngx_http_request_t *r, > > @@ -214,6 +216,10 @@ static ngx_http_variable_t ngx_http_cor > > ngx_http_variable_proxy_protocol_port, > > offsetof(ngx_proxy_protocol_t, dst_port), 0, 0 }, > > > > + { ngx_string("proxy_protocol_tlv_"), NULL, > > + ngx_http_variable_proxy_protocol_tlv, > > + 0, NGX_HTTP_VAR_PREFIX, 0 }, > > + > > { ngx_string("server_addr"), NULL, ngx_http_variable_server_addr, 0, 0, 0 }, > > > > { ngx_string("server_port"), NULL, ngx_http_variable_server_port, 0, 0, 0 }, > > @@ -1387,6 +1393,39 @@ ngx_http_variable_proxy_protocol_port(ng > > > > > > static ngx_int_t > > +ngx_http_variable_proxy_protocol_tlv(ngx_http_request_t *r, > > + ngx_http_variable_value_t *v, uintptr_t data) > > +{ > > + ngx_str_t *name = (ngx_str_t *) data; > > + > > + ngx_int_t rc; > > + ngx_str_t tlv, value; > > + > > + tlv.len = name->len - (sizeof("proxy_protocol_tlv_") - 1); > > + tlv.data = name->data + sizeof("proxy_protocol_tlv_") - 1; > > + > > + rc = ngx_proxy_protocol_get_tlv(r->connection, &tlv, &value); > > + > > + if (rc == NGX_ERROR) { > > + return NGX_ERROR; > > + } > > + > > + if (rc == NGX_DECLINED) { > > + v->not_found = 1; > > + return NGX_OK; > > + } > > + > > + v->len = value.len; > > + v->valid = 1; > > + v->no_cacheable = 0; > > + v->not_found = 0; > > + v->data = value.data; > > + > > + return NGX_OK; > > +} > > + > > + > > +static ngx_int_t > > ngx_http_variable_server_addr(ngx_http_request_t *r, > > ngx_http_variable_value_t *v, uintptr_t data) > > { > > diff --git a/src/stream/ngx_stream_variables.c b/src/stream/ngx_stream_variables.c > > --- a/src/stream/ngx_stream_variables.c > > +++ b/src/stream/ngx_stream_variables.c > > @@ -23,6 +23,8 @@ static ngx_int_t ngx_stream_variable_pro > > ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); > > static ngx_int_t ngx_stream_variable_proxy_protocol_port( > > ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); > > +static ngx_int_t ngx_stream_variable_proxy_protocol_tlv( > > + ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); > > static ngx_int_t ngx_stream_variable_server_addr(ngx_stream_session_t *s, > > ngx_stream_variable_value_t *v, uintptr_t data); > > static ngx_int_t ngx_stream_variable_server_port(ngx_stream_session_t *s, > > @@ -79,6 +81,10 @@ static ngx_stream_variable_t ngx_stream > > ngx_stream_variable_proxy_protocol_port, > > offsetof(ngx_proxy_protocol_t, dst_port), 0, 0 }, > > > > + { ngx_string("proxy_protocol_tlv_"), NULL, > > + ngx_stream_variable_proxy_protocol_tlv, > > + 0, NGX_STREAM_VAR_PREFIX, 0 }, > > + > > { ngx_string("server_addr"), NULL, > > ngx_stream_variable_server_addr, 0, 0, 0 }, > > > > @@ -622,6 +628,39 @@ ngx_stream_variable_proxy_protocol_port( > > > > > > static ngx_int_t > > +ngx_stream_variable_proxy_protocol_tlv(ngx_stream_session_t *s, > > + ngx_stream_variable_value_t *v, uintptr_t data) > > +{ > > + ngx_str_t *name = (ngx_str_t *) data; > > + > > + ngx_int_t rc; > > + ngx_str_t tlv, value; > > + > > + tlv.len = name->len - (sizeof("proxy_protocol_tlv_") - 1); > > + tlv.data = name->data + sizeof("proxy_protocol_tlv_") - 1; > > + > > + rc = ngx_proxy_protocol_get_tlv(s->connection, &tlv, &value); > > + > > + if (rc == NGX_ERROR) { > > + return NGX_ERROR; > > + } > > + > > + if (rc == NGX_DECLINED) { > > + v->not_found = 1; > > + return NGX_OK; > > + } > > + > > + v->len = value.len; > > + v->valid = 1; > > + v->no_cacheable = 0; > > + v->not_found = 0; > > + v->data = value.data; > > + > > + return NGX_OK; > > +} > > + > > + > > +static ngx_int_t > > ngx_stream_variable_server_addr(ngx_stream_session_t *s, > > ngx_stream_variable_value_t *v, uintptr_t data) > > { > > Otherwise looks good. [..] -- Roman -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1664263604 -14400 # Tue Sep 27 11:26:44 2022 +0400 # Node ID 2774f8d59b108635752f9f2dbe3a5394a3650b85 # Parent ba5cf8f73a2d0a3615565bf9545f3d65216a0530 PROXY protocol v2 TLV variables. The variables have prefix $proxy_protocol_tlv_ and are accessible by name and by type. Examples are: $proxy_protocol_tlv_0x01, $proxy_protocol_tlv_alpn. diff --git a/src/core/ngx_proxy_protocol.c b/src/core/ngx_proxy_protocol.c --- a/src/core/ngx_proxy_protocol.c +++ b/src/core/ngx_proxy_protocol.c @@ -15,6 +15,12 @@ #define ngx_proxy_protocol_parse_uint16(p) ((p)[0] << 8 | (p)[1]) +#define ngx_proxy_protocol_parse_uint32(p) \ + ( ((uint32_t) (p)[0] << 24) \ + + ( (p)[1] << 16) \ + + ( (p)[2] << 8) \ + + ( (p)[3]) ) + typedef struct { u_char signature[12]; @@ -40,12 +46,52 @@ typedef struct { } ngx_proxy_protocol_inet6_addrs_t; +typedef struct { + u_char type; + u_char len[2]; +} ngx_proxy_protocol_tlv_t; + + +typedef struct { + u_char client; + u_char verify[4]; +} ngx_proxy_protocol_tlv_ssl_t; + + +typedef struct { + ngx_str_t name; + ngx_uint_t type; +} ngx_proxy_protocol_tlv_entry_t; + + static u_char *ngx_proxy_protocol_read_addr(ngx_connection_t *c, u_char *p, u_char *last, ngx_str_t *addr); static u_char *ngx_proxy_protocol_read_port(u_char *p, u_char *last, in_port_t *port, u_char sep); static u_char *ngx_proxy_protocol_v2_read(ngx_connection_t *c, u_char *buf, u_char *last); +static ngx_int_t ngx_proxy_protocol_lookup_tlv(ngx_connection_t *c, + ngx_str_t *tlvs, ngx_uint_t type, ngx_str_t *value); + + +static ngx_proxy_protocol_tlv_entry_t ngx_proxy_protocol_tlv_entries[] = { + { ngx_string("alpn"), 0x01 }, + { ngx_string("authority"), 0x02 }, + { ngx_string("unique_id"), 0x05 }, + { ngx_string("ssl"), 0x20 }, + { ngx_string("netns"), 0x30 }, + { ngx_null_string, 0x00 } +}; + + +static ngx_proxy_protocol_tlv_entry_t ngx_proxy_protocol_tlv_ssl_entries[] = { + { ngx_string("version"), 0x21 }, + { ngx_string("cn"), 0x22 }, + { ngx_string("cipher"), 0x23 }, + { ngx_string("sig_alg"), 0x24 }, + { ngx_string("key_alg"), 0x25 }, + { ngx_null_string, 0x00 } +}; u_char * @@ -412,11 +458,147 @@ ngx_proxy_protocol_v2_read(ngx_connectio &pp->src_addr, pp->src_port, &pp->dst_addr, pp->dst_port); if (buf < end) { - ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, - "PROXY protocol v2 %z bytes of tlv ignored", end - buf); + pp->tlvs.data = ngx_pnalloc(c->pool, end - buf); + if (pp->tlvs.data == NULL) { + return NULL; + } + + ngx_memcpy(pp->tlvs.data, buf, end - buf); + pp->tlvs.len = end - buf; } c->proxy_protocol = pp; return end; } + + +ngx_int_t +ngx_proxy_protocol_get_tlv(ngx_connection_t *c, ngx_str_t *name, + ngx_str_t *value) +{ + u_char *p; + size_t n; + uint32_t verify; + ngx_str_t ssl, *tlvs; + ngx_int_t rc, type; + ngx_proxy_protocol_tlv_ssl_t *tlv_ssl; + ngx_proxy_protocol_tlv_entry_t *te; + + if (c->proxy_protocol == NULL) { + return NGX_DECLINED; + } + + ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, + "PROXY protocol v2 get tlv \"%V\"", name); + + te = ngx_proxy_protocol_tlv_entries; + tlvs = &c->proxy_protocol->tlvs; + + p = name->data; + n = name->len; + + if (n >= 4 && p[0] == 's' && p[1] == 's' && p[2] == 'l' && p[3] == '_') { + + rc = ngx_proxy_protocol_lookup_tlv(c, tlvs, 0x20, &ssl); + if (rc != NGX_OK) { + return rc; + } + + if (ssl.len < sizeof(ngx_proxy_protocol_tlv_ssl_t)) { + return NGX_ERROR; + } + + p += 4; + n -= 4; + + if (n == 6 && ngx_strncmp(p, "verify", 6) == 0) { + + tlv_ssl = (ngx_proxy_protocol_tlv_ssl_t *) ssl.data; + verify = ngx_proxy_protocol_parse_uint32(tlv_ssl->verify); + + value->data = ngx_pnalloc(c->pool, NGX_INT32_LEN); + if (value->data == NULL) { + return NGX_ERROR; + } + + value->len = ngx_sprintf(value->data, "%uD", verify) + - value->data; + return NGX_OK; + } + + ssl.data += sizeof(ngx_proxy_protocol_tlv_ssl_t); + ssl.len -= sizeof(ngx_proxy_protocol_tlv_ssl_t); + + te = ngx_proxy_protocol_tlv_ssl_entries; + tlvs = &ssl; + } + + if (n >= 2 && p[0] == '0' && p[1] == 'x') { + + type = ngx_hextoi(p + 2, n - 2); + if (type == NGX_ERROR) { + ngx_log_error(NGX_LOG_ERR, c->log, 0, + "invalid PROXY protocol TLV \"%V\"", name); + return NGX_ERROR; + } + + return ngx_proxy_protocol_lookup_tlv(c, tlvs, type, value); + } + + for ( /* void */ ; te->type; te++) { + if (te->name.len == n && ngx_strncmp(te->name.data, p, n) == 0) { + return ngx_proxy_protocol_lookup_tlv(c, tlvs, te->type, value); + } + } + + ngx_log_error(NGX_LOG_ERR, c->log, 0, + "PROXY protocol TLV \"%V\" not found", name); + + return NGX_DECLINED; +} + + +static ngx_int_t +ngx_proxy_protocol_lookup_tlv(ngx_connection_t *c, ngx_str_t *tlvs, + ngx_uint_t type, ngx_str_t *value) +{ + u_char *p; + size_t n, len; + ngx_proxy_protocol_tlv_t *tlv; + + ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, + "PROXY protocol v2 lookup tlv:%02xi", type); + + p = tlvs->data; + n = tlvs->len; + + while (n) { + if (n < sizeof(ngx_proxy_protocol_tlv_t)) { + ngx_log_error(NGX_LOG_ERR, c->log, 0, "broken PROXY protocol TLV"); + return NGX_ERROR; + } + + tlv = (ngx_proxy_protocol_tlv_t *) p; + len = ngx_proxy_protocol_parse_uint16(tlv->len); + + p += sizeof(ngx_proxy_protocol_tlv_t); + n -= sizeof(ngx_proxy_protocol_tlv_t); + + if (n < len) { + ngx_log_error(NGX_LOG_ERR, c->log, 0, "broken PROXY protocol TLV"); + return NGX_ERROR; + } + + if (tlv->type == type) { + value->data = p; + value->len = len; + return NGX_OK; + } + + p += len; + n -= len; + } + + return NGX_DECLINED; +} diff --git a/src/core/ngx_proxy_protocol.h b/src/core/ngx_proxy_protocol.h --- a/src/core/ngx_proxy_protocol.h +++ b/src/core/ngx_proxy_protocol.h @@ -21,6 +21,7 @@ struct ngx_proxy_protocol_s { ngx_str_t dst_addr; in_port_t src_port; in_port_t dst_port; + ngx_str_t tlvs; }; @@ -28,6 +29,8 @@ u_char *ngx_proxy_protocol_read(ngx_conn u_char *last); u_char *ngx_proxy_protocol_write(ngx_connection_t *c, u_char *buf, u_char *last); +ngx_int_t ngx_proxy_protocol_get_tlv(ngx_connection_t *c, ngx_str_t *name, + ngx_str_t *value); #endif /* _NGX_PROXY_PROTOCOL_H_INCLUDED_ */ diff --git a/src/http/ngx_http_variables.c b/src/http/ngx_http_variables.c --- a/src/http/ngx_http_variables.c +++ b/src/http/ngx_http_variables.c @@ -61,6 +61,8 @@ static ngx_int_t ngx_http_variable_proxy ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_proxy_protocol_port(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); +static ngx_int_t ngx_http_variable_proxy_protocol_tlv(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_server_addr(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_server_port(ngx_http_request_t *r, @@ -214,6 +216,10 @@ static ngx_http_variable_t ngx_http_cor ngx_http_variable_proxy_protocol_port, offsetof(ngx_proxy_protocol_t, dst_port), 0, 0 }, + { ngx_string("proxy_protocol_tlv_"), NULL, + ngx_http_variable_proxy_protocol_tlv, + 0, NGX_HTTP_VAR_PREFIX, 0 }, + { ngx_string("server_addr"), NULL, ngx_http_variable_server_addr, 0, 0, 0 }, { ngx_string("server_port"), NULL, ngx_http_variable_server_port, 0, 0, 0 }, @@ -1387,6 +1393,39 @@ ngx_http_variable_proxy_protocol_port(ng static ngx_int_t +ngx_http_variable_proxy_protocol_tlv(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data) +{ + ngx_str_t *name = (ngx_str_t *) data; + + ngx_int_t rc; + ngx_str_t tlv, value; + + tlv.len = name->len - (sizeof("proxy_protocol_tlv_") - 1); + tlv.data = name->data + sizeof("proxy_protocol_tlv_") - 1; + + rc = ngx_proxy_protocol_get_tlv(r->connection, &tlv, &value); + + if (rc == NGX_ERROR) { + return NGX_ERROR; + } + + if (rc == NGX_DECLINED) { + v->not_found = 1; + return NGX_OK; + } + + v->len = value.len; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + v->data = value.data; + + return NGX_OK; +} + + +static ngx_int_t ngx_http_variable_server_addr(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data) { diff --git a/src/stream/ngx_stream_variables.c b/src/stream/ngx_stream_variables.c --- a/src/stream/ngx_stream_variables.c +++ b/src/stream/ngx_stream_variables.c @@ -23,6 +23,8 @@ static ngx_int_t ngx_stream_variable_pro ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_stream_variable_proxy_protocol_port( ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); +static ngx_int_t ngx_stream_variable_proxy_protocol_tlv( + ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_stream_variable_server_addr(ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_stream_variable_server_port(ngx_stream_session_t *s, @@ -79,6 +81,10 @@ static ngx_stream_variable_t ngx_stream ngx_stream_variable_proxy_protocol_port, offsetof(ngx_proxy_protocol_t, dst_port), 0, 0 }, + { ngx_string("proxy_protocol_tlv_"), NULL, + ngx_stream_variable_proxy_protocol_tlv, + 0, NGX_STREAM_VAR_PREFIX, 0 }, + { ngx_string("server_addr"), NULL, ngx_stream_variable_server_addr, 0, 0, 0 }, @@ -622,6 +628,39 @@ ngx_stream_variable_proxy_protocol_port( static ngx_int_t +ngx_stream_variable_proxy_protocol_tlv(ngx_stream_session_t *s, + ngx_stream_variable_value_t *v, uintptr_t data) +{ + ngx_str_t *name = (ngx_str_t *) data; + + ngx_int_t rc; + ngx_str_t tlv, value; + + tlv.len = name->len - (sizeof("proxy_protocol_tlv_") - 1); + tlv.data = name->data + sizeof("proxy_protocol_tlv_") - 1; + + rc = ngx_proxy_protocol_get_tlv(s->connection, &tlv, &value); + + if (rc == NGX_ERROR) { + return NGX_ERROR; + } + + if (rc == NGX_DECLINED) { + v->not_found = 1; + return NGX_OK; + } + + v->len = value.len; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + v->data = value.data; + + return NGX_OK; +} + + +static ngx_int_t ngx_stream_variable_server_addr(ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data) { From arut at nginx.com Tue Oct 11 13:09:59 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 11 Oct 2022 13:09:59 +0000 Subject: [nginx] Log only the first line of user input on PROXY protocol v1 error. Message-ID: details: https://hg.nginx.org/nginx/rev/017fd847f4f7 branches: changeset: 8071:017fd847f4f7 user: Roman Arutyunyan date: Mon Oct 10 13:57:31 2022 +0400 description: Log only the first line of user input on PROXY protocol v1 error. Previously, all received user input was logged. If a multi-line text was received from client and logged, it could reduce log readability and also make it harder to parse nginx log by scripts. The change brings to PROXY protocol the same behavior that exists for HTTP request line in ngx_http_log_error_handler(). diffstat: src/core/ngx_proxy_protocol.c | 8 +++++++- 1 files changed, 7 insertions(+), 1 deletions(-) diffs (19 lines): diff -r ba5cf8f73a2d -r 017fd847f4f7 src/core/ngx_proxy_protocol.c --- a/src/core/ngx_proxy_protocol.c Thu Sep 08 13:53:49 2022 +0400 +++ b/src/core/ngx_proxy_protocol.c Mon Oct 10 13:57:31 2022 +0400 @@ -139,8 +139,14 @@ skip: invalid: + for (p = buf; p < last; p++) { + if (*p == CR || *p == LF) { + break; + } + } + ngx_log_error(NGX_LOG_ERR, c->log, 0, - "broken header: \"%*s\"", (size_t) (last - buf), buf); + "broken header: \"%*s\"", (size_t) (p - buf), buf); return NULL; } From mdounin at mdounin.ru Tue Oct 11 13:21:59 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 11 Oct 2022 16:21:59 +0300 Subject: [nginx] allowing auth_request to proxy TOO_MANY_REQUESTS In-Reply-To: <142666848.1.1665486276619@alvn> References: <142666848.1.1665486276619@alvn> Message-ID: Hello! On Tue, Oct 11, 2022 at 01:04:36PM +0200, Anders Nicolaisen via nginx-devel wrote: > I have tried your suggestion, but it seems to not quite fit my use case. > > Does your suggestion not eliminate the authentication server entirely > for any upstream servers? > > My preferred use case would be to have auth_request intercept all calls, > and only relay the accepted ones. > > Something like this: > ------------ > server { > auth_request /auth; > > location /v1/endpoint { > proxy_pass http://localhost:7777/v1; > } > > location /v2/endpoint { > proxy_pass http://localhost:6666/v2; > } > > location = /auth { > internal; > proxy_pass http://localhost:8888/authentication; > [..] > } > } > ----------- > > With the authentication server responding with X-Accel-Redirect, it still gets > interpreted by auth_request and 429 can never be sent directly to the user. The X-Accel-Redirect approach replaces auth_request entirely. Instead, you pass all requests to the upstream server, and this upstream server decides whether to return an error to the user, or to X-Accel-Redirect the request to an internal location which returns the actual response. E.g., server { listen 8080; location / { proxy_pass http://127.0.0.1:8081; } location @protected { proxy_pass ...; } } server { listen 8081; # an example X-Accel-Redirect server # which rejects requests with 'foo' argument set to a true # value if ($arg_foo) { return 429; } add_header X-Accel-Redirect @protected; return 204; } Hope this helps. -- Maxim Dounin http://mdounin.ru/ From yar at nginx.com Tue Oct 11 14:50:40 2022 From: yar at nginx.com (Yaroslav Zhuravlev) Date: Tue, 11 Oct 2022 15:50:40 +0100 Subject: [PATCH] Documented behaviour of a single server in upstream with keepalive In-Reply-To: References: Message-ID: > On 10 Oct 2022, at 00:55, Maxim Dounin wrote: > > Hello! > > On Mon, Oct 03, 2022 at 09:21:52PM +0100, Yaroslav Zhuravlev wrote: > >> # HG changeset patch >> # User Yaroslav Zhuravlev >> # Date 1663861151 -3600 >> # Thu Sep 22 16:39:11 2022 +0100 >> # Node ID aa3505dc76f13086703543cb079a13e48c57386e >> # Parent 9708787aafc70744296baceb2aa0092401a4ef34 >> Documented behaviour of a single server in upstream with keepalive. >> >> diff --git a/xml/en/docs/http/ngx_http_fastcgi_module.xml b/xml/en/docs/http/ngx_http_fastcgi_module.xml >> --- a/xml/en/docs/http/ngx_http_fastcgi_module.xml >> +++ b/xml/en/docs/http/ngx_http_fastcgi_module.xml >> @@ -10,7 +10,7 @@ >> > link="/en/docs/http/ngx_http_fastcgi_module.html" >> lang="en" >> - rev="53"> >> + rev="54"> >> >>
>> >> @@ -1071,7 +1071,9 @@ >> >> >> error >> -an error occurred while establishing a connection with the >> +an error occurred while establishing >> +or reusing >> +a connection with the >> server, passing a request to it, or reading the response header; > > That's bullshit. No errors are reported "while reusing a > connection". If there is an already established cached > connection, it is simply used. Errors, if any, might happen > later, while "passing a request to it". > > [...] Thank you, these parts were removed from the patch. > >> diff --git a/xml/en/docs/http/ngx_http_upstream_module.xml b/xml/en/docs/http/ngx_http_upstream_module.xml >> --- a/xml/en/docs/http/ngx_http_upstream_module.xml >> +++ b/xml/en/docs/http/ngx_http_upstream_module.xml >> @@ -10,7 +10,7 @@ >> > link="/en/docs/http/ngx_http_upstream_module.html" >> lang="en" >> - rev="88"> >> + rev="89"> >> >>
>> >> @@ -351,6 +351,11 @@ >> If there is only a single server in a group, max_fails, >> fail_timeout and slow_start parameters >> are ignored, and such a server will never be considered unavailable. >> +If an error occurred while trying to reuse a >> +keepalive connection >> +with a single server, and the request is allowed to be passed to the >> +next server >> +on error, such server will be selected again. >> >> > > If an error occurs? > > The "with a single server" clause looks wrong, we are talking > about a group with only a single server here. It probably should > be either "with such server" or "the server" (probably "... with > such server ... the server will be ..." would be good enough > considering the whole sentence). Thanks, updated: # HG changeset patch # User Yaroslav Zhuravlev # Date 1663861151 -3600 # Thu Sep 22 16:39:11 2022 +0100 # Node ID 3b878f0c18cc277bfccb6095afd2cc7dc0cdec0f # Parent 9708787aafc70744296baceb2aa0092401a4ef34 Documented behaviour of a single server in upstream with keepalive. diff --git a/xml/en/docs/http/ngx_http_upstream_module.xml b/xml/en/docs/http/ngx_http_upstream_module.xml --- a/xml/en/docs/http/ngx_http_upstream_module.xml +++ b/xml/en/docs/http/ngx_http_upstream_module.xml @@ -10,7 +10,7 @@ + rev="89">
@@ -350,7 +350,13 @@ If there is only a single server in a group, max_fails, fail_timeout and slow_start parameters -are ignored, and such a server will never be considered unavailable. +are ignored, and such server will never be considered unavailable. +If an error occurs +while passing a request through +a keepalive connection to such server +and the request is allowed to be passed to the +next +server on error, the server will be selected again. diff --git a/xml/ru/docs/http/ngx_http_upstream_module.xml b/xml/ru/docs/http/ngx_http_upstream_module.xml --- a/xml/ru/docs/http/ngx_http_upstream_module.xml +++ b/xml/ru/docs/http/ngx_http_upstream_module.xml @@ -10,7 +10,7 @@ + rev="89">
@@ -355,6 +355,12 @@ Если в группе только один сервер, параметры max_fails, fail_timeout и slow_start игнорируются и такой сервер никогда не будет считаться недоступным. +Если при отправке запроса по +постоянному соединению к такому серверу +происходит ошибка +и разрешена передача запроса +следующему +серверу в случае ошибки, то сервер будет выбран снова. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org From mdounin at mdounin.ru Tue Oct 11 20:53:56 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 11 Oct 2022 23:53:56 +0300 Subject: [PATCH] Core: support for reading PROXY protocol v2 TLVs In-Reply-To: <20221011130111.oiljq55eydpp3dh6@N00W24XTQX> References: <20220905132318.s27wgtof6wuqde7x@N00W24XTQX> <20220909154658.fpnpndo2opnnzywx@N00W24XTQX> <20220913150304.k2fjjdxgesgzbilu@N00W24XTQX> <20220927094125.w7oo4g2quw3yyqfh@N00W24XTQX> <20221011130111.oiljq55eydpp3dh6@N00W24XTQX> Message-ID: Hello! On Tue, Oct 11, 2022 at 05:01:11PM +0400, Roman Arutyunyan wrote: [...] > # HG changeset patch > # User Roman Arutyunyan > # Date 1664263604 -14400 > # Tue Sep 27 11:26:44 2022 +0400 > # Node ID 2774f8d59b108635752f9f2dbe3a5394a3650b85 > # Parent ba5cf8f73a2d0a3615565bf9545f3d65216a0530 > PROXY protocol v2 TLV variables. > > The variables have prefix $proxy_protocol_tlv_ and are accessible by name > and by type. Examples are: $proxy_protocol_tlv_0x01, $proxy_protocol_tlv_alpn. > > diff --git a/src/core/ngx_proxy_protocol.c b/src/core/ngx_proxy_protocol.c > --- a/src/core/ngx_proxy_protocol.c > +++ b/src/core/ngx_proxy_protocol.c > @@ -15,6 +15,12 @@ > > #define ngx_proxy_protocol_parse_uint16(p) ((p)[0] << 8 | (p)[1]) > > +#define ngx_proxy_protocol_parse_uint32(p) \ > + ( ((uint32_t) (p)[0] << 24) \ > + + ( (p)[1] << 16) \ > + + ( (p)[2] << 8) \ > + + ( (p)[3]) ) > + > > typedef struct { > u_char signature[12]; > @@ -40,12 +46,52 @@ typedef struct { > } ngx_proxy_protocol_inet6_addrs_t; > > > +typedef struct { > + u_char type; > + u_char len[2]; > +} ngx_proxy_protocol_tlv_t; > + > + > +typedef struct { > + u_char client; > + u_char verify[4]; > +} ngx_proxy_protocol_tlv_ssl_t; > + > + > +typedef struct { > + ngx_str_t name; > + ngx_uint_t type; > +} ngx_proxy_protocol_tlv_entry_t; > + > + > static u_char *ngx_proxy_protocol_read_addr(ngx_connection_t *c, u_char *p, > u_char *last, ngx_str_t *addr); > static u_char *ngx_proxy_protocol_read_port(u_char *p, u_char *last, > in_port_t *port, u_char sep); > static u_char *ngx_proxy_protocol_v2_read(ngx_connection_t *c, u_char *buf, > u_char *last); > +static ngx_int_t ngx_proxy_protocol_lookup_tlv(ngx_connection_t *c, > + ngx_str_t *tlvs, ngx_uint_t type, ngx_str_t *value); > + > + > +static ngx_proxy_protocol_tlv_entry_t ngx_proxy_protocol_tlv_entries[] = { > + { ngx_string("alpn"), 0x01 }, > + { ngx_string("authority"), 0x02 }, > + { ngx_string("unique_id"), 0x05 }, > + { ngx_string("ssl"), 0x20 }, > + { ngx_string("netns"), 0x30 }, > + { ngx_null_string, 0x00 } > +}; > + > + > +static ngx_proxy_protocol_tlv_entry_t ngx_proxy_protocol_tlv_ssl_entries[] = { > + { ngx_string("version"), 0x21 }, > + { ngx_string("cn"), 0x22 }, > + { ngx_string("cipher"), 0x23 }, > + { ngx_string("sig_alg"), 0x24 }, > + { ngx_string("key_alg"), 0x25 }, > + { ngx_null_string, 0x00 } > +}; > > > u_char * > @@ -412,11 +458,147 @@ ngx_proxy_protocol_v2_read(ngx_connectio > &pp->src_addr, pp->src_port, &pp->dst_addr, pp->dst_port); > > if (buf < end) { > - ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, > - "PROXY protocol v2 %z bytes of tlv ignored", end - buf); > + pp->tlvs.data = ngx_pnalloc(c->pool, end - buf); > + if (pp->tlvs.data == NULL) { > + return NULL; > + } > + > + ngx_memcpy(pp->tlvs.data, buf, end - buf); > + pp->tlvs.len = end - buf; > } > > c->proxy_protocol = pp; > > return end; > } > + > + > +ngx_int_t > +ngx_proxy_protocol_get_tlv(ngx_connection_t *c, ngx_str_t *name, > + ngx_str_t *value) > +{ > + u_char *p; > + size_t n; > + uint32_t verify; > + ngx_str_t ssl, *tlvs; > + ngx_int_t rc, type; > + ngx_proxy_protocol_tlv_ssl_t *tlv_ssl; > + ngx_proxy_protocol_tlv_entry_t *te; > + > + if (c->proxy_protocol == NULL) { > + return NGX_DECLINED; > + } > + > + ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, > + "PROXY protocol v2 get tlv \"%V\"", name); > + > + te = ngx_proxy_protocol_tlv_entries; > + tlvs = &c->proxy_protocol->tlvs; > + > + p = name->data; > + n = name->len; > + > + if (n >= 4 && p[0] == 's' && p[1] == 's' && p[2] == 'l' && p[3] == '_') { > + > + rc = ngx_proxy_protocol_lookup_tlv(c, tlvs, 0x20, &ssl); > + if (rc != NGX_OK) { > + return rc; > + } > + > + if (ssl.len < sizeof(ngx_proxy_protocol_tlv_ssl_t)) { > + return NGX_ERROR; > + } > + > + p += 4; > + n -= 4; > + > + if (n == 6 && ngx_strncmp(p, "verify", 6) == 0) { > + > + tlv_ssl = (ngx_proxy_protocol_tlv_ssl_t *) ssl.data; > + verify = ngx_proxy_protocol_parse_uint32(tlv_ssl->verify); > + > + value->data = ngx_pnalloc(c->pool, NGX_INT32_LEN); > + if (value->data == NULL) { > + return NGX_ERROR; > + } > + > + value->len = ngx_sprintf(value->data, "%uD", verify) > + - value->data; > + return NGX_OK; > + } > + > + ssl.data += sizeof(ngx_proxy_protocol_tlv_ssl_t); > + ssl.len -= sizeof(ngx_proxy_protocol_tlv_ssl_t); > + > + te = ngx_proxy_protocol_tlv_ssl_entries; > + tlvs = &ssl; > + } > + > + if (n >= 2 && p[0] == '0' && p[1] == 'x') { > + > + type = ngx_hextoi(p + 2, n - 2); > + if (type == NGX_ERROR) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, > + "invalid PROXY protocol TLV \"%V\"", name); > + return NGX_ERROR; > + } > + > + return ngx_proxy_protocol_lookup_tlv(c, tlvs, type, value); > + } > + > + for ( /* void */ ; te->type; te++) { > + if (te->name.len == n && ngx_strncmp(te->name.data, p, n) == 0) { > + return ngx_proxy_protocol_lookup_tlv(c, tlvs, te->type, value); > + } > + } > + > + ngx_log_error(NGX_LOG_ERR, c->log, 0, > + "PROXY protocol TLV \"%V\" not found", name); "not found" suggests it's something known/valid yet not present in the TLVs provided by the client. "unknown PROXY protocol TLV ..."? > + > + return NGX_DECLINED; > +} > + > + > +static ngx_int_t > +ngx_proxy_protocol_lookup_tlv(ngx_connection_t *c, ngx_str_t *tlvs, > + ngx_uint_t type, ngx_str_t *value) > +{ > + u_char *p; > + size_t n, len; > + ngx_proxy_protocol_tlv_t *tlv; > + > + ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, > + "PROXY protocol v2 lookup tlv:%02xi", type); > + > + p = tlvs->data; > + n = tlvs->len; > + > + while (n) { > + if (n < sizeof(ngx_proxy_protocol_tlv_t)) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "broken PROXY protocol TLV"); > + return NGX_ERROR; > + } > + > + tlv = (ngx_proxy_protocol_tlv_t *) p; > + len = ngx_proxy_protocol_parse_uint16(tlv->len); > + > + p += sizeof(ngx_proxy_protocol_tlv_t); > + n -= sizeof(ngx_proxy_protocol_tlv_t); > + > + if (n < len) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "broken PROXY protocol TLV"); > + return NGX_ERROR; > + } > + > + if (tlv->type == type) { > + value->data = p; > + value->len = len; > + return NGX_OK; > + } > + > + p += len; > + n -= len; > + } > + > + return NGX_DECLINED; > +} > diff --git a/src/core/ngx_proxy_protocol.h b/src/core/ngx_proxy_protocol.h > --- a/src/core/ngx_proxy_protocol.h > +++ b/src/core/ngx_proxy_protocol.h > @@ -21,6 +21,7 @@ struct ngx_proxy_protocol_s { > ngx_str_t dst_addr; > in_port_t src_port; > in_port_t dst_port; > + ngx_str_t tlvs; > }; > > > @@ -28,6 +29,8 @@ u_char *ngx_proxy_protocol_read(ngx_conn > u_char *last); > u_char *ngx_proxy_protocol_write(ngx_connection_t *c, u_char *buf, > u_char *last); > +ngx_int_t ngx_proxy_protocol_get_tlv(ngx_connection_t *c, ngx_str_t *name, > + ngx_str_t *value); > > > #endif /* _NGX_PROXY_PROTOCOL_H_INCLUDED_ */ > diff --git a/src/http/ngx_http_variables.c b/src/http/ngx_http_variables.c > --- a/src/http/ngx_http_variables.c > +++ b/src/http/ngx_http_variables.c > @@ -61,6 +61,8 @@ static ngx_int_t ngx_http_variable_proxy > ngx_http_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_http_variable_proxy_protocol_port(ngx_http_request_t *r, > ngx_http_variable_value_t *v, uintptr_t data); > +static ngx_int_t ngx_http_variable_proxy_protocol_tlv(ngx_http_request_t *r, > + ngx_http_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_http_variable_server_addr(ngx_http_request_t *r, > ngx_http_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_http_variable_server_port(ngx_http_request_t *r, > @@ -214,6 +216,10 @@ static ngx_http_variable_t ngx_http_cor > ngx_http_variable_proxy_protocol_port, > offsetof(ngx_proxy_protocol_t, dst_port), 0, 0 }, > > + { ngx_string("proxy_protocol_tlv_"), NULL, > + ngx_http_variable_proxy_protocol_tlv, > + 0, NGX_HTTP_VAR_PREFIX, 0 }, > + > { ngx_string("server_addr"), NULL, ngx_http_variable_server_addr, 0, 0, 0 }, > > { ngx_string("server_port"), NULL, ngx_http_variable_server_port, 0, 0, 0 }, > @@ -1387,6 +1393,39 @@ ngx_http_variable_proxy_protocol_port(ng > > > static ngx_int_t > +ngx_http_variable_proxy_protocol_tlv(ngx_http_request_t *r, > + ngx_http_variable_value_t *v, uintptr_t data) > +{ > + ngx_str_t *name = (ngx_str_t *) data; > + > + ngx_int_t rc; > + ngx_str_t tlv, value; > + > + tlv.len = name->len - (sizeof("proxy_protocol_tlv_") - 1); > + tlv.data = name->data + sizeof("proxy_protocol_tlv_") - 1; > + > + rc = ngx_proxy_protocol_get_tlv(r->connection, &tlv, &value); > + > + if (rc == NGX_ERROR) { > + return NGX_ERROR; > + } > + > + if (rc == NGX_DECLINED) { > + v->not_found = 1; > + return NGX_OK; > + } > + > + v->len = value.len; > + v->valid = 1; > + v->no_cacheable = 0; > + v->not_found = 0; > + v->data = value.data; > + > + return NGX_OK; > +} > + > + > +static ngx_int_t > ngx_http_variable_server_addr(ngx_http_request_t *r, > ngx_http_variable_value_t *v, uintptr_t data) > { > diff --git a/src/stream/ngx_stream_variables.c b/src/stream/ngx_stream_variables.c > --- a/src/stream/ngx_stream_variables.c > +++ b/src/stream/ngx_stream_variables.c > @@ -23,6 +23,8 @@ static ngx_int_t ngx_stream_variable_pro > ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_stream_variable_proxy_protocol_port( > ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); > +static ngx_int_t ngx_stream_variable_proxy_protocol_tlv( > + ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_stream_variable_server_addr(ngx_stream_session_t *s, > ngx_stream_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_stream_variable_server_port(ngx_stream_session_t *s, > @@ -79,6 +81,10 @@ static ngx_stream_variable_t ngx_stream > ngx_stream_variable_proxy_protocol_port, > offsetof(ngx_proxy_protocol_t, dst_port), 0, 0 }, > > + { ngx_string("proxy_protocol_tlv_"), NULL, > + ngx_stream_variable_proxy_protocol_tlv, > + 0, NGX_STREAM_VAR_PREFIX, 0 }, > + > { ngx_string("server_addr"), NULL, > ngx_stream_variable_server_addr, 0, 0, 0 }, > > @@ -622,6 +628,39 @@ ngx_stream_variable_proxy_protocol_port( > > > static ngx_int_t > +ngx_stream_variable_proxy_protocol_tlv(ngx_stream_session_t *s, > + ngx_stream_variable_value_t *v, uintptr_t data) > +{ > + ngx_str_t *name = (ngx_str_t *) data; > + > + ngx_int_t rc; > + ngx_str_t tlv, value; > + > + tlv.len = name->len - (sizeof("proxy_protocol_tlv_") - 1); > + tlv.data = name->data + sizeof("proxy_protocol_tlv_") - 1; > + > + rc = ngx_proxy_protocol_get_tlv(s->connection, &tlv, &value); > + > + if (rc == NGX_ERROR) { > + return NGX_ERROR; > + } > + > + if (rc == NGX_DECLINED) { > + v->not_found = 1; > + return NGX_OK; > + } > + > + v->len = value.len; > + v->valid = 1; > + v->no_cacheable = 0; > + v->not_found = 0; > + v->data = value.data; > + > + return NGX_OK; > +} > + > + > +static ngx_int_t > ngx_stream_variable_server_addr(ngx_stream_session_t *s, > ngx_stream_variable_value_t *v, uintptr_t data) > { Otherwise looks good. -- Maxim Dounin http://mdounin.ru/ From alvn at alvn.dk Wed Oct 12 09:04:50 2022 From: alvn at alvn.dk (Anders Nicolaisen) Date: Wed, 12 Oct 2022 11:04:50 +0200 Subject: [nginx] allowing auth_request to proxy TOO_MANY_REQUESTS In-Reply-To: References: <142666848.1.1665486276619@alvn> Message-ID: Thanks! This does make sense, and one might be able to create a somewhat working example using this. However, this seems to introduce a couple of drawbacks, and kind of breaks the semantics of the 'auth_request'. Let me illustrate: First of all, having auth_request in the server context guards against any newly added locations that might yet be missing rules handled by the authentication server. So, whenever a new location is added, the authentication server needs to be updated as well before any requests can be redirected. This will most often actually be a good thing in an environment with a lot of rules and multiple developers. Second, if multiple developers are editing the configurations, they are not required to remember the 'internal' in order to bar these from outsiders, as this would be automatically imposed via auth_request. It seems to be more in line with the current semantics of auth_request, and also by far cleaner code/configurations, by having auth_request be able to relay this one more status code. P.S.: I tried to test your suggestion with this simple conf: ----- server { location / { proxy_pass http://localhost:8888/auth; } location @content { proxy_pass http://localhost:8888/; } } ---- And got this error: === 2022/10/12 08:51:09 [emerg] 1451#1451: "proxy_pass" cannot have URI part in location given by regular expression, or inside named location, or inside "if" statement, or inside "limit_except" block === I'm guessing I just did something wrong, but the error message seems to tell me that it is not possible to do it this way. -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Wed Oct 12 13:43:15 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 12 Oct 2022 16:43:15 +0300 Subject: [nginx] allowing auth_request to proxy TOO_MANY_REQUESTS In-Reply-To: References: <142666848.1.1665486276619@alvn> Message-ID: Hello! On Wed, Oct 12, 2022 at 11:04:50AM +0200, Anders Nicolaisen via nginx-devel wrote: > Thanks! > This does make sense, and one might be able to create a somewhat working > example using this. > > However, this seems to introduce a couple of drawbacks, and kind of > breaks the semantics of the 'auth_request'. > > Let me illustrate: > > First of all, having auth_request in the server context guards against > any newly added locations that might yet be missing rules handled > by the authentication server. > So, whenever a new location is added, the authentication server needs > to be updated as well before any requests can be redirected. > This will most often actually be a good thing in an environment with > a lot of rules and multiple developers. > > Second, if multiple developers are editing the configurations, they are > not required to remember the 'internal' in order to bar these from > outsiders, > as this would be automatically imposed via auth_request. > > It seems to be more in line with the current semantics of auth_request, > and also by far cleaner code/configurations, by having auth_request be > able to relay this one more status code. Sure, details of the X-Accel-Redirect semantics is different from the one provided by auth_request. If you prefer auth_request semantics, you can do the same with auth_request and appropriate handling of the 403 errors, for example (assuming the auth backend cannot be modified and returns 429): server { listen 8080; location / { auth_request /auth; error_page 403 = /error; proxy_pass ... } location = /auth { error_page 429 = /limit; proxy_intercept_errors on; proxy_pass http://127.0.0.1:8081; } location = /limit { set $limit 1; return 403; } location = /error { if ($limit) { return 429; } return 403; } } server { listen 8081; # an example X-Accel-Redirect server # which rejects requests with 'foo' header set to a true # value if ($http_foo) { return 429; } return 204; } The general issue with "having auth_request be able to relay this one more status code" as I see it is that it's not just one status code. For example, request limiting in nginx by default uses 503 status code, and it is not clear why 429 handling should be different. Further, there is the Retry-After header field, which is optional, though may appear in both 429 and 503 responses. Further, there are other temporary conditions which might be considered, such as 413 (with Retry-After) or 502/504 errors. Trying to extend auth_reqest to handle unrelated response codes is going to result in a lot of additional logic there, which is not needed in most configurations and will complicate things. And this is something I would prefer to avoid, especially given that the desired handling can be easily implemented simply by writing an appropriate configuration. > P.S.: > I tried to test your suggestion with this simple conf: > ----- > server { > > location / { > proxy_pass http://localhost:8888/auth; > } > location @content { > proxy_pass http://localhost:8888/; > } > } > ---- > > And got this error: > > === > 2022/10/12 08:51:09 [emerg] 1451#1451: "proxy_pass" cannot have URI part in > location given by regular expression, or inside named location, or inside > "if" statement, or inside "limit_except" block > === > > I'm guessing I just did something wrong, but the error message seems to > tell me that it is > not possible to do it this way. In named locations there are no location prefix to replace with the URI part specified in proxy_pass, so you should use proxy_pass without URI part, that is, "proxy_pass http://localhost:8888;", note no "/" at the end. See here for details: http://nginx.org/r/proxy_pass Hope this helps. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Wed Oct 12 13:57:07 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 12 Oct 2022 17:57:07 +0400 Subject: [PATCH 11 of 11] SSL: automatic rotation of session ticket keys In-Reply-To: References: <5c26fe5f6ab0bf4c0d18.1661482878@vm-bsd.mdounin.ru> <384C1C36-43CA-490B-9559-DD77DE6346E6@nginx.com> <12290DCD-4B09-4DA0-8057-16172C5F5D28@nginx.com> Message-ID: <3F412EF0-B85C-4501-97B4-0492E41EED70@nginx.com> > On 9 Oct 2022, at 08:59, Maxim Dounin wrote: > > Hello! > > On Sat, Oct 01, 2022 at 11:58:20AM +0300, Maxim Dounin wrote: > >> On Thu, Sep 29, 2022 at 08:00:03PM +0400, Sergey Kandaurov wrote: >> >>>> On 28 Sep 2022, at 22:37, Maxim Dounin wrote: >>>> >>>> On Mon, Sep 26, 2022 at 02:17:18PM +0400, Sergey Kandaurov wrote: >> >> [...] >> >>>>> And by the way, while reviewing this patch, I noticed that >>>>> OpenSSL doesn't allow a client to gracefully renew TLSv1.2 session >>>>> when the client receives a new session ticket in resumed sessions. >>>>> In practice, it is visible when client resumes a not yet expired >>>>> session encrypted with not a fresh ticket key (after rotation), >>>>> which results in sending a new session ticket. >>>>> See ssl_update_cache() for the !s->hit condition. >>>>> In the opposite, BoringSSL always allows to renew TLSv1.2 sessions. >>>> >>>> You mean on the client side? Yes, it looks like >>>> ngx_ssl_new_client_session() won't be called for such a new >>>> session ticket, and updated ticket will be never saved. This >>>> might need to be worked around. >>> >>> Yes, I mean the client side. >>> >>>> >>>> This should be safe with the key rotation logic introduced in this >>>> patch though, given that the previous key is preserved till the >>>> last ticket encrypted with it is expected to expire. >>>> >>>> One of the possible solutions might be to avoid re-encryption of >>>> tickets with the new key, as the old key is anyway expected to be >>>> available till the session expires. >>> >>> I don't think it's worth the effort. If I got you right, and >>> as far as I understand, re-encrypting the ticket essentially >>> means sending a fresh session (renewal). >> >> Well, not really. Re-encryption of a ticket does not imply >> session renewal. Further, doing so implies security risk: if we >> renew a session during re-encryption, this makes it possible to >> create essentially infinite sessions. And, for example, if a >> session used a client certificate, this effectively means that >> this certificate will never expire and cannot be revoked. >> >> With TLSv1.2, OpenSSL follows this logic: session expiration time >> is set when a session is created, and ticket re-encryption only >> re-encrypts the session, but doesn't change session expiration. >> As such, any certificate validation which happens during session >> creation needs to be redone once session timeout expires - and >> this makes it possible to implement certificate revocation. >> >> On the other hand, as implemented for TLSv1.3 at least in OpenSSL >> it seems not to be the case. Every ticket sent to the client >> actually creates a new session with _updated_ expiration time. As >> such, it is possible to create a session authenticated with a client >> certificate, and use this session indefinitely, even after the >> certificate will expire and/or will be revoked. >> >> This seems to be a security issue in OpenSSL. >> >> BoringSSL seems to behave similarly with TLSv1.3, that is, it >> updates session expiration time, making it possible to use an >> authenticated session for much longer than session timeout >> configured. But BoringSSL also has session's auth_timeout, which >> prevents indefinite use of the session. The auth_timeout value is >> hardcoded to 7 days (SSL_DEFAULT_SESSION_AUTH_TIMEOUT), and does >> not seem to be adjustable (only with SSL_SESSION_set_timeout(), >> which is documented to be a function for writing tests). >> >> I would rather say it is also a security issue in BoringSSL, >> though it's slightly better than in OpenSSL due to the 7 days >> limit. > > For the record: > > https://github.com/openssl/openssl/issues/19341 > > Note that with the automatic ticket key rotation this issue with > TLSv1.3 sessions becomes slightly worse in a typical configuration > (with ssl_session_cache in shared memory, but without > ssl_session_ticket_key explicitly set and/or ssl_session_tickets > switched off). Notably, configuration reload is no longer enough > to invalidate all tickets, since ticket keys are now preserved in > shared memory. > > For example, consider that a CRL file is updated with new > revocations, and nginx configuration is reloaded. New revocations > will be loaded by nginx and will appear to work with new sessions, > but can be easily bypassed by maintaining a previously established > TLSv1.3 session. Previously, it was possible to bypass > revocations in such scenario only if ticket keys were explicitly > set or if session tickets were switched off and sessions were > cached in shared memory. > > Given that we do not enable TLSv1.3 by default, we probably can > ignore this and wait for appropriate fixes from the affected > libraries. On the other hand, it might be a good idea to > introduce a workaround, especially if we want to enable TLSv1.3 by > default in the foreseeable future. > > The following patch seems to be simple enough and forces session > timeouts for TLSv1.3 sessions to be as configured, for both > OpenSSL and BoringSSL: Although somewhat tricky, I like the approach. Nitpicking comments inline. > > # HG changeset patch > # User Maxim Dounin > # Date 1665286021 -10800 > # Sun Oct 09 06:27:01 2022 +0300 > # Node ID c0ec4df7ccbb95b7f2c2842f40012082991bed52 > # Parent 37a4ac7ba1c5a003ab85f73d77767058af4eae30 > SSL: workaround for session timeout handling with TLSv1.3. > > OpenSSL with TLSv1.3 updates the session creation time on session > resumption and keeps the session timeout unmodified, making it possible > to maintain the session forever, bypassing client certificate expiration > and revocation. To make sure session timeouts are actually used, we > now update the session creation time and reduce the session timeout > accordingly. > > BoringSSL with TLSv1.3 ignores configured session timeouts and uses a > hardcoded timeout instead, 7 days. So we update session timeout to > the configured value as soon as a session is created. > > diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c > --- a/src/event/ngx_event_openssl.c > +++ b/src/event/ngx_event_openssl.c > @@ -1086,6 +1086,53 @@ ngx_ssl_info_callback(const ngx_ssl_conn > > #endif > > +#ifdef TLS1_3_VERSION > + > + if ((where & SSL_CB_ACCEPT_LOOP) == SSL_CB_ACCEPT_LOOP > + && SSL_version(ssl_conn) == TLS1_3_VERSION) > + { > + time_t now, time, timeout, conf_timeout; > + SSL_SESSION *sess; Not sure if this should use ngx_ssl_session_t. But, given the macro is intended for external consumption, SSL_SESSION should be fine. > + > + /* > + * OpenSSL with TLSv1.3 updates the session creation time on > + * session resumption and keeps the session timeout unmodified, > + * making it possible to maintain the session forever, bypassing > + * client certificate expiration and revocation. To make sure > + * session timeouts are actually used, we now update the session > + * creation time and reduce the session timeout accordingly. > + * > + * BoringSSL with TLSv1.3 ignores configured session timeouts > + * and uses a hardcoded timeout instead, 7 days. So we update > + * session timeout to the configured value as soon as a session > + * is created. > + */ > + > + c = ngx_ssl_get_connection((ngx_ssl_conn_t *) ssl_conn); > + sess = SSL_get0_session(ssl_conn); > + > + if (!c->ssl->session_timeout_set && sess) { > + c->ssl->session_timeout_set = 1; > + > + now = ngx_time(); > + time = SSL_SESSION_get_time(sess); > + timeout = SSL_SESSION_get_timeout(sess); > + conf_timeout = SSL_CTX_get_timeout(c->ssl->session_ctx); > + > + timeout = ngx_min(timeout, conf_timeout); > + > + if (now - time >= timeout) { > + SSL_SESSION_set1_id_context(sess, (unsigned char *) "", 0); Why not u_char? If this is to strictly follow the declaration, without typedef's, then I wonder why const is omitted in casting 2nd parameter. Anyway, this passes compilation on known CI platforms. For the record, BoringSSL moved long ago to uint8_t in its headers, so is incompatible with CHAR_BIT > 8 (though, POSIX mandates 8). I've been pondering if it's worth to call SSL_SESSION_set1_id_context() in order to (obfuscatedly) cancel the going to expire sessions. At least, OpenSSL handles this on theirself for sessions that've already expired ("now > time + timeout"), rejecting such session, so the only viable condition seems to be is when the session is going to be expired ("now == time + timeout"). For the record, invalidating session context (or even session removal) this way doesn't prevent from reusing the session in this connection, since the info callback is called too late, after the ticket has been successfully decrypted (with a check for timeout and session context) and session restored. Such session will be rejected only the next time. It may have sense though to call it still to handle the going to expire sessions that pass the session timeout check on server, see sess_timedout() in OpenSSL sources, it has slightly different condition to reject sessions. In this case the control goes to the "if (now - time >= timeout) {" condition, where we need to take the action, as otherwise OpenSSL will update the session creation time, such that the session will continue to be resumable for another "timeout" seconds. There should be no difference between invalidating context and setting intentionally old values for session time and timeout, both should work to make it stop from being resumable. So, condition could be collapsed to update session time and timeout values in both cases. But zero session timeout doesn't seem to pass i2d_SSL_SESSION checks in OpenSSL while constructing new session ticket, so invalidating session context looks like the only solution. > + > + } else { > + SSL_SESSION_set_time(sess, now); > + SSL_SESSION_set_timeout(sess, timeout - (now - time)); > + } > + } > + } > + > +#endif > + > if ((where & SSL_CB_ACCEPT_LOOP) == SSL_CB_ACCEPT_LOOP) { > c = ngx_ssl_get_connection((ngx_ssl_conn_t *) ssl_conn); > > diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h > --- a/src/event/ngx_event_openssl.h > +++ b/src/event/ngx_event_openssl.h > @@ -114,6 +114,7 @@ struct ngx_ssl_connection_s { > unsigned no_send_shutdown:1; > unsigned shutdown_without_free:1; > unsigned handshake_buffer_set:1; > + unsigned session_timeout_set:1; > unsigned try_early_data:1; > unsigned in_early:1; > unsigned in_ocsp:1; > Looks good. -- Sergey Kandaurov From pluknet at nginx.com Wed Oct 12 14:32:29 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 12 Oct 2022 18:32:29 +0400 Subject: [PATCH] Tests: http resolver with ipv4/ipv6 parameters In-Reply-To: <061aa601e6a33ed9e867.1665083003@DHNVMN3.localdomain> References: <595dee133b0a3681e955.1665038398@DHNVMN3.localdomain> <061aa601e6a33ed9e867.1665083003@DHNVMN3.localdomain> Message-ID: > On 6 Oct 2022, at 23:03, Eugene Grebenschikov via nginx-devel wrote: > > # HG changeset patch > # User Eugene Grebenschikov > # Date 1665082566 25200 > # Thu Oct 06 11:56:06 2022 -0700 > # Node ID 061aa601e6a33ed9e8671fbd3c2a150c27c1d9a6 > # Parent fac6ad94e062ee30356338c943843c4b34d5f532 > Tests: http resolver with ipv4/ipv6 parameters. > Looks good. -- Sergey Kandaurov From arut at nginx.com Wed Oct 12 14:36:58 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 12 Oct 2022 14:36:58 +0000 Subject: [nginx] PROXY protocol v2 TLV variables. Message-ID: details: https://hg.nginx.org/nginx/rev/cca4c8a715de branches: changeset: 8072:cca4c8a715de user: Roman Arutyunyan date: Wed Oct 12 16:58:16 2022 +0400 description: PROXY protocol v2 TLV variables. The variables have prefix $proxy_protocol_tlv_ and are accessible by name and by type. Examples are: $proxy_protocol_tlv_0x01, $proxy_protocol_tlv_alpn. diffstat: src/core/ngx_proxy_protocol.c | 186 +++++++++++++++++++++++++++++++++++++- src/core/ngx_proxy_protocol.h | 3 + src/http/ngx_http_variables.c | 39 +++++++ src/stream/ngx_stream_variables.c | 39 +++++++ 4 files changed, 265 insertions(+), 2 deletions(-) diffs (365 lines): diff -r 017fd847f4f7 -r cca4c8a715de src/core/ngx_proxy_protocol.c --- a/src/core/ngx_proxy_protocol.c Mon Oct 10 13:57:31 2022 +0400 +++ b/src/core/ngx_proxy_protocol.c Wed Oct 12 16:58:16 2022 +0400 @@ -15,6 +15,12 @@ #define ngx_proxy_protocol_parse_uint16(p) ((p)[0] << 8 | (p)[1]) +#define ngx_proxy_protocol_parse_uint32(p) \ + ( ((uint32_t) (p)[0] << 24) \ + + ( (p)[1] << 16) \ + + ( (p)[2] << 8) \ + + ( (p)[3]) ) + typedef struct { u_char signature[12]; @@ -40,12 +46,52 @@ typedef struct { } ngx_proxy_protocol_inet6_addrs_t; +typedef struct { + u_char type; + u_char len[2]; +} ngx_proxy_protocol_tlv_t; + + +typedef struct { + u_char client; + u_char verify[4]; +} ngx_proxy_protocol_tlv_ssl_t; + + +typedef struct { + ngx_str_t name; + ngx_uint_t type; +} ngx_proxy_protocol_tlv_entry_t; + + static u_char *ngx_proxy_protocol_read_addr(ngx_connection_t *c, u_char *p, u_char *last, ngx_str_t *addr); static u_char *ngx_proxy_protocol_read_port(u_char *p, u_char *last, in_port_t *port, u_char sep); static u_char *ngx_proxy_protocol_v2_read(ngx_connection_t *c, u_char *buf, u_char *last); +static ngx_int_t ngx_proxy_protocol_lookup_tlv(ngx_connection_t *c, + ngx_str_t *tlvs, ngx_uint_t type, ngx_str_t *value); + + +static ngx_proxy_protocol_tlv_entry_t ngx_proxy_protocol_tlv_entries[] = { + { ngx_string("alpn"), 0x01 }, + { ngx_string("authority"), 0x02 }, + { ngx_string("unique_id"), 0x05 }, + { ngx_string("ssl"), 0x20 }, + { ngx_string("netns"), 0x30 }, + { ngx_null_string, 0x00 } +}; + + +static ngx_proxy_protocol_tlv_entry_t ngx_proxy_protocol_tlv_ssl_entries[] = { + { ngx_string("version"), 0x21 }, + { ngx_string("cn"), 0x22 }, + { ngx_string("cipher"), 0x23 }, + { ngx_string("sig_alg"), 0x24 }, + { ngx_string("key_alg"), 0x25 }, + { ngx_null_string, 0x00 } +}; u_char * @@ -418,11 +464,147 @@ ngx_proxy_protocol_v2_read(ngx_connectio &pp->src_addr, pp->src_port, &pp->dst_addr, pp->dst_port); if (buf < end) { - ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, - "PROXY protocol v2 %z bytes of tlv ignored", end - buf); + pp->tlvs.data = ngx_pnalloc(c->pool, end - buf); + if (pp->tlvs.data == NULL) { + return NULL; + } + + ngx_memcpy(pp->tlvs.data, buf, end - buf); + pp->tlvs.len = end - buf; } c->proxy_protocol = pp; return end; } + + +ngx_int_t +ngx_proxy_protocol_get_tlv(ngx_connection_t *c, ngx_str_t *name, + ngx_str_t *value) +{ + u_char *p; + size_t n; + uint32_t verify; + ngx_str_t ssl, *tlvs; + ngx_int_t rc, type; + ngx_proxy_protocol_tlv_ssl_t *tlv_ssl; + ngx_proxy_protocol_tlv_entry_t *te; + + if (c->proxy_protocol == NULL) { + return NGX_DECLINED; + } + + ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, + "PROXY protocol v2 get tlv \"%V\"", name); + + te = ngx_proxy_protocol_tlv_entries; + tlvs = &c->proxy_protocol->tlvs; + + p = name->data; + n = name->len; + + if (n >= 4 && p[0] == 's' && p[1] == 's' && p[2] == 'l' && p[3] == '_') { + + rc = ngx_proxy_protocol_lookup_tlv(c, tlvs, 0x20, &ssl); + if (rc != NGX_OK) { + return rc; + } + + if (ssl.len < sizeof(ngx_proxy_protocol_tlv_ssl_t)) { + return NGX_ERROR; + } + + p += 4; + n -= 4; + + if (n == 6 && ngx_strncmp(p, "verify", 6) == 0) { + + tlv_ssl = (ngx_proxy_protocol_tlv_ssl_t *) ssl.data; + verify = ngx_proxy_protocol_parse_uint32(tlv_ssl->verify); + + value->data = ngx_pnalloc(c->pool, NGX_INT32_LEN); + if (value->data == NULL) { + return NGX_ERROR; + } + + value->len = ngx_sprintf(value->data, "%uD", verify) + - value->data; + return NGX_OK; + } + + ssl.data += sizeof(ngx_proxy_protocol_tlv_ssl_t); + ssl.len -= sizeof(ngx_proxy_protocol_tlv_ssl_t); + + te = ngx_proxy_protocol_tlv_ssl_entries; + tlvs = &ssl; + } + + if (n >= 2 && p[0] == '0' && p[1] == 'x') { + + type = ngx_hextoi(p + 2, n - 2); + if (type == NGX_ERROR) { + ngx_log_error(NGX_LOG_ERR, c->log, 0, + "invalid PROXY protocol TLV \"%V\"", name); + return NGX_ERROR; + } + + return ngx_proxy_protocol_lookup_tlv(c, tlvs, type, value); + } + + for ( /* void */ ; te->type; te++) { + if (te->name.len == n && ngx_strncmp(te->name.data, p, n) == 0) { + return ngx_proxy_protocol_lookup_tlv(c, tlvs, te->type, value); + } + } + + ngx_log_error(NGX_LOG_ERR, c->log, 0, + "unknown PROXY protocol TLV \"%V\"", name); + + return NGX_DECLINED; +} + + +static ngx_int_t +ngx_proxy_protocol_lookup_tlv(ngx_connection_t *c, ngx_str_t *tlvs, + ngx_uint_t type, ngx_str_t *value) +{ + u_char *p; + size_t n, len; + ngx_proxy_protocol_tlv_t *tlv; + + ngx_log_debug1(NGX_LOG_DEBUG_CORE, c->log, 0, + "PROXY protocol v2 lookup tlv:%02xi", type); + + p = tlvs->data; + n = tlvs->len; + + while (n) { + if (n < sizeof(ngx_proxy_protocol_tlv_t)) { + ngx_log_error(NGX_LOG_ERR, c->log, 0, "broken PROXY protocol TLV"); + return NGX_ERROR; + } + + tlv = (ngx_proxy_protocol_tlv_t *) p; + len = ngx_proxy_protocol_parse_uint16(tlv->len); + + p += sizeof(ngx_proxy_protocol_tlv_t); + n -= sizeof(ngx_proxy_protocol_tlv_t); + + if (n < len) { + ngx_log_error(NGX_LOG_ERR, c->log, 0, "broken PROXY protocol TLV"); + return NGX_ERROR; + } + + if (tlv->type == type) { + value->data = p; + value->len = len; + return NGX_OK; + } + + p += len; + n -= len; + } + + return NGX_DECLINED; +} diff -r 017fd847f4f7 -r cca4c8a715de src/core/ngx_proxy_protocol.h --- a/src/core/ngx_proxy_protocol.h Mon Oct 10 13:57:31 2022 +0400 +++ b/src/core/ngx_proxy_protocol.h Wed Oct 12 16:58:16 2022 +0400 @@ -21,6 +21,7 @@ struct ngx_proxy_protocol_s { ngx_str_t dst_addr; in_port_t src_port; in_port_t dst_port; + ngx_str_t tlvs; }; @@ -28,6 +29,8 @@ u_char *ngx_proxy_protocol_read(ngx_conn u_char *last); u_char *ngx_proxy_protocol_write(ngx_connection_t *c, u_char *buf, u_char *last); +ngx_int_t ngx_proxy_protocol_get_tlv(ngx_connection_t *c, ngx_str_t *name, + ngx_str_t *value); #endif /* _NGX_PROXY_PROTOCOL_H_INCLUDED_ */ diff -r 017fd847f4f7 -r cca4c8a715de src/http/ngx_http_variables.c --- a/src/http/ngx_http_variables.c Mon Oct 10 13:57:31 2022 +0400 +++ b/src/http/ngx_http_variables.c Wed Oct 12 16:58:16 2022 +0400 @@ -61,6 +61,8 @@ static ngx_int_t ngx_http_variable_proxy ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_proxy_protocol_port(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); +static ngx_int_t ngx_http_variable_proxy_protocol_tlv(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_server_addr(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_server_port(ngx_http_request_t *r, @@ -214,6 +216,10 @@ static ngx_http_variable_t ngx_http_cor ngx_http_variable_proxy_protocol_port, offsetof(ngx_proxy_protocol_t, dst_port), 0, 0 }, + { ngx_string("proxy_protocol_tlv_"), NULL, + ngx_http_variable_proxy_protocol_tlv, + 0, NGX_HTTP_VAR_PREFIX, 0 }, + { ngx_string("server_addr"), NULL, ngx_http_variable_server_addr, 0, 0, 0 }, { ngx_string("server_port"), NULL, ngx_http_variable_server_port, 0, 0, 0 }, @@ -1387,6 +1393,39 @@ ngx_http_variable_proxy_protocol_port(ng static ngx_int_t +ngx_http_variable_proxy_protocol_tlv(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data) +{ + ngx_str_t *name = (ngx_str_t *) data; + + ngx_int_t rc; + ngx_str_t tlv, value; + + tlv.len = name->len - (sizeof("proxy_protocol_tlv_") - 1); + tlv.data = name->data + sizeof("proxy_protocol_tlv_") - 1; + + rc = ngx_proxy_protocol_get_tlv(r->connection, &tlv, &value); + + if (rc == NGX_ERROR) { + return NGX_ERROR; + } + + if (rc == NGX_DECLINED) { + v->not_found = 1; + return NGX_OK; + } + + v->len = value.len; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + v->data = value.data; + + return NGX_OK; +} + + +static ngx_int_t ngx_http_variable_server_addr(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data) { diff -r 017fd847f4f7 -r cca4c8a715de src/stream/ngx_stream_variables.c --- a/src/stream/ngx_stream_variables.c Mon Oct 10 13:57:31 2022 +0400 +++ b/src/stream/ngx_stream_variables.c Wed Oct 12 16:58:16 2022 +0400 @@ -23,6 +23,8 @@ static ngx_int_t ngx_stream_variable_pro ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_stream_variable_proxy_protocol_port( ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); +static ngx_int_t ngx_stream_variable_proxy_protocol_tlv( + ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_stream_variable_server_addr(ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_stream_variable_server_port(ngx_stream_session_t *s, @@ -79,6 +81,10 @@ static ngx_stream_variable_t ngx_stream ngx_stream_variable_proxy_protocol_port, offsetof(ngx_proxy_protocol_t, dst_port), 0, 0 }, + { ngx_string("proxy_protocol_tlv_"), NULL, + ngx_stream_variable_proxy_protocol_tlv, + 0, NGX_STREAM_VAR_PREFIX, 0 }, + { ngx_string("server_addr"), NULL, ngx_stream_variable_server_addr, 0, 0, 0 }, @@ -622,6 +628,39 @@ ngx_stream_variable_proxy_protocol_port( static ngx_int_t +ngx_stream_variable_proxy_protocol_tlv(ngx_stream_session_t *s, + ngx_stream_variable_value_t *v, uintptr_t data) +{ + ngx_str_t *name = (ngx_str_t *) data; + + ngx_int_t rc; + ngx_str_t tlv, value; + + tlv.len = name->len - (sizeof("proxy_protocol_tlv_") - 1); + tlv.data = name->data + sizeof("proxy_protocol_tlv_") - 1; + + rc = ngx_proxy_protocol_get_tlv(s->connection, &tlv, &value); + + if (rc == NGX_ERROR) { + return NGX_ERROR; + } + + if (rc == NGX_DECLINED) { + v->not_found = 1; + return NGX_OK; + } + + v->len = value.len; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + v->data = value.data; + + return NGX_OK; +} + + +static ngx_int_t ngx_stream_variable_server_addr(ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data) { From arut at nginx.com Wed Oct 12 14:37:01 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 12 Oct 2022 14:37:01 +0000 Subject: [nginx] Added type cast to ngx_proxy_protocol_parse_uint16(). Message-ID: details: https://hg.nginx.org/nginx/rev/aa663cc2a77d branches: changeset: 8073:aa663cc2a77d user: Roman Arutyunyan date: Tue Sep 27 11:31:16 2022 +0400 description: Added type cast to ngx_proxy_protocol_parse_uint16(). The cast is added to make ngx_proxy_protocol_parse_uint16() similar to ngx_proxy_protocol_parse_uint32(). diffstat: src/core/ngx_proxy_protocol.c | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diffs (14 lines): diff -r cca4c8a715de -r aa663cc2a77d src/core/ngx_proxy_protocol.c --- a/src/core/ngx_proxy_protocol.c Wed Oct 12 16:58:16 2022 +0400 +++ b/src/core/ngx_proxy_protocol.c Tue Sep 27 11:31:16 2022 +0400 @@ -13,7 +13,9 @@ #define NGX_PROXY_PROTOCOL_AF_INET6 2 -#define ngx_proxy_protocol_parse_uint16(p) ((p)[0] << 8 | (p)[1]) +#define ngx_proxy_protocol_parse_uint16(p) \ + ( ((uint16_t) (p)[0] << 8) \ + + ( (p)[1]) ) #define ngx_proxy_protocol_parse_uint32(p) \ ( ((uint32_t) (p)[0] << 24) \ From mdounin at mdounin.ru Wed Oct 12 18:05:53 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 12 Oct 2022 21:05:53 +0300 Subject: [PATCH 11 of 11] SSL: automatic rotation of session ticket keys In-Reply-To: <3F412EF0-B85C-4501-97B4-0492E41EED70@nginx.com> References: <5c26fe5f6ab0bf4c0d18.1661482878@vm-bsd.mdounin.ru> <384C1C36-43CA-490B-9559-DD77DE6346E6@nginx.com> <12290DCD-4B09-4DA0-8057-16172C5F5D28@nginx.com> <3F412EF0-B85C-4501-97B4-0492E41EED70@nginx.com> Message-ID: Hello! On Wed, Oct 12, 2022 at 05:57:07PM +0400, Sergey Kandaurov wrote: > > > On 9 Oct 2022, at 08:59, Maxim Dounin wrote: > > > > Hello! > > > > On Sat, Oct 01, 2022 at 11:58:20AM +0300, Maxim Dounin wrote: > > > >> On Thu, Sep 29, 2022 at 08:00:03PM +0400, Sergey Kandaurov wrote: > >> > >>>> On 28 Sep 2022, at 22:37, Maxim Dounin wrote: > >>>> > >>>> On Mon, Sep 26, 2022 at 02:17:18PM +0400, Sergey Kandaurov wrote: > >> > >> [...] > >> > >>>>> And by the way, while reviewing this patch, I noticed that > >>>>> OpenSSL doesn't allow a client to gracefully renew TLSv1.2 session > >>>>> when the client receives a new session ticket in resumed sessions. > >>>>> In practice, it is visible when client resumes a not yet expired > >>>>> session encrypted with not a fresh ticket key (after rotation), > >>>>> which results in sending a new session ticket. > >>>>> See ssl_update_cache() for the !s->hit condition. > >>>>> In the opposite, BoringSSL always allows to renew TLSv1.2 sessions. > >>>> > >>>> You mean on the client side? Yes, it looks like > >>>> ngx_ssl_new_client_session() won't be called for such a new > >>>> session ticket, and updated ticket will be never saved. This > >>>> might need to be worked around. > >>> > >>> Yes, I mean the client side. > >>> > >>>> > >>>> This should be safe with the key rotation logic introduced in this > >>>> patch though, given that the previous key is preserved till the > >>>> last ticket encrypted with it is expected to expire. > >>>> > >>>> One of the possible solutions might be to avoid re-encryption of > >>>> tickets with the new key, as the old key is anyway expected to be > >>>> available till the session expires. > >>> > >>> I don't think it's worth the effort. If I got you right, and > >>> as far as I understand, re-encrypting the ticket essentially > >>> means sending a fresh session (renewal). > >> > >> Well, not really. Re-encryption of a ticket does not imply > >> session renewal. Further, doing so implies security risk: if we > >> renew a session during re-encryption, this makes it possible to > >> create essentially infinite sessions. And, for example, if a > >> session used a client certificate, this effectively means that > >> this certificate will never expire and cannot be revoked. > >> > >> With TLSv1.2, OpenSSL follows this logic: session expiration time > >> is set when a session is created, and ticket re-encryption only > >> re-encrypts the session, but doesn't change session expiration. > >> As such, any certificate validation which happens during session > >> creation needs to be redone once session timeout expires - and > >> this makes it possible to implement certificate revocation. > >> > >> On the other hand, as implemented for TLSv1.3 at least in OpenSSL > >> it seems not to be the case. Every ticket sent to the client > >> actually creates a new session with _updated_ expiration time. As > >> such, it is possible to create a session authenticated with a client > >> certificate, and use this session indefinitely, even after the > >> certificate will expire and/or will be revoked. > >> > >> This seems to be a security issue in OpenSSL. > >> > >> BoringSSL seems to behave similarly with TLSv1.3, that is, it > >> updates session expiration time, making it possible to use an > >> authenticated session for much longer than session timeout > >> configured. But BoringSSL also has session's auth_timeout, which > >> prevents indefinite use of the session. The auth_timeout value is > >> hardcoded to 7 days (SSL_DEFAULT_SESSION_AUTH_TIMEOUT), and does > >> not seem to be adjustable (only with SSL_SESSION_set_timeout(), > >> which is documented to be a function for writing tests). > >> > >> I would rather say it is also a security issue in BoringSSL, > >> though it's slightly better than in OpenSSL due to the 7 days > >> limit. > > > > For the record: > > > > https://github.com/openssl/openssl/issues/19341 > > > > Note that with the automatic ticket key rotation this issue with > > TLSv1.3 sessions becomes slightly worse in a typical configuration > > (with ssl_session_cache in shared memory, but without > > ssl_session_ticket_key explicitly set and/or ssl_session_tickets > > switched off). Notably, configuration reload is no longer enough > > to invalidate all tickets, since ticket keys are now preserved in > > shared memory. > > > > For example, consider that a CRL file is updated with new > > revocations, and nginx configuration is reloaded. New revocations > > will be loaded by nginx and will appear to work with new sessions, > > but can be easily bypassed by maintaining a previously established > > TLSv1.3 session. Previously, it was possible to bypass > > revocations in such scenario only if ticket keys were explicitly > > set or if session tickets were switched off and sessions were > > cached in shared memory. > > > > Given that we do not enable TLSv1.3 by default, we probably can > > ignore this and wait for appropriate fixes from the affected > > libraries. On the other hand, it might be a good idea to > > introduce a workaround, especially if we want to enable TLSv1.3 by > > default in the foreseeable future. > > > > The following patch seems to be simple enough and forces session > > timeouts for TLSv1.3 sessions to be as configured, for both > > OpenSSL and BoringSSL: > > Although somewhat tricky, I like the approach. > Nitpicking comments inline. > > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1665286021 -10800 > > # Sun Oct 09 06:27:01 2022 +0300 > > # Node ID c0ec4df7ccbb95b7f2c2842f40012082991bed52 > > # Parent 37a4ac7ba1c5a003ab85f73d77767058af4eae30 > > SSL: workaround for session timeout handling with TLSv1.3. > > > > OpenSSL with TLSv1.3 updates the session creation time on session > > resumption and keeps the session timeout unmodified, making it possible > > to maintain the session forever, bypassing client certificate expiration > > and revocation. To make sure session timeouts are actually used, we > > now update the session creation time and reduce the session timeout > > accordingly. > > > > BoringSSL with TLSv1.3 ignores configured session timeouts and uses a > > hardcoded timeout instead, 7 days. So we update session timeout to > > the configured value as soon as a session is created. > > > > diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c > > --- a/src/event/ngx_event_openssl.c > > +++ b/src/event/ngx_event_openssl.c > > @@ -1086,6 +1086,53 @@ ngx_ssl_info_callback(const ngx_ssl_conn > > > > #endif > > > > +#ifdef TLS1_3_VERSION > > + > > + if ((where & SSL_CB_ACCEPT_LOOP) == SSL_CB_ACCEPT_LOOP > > + && SSL_version(ssl_conn) == TLS1_3_VERSION) > > + { > > + time_t now, time, timeout, conf_timeout; > > + SSL_SESSION *sess; > > Not sure if this should use ngx_ssl_session_t. But, given the macro > is intended for external consumption, SSL_SESSION should be fine. Yes, as far as I can see we are using SSL_SESSION and not ngx_ssl_session_t for tasks internal to ngx_event_openssl.c, so I've used it here. > > + > > + /* > > + * OpenSSL with TLSv1.3 updates the session creation time on > > + * session resumption and keeps the session timeout unmodified, > > + * making it possible to maintain the session forever, bypassing > > + * client certificate expiration and revocation. To make sure > > + * session timeouts are actually used, we now update the session > > + * creation time and reduce the session timeout accordingly. > > + * > > + * BoringSSL with TLSv1.3 ignores configured session timeouts > > + * and uses a hardcoded timeout instead, 7 days. So we update > > + * session timeout to the configured value as soon as a session > > + * is created. > > + */ > > + > > + c = ngx_ssl_get_connection((ngx_ssl_conn_t *) ssl_conn); > > + sess = SSL_get0_session(ssl_conn); > > + > > + if (!c->ssl->session_timeout_set && sess) { > > + c->ssl->session_timeout_set = 1; > > + > > + now = ngx_time(); > > + time = SSL_SESSION_get_time(sess); > > + timeout = SSL_SESSION_get_timeout(sess); > > + conf_timeout = SSL_CTX_get_timeout(c->ssl->session_ctx); > > + > > + timeout = ngx_min(timeout, conf_timeout); > > + > > + if (now - time >= timeout) { > > + SSL_SESSION_set1_id_context(sess, (unsigned char *) "", 0); > > Why not u_char? If this is to strictly follow the declaration, without > typedef's, then I wonder why const is omitted in casting 2nd parameter. > Anyway, this passes compilation on known CI platforms. > For the record, BoringSSL moved long ago to uint8_t in its headers, > so is incompatible with CHAR_BIT > 8 (though, POSIX mandates 8). Because u_char is an nginx type, while SSL_SESSION_set1_id_context() accepts "unsigned char" arguments. While u_char is equivalent, I preferred the type as accepted by the function. The "const" qualifier is not important here and can (and should) be omitted, as it only specifies that the function argument is not to be modified within the function. > I've been pondering if it's worth to call SSL_SESSION_set1_id_context() > in order to (obfuscatedly) cancel the going to expire sessions. > At least, OpenSSL handles this on theirself for sessions that've already > expired ("now > time + timeout"), rejecting such session, so the only > viable condition seems to be is when the session is going to be expired > ("now == time + timeout"). The problem the SSL_SESSION_set1_id_context() solves here is that it is not possible to ensure that the session will expire. Notably: - if you set timeout to 0, this effectively means 3 seconds, due to the default timeout used in d2i_SSL_SESSION(); - if you set session timeout to any negative value, it is set by OpenSSL itself to 0 in ssl_session_calculate_timeout(); - if you set session timeout to a positive value, this means that this session can be reused 1 second later, and hence prolonged for 1 more second (and therefore can be kept active indefinitely). As such, SSL_SESSION_set1_id_context() is called to ensure that in case of "now == time + timeout" the session modified so it won't be reusable anymore. The "<" part is mostly cosmetic. > For the record, invalidating session context (or even session removal) > this way doesn't prevent from reusing the session in this connection, > since the info callback is called too late, after the ticket has been > successfully decrypted (with a check for timeout and session context) > and session restored. Such session will be rejected only the next time. That's exactly the plan. > It may have sense though to call it still to handle the going to expire > sessions that pass the session timeout check on server, see sess_timedout() > in OpenSSL sources, it has slightly different condition to reject sessions. > In this case the control goes to the "if (now - time >= timeout) {" > condition, where we need to take the action, as otherwise OpenSSL > will update the session creation time, such that the session will > continue to be resumable for another "timeout" seconds. > There should be no difference between invalidating context and setting > intentionally old values for session time and timeout, both should > work to make it stop from being resumable. So, condition could be > collapsed to update session time and timeout values in both cases. > But zero session timeout doesn't seem to pass i2d_SSL_SESSION checks > in OpenSSL while constructing new session ticket, so invalidating > session context looks like the only solution. Yes, basically SSL_SESSION_set1_id_context() seems to be the only way to make sure it won't be possible to resume the session. > > + > > + } else { > > + SSL_SESSION_set_time(sess, now); > > + SSL_SESSION_set_timeout(sess, timeout - (now - time)); > > + } > > + } > > + } > > + > > +#endif > > + > > if ((where & SSL_CB_ACCEPT_LOOP) == SSL_CB_ACCEPT_LOOP) { > > c = ngx_ssl_get_connection((ngx_ssl_conn_t *) ssl_conn); > > > > diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h > > --- a/src/event/ngx_event_openssl.h > > +++ b/src/event/ngx_event_openssl.h > > @@ -114,6 +114,7 @@ struct ngx_ssl_connection_s { > > unsigned no_send_shutdown:1; > > unsigned shutdown_without_free:1; > > unsigned handshake_buffer_set:1; > > + unsigned session_timeout_set:1; > > unsigned try_early_data:1; > > unsigned in_early:1; > > unsigned in_ocsp:1; > > > > Looks good. The whole patch series pushed to http://mdounin.ru/hg/nginx. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Thu Oct 13 10:56:56 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:56:56 +0000 Subject: [nginx] SSL: disabled saving tickets to session cache. Message-ID: details: https://hg.nginx.org/nginx/rev/026ee23b6774 branches: changeset: 8074:026ee23b6774 user: Maxim Dounin date: Wed Oct 12 20:14:34 2022 +0300 description: SSL: disabled saving tickets to session cache. OpenSSL tries to save TLSv1.3 sessions into session cache even when using tickets for stateless session resumption, "because some applications just want to know about the creation of a session". To avoid trashing session cache with useless data, we do not save such sessions now. diffstat: src/event/ngx_event_openssl.c | 17 +++++++++++++++++ 1 files changed, 17 insertions(+), 0 deletions(-) diffs (27 lines): diff -r aa663cc2a77d -r 026ee23b6774 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Sep 27 11:31:16 2022 +0400 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:34 2022 +0300 @@ -3818,6 +3818,23 @@ ngx_ssl_new_session(ngx_ssl_conn_t *ssl_ ngx_ssl_session_cache_t *cache; u_char buf[NGX_SSL_MAX_SESSION_SIZE]; +#ifdef TLS1_3_VERSION + + /* + * OpenSSL tries to save TLSv1.3 sessions into session cache + * even when using tickets for stateless session resumption, + * "because some applications just want to know about the creation + * of a session"; do not cache such sessions + */ + + if (SSL_version(ssl_conn) == TLS1_3_VERSION + && (SSL_get_options(ssl_conn) & SSL_OP_NO_TICKET) == 0) + { + return 0; + } + +#endif + len = i2d_SSL_SESSION(sess, NULL); /* do not cache too big session */ From pluknet at nginx.com Thu Oct 13 10:56:59 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:56:59 +0000 Subject: [nginx] SSL: reduced logging of session cache failures (ticket #621). Message-ID: details: https://hg.nginx.org/nginx/rev/38c71f9b2293 branches: changeset: 8075:38c71f9b2293 user: Maxim Dounin date: Wed Oct 12 20:14:36 2022 +0300 description: SSL: reduced logging of session cache failures (ticket #621). Session cache allocations might fail as long as the new session is different in size from the one least recently used (and freed when the first allocation fails). In particular, it might not be possible to allocate space for sessions with client certificates, since they are noticeably bigger than normal sessions. To ensure such allocation failures won't clutter logs, logging level changed to "warn", and logging is now limited to at most one warning per second. diffstat: src/event/ngx_event_openssl.c | 9 +++++++-- src/event/ngx_event_openssl.h | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diffs (37 lines): diff -r 026ee23b6774 -r 38c71f9b2293 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:34 2022 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:36 2022 +0300 @@ -3770,6 +3770,8 @@ ngx_ssl_session_cache_init(ngx_shm_zone_ ngx_queue_init(&cache->expire_queue); + cache->fail_time = 0; + len = sizeof(" in SSL session shared cache \"\"") + shm_zone->shm.name.len; shpool->log_ctx = ngx_slab_alloc(shpool, len); @@ -3953,8 +3955,11 @@ failed: ngx_shmtx_unlock(&shpool->mutex); - ngx_log_error(NGX_LOG_ALERT, c->log, 0, - "could not allocate new session%s", shpool->log_ctx); + if (cache->fail_time != ngx_time()) { + cache->fail_time = ngx_time(); + ngx_log_error(NGX_LOG_WARN, c->log, 0, + "could not allocate new session%s", shpool->log_ctx); + } return 0; } diff -r 026ee23b6774 -r 38c71f9b2293 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Wed Oct 12 20:14:34 2022 +0300 +++ b/src/event/ngx_event_openssl.h Wed Oct 12 20:14:36 2022 +0300 @@ -150,6 +150,7 @@ typedef struct { ngx_rbtree_t session_rbtree; ngx_rbtree_node_t sentinel; ngx_queue_t expire_queue; + time_t fail_time; } ngx_ssl_session_cache_t; From pluknet at nginx.com Thu Oct 13 10:57:02 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:57:02 +0000 Subject: [nginx] SSL: updated comment about session sizes. Message-ID: details: https://hg.nginx.org/nginx/rev/fa4b4f38da4a branches: changeset: 8076:fa4b4f38da4a user: Maxim Dounin date: Wed Oct 12 20:14:37 2022 +0300 description: SSL: updated comment about session sizes. Previous numbers are somewhat outdated, typical ASN1 representations of sessions are slightly bigger now. diffstat: src/event/ngx_event_openssl.c | 12 ++++++------ 1 files changed, 6 insertions(+), 6 deletions(-) diffs (26 lines): diff -r 38c71f9b2293 -r fa4b4f38da4a src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:36 2022 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:37 2022 +0300 @@ -3790,16 +3790,16 @@ ngx_ssl_session_cache_init(ngx_shm_zone_ /* * The length of the session id is 16 bytes for SSLv2 sessions and - * between 1 and 32 bytes for SSLv3/TLSv1, typically 32 bytes. - * It seems that the typical length of the external ASN1 representation - * of a session is 118 or 119 bytes for SSLv3/TSLv1. + * between 1 and 32 bytes for SSLv3 and TLS, typically 32 bytes. + * Typical length of the external ASN1 representation of a session + * is about 150 bytes plus SNI server name. * - * Thus on 32-bit platforms we allocate separately an rbtree node, + * On 32-bit platforms we allocate separately an rbtree node, * a session id, and an ASN1 representation, they take accordingly - * 64, 32, and 128 bytes. + * 64, 32, and 256 bytes. * * On 64-bit platforms we allocate separately an rbtree node + session_id, - * and an ASN1 representation, they take accordingly 128 and 128 bytes. + * and an ASN1 representation, they take accordingly 128 and 256 bytes. * * OpenSSL's i2d_SSL_SESSION() and d2i_SSL_SESSION are slow, * so they are outside the code locked by shared pool mutex From pluknet at nginx.com Thu Oct 13 10:57:05 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:57:05 +0000 Subject: [nginx] SSL: explicit session id length checking. Message-ID: details: https://hg.nginx.org/nginx/rev/ec1fa010c3a5 branches: changeset: 8077:ec1fa010c3a5 user: Maxim Dounin date: Wed Oct 12 20:14:39 2022 +0300 description: SSL: explicit session id length checking. Session ids are not expected to be longer than 32 bytes, but this is theoretically possible with TLSv1.3, where session ids are essentially arbitrary and sent as session tickets. Since on 64-bit platforms we use fixed 32-byte buffer for session ids, added an explicit length check to make sure the buffer is large enough. diffstat: src/event/ngx_event_openssl.c | 10 ++++++++-- 1 files changed, 8 insertions(+), 2 deletions(-) diffs (27 lines): diff -r fa4b4f38da4a -r ec1fa010c3a5 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:37 2022 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:39 2022 +0300 @@ -3848,6 +3848,14 @@ ngx_ssl_new_session(ngx_ssl_conn_t *ssl_ p = buf; i2d_SSL_SESSION(sess, &p); + session_id = (u_char *) SSL_SESSION_get_id(sess, &session_id_length); + + /* do not cache sessions with too long session id */ + + if (session_id_length > 32) { + return 0; + } + c = ngx_ssl_get_connection(ssl_conn); ssl_ctx = c->ssl->session_ctx; @@ -3892,8 +3900,6 @@ ngx_ssl_new_session(ngx_ssl_conn_t *ssl_ } } - session_id = (u_char *) SSL_SESSION_get_id(sess, &session_id_length); - #if (NGX_PTR_SIZE == 8) id = sess_id->sess_id; From pluknet at nginx.com Thu Oct 13 10:57:07 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:57:07 +0000 Subject: [nginx] SSL: single allocation in session cache on 32-bit platforms. Message-ID: details: https://hg.nginx.org/nginx/rev/5244d3b165ff branches: changeset: 8078:5244d3b165ff user: Maxim Dounin date: Wed Oct 12 20:14:40 2022 +0300 description: SSL: single allocation in session cache on 32-bit platforms. Given the present typical SSL session sizes, on 32-bit platforms it is now beneficial to store all data in a single allocation, since rbtree node + session id + ASN1 representation of a session takes 256 bytes of shared memory (36 + 32 + 150 = about 218 bytes plus SNI server name). Storing all data in a single allocation is beneficial for SNI names up to about 40 characters long and makes it possible to store about 4000 sessions in one megabyte (instead of about 3000 sessions now). This also slightly simplifies the code. diffstat: src/event/ngx_event_openssl.c | 71 ++++++++++++++---------------------------- src/event/ngx_event_openssl.h | 8 ++-- 2 files changed, 28 insertions(+), 51 deletions(-) diffs (181 lines): diff -r ec1fa010c3a5 -r 5244d3b165ff src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:39 2022 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:40 2022 +0300 @@ -3794,9 +3794,9 @@ ngx_ssl_session_cache_init(ngx_shm_zone_ * Typical length of the external ASN1 representation of a session * is about 150 bytes plus SNI server name. * - * On 32-bit platforms we allocate separately an rbtree node, - * a session id, and an ASN1 representation, they take accordingly - * 64, 32, and 256 bytes. + * On 32-bit platforms we allocate an rbtree node, a session id, and + * an ASN1 representation in a single allocation, it typically takes + * 256 bytes. * * On 64-bit platforms we allocate separately an rbtree node + session_id, * and an ASN1 representation, they take accordingly 128 and 256 bytes. @@ -3809,7 +3809,8 @@ static int ngx_ssl_new_session(ngx_ssl_conn_t *ssl_conn, ngx_ssl_session_t *sess) { int len; - u_char *p, *id, *cached_sess, *session_id; + u_char *p, *session_id; + size_t n; uint32_t hash; SSL_CTX *ssl_ctx; unsigned int session_id_length; @@ -3869,23 +3870,13 @@ ngx_ssl_new_session(ngx_ssl_conn_t *ssl_ /* drop one or two expired sessions */ ngx_ssl_expire_sessions(cache, shpool, 1); - cached_sess = ngx_slab_alloc_locked(shpool, len); - - if (cached_sess == NULL) { - - /* drop the oldest non-expired session and try once more */ - - ngx_ssl_expire_sessions(cache, shpool, 0); - - cached_sess = ngx_slab_alloc_locked(shpool, len); - - if (cached_sess == NULL) { - sess_id = NULL; - goto failed; - } - } - - sess_id = ngx_slab_alloc_locked(shpool, sizeof(ngx_ssl_sess_id_t)); +#if (NGX_PTR_SIZE == 8) + n = sizeof(ngx_ssl_sess_id_t); +#else + n = offsetof(ngx_ssl_sess_id_t, session) + len; +#endif + + sess_id = ngx_slab_alloc_locked(shpool, n); if (sess_id == NULL) { @@ -3893,7 +3884,7 @@ ngx_ssl_new_session(ngx_ssl_conn_t *ssl_ ngx_ssl_expire_sessions(cache, shpool, 0); - sess_id = ngx_slab_alloc_locked(shpool, sizeof(ngx_ssl_sess_id_t)); + sess_id = ngx_slab_alloc_locked(shpool, n); if (sess_id == NULL) { goto failed; @@ -3902,30 +3893,25 @@ ngx_ssl_new_session(ngx_ssl_conn_t *ssl_ #if (NGX_PTR_SIZE == 8) - id = sess_id->sess_id; - -#else - - id = ngx_slab_alloc_locked(shpool, session_id_length); - - if (id == NULL) { + sess_id->session = ngx_slab_alloc_locked(shpool, len); + + if (sess_id->session == NULL) { /* drop the oldest non-expired session and try once more */ ngx_ssl_expire_sessions(cache, shpool, 0); - id = ngx_slab_alloc_locked(shpool, session_id_length); - - if (id == NULL) { + sess_id->session = ngx_slab_alloc_locked(shpool, len); + + if (sess_id->session == NULL) { goto failed; } } #endif - ngx_memcpy(cached_sess, buf, len); - - ngx_memcpy(id, session_id, session_id_length); + ngx_memcpy(sess_id->session, buf, len); + ngx_memcpy(sess_id->id, session_id, session_id_length); hash = ngx_crc32_short(session_id, session_id_length); @@ -3935,9 +3921,7 @@ ngx_ssl_new_session(ngx_ssl_conn_t *ssl_ sess_id->node.key = hash; sess_id->node.data = (u_char) session_id_length; - sess_id->id = id; sess_id->len = len; - sess_id->session = cached_sess; sess_id->expire = ngx_time() + SSL_CTX_get_timeout(ssl_ctx); @@ -3951,10 +3935,6 @@ ngx_ssl_new_session(ngx_ssl_conn_t *ssl_ failed: - if (cached_sess) { - ngx_slab_free_locked(shpool, cached_sess); - } - if (sess_id) { ngx_slab_free_locked(shpool, sess_id); } @@ -4051,9 +4031,8 @@ ngx_ssl_get_cached_session(ngx_ssl_conn_ ngx_rbtree_delete(&cache->session_rbtree, node); +#if (NGX_PTR_SIZE == 8) ngx_slab_free_locked(shpool, sess_id->session); -#if (NGX_PTR_SIZE == 4) - ngx_slab_free_locked(shpool, sess_id->id); #endif ngx_slab_free_locked(shpool, sess_id); @@ -4141,9 +4120,8 @@ ngx_ssl_remove_session(SSL_CTX *ssl, ngx ngx_rbtree_delete(&cache->session_rbtree, node); +#if (NGX_PTR_SIZE == 8) ngx_slab_free_locked(shpool, sess_id->session); -#if (NGX_PTR_SIZE == 4) - ngx_slab_free_locked(shpool, sess_id->id); #endif ngx_slab_free_locked(shpool, sess_id); @@ -4190,9 +4168,8 @@ ngx_ssl_expire_sessions(ngx_ssl_session_ ngx_rbtree_delete(&cache->session_rbtree, &sess_id->node); +#if (NGX_PTR_SIZE == 8) ngx_slab_free_locked(shpool, sess_id->session); -#if (NGX_PTR_SIZE == 4) - ngx_slab_free_locked(shpool, sess_id->id); #endif ngx_slab_free_locked(shpool, sess_id); } diff -r ec1fa010c3a5 -r 5244d3b165ff src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Wed Oct 12 20:14:39 2022 +0300 +++ b/src/event/ngx_event_openssl.h Wed Oct 12 20:14:40 2022 +0300 @@ -134,14 +134,14 @@ typedef struct ngx_ssl_sess_id_s ngx_ss struct ngx_ssl_sess_id_s { ngx_rbtree_node_t node; - u_char *id; size_t len; - u_char *session; ngx_queue_t queue; time_t expire; + u_char id[32]; #if (NGX_PTR_SIZE == 8) - void *stub; - u_char sess_id[32]; + u_char *session; +#else + u_char session[1]; #endif }; From pluknet at nginx.com Thu Oct 13 10:57:10 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:57:10 +0000 Subject: [nginx] SSL: explicit clearing of expired sessions. Message-ID: details: https://hg.nginx.org/nginx/rev/f106f4a68faf branches: changeset: 8079:f106f4a68faf user: Maxim Dounin date: Wed Oct 12 20:14:43 2022 +0300 description: SSL: explicit clearing of expired sessions. This reduces lifetime of session keying material in server's memory, and therefore can be beneficial from forward secrecy point of view. diffstat: src/event/ngx_event_openssl.c | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diffs (30 lines): diff -r 5244d3b165ff -r f106f4a68faf src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:40 2022 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:43 2022 +0300 @@ -4031,6 +4031,8 @@ ngx_ssl_get_cached_session(ngx_ssl_conn_ ngx_rbtree_delete(&cache->session_rbtree, node); + ngx_explicit_memzero(sess_id->session, sess_id->len); + #if (NGX_PTR_SIZE == 8) ngx_slab_free_locked(shpool, sess_id->session); #endif @@ -4120,6 +4122,8 @@ ngx_ssl_remove_session(SSL_CTX *ssl, ngx ngx_rbtree_delete(&cache->session_rbtree, node); + ngx_explicit_memzero(sess_id->session, sess_id->len); + #if (NGX_PTR_SIZE == 8) ngx_slab_free_locked(shpool, sess_id->session); #endif @@ -4168,6 +4172,8 @@ ngx_ssl_expire_sessions(ngx_ssl_session_ ngx_rbtree_delete(&cache->session_rbtree, &sess_id->node); + ngx_explicit_memzero(sess_id->session, sess_id->len); + #if (NGX_PTR_SIZE == 8) ngx_slab_free_locked(shpool, sess_id->session); #endif From pluknet at nginx.com Thu Oct 13 10:57:13 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:57:13 +0000 Subject: [nginx] SSL: style. Message-ID: details: https://hg.nginx.org/nginx/rev/bf02161f291e branches: changeset: 8080:bf02161f291e user: Maxim Dounin date: Wed Oct 12 20:14:45 2022 +0300 description: SSL: style. Runtime OCSP functions separated from configuration ones. diffstat: src/event/ngx_event_openssl.h | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diffs (16 lines): diff -r f106f4a68faf -r bf02161f291e src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Wed Oct 12 20:14:43 2022 +0300 +++ b/src/event/ngx_event_openssl.h Wed Oct 12 20:14:45 2022 +0300 @@ -205,10 +205,12 @@ ngx_int_t ngx_ssl_ocsp(ngx_conf_t *cf, n ngx_uint_t depth, ngx_shm_zone_t *shm_zone); ngx_int_t ngx_ssl_ocsp_resolver(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_resolver_t *resolver, ngx_msec_t resolver_timeout); + ngx_int_t ngx_ssl_ocsp_validate(ngx_connection_t *c); ngx_int_t ngx_ssl_ocsp_get_status(ngx_connection_t *c, const char **s); void ngx_ssl_ocsp_cleanup(ngx_connection_t *c); ngx_int_t ngx_ssl_ocsp_cache_init(ngx_shm_zone_t *shm_zone, void *data); + ngx_array_t *ngx_ssl_read_password_file(ngx_conf_t *cf, ngx_str_t *file); ngx_array_t *ngx_ssl_preserve_passwords(ngx_conf_t *cf, ngx_array_t *passwords); From pluknet at nginx.com Thu Oct 13 10:57:16 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:57:16 +0000 Subject: [nginx] SSL: renamed session ticket key type. Message-ID: details: https://hg.nginx.org/nginx/rev/4eeb53743d25 branches: changeset: 8081:4eeb53743d25 user: Maxim Dounin date: Wed Oct 12 20:14:47 2022 +0300 description: SSL: renamed session ticket key type. The ngx_ssl_session_ticket_key_t is way too long, renamed to ngx_ssl_ticket_key_t to simplify writing code. diffstat: src/event/ngx_event_openssl.c | 40 ++++++++++++++++++++-------------------- src/event/ngx_event_openssl.h | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diffs (82 lines): diff -r bf02161f291e -r 4eeb53743d25 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:45 2022 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:47 2022 +0300 @@ -4229,23 +4229,23 @@ ngx_ssl_session_rbtree_insert_value(ngx_ ngx_int_t ngx_ssl_session_ticket_keys(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_array_t *paths) { - u_char buf[80]; - size_t size; - ssize_t n; - ngx_str_t *path; - ngx_file_t file; - ngx_uint_t i; - ngx_array_t *keys; - ngx_file_info_t fi; - ngx_pool_cleanup_t *cln; - ngx_ssl_session_ticket_key_t *key; + u_char buf[80]; + size_t size; + ssize_t n; + ngx_str_t *path; + ngx_file_t file; + ngx_uint_t i; + ngx_array_t *keys; + ngx_file_info_t fi; + ngx_pool_cleanup_t *cln; + ngx_ssl_ticket_key_t *key; if (paths == NULL) { return NGX_OK; } keys = ngx_array_create(cf->pool, paths->nelts, - sizeof(ngx_ssl_session_ticket_key_t)); + sizeof(ngx_ssl_ticket_key_t)); if (keys == NULL) { return NGX_ERROR; } @@ -4372,14 +4372,14 @@ ngx_ssl_session_ticket_key_callback(ngx_ unsigned char *name, unsigned char *iv, EVP_CIPHER_CTX *ectx, HMAC_CTX *hctx, int enc) { - size_t size; - SSL_CTX *ssl_ctx; - ngx_uint_t i; - ngx_array_t *keys; - ngx_connection_t *c; - ngx_ssl_session_ticket_key_t *key; - const EVP_MD *digest; - const EVP_CIPHER *cipher; + size_t size; + SSL_CTX *ssl_ctx; + ngx_uint_t i; + ngx_array_t *keys; + ngx_connection_t *c; + ngx_ssl_ticket_key_t *key; + const EVP_MD *digest; + const EVP_CIPHER *cipher; c = ngx_ssl_get_connection(ssl_conn); ssl_ctx = c->ssl->session_ctx; @@ -4508,7 +4508,7 @@ ngx_ssl_session_ticket_keys_cleanup(void ngx_array_t *keys = data; ngx_explicit_memzero(keys->elts, - keys->nelts * sizeof(ngx_ssl_session_ticket_key_t)); + keys->nelts * sizeof(ngx_ssl_ticket_key_t)); } #else diff -r bf02161f291e -r 4eeb53743d25 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Wed Oct 12 20:14:45 2022 +0300 +++ b/src/event/ngx_event_openssl.h Wed Oct 12 20:14:47 2022 +0300 @@ -161,7 +161,7 @@ typedef struct { u_char name[16]; u_char hmac_key[32]; u_char aes_key[32]; -} ngx_ssl_session_ticket_key_t; +} ngx_ssl_ticket_key_t; #endif From pluknet at nginx.com Thu Oct 13 10:57:19 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:57:19 +0000 Subject: [nginx] SSL: renamed session ticket key functions and data index. Message-ID: details: https://hg.nginx.org/nginx/rev/c71e113b57d8 branches: changeset: 8082:c71e113b57d8 user: Maxim Dounin date: Wed Oct 12 20:14:49 2022 +0300 description: SSL: renamed session ticket key functions and data index. Previously used names are way too long, renamed to simplify writing code. diffstat: src/event/ngx_event_openssl.c | 27 ++++++++++++--------------- src/event/ngx_event_openssl.h | 2 +- 2 files changed, 13 insertions(+), 16 deletions(-) diffs (105 lines): diff -r 4eeb53743d25 -r c71e113b57d8 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:47 2022 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:49 2022 +0300 @@ -71,10 +71,10 @@ static void ngx_ssl_session_rbtree_inser ngx_rbtree_node_t *node, ngx_rbtree_node_t *sentinel); #ifdef SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB -static int ngx_ssl_session_ticket_key_callback(ngx_ssl_conn_t *ssl_conn, +static int ngx_ssl_ticket_key_callback(ngx_ssl_conn_t *ssl_conn, unsigned char *name, unsigned char *iv, EVP_CIPHER_CTX *ectx, HMAC_CTX *hctx, int enc); -static void ngx_ssl_session_ticket_keys_cleanup(void *data); +static void ngx_ssl_ticket_keys_cleanup(void *data); #endif #ifndef X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT @@ -131,7 +131,7 @@ ngx_module_t ngx_openssl_module = { int ngx_ssl_connection_index; int ngx_ssl_server_conf_index; int ngx_ssl_session_cache_index; -int ngx_ssl_session_ticket_keys_index; +int ngx_ssl_ticket_keys_index; int ngx_ssl_ocsp_index; int ngx_ssl_certificate_index; int ngx_ssl_next_certificate_index; @@ -208,9 +208,9 @@ ngx_ssl_init(ngx_log_t *log) return NGX_ERROR; } - ngx_ssl_session_ticket_keys_index = SSL_CTX_get_ex_new_index(0, NULL, NULL, - NULL, NULL); - if (ngx_ssl_session_ticket_keys_index == -1) { + ngx_ssl_ticket_keys_index = SSL_CTX_get_ex_new_index(0, NULL, NULL, NULL, + NULL); + if (ngx_ssl_ticket_keys_index == -1) { ngx_ssl_error(NGX_LOG_ALERT, log, 0, "SSL_CTX_get_ex_new_index() failed"); return NGX_ERROR; @@ -4255,7 +4255,7 @@ ngx_ssl_session_ticket_keys(ngx_conf_t * return NGX_ERROR; } - cln->handler = ngx_ssl_session_ticket_keys_cleanup; + cln->handler = ngx_ssl_ticket_keys_cleanup; cln->data = keys; path = paths->elts; @@ -4333,16 +4333,13 @@ ngx_ssl_session_ticket_keys(ngx_conf_t * ngx_explicit_memzero(&buf, 80); } - if (SSL_CTX_set_ex_data(ssl->ctx, ngx_ssl_session_ticket_keys_index, keys) - == 0) - { + if (SSL_CTX_set_ex_data(ssl->ctx, ngx_ssl_ticket_keys_index, keys) == 0) { ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, "SSL_CTX_set_ex_data() failed"); return NGX_ERROR; } - if (SSL_CTX_set_tlsext_ticket_key_cb(ssl->ctx, - ngx_ssl_session_ticket_key_callback) + if (SSL_CTX_set_tlsext_ticket_key_cb(ssl->ctx, ngx_ssl_ticket_key_callback) == 0) { ngx_log_error(NGX_LOG_WARN, cf->log, 0, @@ -4368,7 +4365,7 @@ failed: static int -ngx_ssl_session_ticket_key_callback(ngx_ssl_conn_t *ssl_conn, +ngx_ssl_ticket_key_callback(ngx_ssl_conn_t *ssl_conn, unsigned char *name, unsigned char *iv, EVP_CIPHER_CTX *ectx, HMAC_CTX *hctx, int enc) { @@ -4390,7 +4387,7 @@ ngx_ssl_session_ticket_key_callback(ngx_ digest = EVP_sha256(); #endif - keys = SSL_CTX_get_ex_data(ssl_ctx, ngx_ssl_session_ticket_keys_index); + keys = SSL_CTX_get_ex_data(ssl_ctx, ngx_ssl_ticket_keys_index); if (keys == NULL) { return -1; } @@ -4503,7 +4500,7 @@ ngx_ssl_session_ticket_key_callback(ngx_ static void -ngx_ssl_session_ticket_keys_cleanup(void *data) +ngx_ssl_ticket_keys_cleanup(void *data) { ngx_array_t *keys = data; diff -r 4eeb53743d25 -r c71e113b57d8 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Wed Oct 12 20:14:47 2022 +0300 +++ b/src/event/ngx_event_openssl.h Wed Oct 12 20:14:49 2022 +0300 @@ -317,7 +317,7 @@ void ngx_ssl_cleanup_ctx(void *data); extern int ngx_ssl_connection_index; extern int ngx_ssl_server_conf_index; extern int ngx_ssl_session_cache_index; -extern int ngx_ssl_session_ticket_keys_index; +extern int ngx_ssl_ticket_keys_index; extern int ngx_ssl_ocsp_index; extern int ngx_ssl_certificate_index; extern int ngx_ssl_next_certificate_index; From pluknet at nginx.com Thu Oct 13 10:57:22 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:57:22 +0000 Subject: [nginx] SSL: shorter debug messages about session tickets. Message-ID: details: https://hg.nginx.org/nginx/rev/e13a271bdd40 branches: changeset: 8083:e13a271bdd40 user: Maxim Dounin date: Wed Oct 12 20:14:51 2022 +0300 description: SSL: shorter debug messages about session tickets. diffstat: src/event/ngx_event_openssl.c | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diffs (30 lines): diff -r c71e113b57d8 -r e13a271bdd40 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:49 2022 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:51 2022 +0300 @@ -4398,7 +4398,7 @@ ngx_ssl_ticket_key_callback(ngx_ssl_conn /* encrypt session ticket */ ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, - "ssl session ticket encrypt, key: \"%*xs\" (%s session)", + "ssl ticket encrypt, key: \"%*xs\" (%s session)", (size_t) 16, key[0].name, SSL_session_reused(ssl_conn) ? "reused" : "new"); @@ -4445,7 +4445,7 @@ ngx_ssl_ticket_key_callback(ngx_ssl_conn } ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, - "ssl session ticket decrypt, key: \"%*xs\" not found", + "ssl ticket decrypt, key: \"%*xs\" not found", (size_t) 16, name); return 0; @@ -4453,7 +4453,7 @@ ngx_ssl_ticket_key_callback(ngx_ssl_conn found: ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, - "ssl session ticket decrypt, key: \"%*xs\"%s", + "ssl ticket decrypt, key: \"%*xs\"%s", (size_t) 16, key[i].name, (i == 0) ? " (default)" : ""); if (key[i].size == 48) { From pluknet at nginx.com Thu Oct 13 10:57:27 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:57:27 +0000 Subject: [nginx] SSL: automatic rotation of session ticket keys. Message-ID: details: https://hg.nginx.org/nginx/rev/0f3d98e4bcc5 branches: changeset: 8084:0f3d98e4bcc5 user: Maxim Dounin date: Wed Oct 12 20:14:53 2022 +0300 description: SSL: automatic rotation of session ticket keys. As long as ssl_session_cache in shared memory is configured, session ticket keys are now automatically generated in shared memory, and rotated periodically. This can be beneficial from forward secrecy point of view, and also avoids increased CPU usage after configuration reloads. This also helps BoringSSL to properly resume sessions in configurations with multiple worker processes and no ssl_session_ticket_key directives, as BoringSSL tries to automatically rotate session ticket keys and does this independently in different worker processes, thus breaking session resumption between worker processes. diffstat: src/event/ngx_event_openssl.c | 167 +++++++++++++++++++++++++++++++++++++---- src/event/ngx_event_openssl.h | 23 ++--- 2 files changed, 160 insertions(+), 30 deletions(-) diffs (269 lines): diff -r e13a271bdd40 -r 0f3d98e4bcc5 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:51 2022 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:53 2022 +0300 @@ -74,6 +74,7 @@ static void ngx_ssl_session_rbtree_inser static int ngx_ssl_ticket_key_callback(ngx_ssl_conn_t *ssl_conn, unsigned char *name, unsigned char *iv, EVP_CIPHER_CTX *ectx, HMAC_CTX *hctx, int enc); +static ngx_int_t ngx_ssl_rotate_ticket_keys(SSL_CTX *ssl_ctx, ngx_log_t *log); static void ngx_ssl_ticket_keys_cleanup(void *data); #endif @@ -3770,6 +3771,9 @@ ngx_ssl_session_cache_init(ngx_shm_zone_ ngx_queue_init(&cache->expire_queue); + cache->ticket_keys[0].expire = 0; + cache->ticket_keys[1].expire = 0; + cache->fail_time = 0; len = sizeof(" in SSL session shared cache \"\"") + shm_zone->shm.name.len; @@ -4240,11 +4244,13 @@ ngx_ssl_session_ticket_keys(ngx_conf_t * ngx_pool_cleanup_t *cln; ngx_ssl_ticket_key_t *key; - if (paths == NULL) { + if (paths == NULL + && SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_session_cache_index) == NULL) + { return NGX_OK; } - keys = ngx_array_create(cf->pool, paths->nelts, + keys = ngx_array_create(cf->pool, paths ? paths->nelts : 2, sizeof(ngx_ssl_ticket_key_t)); if (keys == NULL) { return NGX_ERROR; @@ -4258,6 +4264,34 @@ ngx_ssl_session_ticket_keys(ngx_conf_t * cln->handler = ngx_ssl_ticket_keys_cleanup; cln->data = keys; + if (SSL_CTX_set_ex_data(ssl->ctx, ngx_ssl_ticket_keys_index, keys) == 0) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "SSL_CTX_set_ex_data() failed"); + return NGX_ERROR; + } + + if (SSL_CTX_set_tlsext_ticket_key_cb(ssl->ctx, ngx_ssl_ticket_key_callback) + == 0) + { + ngx_log_error(NGX_LOG_WARN, cf->log, 0, + "nginx was built with Session Tickets support, however, " + "now it is linked dynamically to an OpenSSL library " + "which has no tlsext support, therefore Session Tickets " + "are not available"); + return NGX_OK; + } + + if (paths == NULL) { + + /* placeholder for keys in shared memory */ + + key = ngx_array_push_n(keys, 2); + key[0].shared = 1; + key[1].shared = 1; + + return NGX_OK; + } + path = paths->elts; for (i = 0; i < paths->nelts; i++) { @@ -4312,6 +4346,8 @@ ngx_ssl_session_ticket_keys(ngx_conf_t * goto failed; } + key->shared = 0; + if (size == 48) { key->size = 48; ngx_memcpy(key->name, buf, 16); @@ -4333,22 +4369,6 @@ ngx_ssl_session_ticket_keys(ngx_conf_t * ngx_explicit_memzero(&buf, 80); } - if (SSL_CTX_set_ex_data(ssl->ctx, ngx_ssl_ticket_keys_index, keys) == 0) { - ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, - "SSL_CTX_set_ex_data() failed"); - return NGX_ERROR; - } - - if (SSL_CTX_set_tlsext_ticket_key_cb(ssl->ctx, ngx_ssl_ticket_key_callback) - == 0) - { - ngx_log_error(NGX_LOG_WARN, cf->log, 0, - "nginx was built with Session Tickets support, however, " - "now it is linked dynamically to an OpenSSL library " - "which has no tlsext support, therefore Session Tickets " - "are not available"); - } - return NGX_OK; failed: @@ -4381,6 +4401,10 @@ ngx_ssl_ticket_key_callback(ngx_ssl_conn c = ngx_ssl_get_connection(ssl_conn); ssl_ctx = c->ssl->session_ctx; + if (ngx_ssl_rotate_ticket_keys(ssl_ctx, c->log) != NGX_OK) { + return -1; + } + #ifdef OPENSSL_NO_SHA256 digest = EVP_sha1(); #else @@ -4499,6 +4523,113 @@ ngx_ssl_ticket_key_callback(ngx_ssl_conn } +static ngx_int_t +ngx_ssl_rotate_ticket_keys(SSL_CTX *ssl_ctx, ngx_log_t *log) +{ + time_t now, expire; + ngx_array_t *keys; + ngx_shm_zone_t *shm_zone; + ngx_slab_pool_t *shpool; + ngx_ssl_ticket_key_t *key; + ngx_ssl_session_cache_t *cache; + u_char buf[80]; + + keys = SSL_CTX_get_ex_data(ssl_ctx, ngx_ssl_ticket_keys_index); + if (keys == NULL) { + return NGX_OK; + } + + key = keys->elts; + + if (!key[0].shared) { + return NGX_OK; + } + + now = ngx_time(); + expire = now + SSL_CTX_get_timeout(ssl_ctx); + + shm_zone = SSL_CTX_get_ex_data(ssl_ctx, ngx_ssl_session_cache_index); + + cache = shm_zone->data; + shpool = (ngx_slab_pool_t *) shm_zone->shm.addr; + + ngx_shmtx_lock(&shpool->mutex); + + key = cache->ticket_keys; + + if (key[0].expire == 0) { + + /* initialize the current key */ + + if (RAND_bytes(buf, 80) != 1) { + ngx_ssl_error(NGX_LOG_ALERT, log, 0, "RAND_bytes() failed"); + ngx_shmtx_unlock(&shpool->mutex); + return NGX_ERROR; + } + + key->shared = 1; + key->expire = expire; + key->size = 80; + ngx_memcpy(key->name, buf, 16); + ngx_memcpy(key->hmac_key, buf + 16, 32); + ngx_memcpy(key->aes_key, buf + 48, 32); + + ngx_explicit_memzero(&buf, 80); + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, log, 0, + "ssl ticket key: \"%*xs\"", + (size_t) 16, key->name); + } + + if (key[1].expire < now) { + + /* + * if the previous key is no longer needed (or not initialized), + * replace it with the current key and generate new current key + */ + + key[1] = key[0]; + + if (RAND_bytes(buf, 80) != 1) { + ngx_ssl_error(NGX_LOG_ALERT, log, 0, "RAND_bytes() failed"); + ngx_shmtx_unlock(&shpool->mutex); + return NGX_ERROR; + } + + key->shared = 1; + key->expire = expire; + key->size = 80; + ngx_memcpy(key->name, buf, 16); + ngx_memcpy(key->hmac_key, buf + 16, 32); + ngx_memcpy(key->aes_key, buf + 48, 32); + + ngx_explicit_memzero(&buf, 80); + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, log, 0, + "ssl ticket key: \"%*xs\"", + (size_t) 16, key->name); + } + + /* + * update expiration of the current key: it is going to be needed + * at least till the session being created expires + */ + + if (expire > key[0].expire) { + key[0].expire = expire; + } + + /* sync keys to the worker process memory */ + + ngx_memcpy(keys->elts, cache->ticket_keys, + 2 * sizeof(ngx_ssl_ticket_key_t)); + + ngx_shmtx_unlock(&shpool->mutex); + + return NGX_OK; +} + + static void ngx_ssl_ticket_keys_cleanup(void *data) { diff -r e13a271bdd40 -r 0f3d98e4bcc5 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Wed Oct 12 20:14:51 2022 +0300 +++ b/src/event/ngx_event_openssl.h Wed Oct 12 20:14:53 2022 +0300 @@ -147,25 +147,24 @@ struct ngx_ssl_sess_id_s { typedef struct { + u_char name[16]; + u_char hmac_key[32]; + u_char aes_key[32]; + time_t expire; + unsigned size:8; + unsigned shared:1; +} ngx_ssl_ticket_key_t; + + +typedef struct { ngx_rbtree_t session_rbtree; ngx_rbtree_node_t sentinel; ngx_queue_t expire_queue; + ngx_ssl_ticket_key_t ticket_keys[2]; time_t fail_time; } ngx_ssl_session_cache_t; -#ifdef SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB - -typedef struct { - size_t size; - u_char name[16]; - u_char hmac_key[32]; - u_char aes_key[32]; -} ngx_ssl_ticket_key_t; - -#endif - - #define NGX_SSL_SSLv2 0x0002 #define NGX_SSL_SSLv3 0x0004 #define NGX_SSL_TLSv1 0x0008 From pluknet at nginx.com Thu Oct 13 10:57:30 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:57:30 +0000 Subject: [nginx] SSL: optimized rotation of session ticket keys. Message-ID: details: https://hg.nginx.org/nginx/rev/043006e5a0b1 branches: changeset: 8085:043006e5a0b1 user: Maxim Dounin date: Wed Oct 12 20:14:55 2022 +0300 description: SSL: optimized rotation of session ticket keys. Instead of syncing keys with shared memory on each ticket operation, the code now does this only when the worker is going to change expiration of the current key, or going to switch to a new key: that is, usually at most once per second. To do so without races, the code maintains 3 keys: current, previous, and next. If a worker will switch to the next key earlier, other workers will still be able to decrypt new tickets, since they will be encrypted with the next key. diffstat: src/event/ngx_event_openssl.c | 64 ++++++++++++++++++++++++++++++------------ src/event/ngx_event_openssl.h | 2 +- 2 files changed, 47 insertions(+), 19 deletions(-) diffs (159 lines): diff -r 0f3d98e4bcc5 -r 043006e5a0b1 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:53 2022 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:55 2022 +0300 @@ -3773,6 +3773,7 @@ ngx_ssl_session_cache_init(ngx_shm_zone_ cache->ticket_keys[0].expire = 0; cache->ticket_keys[1].expire = 0; + cache->ticket_keys[2].expire = 0; cache->fail_time = 0; @@ -4250,7 +4251,7 @@ ngx_ssl_session_ticket_keys(ngx_conf_t * return NGX_OK; } - keys = ngx_array_create(cf->pool, paths ? paths->nelts : 2, + keys = ngx_array_create(cf->pool, paths ? paths->nelts : 3, sizeof(ngx_ssl_ticket_key_t)); if (keys == NULL) { return NGX_ERROR; @@ -4285,9 +4286,13 @@ ngx_ssl_session_ticket_keys(ngx_conf_t * /* placeholder for keys in shared memory */ - key = ngx_array_push_n(keys, 2); + key = ngx_array_push_n(keys, 3); key[0].shared = 1; + key[0].expire = 0; key[1].shared = 1; + key[1].expire = 0; + key[2].shared = 1; + key[2].expire = 0; return NGX_OK; } @@ -4347,6 +4352,7 @@ ngx_ssl_session_ticket_keys(ngx_conf_t * } key->shared = 0; + key->expire = 1; if (size == 48) { key->size = 48; @@ -4514,7 +4520,7 @@ ngx_ssl_ticket_key_callback(ngx_ssl_conn /* renew if non-default key */ - if (i != 0) { + if (i != 0 && key[i].expire) { return 2; } @@ -4545,9 +4551,21 @@ ngx_ssl_rotate_ticket_keys(SSL_CTX *ssl_ return NGX_OK; } + /* + * if we don't need to update expiration of the current key + * and the previous key is still needed, don't sync with shared + * memory to save some work; in the worst case other worker process + * will switch to the next key, but this process will still be able + * to decrypt tickets encrypted with it + */ + now = ngx_time(); expire = now + SSL_CTX_get_timeout(ssl_ctx); + if (key[0].expire >= expire && key[1].expire >= now) { + return NGX_OK; + } + shm_zone = SSL_CTX_get_ex_data(ssl_ctx, ngx_ssl_session_cache_index); cache = shm_zone->data; @@ -4567,28 +4585,38 @@ ngx_ssl_rotate_ticket_keys(SSL_CTX *ssl_ return NGX_ERROR; } - key->shared = 1; - key->expire = expire; - key->size = 80; - ngx_memcpy(key->name, buf, 16); - ngx_memcpy(key->hmac_key, buf + 16, 32); - ngx_memcpy(key->aes_key, buf + 48, 32); + key[0].shared = 1; + key[0].expire = expire; + key[0].size = 80; + ngx_memcpy(key[0].name, buf, 16); + ngx_memcpy(key[0].hmac_key, buf + 16, 32); + ngx_memcpy(key[0].aes_key, buf + 48, 32); ngx_explicit_memzero(&buf, 80); ngx_log_debug2(NGX_LOG_DEBUG_EVENT, log, 0, "ssl ticket key: \"%*xs\"", - (size_t) 16, key->name); + (size_t) 16, key[0].name); + + /* + * copy the current key to the next key, as initialization of + * the previous key will replace the current key with the next + * key + */ + + key[2] = key[0]; } if (key[1].expire < now) { /* * if the previous key is no longer needed (or not initialized), - * replace it with the current key and generate new current key + * replace it with the current key, replace the current key with + * the next key, and generate new next key */ key[1] = key[0]; + key[0] = key[2]; if (RAND_bytes(buf, 80) != 1) { ngx_ssl_error(NGX_LOG_ALERT, log, 0, "RAND_bytes() failed"); @@ -4596,18 +4624,18 @@ ngx_ssl_rotate_ticket_keys(SSL_CTX *ssl_ return NGX_ERROR; } - key->shared = 1; - key->expire = expire; - key->size = 80; - ngx_memcpy(key->name, buf, 16); - ngx_memcpy(key->hmac_key, buf + 16, 32); - ngx_memcpy(key->aes_key, buf + 48, 32); + key[2].shared = 1; + key[2].expire = 0; + key[2].size = 80; + ngx_memcpy(key[2].name, buf, 16); + ngx_memcpy(key[2].hmac_key, buf + 16, 32); + ngx_memcpy(key[2].aes_key, buf + 48, 32); ngx_explicit_memzero(&buf, 80); ngx_log_debug2(NGX_LOG_DEBUG_EVENT, log, 0, "ssl ticket key: \"%*xs\"", - (size_t) 16, key->name); + (size_t) 16, key[2].name); } /* diff -r 0f3d98e4bcc5 -r 043006e5a0b1 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Wed Oct 12 20:14:53 2022 +0300 +++ b/src/event/ngx_event_openssl.h Wed Oct 12 20:14:55 2022 +0300 @@ -160,7 +160,7 @@ typedef struct { ngx_rbtree_t session_rbtree; ngx_rbtree_node_t sentinel; ngx_queue_t expire_queue; - ngx_ssl_ticket_key_t ticket_keys[2]; + ngx_ssl_ticket_key_t ticket_keys[3]; time_t fail_time; } ngx_ssl_session_cache_t; From pluknet at nginx.com Thu Oct 13 10:57:33 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 10:57:33 +0000 Subject: [nginx] SSL: workaround for session timeout handling with TLSv1.3. Message-ID: details: https://hg.nginx.org/nginx/rev/496241338da5 branches: changeset: 8086:496241338da5 user: Maxim Dounin date: Wed Oct 12 20:14:57 2022 +0300 description: SSL: workaround for session timeout handling with TLSv1.3. OpenSSL with TLSv1.3 updates the session creation time on session resumption and keeps the session timeout unmodified, making it possible to maintain the session forever, bypassing client certificate expiration and revocation. To make sure session timeouts are actually used, we now update the session creation time and reduce the session timeout accordingly. BoringSSL with TLSv1.3 ignores configured session timeouts and uses a hardcoded timeout instead, 7 days. So we update session timeout to the configured value as soon as a session is created. diffstat: src/event/ngx_event_openssl.c | 47 +++++++++++++++++++++++++++++++++++++++++++ src/event/ngx_event_openssl.h | 1 + 2 files changed, 48 insertions(+), 0 deletions(-) diffs (68 lines): diff -r 043006e5a0b1 -r 496241338da5 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:55 2022 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 12 20:14:57 2022 +0300 @@ -1086,6 +1086,53 @@ ngx_ssl_info_callback(const ngx_ssl_conn #endif +#ifdef TLS1_3_VERSION + + if ((where & SSL_CB_ACCEPT_LOOP) == SSL_CB_ACCEPT_LOOP + && SSL_version(ssl_conn) == TLS1_3_VERSION) + { + time_t now, time, timeout, conf_timeout; + SSL_SESSION *sess; + + /* + * OpenSSL with TLSv1.3 updates the session creation time on + * session resumption and keeps the session timeout unmodified, + * making it possible to maintain the session forever, bypassing + * client certificate expiration and revocation. To make sure + * session timeouts are actually used, we now update the session + * creation time and reduce the session timeout accordingly. + * + * BoringSSL with TLSv1.3 ignores configured session timeouts + * and uses a hardcoded timeout instead, 7 days. So we update + * session timeout to the configured value as soon as a session + * is created. + */ + + c = ngx_ssl_get_connection((ngx_ssl_conn_t *) ssl_conn); + sess = SSL_get0_session(ssl_conn); + + if (!c->ssl->session_timeout_set && sess) { + c->ssl->session_timeout_set = 1; + + now = ngx_time(); + time = SSL_SESSION_get_time(sess); + timeout = SSL_SESSION_get_timeout(sess); + conf_timeout = SSL_CTX_get_timeout(c->ssl->session_ctx); + + timeout = ngx_min(timeout, conf_timeout); + + if (now - time >= timeout) { + SSL_SESSION_set1_id_context(sess, (unsigned char *) "", 0); + + } else { + SSL_SESSION_set_time(sess, now); + SSL_SESSION_set_timeout(sess, timeout - (now - time)); + } + } + } + +#endif + if ((where & SSL_CB_ACCEPT_LOOP) == SSL_CB_ACCEPT_LOOP) { c = ngx_ssl_get_connection((ngx_ssl_conn_t *) ssl_conn); diff -r 043006e5a0b1 -r 496241338da5 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Wed Oct 12 20:14:55 2022 +0300 +++ b/src/event/ngx_event_openssl.h Wed Oct 12 20:14:57 2022 +0300 @@ -114,6 +114,7 @@ struct ngx_ssl_connection_s { unsigned no_send_shutdown:1; unsigned shutdown_without_free:1; unsigned handshake_buffer_set:1; + unsigned session_timeout_set:1; unsigned try_early_data:1; unsigned in_early:1; unsigned in_ocsp:1; From pluknet at nginx.com Thu Oct 13 12:22:42 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 13 Oct 2022 12:22:42 +0000 Subject: [nginx] SSL: removed cast not needed after 5ffd76a9ccf3. Message-ID: details: https://hg.nginx.org/nginx/rev/81b4326daac7 branches: changeset: 8087:81b4326daac7 user: Sergey Kandaurov date: Thu Oct 13 16:18:56 2022 +0400 description: SSL: removed cast not needed after 5ffd76a9ccf3. diffstat: src/event/ngx_event_openssl.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 496241338da5 -r 81b4326daac7 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Oct 12 20:14:57 2022 +0300 +++ b/src/event/ngx_event_openssl.c Thu Oct 13 16:18:56 2022 +0400 @@ -3894,7 +3894,7 @@ ngx_ssl_new_session(ngx_ssl_conn_t *ssl_ /* do not cache too big session */ - if (len > (int) NGX_SSL_MAX_SESSION_SIZE) { + if (len > NGX_SSL_MAX_SESSION_SIZE) { return 0; } From pluknet at nginx.com Thu Oct 13 13:02:42 2022 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Thu, 13 Oct 2022 17:02:42 +0400 Subject: [PATCH] SSL: improved validation of ssl_session_cache and ssl_ocsp_cache Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1665665717 -14400 # Thu Oct 13 16:55:17 2022 +0400 # Node ID b2eba2994ddcbf9084075f9ae32c3332a628ca7a # Parent 81b4326daac70d6de70abbc3fe36d4f6e3da54a2 SSL: improved validation of ssl_session_cache and ssl_ocsp_cache. Now it properly detects invalid shared zone configuration with omitted size. Previously it used to read outside of the buffer boundary. Found with AddressSanitizer. diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c --- a/src/http/modules/ngx_http_ssl_module.c +++ b/src/http/modules/ngx_http_ssl_module.c @@ -1039,10 +1039,10 @@ ngx_http_ssl_session_cache(ngx_conf_t *c { ngx_http_ssl_srv_conf_t *sscf = conf; - size_t len; + u_char *p; ngx_str_t *value, name, size; ngx_int_t n; - ngx_uint_t i, j; + ngx_uint_t i; value = cf->args->elts; @@ -1083,25 +1083,20 @@ ngx_http_ssl_session_cache(ngx_conf_t *c && ngx_strncmp(value[i].data, "shared:", sizeof("shared:") - 1) == 0) { - len = 0; + name.data = value[i].data + sizeof("shared:") - 1; + + p = (u_char *) ngx_strchr(name.data, ':'); - for (j = sizeof("shared:") - 1; j < value[i].len; j++) { - if (value[i].data[j] == ':') { - break; - } - - len++; + if (p == NULL) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid zone size \"%V\"", &value[i]); + return NGX_CONF_ERROR; } - if (len == 0) { - goto invalid; - } + name.len = p - name.data; - name.len = len; - name.data = value[i].data + sizeof("shared:") - 1; - - size.len = value[i].len - j - 1; - size.data = name.data + len + 1; + size.data = p + 1; + size.len = value[i].data + value[i].len - size.data; n = ngx_parse_size(&size); @@ -1151,10 +1146,9 @@ ngx_http_ssl_ocsp_cache(ngx_conf_t *cf, { ngx_http_ssl_srv_conf_t *sscf = conf; - size_t len; - ngx_int_t n; - ngx_str_t *value, name, size; - ngx_uint_t j; + u_char *p; + ngx_int_t n; + ngx_str_t *value, name, size; if (sscf->ocsp_cache_zone != NGX_CONF_UNSET_PTR) { return "is duplicate"; @@ -1173,25 +1167,20 @@ ngx_http_ssl_ocsp_cache(ngx_conf_t *cf, goto invalid; } - len = 0; + name.data = value[1].data + sizeof("shared:") - 1; + + p = (u_char *) ngx_strchr(name.data, ':'); - for (j = sizeof("shared:") - 1; j < value[1].len; j++) { - if (value[1].data[j] == ':') { - break; - } - - len++; + if (p == NULL) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid zone size \"%V\"", &value[1]); + return NGX_CONF_ERROR; } - if (len == 0) { - goto invalid; - } + name.len = p - name.data; - name.len = len; - name.data = value[1].data + sizeof("shared:") - 1; - - size.len = value[1].len - j - 1; - size.data = name.data + len + 1; + size.data = p + 1; + size.len = value[1].data + value[1].len - size.data; n = ngx_parse_size(&size); diff --git a/src/mail/ngx_mail_ssl_module.c b/src/mail/ngx_mail_ssl_module.c --- a/src/mail/ngx_mail_ssl_module.c +++ b/src/mail/ngx_mail_ssl_module.c @@ -628,10 +628,10 @@ ngx_mail_ssl_session_cache(ngx_conf_t *c { ngx_mail_ssl_conf_t *scf = conf; - size_t len; + u_char *p; ngx_str_t *value, name, size; ngx_int_t n; - ngx_uint_t i, j; + ngx_uint_t i; value = cf->args->elts; @@ -672,25 +672,20 @@ ngx_mail_ssl_session_cache(ngx_conf_t *c && ngx_strncmp(value[i].data, "shared:", sizeof("shared:") - 1) == 0) { - len = 0; + name.data = value[i].data + sizeof("shared:") - 1; + + p = (u_char *) ngx_strchr(name.data, ':'); - for (j = sizeof("shared:") - 1; j < value[i].len; j++) { - if (value[i].data[j] == ':') { - break; - } - - len++; + if (p == NULL) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid zone size \"%V\"", &value[i]); + return NGX_CONF_ERROR; } - if (len == 0) { - goto invalid; - } + name.len = p - name.data; - name.len = len; - name.data = value[i].data + sizeof("shared:") - 1; - - size.len = value[i].len - j - 1; - size.data = name.data + len + 1; + size.data = p + 1; + size.len = value[i].data + value[i].len - size.data; n = ngx_parse_size(&size); diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c +++ b/src/stream/ngx_stream_ssl_module.c @@ -1019,10 +1019,10 @@ ngx_stream_ssl_session_cache(ngx_conf_t { ngx_stream_ssl_conf_t *scf = conf; - size_t len; + u_char *p; ngx_str_t *value, name, size; ngx_int_t n; - ngx_uint_t i, j; + ngx_uint_t i; value = cf->args->elts; @@ -1063,25 +1063,20 @@ ngx_stream_ssl_session_cache(ngx_conf_t && ngx_strncmp(value[i].data, "shared:", sizeof("shared:") - 1) == 0) { - len = 0; + name.data = value[i].data + sizeof("shared:") - 1; + + p = (u_char *) ngx_strchr(name.data, ':'); - for (j = sizeof("shared:") - 1; j < value[i].len; j++) { - if (value[i].data[j] == ':') { - break; - } - - len++; + if (p == NULL) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid zone size \"%V\"", &value[i]); + return NGX_CONF_ERROR; } - if (len == 0) { - goto invalid; - } + name.len = p - name.data; - name.len = len; - name.data = value[i].data + sizeof("shared:") - 1; - - size.len = value[i].len - j - 1; - size.data = name.data + len + 1; + size.data = p + 1; + size.len = value[i].data + value[i].len - size.data; n = ngx_parse_size(&size); From yar at nginx.com Thu Oct 13 18:41:25 2022 From: yar at nginx.com (Yaroslav Zhuravlev) Date: Thu, 13 Oct 2022 19:41:25 +0100 Subject: [PATCH] Documented automatic rotation of TLS session ticket keys In-Reply-To: <42481660-0678-462A-A683-452BC044440C@nginx.com> References: <547c4be44f0db08923b7.1664828246@ORK-ML-00007151> <42481660-0678-462A-A683-452BC044440C@nginx.com> Message-ID: > On 4 Oct 2022, at 15:22, Sergey Kandaurov wrote: > >> >> On 4 Oct 2022, at 00:17, Yaroslav Zhuravlev wrote: >> >> xml/en/docs/http/ngx_http_ssl_module.xml | 5 ++++- >> xml/ru/docs/http/ngx_http_ssl_module.xml | 5 ++++- >> 2 files changed, 8 insertions(+), 2 deletions(-) >> >> >> # HG changeset patch >> # User Yaroslav Zhuravlev >> # Date 1664828002 -3600 >> # Mon Oct 03 21:13:22 2022 +0100 >> # Node ID 547c4be44f0db08923b7dd33bca262d009219a3a >> # Parent 9708787aafc70744296baceb2aa0092401a4ef34 >> Documented automatic rotation of TLS session ticket keys. >> >> diff --git a/xml/en/docs/http/ngx_http_ssl_module.xml b/xml/en/docs/http/ngx_http_ssl_module.xml >> --- a/xml/en/docs/http/ngx_http_ssl_module.xml >> +++ b/xml/en/docs/http/ngx_http_ssl_module.xml >> @@ -10,7 +10,7 @@ >> > link="/en/docs/http/ngx_http_ssl_module.html" >> lang="en" >> - rev="58"> >> + rev="59"> >> >>
>> >> @@ -690,6 +690,9 @@ >> about 4000 sessions. >> Each shared cache should have an arbitrary name. >> A cache with the same name can be used in several virtual servers. >> +In shared cache, > > This part looks redundant, as it's already dedicated to shared cache. > >> +TLS session ticket keys >> +are automatically generated, stored, and periodically rotated. > > - missed > - need to clarify relationship with ssl_session_ticket_key, e.g.: > > Additionally, TLS session ticket keys > are automatically generated, stored, and periodically rotated > unless explicitly configured using the > directive (1.23.2). > > [..] Thank you, the patch updated: # HG changeset patch # User Yaroslav Zhuravlev # Date 1665685813 -3600 # Thu Oct 13 19:30:13 2022 +0100 # Node ID 6dfa05c1a12ac43bdc89fa999509a5dcc879db4d # Parent 3cd9ec612c4a982ca1b74a7f5adc24bc69025483 Documented automatic rotation of TLS session ticket keys. diff --git a/xml/en/docs/http/ngx_http_ssl_module.xml b/xml/en/docs/http/ngx_http_ssl_module.xml --- a/xml/en/docs/http/ngx_http_ssl_module.xml +++ b/xml/en/docs/http/ngx_http_ssl_module.xml @@ -10,7 +10,7 @@ + rev="59">
@@ -690,6 +690,10 @@ about 4000 sessions. Each shared cache should have an arbitrary name. A cache with the same name can be used in several virtual servers. +It is also used to automatically generate, store, and +periodically rotate TLS session ticket keys (1.23.2) +unless configured explicitly +using the directive. diff --git a/xml/ru/docs/http/ngx_http_ssl_module.xml b/xml/ru/docs/http/ngx_http_ssl_module.xml --- a/xml/ru/docs/http/ngx_http_ssl_module.xml +++ b/xml/ru/docs/http/ngx_http_ssl_module.xml @@ -10,7 +10,7 @@ + rev="59">
@@ -696,6 +696,10 @@ У каждого разделяемого кэша должно быть произвольное название. Кэш с одинаковым названием может использоваться в нескольких виртуальных серверах. +Также он используется для автоматического создания, хранения и +периодического обновления ключей TLS session tickets (1.23.2), +если они не указаны явно +с помощью директивы . > > -- > Sergey Kandaurov > > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org From mdounin at mdounin.ru Thu Oct 13 20:30:47 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 13 Oct 2022 23:30:47 +0300 Subject: [PATCH] SSL: improved validation of ssl_session_cache and ssl_ocsp_cache In-Reply-To: References: Message-ID: Hello! On Thu, Oct 13, 2022 at 05:02:42PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1665665717 -14400 > # Thu Oct 13 16:55:17 2022 +0400 > # Node ID b2eba2994ddcbf9084075f9ae32c3332a628ca7a > # Parent 81b4326daac70d6de70abbc3fe36d4f6e3da54a2 > SSL: improved validation of ssl_session_cache and ssl_ocsp_cache. > > Now it properly detects invalid shared zone configuration with omitted size. > Previously it used to read outside of the buffer boundary. > > Found with AddressSanitizer. > > diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c > --- a/src/http/modules/ngx_http_ssl_module.c > +++ b/src/http/modules/ngx_http_ssl_module.c > @@ -1039,10 +1039,10 @@ ngx_http_ssl_session_cache(ngx_conf_t *c > { > ngx_http_ssl_srv_conf_t *sscf = conf; > > - size_t len; > + u_char *p; > ngx_str_t *value, name, size; > ngx_int_t n; > - ngx_uint_t i, j; > + ngx_uint_t i; > > value = cf->args->elts; > > @@ -1083,25 +1083,20 @@ ngx_http_ssl_session_cache(ngx_conf_t *c > && ngx_strncmp(value[i].data, "shared:", sizeof("shared:") - 1) > == 0) > { > - len = 0; > + name.data = value[i].data + sizeof("shared:") - 1; > + > + p = (u_char *) ngx_strchr(name.data, ':'); > > - for (j = sizeof("shared:") - 1; j < value[i].len; j++) { > - if (value[i].data[j] == ':') { > - break; > - } > - > - len++; > + if (p == NULL) { > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "invalid zone size \"%V\"", &value[i]); > + return NGX_CONF_ERROR; goto invalid? This seems to be more in line with both previous handling of the "len == 0" case, and the remaining handling of the "n == NGX_ERROR" case. > } > > - if (len == 0) { > - goto invalid; > - } > + name.len = p - name.data; This makes it possible to create a shared memory zone with an empty name, which was previously forbidden. Note that limit_req_zone / limit_conn_zone parsing you've copied does not allow shared memory zones with empty names due to the additional name.len check after parsing of all arguments. While I don't think that empty names are fatal, they are certainly confusing at least in logs, and it might be a good idea to preserve the name length check. > > - name.len = len; > - name.data = value[i].data + sizeof("shared:") - 1; > - > - size.len = value[i].len - j - 1; > - size.data = name.data + len + 1; > + size.data = p + 1; > + size.len = value[i].data + value[i].len - size.data; > > n = ngx_parse_size(&size); > [...] -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Fri Oct 14 09:19:34 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 14 Oct 2022 13:19:34 +0400 Subject: [PATCH] Documented automatic rotation of TLS session ticket keys In-Reply-To: References: <547c4be44f0db08923b7.1664828246@ORK-ML-00007151> <42481660-0678-462A-A683-452BC044440C@nginx.com> Message-ID: > On 13 Oct 2022, at 22:41, Yaroslav Zhuravlev wrote: > > Thank you, the patch updated: > > # HG changeset patch > # User Yaroslav Zhuravlev > # Date 1665685813 -3600 > # Thu Oct 13 19:30:13 2022 +0100 > # Node ID 6dfa05c1a12ac43bdc89fa999509a5dcc879db4d > # Parent 3cd9ec612c4a982ca1b74a7f5adc24bc69025483 > Documented automatic rotation of TLS session ticket keys. > > diff --git a/xml/en/docs/http/ngx_http_ssl_module.xml b/xml/en/docs/http/ngx_http_ssl_module.xml > --- a/xml/en/docs/http/ngx_http_ssl_module.xml > +++ b/xml/en/docs/http/ngx_http_ssl_module.xml > @@ -10,7 +10,7 @@ > link="/en/docs/http/ngx_http_ssl_module.html" > lang="en" > - rev="58"> > + rev="59"> > >
> > @@ -690,6 +690,10 @@ > about 4000 sessions. > Each shared cache should have an arbitrary name. > A cache with the same name can be used in several virtual servers. > +It is also used to automatically generate, store, and > +periodically rotate TLS session ticket keys (1.23.2) > +unless configured explicitly > +using the directive. > > > > diff --git a/xml/ru/docs/http/ngx_http_ssl_module.xml b/xml/ru/docs/http/ngx_http_ssl_module.xml > --- a/xml/ru/docs/http/ngx_http_ssl_module.xml > +++ b/xml/ru/docs/http/ngx_http_ssl_module.xml > @@ -10,7 +10,7 @@ > link="/ru/docs/http/ngx_http_ssl_module.html" > lang="ru" > - rev="58"> > + rev="59"> > >
> > @@ -696,6 +696,10 @@ > У каждого разделяемого кэша должно быть произвольное название. > Кэш с одинаковым названием может использоваться в нескольких > виртуальных серверах. > +Также он используется для автоматического создания, хранения и > +периодического обновления ключей TLS session tickets (1.23.2), > +если они не указаны явно > +с помощью директивы . > > > Looks good. -- Sergey Kandaurov From eagle-china at hotmail.com Fri Oct 14 09:30:20 2022 From: eagle-china at hotmail.com (=?gb2312?B?WCDJvdOl?=) Date: Fri, 14 Oct 2022 09:30:20 +0000 Subject: reply Message-ID: confirm a715e69efc7c731644ec10f79b7d2e45c536cb76 reply From m15860198213 at 163.com Fri Oct 14 12:29:24 2022 From: m15860198213 at 163.com (yang) Date: Fri, 14 Oct 2022 20:29:24 +0800 (CST) Subject: reply In-Reply-To: References: Message-ID: <2c36d6bf.3996.183d6787e62.Coremail.m15860198213@163.com> a715e69efc7c731644ec10f79b7d2e45c536cb76 At 2022-10-14 17:30:20, "X 山鹰" wrote: > confirm a715e69efc7c731644ec10f79b7d2e45c536cb76 >reply >_______________________________________________ >nginx-devel mailing list -- nginx-devel at nginx.org >To unsubscribe send an email to nginx-devel-leave at nginx.org -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Fri Oct 14 12:33:00 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 14 Oct 2022 16:33:00 +0400 Subject: [PATCH] SSL: improved validation of ssl_session_cache and ssl_ocsp_cache In-Reply-To: References: Message-ID: <7EFE2F98-BFAB-413A-ADD1-8689F9F0A159@nginx.com> > On 14 Oct 2022, at 00:30, Maxim Dounin wrote: > > Hello! > > On Thu, Oct 13, 2022 at 05:02:42PM +0400, Sergey Kandaurov wrote: > >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1665665717 -14400 >> # Thu Oct 13 16:55:17 2022 +0400 >> # Node ID b2eba2994ddcbf9084075f9ae32c3332a628ca7a >> # Parent 81b4326daac70d6de70abbc3fe36d4f6e3da54a2 >> SSL: improved validation of ssl_session_cache and ssl_ocsp_cache. >> >> Now it properly detects invalid shared zone configuration with omitted size. >> Previously it used to read outside of the buffer boundary. >> >> Found with AddressSanitizer. >> >> diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c >> --- a/src/http/modules/ngx_http_ssl_module.c >> +++ b/src/http/modules/ngx_http_ssl_module.c >> @@ -1039,10 +1039,10 @@ ngx_http_ssl_session_cache(ngx_conf_t *c >> { >> ngx_http_ssl_srv_conf_t *sscf = conf; >> >> - size_t len; >> + u_char *p; >> ngx_str_t *value, name, size; >> ngx_int_t n; >> - ngx_uint_t i, j; >> + ngx_uint_t i; >> >> value = cf->args->elts; >> >> @@ -1083,25 +1083,20 @@ ngx_http_ssl_session_cache(ngx_conf_t *c >> && ngx_strncmp(value[i].data, "shared:", sizeof("shared:") - 1) >> == 0) >> { >> - len = 0; >> + name.data = value[i].data + sizeof("shared:") - 1; >> + >> + p = (u_char *) ngx_strchr(name.data, ':'); >> >> - for (j = sizeof("shared:") - 1; j < value[i].len; j++) { >> - if (value[i].data[j] == ':') { >> - break; >> - } >> - >> - len++; >> + if (p == NULL) { >> + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, >> + "invalid zone size \"%V\"", &value[i]); >> + return NGX_CONF_ERROR; > > goto invalid? > > This seems to be more in line with both previous handling of the > "len == 0" case, and the remaining handling of the "n == > NGX_ERROR" case. Agree. > >> } >> >> - if (len == 0) { >> - goto invalid; >> - } >> + name.len = p - name.data; > > This makes it possible to create a shared memory zone with an > empty name, which was previously forbidden. > Thanks, that's certainly an omission. diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c --- a/src/http/modules/ngx_http_ssl_module.c +++ b/src/http/modules/ngx_http_ssl_module.c @@ -1088,13 +1088,15 @@ ngx_http_ssl_session_cache(ngx_conf_t *c p = (u_char *) ngx_strchr(name.data, ':'); if (p == NULL) { - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "invalid zone size \"%V\"", &value[i]); - return NGX_CONF_ERROR; + goto invalid; } name.len = p - name.data; + if (name.len == 0) { + goto invalid; + } + size.data = p + 1; size.len = value[i].data + value[i].len - size.data; (with intention to update other places.) > Note that limit_req_zone / limit_conn_zone parsing you've copied > does not allow shared memory zones with empty names due to the > additional name.len check after parsing of all arguments. Indeed, this is to bring similarity in parsing, that's why it comes with such a huge diff. Alternatively (my initial version), is to add a simple check. Given that the resulting code has subtle differences comparing to limit_req/limit_conn, I tend to think it has little sense to unify. That said, below is a different approach: # HG changeset patch # User Sergey Kandaurov # Date 1665749669 -14400 # Fri Oct 14 16:14:29 2022 +0400 # Node ID 68bc1f8b35a9709a2b8bef6c2d60b33ac7c2712b # Parent 81b4326daac70d6de70abbc3fe36d4f6e3da54a2 SSL: improved validation of ssl_session_cache and ssl_ocsp_cache. Now it properly detects invalid shared zone configuration with omitted size. Previously it used to read outside of the buffer boundary. Found with AddressSanitizer. diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c --- a/src/http/modules/ngx_http_ssl_module.c +++ b/src/http/modules/ngx_http_ssl_module.c @@ -1097,6 +1097,10 @@ ngx_http_ssl_session_cache(ngx_conf_t *c goto invalid; } + if (j == value[i].len) { + goto invalid; + } + name.len = len; name.data = value[i].data + sizeof("shared:") - 1; @@ -1187,6 +1191,10 @@ ngx_http_ssl_ocsp_cache(ngx_conf_t *cf, goto invalid; } + if (j == value[1].len) { + goto invalid; + } + name.len = len; name.data = value[1].data + sizeof("shared:") - 1; diff --git a/src/mail/ngx_mail_ssl_module.c b/src/mail/ngx_mail_ssl_module.c --- a/src/mail/ngx_mail_ssl_module.c +++ b/src/mail/ngx_mail_ssl_module.c @@ -686,6 +686,10 @@ ngx_mail_ssl_session_cache(ngx_conf_t *c goto invalid; } + if (j == value[i].len) { + goto invalid; + } + name.len = len; name.data = value[i].data + sizeof("shared:") - 1; diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c +++ b/src/stream/ngx_stream_ssl_module.c @@ -1077,6 +1077,10 @@ ngx_stream_ssl_session_cache(ngx_conf_t goto invalid; } + if (j == value[i].len) { + goto invalid; + } + name.len = len; name.data = value[i].data + sizeof("shared:") - 1; -- Sergey Kandaurov From mdounin at mdounin.ru Fri Oct 14 16:30:43 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 14 Oct 2022 19:30:43 +0300 Subject: [PATCH] SSL: improved validation of ssl_session_cache and ssl_ocsp_cache In-Reply-To: <7EFE2F98-BFAB-413A-ADD1-8689F9F0A159@nginx.com> References: <7EFE2F98-BFAB-413A-ADD1-8689F9F0A159@nginx.com> Message-ID: Hello! On Fri, Oct 14, 2022 at 04:33:00PM +0400, Sergey Kandaurov wrote: > > On 14 Oct 2022, at 00:30, Maxim Dounin wrote: > > > > Hello! > > > > On Thu, Oct 13, 2022 at 05:02:42PM +0400, Sergey Kandaurov wrote: > > > >> # HG changeset patch > >> # User Sergey Kandaurov > >> # Date 1665665717 -14400 > >> # Thu Oct 13 16:55:17 2022 +0400 > >> # Node ID b2eba2994ddcbf9084075f9ae32c3332a628ca7a > >> # Parent 81b4326daac70d6de70abbc3fe36d4f6e3da54a2 > >> SSL: improved validation of ssl_session_cache and ssl_ocsp_cache. > >> > >> Now it properly detects invalid shared zone configuration with omitted size. > >> Previously it used to read outside of the buffer boundary. > >> > >> Found with AddressSanitizer. > >> > >> diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c > >> --- a/src/http/modules/ngx_http_ssl_module.c > >> +++ b/src/http/modules/ngx_http_ssl_module.c > >> @@ -1039,10 +1039,10 @@ ngx_http_ssl_session_cache(ngx_conf_t *c > >> { > >> ngx_http_ssl_srv_conf_t *sscf = conf; > >> > >> - size_t len; > >> + u_char *p; > >> ngx_str_t *value, name, size; > >> ngx_int_t n; > >> - ngx_uint_t i, j; > >> + ngx_uint_t i; > >> > >> value = cf->args->elts; > >> > >> @@ -1083,25 +1083,20 @@ ngx_http_ssl_session_cache(ngx_conf_t *c > >> && ngx_strncmp(value[i].data, "shared:", sizeof("shared:") - 1) > >> == 0) > >> { > >> - len = 0; > >> + name.data = value[i].data + sizeof("shared:") - 1; > >> + > >> + p = (u_char *) ngx_strchr(name.data, ':'); > >> > >> - for (j = sizeof("shared:") - 1; j < value[i].len; j++) { > >> - if (value[i].data[j] == ':') { > >> - break; > >> - } > >> - > >> - len++; > >> + if (p == NULL) { > >> + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > >> + "invalid zone size \"%V\"", &value[i]); > >> + return NGX_CONF_ERROR; > > > > goto invalid? > > > > This seems to be more in line with both previous handling of the > > "len == 0" case, and the remaining handling of the "n == > > NGX_ERROR" case. > > Agree. > > > > >> } > >> > >> - if (len == 0) { > >> - goto invalid; > >> - } > >> + name.len = p - name.data; > > > > This makes it possible to create a shared memory zone with an > > empty name, which was previously forbidden. > > > > Thanks, that's certainly an omission. > > diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c > --- a/src/http/modules/ngx_http_ssl_module.c > +++ b/src/http/modules/ngx_http_ssl_module.c > @@ -1088,13 +1088,15 @@ ngx_http_ssl_session_cache(ngx_conf_t *c > p = (u_char *) ngx_strchr(name.data, ':'); > > if (p == NULL) { > - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > - "invalid zone size \"%V\"", &value[i]); > - return NGX_CONF_ERROR; > + goto invalid; > } > > name.len = p - name.data; > > + if (name.len == 0) { > + goto invalid; > + } > + > size.data = p + 1; > size.len = value[i].data + value[i].len - size.data; > > > (with intention to update other places.) Looks good. > > Note that limit_req_zone / limit_conn_zone parsing you've copied > > does not allow shared memory zones with empty names due to the > > additional name.len check after parsing of all arguments. > > Indeed, this is to bring similarity in parsing, > that's why it comes with such a huge diff. > > Alternatively (my initial version), is to add a simple check. > Given that the resulting code has subtle differences comparing to > limit_req/limit_conn, I tend to think it has little sense to unify. > That said, below is a different approach: I generally tend to think that ngx_strchr() approach as used in limit_req_zone / limit_conn_zone is more readable compared to the explicit for loop in ssl_session_cache. But the idea of changing the existing code is indeed questionable. > > # HG changeset patch > # User Sergey Kandaurov > # Date 1665749669 -14400 > # Fri Oct 14 16:14:29 2022 +0400 > # Node ID 68bc1f8b35a9709a2b8bef6c2d60b33ac7c2712b > # Parent 81b4326daac70d6de70abbc3fe36d4f6e3da54a2 > SSL: improved validation of ssl_session_cache and ssl_ocsp_cache. > > Now it properly detects invalid shared zone configuration with omitted size. > Previously it used to read outside of the buffer boundary. > > Found with AddressSanitizer. > > diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c > --- a/src/http/modules/ngx_http_ssl_module.c > +++ b/src/http/modules/ngx_http_ssl_module.c > @@ -1097,6 +1097,10 @@ ngx_http_ssl_session_cache(ngx_conf_t *c > goto invalid; > } > > + if (j == value[i].len) { > + goto invalid; > + } > + > name.len = len; > name.data = value[i].data + sizeof("shared:") - 1; > May be just @@ -1093,7 +1093,7 @@ ngx_http_ssl_session_cache(ngx_conf_t *c len++; } - if (len == 0) { + if (len == 0 || j == value[i].len) { goto invalid; } ? Either way, looks good. Feel free to commit the variant you prefer. -- Maxim Dounin http://mdounin.ru/ From arut at nginx.com Mon Oct 17 11:07:59 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 17 Oct 2022 15:07:59 +0400 Subject: [PATCH 2 of 4] QUIC: do not use SSL_set_quic_early_data_enabled() with LibreSSL In-Reply-To: References: Message-ID: <20221017110759.egniy6o45n7vuuym@N00W24XTQX> Hi, On Tue, Oct 11, 2022 at 02:35:51PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1665442922 -14400 > # Tue Oct 11 03:02:02 2022 +0400 > # Branch quic > # Node ID caced81ce0a9cb218ae8cdd6176c12e0614acee9 > # Parent 82b03006a7bd93c3b5c962a3afac89e0639b0c12 > QUIC: do not use SSL_set_quic_early_data_enabled() with LibreSSL. > > This function is present in QuicTLS only. After SSL_READ_EARLY_DATA_SUCCESS > became visible in LibreSSL together with experimental QUIC API, this required > to revise the conditional compilation test to use more narrow macros. > > diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c > --- a/src/event/quic/ngx_event_quic_ssl.c > +++ b/src/event/quic/ngx_event_quic_ssl.c > @@ -557,7 +557,7 @@ ngx_quic_init_connection(ngx_connection_ > return NGX_ERROR; > } > > -#ifdef SSL_READ_EARLY_DATA_SUCCESS > +#if (!defined LIBRESSL_VERSION_NUMBER && !defined OPENSSL_IS_BORINGSSL) What about the macro OPENSSL_INFO_QUIC? It's only defined in QuicTLS. > if (SSL_CTX_get_max_early_data(qc->conf->ssl->ctx)) { > SSL_set_quic_early_data_enabled(ssl_conn, 1); > } -- Roman Arutyunyan From pluknet at nginx.com Mon Oct 17 12:26:37 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 17 Oct 2022 12:26:37 +0000 Subject: [nginx] SSL: improved validation of ssl_session_cache and ssl_ocsp_cache. Message-ID: details: https://hg.nginx.org/nginx/rev/e32b48848add branches: changeset: 8088:e32b48848add user: Sergey Kandaurov date: Mon Oct 17 16:24:53 2022 +0400 description: SSL: improved validation of ssl_session_cache and ssl_ocsp_cache. Now it properly detects invalid shared zone configuration with omitted size. Previously it used to read outside of the buffer boundary. Found with AddressSanitizer. diffstat: src/http/modules/ngx_http_ssl_module.c | 4 ++-- src/mail/ngx_mail_ssl_module.c | 2 +- src/stream/ngx_stream_ssl_module.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diffs (45 lines): diff -r 81b4326daac7 -r e32b48848add src/http/modules/ngx_http_ssl_module.c --- a/src/http/modules/ngx_http_ssl_module.c Thu Oct 13 16:18:56 2022 +0400 +++ b/src/http/modules/ngx_http_ssl_module.c Mon Oct 17 16:24:53 2022 +0400 @@ -1093,7 +1093,7 @@ ngx_http_ssl_session_cache(ngx_conf_t *c len++; } - if (len == 0) { + if (len == 0 || j == value[i].len) { goto invalid; } @@ -1183,7 +1183,7 @@ ngx_http_ssl_ocsp_cache(ngx_conf_t *cf, len++; } - if (len == 0) { + if (len == 0 || j == value[1].len) { goto invalid; } diff -r 81b4326daac7 -r e32b48848add src/mail/ngx_mail_ssl_module.c --- a/src/mail/ngx_mail_ssl_module.c Thu Oct 13 16:18:56 2022 +0400 +++ b/src/mail/ngx_mail_ssl_module.c Mon Oct 17 16:24:53 2022 +0400 @@ -682,7 +682,7 @@ ngx_mail_ssl_session_cache(ngx_conf_t *c len++; } - if (len == 0) { + if (len == 0 || j == value[i].len) { goto invalid; } diff -r 81b4326daac7 -r e32b48848add src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c Thu Oct 13 16:18:56 2022 +0400 +++ b/src/stream/ngx_stream_ssl_module.c Mon Oct 17 16:24:53 2022 +0400 @@ -1073,7 +1073,7 @@ ngx_stream_ssl_session_cache(ngx_conf_t len++; } - if (len == 0) { + if (len == 0 || j == value[i].len) { goto invalid; } From pluknet at nginx.com Mon Oct 17 12:27:42 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 17 Oct 2022 16:27:42 +0400 Subject: [PATCH] SSL: improved validation of ssl_session_cache and ssl_ocsp_cache In-Reply-To: References: <7EFE2F98-BFAB-413A-ADD1-8689F9F0A159@nginx.com> Message-ID: <37A34C93-F5C3-4268-A20C-AD2FED01DF6D@nginx.com> > On 14 Oct 2022, at 20:30, Maxim Dounin wrote: > > Hello! > > On Fri, Oct 14, 2022 at 04:33:00PM +0400, Sergey Kandaurov wrote: > >>> On 14 Oct 2022, at 00:30, Maxim Dounin wrote: >>> >>> Hello! >>> >>> On Thu, Oct 13, 2022 at 05:02:42PM +0400, Sergey Kandaurov wrote: >>> >>>> # HG changeset patch >>>> # User Sergey Kandaurov >>>> # Date 1665665717 -14400 >>>> # Thu Oct 13 16:55:17 2022 +0400 >>>> # Node ID b2eba2994ddcbf9084075f9ae32c3332a628ca7a >>>> # Parent 81b4326daac70d6de70abbc3fe36d4f6e3da54a2 >>>> SSL: improved validation of ssl_session_cache and ssl_ocsp_cache. >>>> >>>> Now it properly detects invalid shared zone configuration with omitted size. >>>> Previously it used to read outside of the buffer boundary. >>>> >>>> Found with AddressSanitizer. >>>> >>>> diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c >>>> --- a/src/http/modules/ngx_http_ssl_module.c >>>> +++ b/src/http/modules/ngx_http_ssl_module.c >>>> @@ -1039,10 +1039,10 @@ ngx_http_ssl_session_cache(ngx_conf_t *c >>>> { >>>> ngx_http_ssl_srv_conf_t *sscf = conf; >>>> >>>> - size_t len; >>>> + u_char *p; >>>> ngx_str_t *value, name, size; >>>> ngx_int_t n; >>>> - ngx_uint_t i, j; >>>> + ngx_uint_t i; >>>> >>>> value = cf->args->elts; >>>> >>>> @@ -1083,25 +1083,20 @@ ngx_http_ssl_session_cache(ngx_conf_t *c >>>> && ngx_strncmp(value[i].data, "shared:", sizeof("shared:") - 1) >>>> == 0) >>>> { >>>> - len = 0; >>>> + name.data = value[i].data + sizeof("shared:") - 1; >>>> + >>>> + p = (u_char *) ngx_strchr(name.data, ':'); >>>> >>>> - for (j = sizeof("shared:") - 1; j < value[i].len; j++) { >>>> - if (value[i].data[j] == ':') { >>>> - break; >>>> - } >>>> - >>>> - len++; >>>> + if (p == NULL) { >>>> + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, >>>> + "invalid zone size \"%V\"", &value[i]); >>>> + return NGX_CONF_ERROR; >>> >>> goto invalid? >>> >>> This seems to be more in line with both previous handling of the >>> "len == 0" case, and the remaining handling of the "n == >>> NGX_ERROR" case. >> >> Agree. >> >>> >>>> } >>>> >>>> - if (len == 0) { >>>> - goto invalid; >>>> - } >>>> + name.len = p - name.data; >>> >>> This makes it possible to create a shared memory zone with an >>> empty name, which was previously forbidden. >>> >> >> Thanks, that's certainly an omission. >> >> diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c >> --- a/src/http/modules/ngx_http_ssl_module.c >> +++ b/src/http/modules/ngx_http_ssl_module.c >> @@ -1088,13 +1088,15 @@ ngx_http_ssl_session_cache(ngx_conf_t *c >> p = (u_char *) ngx_strchr(name.data, ':'); >> >> if (p == NULL) { >> - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, >> - "invalid zone size \"%V\"", &value[i]); >> - return NGX_CONF_ERROR; >> + goto invalid; >> } >> >> name.len = p - name.data; >> >> + if (name.len == 0) { >> + goto invalid; >> + } >> + >> size.data = p + 1; >> size.len = value[i].data + value[i].len - size.data; >> >> >> (with intention to update other places.) > > Looks good. > >>> Note that limit_req_zone / limit_conn_zone parsing you've copied >>> does not allow shared memory zones with empty names due to the >>> additional name.len check after parsing of all arguments. >> >> Indeed, this is to bring similarity in parsing, >> that's why it comes with such a huge diff. >> >> Alternatively (my initial version), is to add a simple check. >> Given that the resulting code has subtle differences comparing to >> limit_req/limit_conn, I tend to think it has little sense to unify. >> That said, below is a different approach: > > I generally tend to think that ngx_strchr() approach as used in > limit_req_zone / limit_conn_zone is more readable compared to the > explicit for loop in ssl_session_cache. But the idea of changing > the existing code is indeed questionable. > >> >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1665749669 -14400 >> # Fri Oct 14 16:14:29 2022 +0400 >> # Node ID 68bc1f8b35a9709a2b8bef6c2d60b33ac7c2712b >> # Parent 81b4326daac70d6de70abbc3fe36d4f6e3da54a2 >> SSL: improved validation of ssl_session_cache and ssl_ocsp_cache. >> >> Now it properly detects invalid shared zone configuration with omitted size. >> Previously it used to read outside of the buffer boundary. >> >> Found with AddressSanitizer. >> >> diff --git a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c >> --- a/src/http/modules/ngx_http_ssl_module.c >> +++ b/src/http/modules/ngx_http_ssl_module.c >> @@ -1097,6 +1097,10 @@ ngx_http_ssl_session_cache(ngx_conf_t *c >> goto invalid; >> } >> >> + if (j == value[i].len) { >> + goto invalid; >> + } >> + >> name.len = len; >> name.data = value[i].data + sizeof("shared:") - 1; >> > > May be just > > @@ -1093,7 +1093,7 @@ ngx_http_ssl_session_cache(ngx_conf_t *c > len++; > } > > - if (len == 0) { > + if (len == 0 || j == value[i].len) { > goto invalid; > } > > ? > > Either way, looks good. Yep, committed this variant. Thanks for review. -- Sergey Kandaurov From pluknet at nginx.com Mon Oct 17 13:29:36 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 17 Oct 2022 17:29:36 +0400 Subject: [PATCH] Documented behaviour of a single server in upstream with keepalive In-Reply-To: References: Message-ID: > On 11 Oct 2022, at 18:50, Yaroslav Zhuravlev wrote: > >> On 10 Oct 2022, at 00:55, Maxim Dounin wrote: >> >> Hello! >> >> On Mon, Oct 03, 2022 at 09:21:52PM +0100, Yaroslav Zhuravlev wrote: >> > [..] >> >>> diff --git a/xml/en/docs/http/ngx_http_upstream_module.xml b/xml/en/docs/http/ngx_http_upstream_module.xml >>> --- a/xml/en/docs/http/ngx_http_upstream_module.xml >>> +++ b/xml/en/docs/http/ngx_http_upstream_module.xml >>> @@ -10,7 +10,7 @@ >>> >> link="/en/docs/http/ngx_http_upstream_module.html" >>> lang="en" >>> - rev="88"> >>> + rev="89"> >>> >>>
>>> >>> @@ -351,6 +351,11 @@ >>> If there is only a single server in a group, max_fails, >>> fail_timeout and slow_start parameters >>> are ignored, and such a server will never be considered unavailable. >>> +If an error occurred while trying to reuse a >>> +keepalive connection >>> +with a single server, and the request is allowed to be passed to the >>> +next server >>> +on error, such server will be selected again. >>> >>> >> >> If an error occurs? >> >> The "with a single server" clause looks wrong, we are talking >> about a group with only a single server here. It probably should >> be either "with such server" or "the server" (probably "... with >> such server ... the server will be ..." would be good enough >> considering the whole sentence). > > Thanks, updated: > > # HG changeset patch > # User Yaroslav Zhuravlev > # Date 1663861151 -3600 > # Thu Sep 22 16:39:11 2022 +0100 > # Node ID 3b878f0c18cc277bfccb6095afd2cc7dc0cdec0f > # Parent 9708787aafc70744296baceb2aa0092401a4ef34 > Documented behaviour of a single server in upstream with keepalive. > > diff --git a/xml/en/docs/http/ngx_http_upstream_module.xml b/xml/en/docs/http/ngx_http_upstream_module.xml > --- a/xml/en/docs/http/ngx_http_upstream_module.xml > +++ b/xml/en/docs/http/ngx_http_upstream_module.xml > @@ -10,7 +10,7 @@ > link="/en/docs/http/ngx_http_upstream_module.html" > lang="en" > - rev="88"> > + rev="89"> > >
> > @@ -350,7 +350,13 @@ > > If there is only a single server in a group, max_fails, > fail_timeout and slow_start parameters > -are ignored, and such a server will never be considered unavailable. > +are ignored, and such server will never be considered unavailable. > +If an error occurs > +while passing a request through > +a keepalive connection to such server > +and the request is allowed to be passed to the > +next > +server on error, the server will be selected again. I am in doubt about the note "on error", as such condition can be insufficient to express the desired next server logic, e.g.: proxy_next_upstream error non_idempotent; So it could be enough to write "and the request is allowed to be passed to the next server ...". Next, usually we don't refer to the specific proxy module from the upstream module documentation. Consider the following existing text for the upstream block: : If an error occurs during communication with a server, the request will : be passed to the next server, and so on until all of the functioning : servers will be tried. BTW, this text could be used instead as a basis to expand on to cached connections, so far as the next server logic belongs to the upstream group, not to a distinct server. For example, consider the next addition under the above citation: : If an error occurs during communication with a single server in a group : through a keepalive connection, that means : the request will be passed to the next cached connection instead, : and so on until all cached connections will be tried first. Or just: : : In case of communicating with a single server through : a keepalive connection, that means : the request will be passed to the next cached connection instead, : and so on until all cached connections will be tried first. : -- Sergey Kandaurov From arut at nginx.com Mon Oct 17 13:31:30 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 17 Oct 2022 17:31:30 +0400 Subject: [PATCH 3 of 4] QUIC: support for setting QUIC methods with LibreSSL In-Reply-To: References: Message-ID: <20221017133130.ba76bkilt2brt66j@N00W24XTQX> Hi, On Tue, Oct 11, 2022 at 02:35:52PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1665484414 -14400 > # Tue Oct 11 14:33:34 2022 +0400 > # Branch quic > # Node ID c0165ddcb1c6981f8e5230081f03a277f62d20c3 > # Parent caced81ce0a9cb218ae8cdd6176c12e0614acee9 > QUIC: support for setting QUIC methods with LibreSSL. > > Setting QUIC methods is converted to use C99 designated initializers > for simplicity, as LibreSSL 3.6.0 has different SSL_QUIC_METHOD layout. > > Additionally, it's stick with set_read_secret/set_write_secret callbacks. > LibreSSL prefers set_encryption_secrets over them but has unexpectedly > incompatible behaviour expressed in passing read and write secrets split > in separate calls, unlike this is documented in old BoringSSL sources. Why do you think it prefres set_encryption_secrets()? The source code references it as "old", see this comment from tls13_quic_set_read_traffic_key(): /* Handle both the new (BoringSSL) and old (quictls) APIs. */ > > diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c > --- a/src/event/quic/ngx_event_quic_ssl.c > +++ b/src/event/quic/ngx_event_quic_ssl.c > @@ -18,7 +18,7 @@ > #define NGX_QUIC_MAX_BUFFERED 65535 > > > -#if BORINGSSL_API_VERSION >= 10 > +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER > static int ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, > enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, > const uint8_t *secret, size_t secret_len); > @@ -40,19 +40,19 @@ static ngx_int_t ngx_quic_crypto_input(n > > > static SSL_QUIC_METHOD quic_method = { > -#if BORINGSSL_API_VERSION >= 10 > - ngx_quic_set_read_secret, > - ngx_quic_set_write_secret, > +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER > + .set_read_secret = ngx_quic_set_read_secret, > + .set_write_secret = ngx_quic_set_write_secret, > #else > - ngx_quic_set_encryption_secrets, > + .set_encryption_secrets = ngx_quic_set_encryption_secrets, > #endif > - ngx_quic_add_handshake_data, > - ngx_quic_flush_flight, > - ngx_quic_send_alert, > + .add_handshake_data = ngx_quic_add_handshake_data, > + .flush_flight = ngx_quic_flush_flight, > + .send_alert = ngx_quic_send_alert, > }; > > > -#if BORINGSSL_API_VERSION >= 10 > +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER > > static int > ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, > > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org -- Roman Arutyunyan From pluknet at nginx.com Mon Oct 17 14:04:36 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 17 Oct 2022 18:04:36 +0400 Subject: [PATCH 2 of 4] QUIC: do not use SSL_set_quic_early_data_enabled() with LibreSSL In-Reply-To: <20221017110759.egniy6o45n7vuuym@N00W24XTQX> References: <20221017110759.egniy6o45n7vuuym@N00W24XTQX> Message-ID: > On 17 Oct 2022, at 15:07, Roman Arutyunyan wrote: > > Hi, > > On Tue, Oct 11, 2022 at 02:35:51PM +0400, Sergey Kandaurov wrote: >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1665442922 -14400 >> # Tue Oct 11 03:02:02 2022 +0400 >> # Branch quic >> # Node ID caced81ce0a9cb218ae8cdd6176c12e0614acee9 >> # Parent 82b03006a7bd93c3b5c962a3afac89e0639b0c12 >> QUIC: do not use SSL_set_quic_early_data_enabled() with LibreSSL. >> >> This function is present in QuicTLS only. After SSL_READ_EARLY_DATA_SUCCESS >> became visible in LibreSSL together with experimental QUIC API, this required >> to revise the conditional compilation test to use more narrow macros. >> >> diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c >> --- a/src/event/quic/ngx_event_quic_ssl.c >> +++ b/src/event/quic/ngx_event_quic_ssl.c >> @@ -557,7 +557,7 @@ ngx_quic_init_connection(ngx_connection_ >> return NGX_ERROR; >> } >> >> -#ifdef SSL_READ_EARLY_DATA_SUCCESS >> +#if (!defined LIBRESSL_VERSION_NUMBER && !defined OPENSSL_IS_BORINGSSL) > > What about the macro OPENSSL_INFO_QUIC? It's only defined in QuicTLS. > >> if (SSL_CTX_get_max_early_data(qc->conf->ssl->ctx)) { >> SSL_set_quic_early_data_enabled(ssl_conn, 1); >> } I wonder how long will it take to use this macro in the upstream OpenSSL. Given that QuicTLS may be considered as something interim, I think it should be ok for now. And it respects the no-quic QuicTLS build option. Together with the adjusted log summary: QUIC: using SSL_set_quic_early_data_enabled() only with QuicTLS. This function is present in QuicTLS only. After SSL_READ_EARLY_DATA_SUCCESS became visible in LibreSSL together with experimental QUIC API, this required to revise the conditional compilation test to use more narrow macros. diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c --- a/src/event/quic/ngx_event_quic_ssl.c +++ b/src/event/quic/ngx_event_quic_ssl.c @@ -557,7 +557,7 @@ ngx_quic_init_connection(ngx_connection_ return NGX_ERROR; } -#ifdef SSL_READ_EARLY_DATA_SUCCESS +#ifdef OPENSSL_INFO_QUIC if (SSL_CTX_get_max_early_data(qc->conf->ssl->ctx)) { SSL_set_quic_early_data_enabled(ssl_conn, 1); } -- Sergey Kandaurov From pluknet at nginx.com Mon Oct 17 14:26:41 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 17 Oct 2022 18:26:41 +0400 Subject: [PATCH 3 of 4] QUIC: support for setting QUIC methods with LibreSSL In-Reply-To: <20221017133130.ba76bkilt2brt66j@N00W24XTQX> References: <20221017133130.ba76bkilt2brt66j@N00W24XTQX> Message-ID: <7F24085F-2EB1-450A-8945-F1A161F31D25@nginx.com> > On 17 Oct 2022, at 17:31, Roman Arutyunyan wrote: > > Hi, > > On Tue, Oct 11, 2022 at 02:35:52PM +0400, Sergey Kandaurov wrote: >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1665484414 -14400 >> # Tue Oct 11 14:33:34 2022 +0400 >> # Branch quic >> # Node ID c0165ddcb1c6981f8e5230081f03a277f62d20c3 >> # Parent caced81ce0a9cb218ae8cdd6176c12e0614acee9 >> QUIC: support for setting QUIC methods with LibreSSL. >> >> Setting QUIC methods is converted to use C99 designated initializers >> for simplicity, as LibreSSL 3.6.0 has different SSL_QUIC_METHOD layout. >> >> Additionally, it's stick with set_read_secret/set_write_secret callbacks. >> LibreSSL prefers set_encryption_secrets over them but has unexpectedly >> incompatible behaviour expressed in passing read and write secrets split >> in separate calls, unlike this is documented in old BoringSSL sources. > > Why do you think it prefres set_encryption_secrets()? The source code > references it as "old", see this comment from tls13_quic_set_read_traffic_key(): > > /* Handle both the new (BoringSSL) and old (quictls) APIs. */ > Tnx, looks like a false memory from before applying the patch. Anyway, it's still worth to leave only the new API. This updates the last paragraph of the change description: : Additionally, only set_read_secret/set_write_secret callbacks are set. : Although they are preferred in LibreSSL over set_encryption_secrets, : better be on a safe side as LibreSSL has unexpectedly incompatible : set_encryption_secrets calling convention expressed in passing read : and write secrets split in separate calls, unlike this is documented : in old BoringSSL sources. To avoid introducing further changes for : the old API, it is simply disabled. >> >> diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c >> --- a/src/event/quic/ngx_event_quic_ssl.c >> +++ b/src/event/quic/ngx_event_quic_ssl.c >> @@ -18,7 +18,7 @@ >> #define NGX_QUIC_MAX_BUFFERED 65535 >> >> >> -#if BORINGSSL_API_VERSION >= 10 >> +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER >> static int ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, >> enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, >> const uint8_t *secret, size_t secret_len); >> @@ -40,19 +40,19 @@ static ngx_int_t ngx_quic_crypto_input(n >> >> >> static SSL_QUIC_METHOD quic_method = { >> -#if BORINGSSL_API_VERSION >= 10 >> - ngx_quic_set_read_secret, >> - ngx_quic_set_write_secret, >> +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER >> + .set_read_secret = ngx_quic_set_read_secret, >> + .set_write_secret = ngx_quic_set_write_secret, >> #else >> - ngx_quic_set_encryption_secrets, >> + .set_encryption_secrets = ngx_quic_set_encryption_secrets, >> #endif >> - ngx_quic_add_handshake_data, >> - ngx_quic_flush_flight, >> - ngx_quic_send_alert, >> + .add_handshake_data = ngx_quic_add_handshake_data, >> + .flush_flight = ngx_quic_flush_flight, >> + .send_alert = ngx_quic_send_alert, >> }; >> >> >> -#if BORINGSSL_API_VERSION >= 10 >> +#if BORINGSSL_API_VERSION >= 10 || defined LIBRESSL_VERSION_NUMBER >> >> static int >> ngx_quic_set_read_secret(ngx_ssl_conn_t *ssl_conn, >> -- Sergey Kandaurov From mdounin at mdounin.ru Mon Oct 17 19:43:37 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 17 Oct 2022 22:43:37 +0300 Subject: nginx-1.23.2 changes draft Message-ID: Hello! Changes with nginx 1.23.2 19 Oct 2022 *) Feature: the "$proxy_protocol_tlv_..." variables. *) Feature: TLS session tickets encryption keys are now automatically rotated when using shared memory in the "ssl_session_cache" directive. *) Change: the logging level of the "bad record type" SSL errors has been lowered from "crit" to "info". Thanks to Murilo Andrade. *) Change: now when using shared memory in the "ssl_session_cache" directive the "could not allocate new session" errors are logged at the "warn" level instead of "alert" and not more often than once per second. *) Bugfix: nginx/Windows could not be built with OpenSSL 3.0.x. *) Bugfix: in logging of the PROXY protocol errors. Thanks to Sergey Brester. *) Workaround: shared memory from the "ssl_session_cache" directive was spent on sessions using TLS session tickets when using TLSv1.3 with OpenSSL. *) Workaround: timeout specified with the "ssl_session_timeout" directive did not work when using TLSv1.3 with OpenSSL or BoringSSL. Изменения в nginx 1.23.2 19.10.2022 *) Добавление: переменные "$proxy_protocol_tlv_...". *) Добавление: ключи шифрования TLS session tickets теперь автоматически меняются при использовании разделяемой памяти в ssl_session_cache. *) Изменение: уровень логгирования ошибок SSL "bad record type" понижен с уровня crit до info. Спасибо Murilo Andrade. *) Изменение: теперь при использовании разделяемой памяти в ssl_session_cache сообщения "could not allocate new session" логгируются на уровне warn вместо alert и не чаще одного раза в секунду. *) Исправление: nginx/Windows не собирался с OpenSSL 3.0.x. *) Исправление: в логгировании ошибок протокола PROXY. Спасибо Сергею Брестеру. *) Изменение: при использовании TLSv1.3 с OpenSSL разделяемая память из ssl_session_cache расходовалась в том числе на сессии, использующие TLS session tickets. *) Изменение: таймаут, заданный с помощью директивы ssl_session_timeout, не работал при использовании TLSv1.3 с OpenSSL или BoringSSL. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Mon Oct 17 21:31:34 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 18 Oct 2022 01:31:34 +0400 Subject: nginx-1.23.2 changes draft In-Reply-To: References: Message-ID: <5859618C-42FA-4E2D-A51D-FA70595455DD@nginx.com> > On 17 Oct 2022, at 23:43, Maxim Dounin wrote: > > Hello! > > > Changes with nginx 1.23.2 19 Oct 2022 > > [..] > > Изменения в nginx 1.23.2 19.10.2022 > > [..] > Looks good. -- Sergey Kandaurov From arut at nginx.com Tue Oct 18 11:46:34 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 18 Oct 2022 15:46:34 +0400 Subject: [PATCH 2 of 4] QUIC: do not use SSL_set_quic_early_data_enabled() with LibreSSL In-Reply-To: References: <20221017110759.egniy6o45n7vuuym@N00W24XTQX> Message-ID: <20221018114634.5gs3zl4axl2i6v2l@N00W24XTQX> Hi, On Mon, Oct 17, 2022 at 06:04:36PM +0400, Sergey Kandaurov wrote: > > > On 17 Oct 2022, at 15:07, Roman Arutyunyan wrote: > > > > Hi, > > > > On Tue, Oct 11, 2022 at 02:35:51PM +0400, Sergey Kandaurov wrote: > >> # HG changeset patch > >> # User Sergey Kandaurov > >> # Date 1665442922 -14400 > >> # Tue Oct 11 03:02:02 2022 +0400 > >> # Branch quic > >> # Node ID caced81ce0a9cb218ae8cdd6176c12e0614acee9 > >> # Parent 82b03006a7bd93c3b5c962a3afac89e0639b0c12 > >> QUIC: do not use SSL_set_quic_early_data_enabled() with LibreSSL. > >> > >> This function is present in QuicTLS only. After SSL_READ_EARLY_DATA_SUCCESS > >> became visible in LibreSSL together with experimental QUIC API, this required > >> to revise the conditional compilation test to use more narrow macros. > >> > >> diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c > >> --- a/src/event/quic/ngx_event_quic_ssl.c > >> +++ b/src/event/quic/ngx_event_quic_ssl.c > >> @@ -557,7 +557,7 @@ ngx_quic_init_connection(ngx_connection_ > >> return NGX_ERROR; > >> } > >> > >> -#ifdef SSL_READ_EARLY_DATA_SUCCESS > >> +#if (!defined LIBRESSL_VERSION_NUMBER && !defined OPENSSL_IS_BORINGSSL) > > > > What about the macro OPENSSL_INFO_QUIC? It's only defined in QuicTLS. > > > >> if (SSL_CTX_get_max_early_data(qc->conf->ssl->ctx)) { > >> SSL_set_quic_early_data_enabled(ssl_conn, 1); > >> } > > I wonder how long will it take to use this macro in the upstream OpenSSL. > Given that QuicTLS may be considered as something interim, > I think it should be ok for now. > And it respects the no-quic QuicTLS build option. > > Together with the adjusted log summary: > > QUIC: using SSL_set_quic_early_data_enabled() only with QuicTLS. > > This function is present in QuicTLS only. After SSL_READ_EARLY_DATA_SUCCESS > became visible in LibreSSL together with experimental QUIC API, this required > to revise the conditional compilation test to use more narrow macros. > > diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c > --- a/src/event/quic/ngx_event_quic_ssl.c > +++ b/src/event/quic/ngx_event_quic_ssl.c > @@ -557,7 +557,7 @@ ngx_quic_init_connection(ngx_connection_ > return NGX_ERROR; > } > > -#ifdef SSL_READ_EARLY_DATA_SUCCESS > +#ifdef OPENSSL_INFO_QUIC > if (SSL_CTX_get_max_early_data(qc->conf->ssl->ctx)) { > SSL_set_quic_early_data_enabled(ssl_conn, 1); > } Looks ok. Other patches are ok as well. -- Roman Arutyunyan From yar at nginx.com Wed Oct 19 09:01:43 2022 From: yar at nginx.com (=?utf-8?q?Yaroslav_Zhuravlev?=) Date: Wed, 19 Oct 2022 10:01:43 +0100 Subject: [PATCH] Documented the $proxy_protocol_tlv_ variable Message-ID: <881627fb56bfff009297.1666170103@ORK-ML-00007151> xml/en/docs/http/ngx_http_core_module.xml | 107 +++++++++++++++++++++++++- xml/en/docs/stream/ngx_stream_core_module.xml | 105 +++++++++++++++++++++++++- xml/ru/docs/http/ngx_http_core_module.xml | 106 +++++++++++++++++++++++++- xml/ru/docs/stream/ngx_stream_core_module.xml | 106 +++++++++++++++++++++++++- 4 files changed, 420 insertions(+), 4 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx.org.patch Type: text/x-patch Size: 14619 bytes Desc: not available URL: From thresh at nginx.com Wed Oct 19 12:03:11 2022 From: thresh at nginx.com (Konstantin Pavlov) Date: Wed, 19 Oct 2022 12:03:11 +0000 Subject: [nginx] Mp4: disabled duplicate atoms. Message-ID: details: https://hg.nginx.org/nginx/rev/4032c1bdfa14 branches: changeset: 8089:4032c1bdfa14 user: Roman Arutyunyan date: Wed Oct 19 10:53:17 2022 +0300 description: Mp4: disabled duplicate atoms. Most atoms should not appear more than once in a container. Previously, this was not enforced by the module, which could result in worker process crash, memory corruption and disclosure. diffstat: src/http/modules/ngx_http_mp4_module.c | 147 +++++++++++++++++++++++++++++++++ 1 files changed, 147 insertions(+), 0 deletions(-) diffs (297 lines): diff -r e32b48848add -r 4032c1bdfa14 src/http/modules/ngx_http_mp4_module.c --- a/src/http/modules/ngx_http_mp4_module.c Mon Oct 17 16:24:53 2022 +0400 +++ b/src/http/modules/ngx_http_mp4_module.c Wed Oct 19 10:53:17 2022 +0300 @@ -1121,6 +1121,12 @@ ngx_http_mp4_read_ftyp_atom(ngx_http_mp4 return NGX_ERROR; } + if (mp4->ftyp_atom.buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 ftyp atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom_size = sizeof(ngx_mp4_atom_header_t) + (size_t) atom_data_size; ftyp_atom = ngx_palloc(mp4->request->pool, atom_size); @@ -1179,6 +1185,12 @@ ngx_http_mp4_read_moov_atom(ngx_http_mp4 return NGX_DECLINED; } + if (mp4->moov_atom.buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 moov atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + conf = ngx_http_get_module_loc_conf(mp4->request, ngx_http_mp4_module); if (atom_data_size > mp4->buffer_size) { @@ -1246,6 +1258,12 @@ ngx_http_mp4_read_mdat_atom(ngx_http_mp4 ngx_log_debug0(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, "mp4 mdat atom"); + if (mp4->mdat_atom.buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 mdat atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + data = &mp4->mdat_data_buf; data->file = &mp4->file; data->in_file = 1; @@ -1372,6 +1390,12 @@ ngx_http_mp4_read_mvhd_atom(ngx_http_mp4 ngx_log_debug0(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, "mp4 mvhd atom"); + if (mp4->mvhd_atom.buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 mvhd atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom_header = ngx_mp4_atom_header(mp4); mvhd_atom = (ngx_mp4_mvhd_atom_t *) atom_header; mvhd64_atom = (ngx_mp4_mvhd64_atom_t *) atom_header; @@ -1637,6 +1661,13 @@ ngx_http_mp4_read_tkhd_atom(ngx_http_mp4 atom_size = sizeof(ngx_mp4_atom_header_t) + (size_t) atom_data_size; trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_TKHD_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 tkhd atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->tkhd_size = atom_size; trak->movie_duration = duration; @@ -1676,6 +1707,12 @@ ngx_http_mp4_read_mdia_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_MDIA_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 mdia atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->mdia_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -1799,6 +1836,13 @@ ngx_http_mp4_read_mdhd_atom(ngx_http_mp4 atom_size = sizeof(ngx_mp4_atom_header_t) + (size_t) atom_data_size; trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_MDHD_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 mdhd atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->mdhd_size = atom_size; trak->timescale = timescale; trak->duration = duration; @@ -1862,6 +1906,12 @@ ngx_http_mp4_read_hdlr_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_HDLR_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 hdlr atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->hdlr_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -1890,6 +1940,12 @@ ngx_http_mp4_read_minf_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_MINF_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 minf atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->minf_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -1933,6 +1989,15 @@ ngx_http_mp4_read_vmhd_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_VMHD_ATOM].buf + || trak->out[NGX_HTTP_MP4_SMHD_ATOM].buf) + { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 vmhd/smhd atom in \"%s\"", + mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->vmhd_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -1964,6 +2029,15 @@ ngx_http_mp4_read_smhd_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_VMHD_ATOM].buf + || trak->out[NGX_HTTP_MP4_SMHD_ATOM].buf) + { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 vmhd/smhd atom in \"%s\"", + mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->smhd_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -1995,6 +2069,12 @@ ngx_http_mp4_read_dinf_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_DINF_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 dinf atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->dinf_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -2023,6 +2103,12 @@ ngx_http_mp4_read_stbl_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_STBL_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stbl atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->stbl_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -2144,6 +2230,12 @@ ngx_http_mp4_read_stsd_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_STSD_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stsd atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->stsd_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -2212,6 +2304,13 @@ ngx_http_mp4_read_stts_atom(ngx_http_mp4 atom_end = atom_table + entries * sizeof(ngx_mp4_stts_entry_t); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STTS_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stts atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->time_to_sample_entries = entries; atom = &trak->stts_atom_buf; @@ -2480,6 +2579,13 @@ ngx_http_mp4_read_stss_atom(ngx_http_mp4 "sync sample entries:%uD", entries); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STSS_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stss atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->sync_samples_entries = entries; atom_table = atom_header + sizeof(ngx_http_mp4_stss_atom_t); @@ -2678,6 +2784,13 @@ ngx_http_mp4_read_ctts_atom(ngx_http_mp4 "composition offset entries:%uD", entries); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_CTTS_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 ctts atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->composition_offset_entries = entries; atom_table = atom_header + sizeof(ngx_mp4_ctts_atom_t); @@ -2881,6 +2994,13 @@ ngx_http_mp4_read_stsc_atom(ngx_http_mp4 atom_end = atom_table + entries * sizeof(ngx_mp4_stsc_entry_t); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STSC_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stsc atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->sample_to_chunk_entries = entries; atom = &trak->stsc_atom_buf; @@ -3213,6 +3333,13 @@ ngx_http_mp4_read_stsz_atom(ngx_http_mp4 "sample uniform size:%uD, entries:%uD", size, entries); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STSZ_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stsz atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->sample_sizes_entries = entries; atom_table = atom_header + sizeof(ngx_mp4_stsz_atom_t); @@ -3396,6 +3523,16 @@ ngx_http_mp4_read_stco_atom(ngx_http_mp4 atom_end = atom_table + entries * sizeof(uint32_t); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STCO_ATOM].buf + || trak->out[NGX_HTTP_MP4_CO64_ATOM].buf) + { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stco/co64 atom in \"%s\"", + mp4->file.name.data); + return NGX_ERROR; + } + trak->chunks = entries; atom = &trak->stco_atom_buf; @@ -3602,6 +3739,16 @@ ngx_http_mp4_read_co64_atom(ngx_http_mp4 atom_end = atom_table + entries * sizeof(uint64_t); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STCO_ATOM].buf + || trak->out[NGX_HTTP_MP4_CO64_ATOM].buf) + { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stco/co64 atom in \"%s\"", + mp4->file.name.data); + return NGX_ERROR; + } + trak->chunks = entries; atom = &trak->co64_atom_buf; From thresh at nginx.com Wed Oct 19 12:03:14 2022 From: thresh at nginx.com (Konstantin Pavlov) Date: Wed, 19 Oct 2022 12:03:14 +0000 Subject: [nginx] nginx-1.23.2-RELEASE Message-ID: details: https://hg.nginx.org/nginx/rev/aa901551a7eb branches: changeset: 8090:aa901551a7eb user: Maxim Dounin date: Wed Oct 19 10:56:20 2022 +0300 description: nginx-1.23.2-RELEASE diffstat: docs/xml/nginx/changes.xml | 114 +++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 114 insertions(+), 0 deletions(-) diffs (124 lines): diff -r 4032c1bdfa14 -r aa901551a7eb docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml Wed Oct 19 10:53:17 2022 +0300 +++ b/docs/xml/nginx/changes.xml Wed Oct 19 10:56:20 2022 +0300 @@ -5,6 +5,120 @@ + + + + +обработка специально созданного mp4-файла модулем ngx_http_mp4_module +могла приводить к падению рабочего процесса, +отправке клиенту части содержимого памяти рабочего процесса, +а также потенциально могла иметь другие последствия +(CVE-2022-41741, CVE-2022-41742). + + +processing of a specially crafted mp4 file by the ngx_http_mp4_module +might cause a worker process crash, +worker process memory disclosure, +or might have potential other impact +(CVE-2022-41741, CVE-2022-41742). + + + + + +переменные "$proxy_protocol_tlv_...". + + +the "$proxy_protocol_tlv_..." variables. + + + + + +ключи шифрования TLS session tickets теперь автоматически меняются +при использовании разделяемой памяти в ssl_session_cache. + + +TLS session tickets encryption keys are now automatically rotated +when using shared memory in the "ssl_session_cache" directive. + + + + + +уровень логгирования ошибок SSL "bad record type" +понижен с уровня crit до info.
+Спасибо Murilo Andrade. +
+ +the logging level of the "bad record type" SSL errors +has been lowered from "crit" to "info".
+Thanks to Murilo Andrade. +
+
+ + + +теперь при использовании разделяемой памяти в ssl_session_cache +сообщения "could not allocate new session" +логгируются на уровне warn вместо alert +и не чаще одного раза в секунду. + + +now when using shared memory in the "ssl_session_cache" directive +the "could not allocate new session" errors +are logged at the "warn" level instead of "alert" +and not more often than once per second. + + + + + +nginx/Windows не собирался с OpenSSL 3.0.x. + + +nginx/Windows could not be built with OpenSSL 3.0.x. + + + + + +в логгировании ошибок протокола PROXY.
+Спасибо Сергею Брестеру. +
+ +in logging of the PROXY protocol errors.
+Thanks to Sergey Brester. +
+
+ + + +при использовании TLSv1.3 с OpenSSL +разделяемая память из ssl_session_cache расходовалась +в том числе на сессии, использующие TLS session tickets. + + +shared memory from the "ssl_session_cache" directive +was spent on sessions using TLS session tickets +when using TLSv1.3 with OpenSSL. + + + + + +таймаут, заданный с помощью директивы ssl_session_timeout, +не работал при использовании TLSv1.3 с OpenSSL или BoringSSL. + + +timeout specified with the "ssl_session_timeout" directive +did not work when using TLSv1.3 with OpenSSL or BoringSSL. + + + +
+ + From thresh at nginx.com Wed Oct 19 12:03:17 2022 From: thresh at nginx.com (Konstantin Pavlov) Date: Wed, 19 Oct 2022 12:03:17 +0000 Subject: [nginx] release-1.23.2 tag Message-ID: details: https://hg.nginx.org/nginx/rev/1ae25660c0c7 branches: changeset: 8091:1ae25660c0c7 user: Maxim Dounin date: Wed Oct 19 10:56:21 2022 +0300 description: release-1.23.2 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r aa901551a7eb -r 1ae25660c0c7 .hgtags --- a/.hgtags Wed Oct 19 10:56:20 2022 +0300 +++ b/.hgtags Wed Oct 19 10:56:21 2022 +0300 @@ -469,3 +469,4 @@ d986378168fd4d70e0121cabac274c560cca9bdf 714eb4b2c09e712fb2572a2164ce2bf67638ccac release-1.21.6 5da2c0902e8e2aa4534008a582a60c61c135960e release-1.23.0 a63d0a70afea96813ba6667997bc7d68b5863f0d release-1.23.1 +aa901551a7ebad1e8b0f8c11cb44e3424ba29707 release-1.23.2 From thresh at nginx.com Wed Oct 19 12:03:20 2022 From: thresh at nginx.com (Konstantin Pavlov) Date: Wed, 19 Oct 2022 12:03:20 +0000 Subject: [nginx] Version bump. Message-ID: details: https://hg.nginx.org/nginx/rev/2b08b48ecc23 branches: stable-1.22 changeset: 8092:2b08b48ecc23 user: Maxim Dounin date: Wed Oct 19 10:59:37 2022 +0300 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r 2d3ed138ce65 -r 2b08b48ecc23 src/core/nginx.h --- a/src/core/nginx.h Tue May 24 02:59:19 2022 +0300 +++ b/src/core/nginx.h Wed Oct 19 10:59:37 2022 +0300 @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1022000 -#define NGINX_VERSION "1.22.0" +#define nginx_version 1022001 +#define NGINX_VERSION "1.22.1" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From thresh at nginx.com Wed Oct 19 12:03:23 2022 From: thresh at nginx.com (Konstantin Pavlov) Date: Wed, 19 Oct 2022 12:03:23 +0000 Subject: [nginx] Updated OpenSSL used for win32 builds. Message-ID: details: https://hg.nginx.org/nginx/rev/fc08fa6757e6 branches: stable-1.22 changeset: 8093:fc08fa6757e6 user: Maxim Dounin date: Tue Jun 21 17:09:34 2022 +0300 description: Updated OpenSSL used for win32 builds. diffstat: misc/GNUmakefile | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 2b08b48ecc23 -r fc08fa6757e6 misc/GNUmakefile --- a/misc/GNUmakefile Wed Oct 19 10:59:37 2022 +0300 +++ b/misc/GNUmakefile Tue Jun 21 17:09:34 2022 +0300 @@ -6,7 +6,7 @@ TEMP = tmp CC = cl OBJS = objs.msvc8 -OPENSSL = openssl-1.1.1o +OPENSSL = openssl-1.1.1p ZLIB = zlib-1.2.12 PCRE = pcre2-10.39 From thresh at nginx.com Wed Oct 19 12:03:26 2022 From: thresh at nginx.com (Konstantin Pavlov) Date: Wed, 19 Oct 2022 12:03:26 +0000 Subject: [nginx] Updated OpenSSL used for win32 builds. Message-ID: details: https://hg.nginx.org/nginx/rev/89a716b4fe0c branches: stable-1.22 changeset: 8094:89a716b4fe0c user: Maxim Dounin date: Tue Jul 19 17:03:30 2022 +0300 description: Updated OpenSSL used for win32 builds. diffstat: misc/GNUmakefile | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r fc08fa6757e6 -r 89a716b4fe0c misc/GNUmakefile --- a/misc/GNUmakefile Tue Jun 21 17:09:34 2022 +0300 +++ b/misc/GNUmakefile Tue Jul 19 17:03:30 2022 +0300 @@ -6,7 +6,7 @@ TEMP = tmp CC = cl OBJS = objs.msvc8 -OPENSSL = openssl-1.1.1p +OPENSSL = openssl-1.1.1q ZLIB = zlib-1.2.12 PCRE = pcre2-10.39 From thresh at nginx.com Wed Oct 19 12:03:29 2022 From: thresh at nginx.com (Konstantin Pavlov) Date: Wed, 19 Oct 2022 12:03:29 +0000 Subject: [nginx] Mp4: disabled duplicate atoms. Message-ID: details: https://hg.nginx.org/nginx/rev/adae1da17749 branches: stable-1.22 changeset: 8095:adae1da17749 user: Roman Arutyunyan date: Wed Oct 19 10:53:17 2022 +0300 description: Mp4: disabled duplicate atoms. Most atoms should not appear more than once in a container. Previously, this was not enforced by the module, which could result in worker process crash, memory corruption and disclosure. diffstat: src/http/modules/ngx_http_mp4_module.c | 147 +++++++++++++++++++++++++++++++++ 1 files changed, 147 insertions(+), 0 deletions(-) diffs (297 lines): diff -r 89a716b4fe0c -r adae1da17749 src/http/modules/ngx_http_mp4_module.c --- a/src/http/modules/ngx_http_mp4_module.c Tue Jul 19 17:03:30 2022 +0300 +++ b/src/http/modules/ngx_http_mp4_module.c Wed Oct 19 10:53:17 2022 +0300 @@ -1121,6 +1121,12 @@ ngx_http_mp4_read_ftyp_atom(ngx_http_mp4 return NGX_ERROR; } + if (mp4->ftyp_atom.buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 ftyp atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom_size = sizeof(ngx_mp4_atom_header_t) + (size_t) atom_data_size; ftyp_atom = ngx_palloc(mp4->request->pool, atom_size); @@ -1179,6 +1185,12 @@ ngx_http_mp4_read_moov_atom(ngx_http_mp4 return NGX_DECLINED; } + if (mp4->moov_atom.buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 moov atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + conf = ngx_http_get_module_loc_conf(mp4->request, ngx_http_mp4_module); if (atom_data_size > mp4->buffer_size) { @@ -1246,6 +1258,12 @@ ngx_http_mp4_read_mdat_atom(ngx_http_mp4 ngx_log_debug0(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, "mp4 mdat atom"); + if (mp4->mdat_atom.buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 mdat atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + data = &mp4->mdat_data_buf; data->file = &mp4->file; data->in_file = 1; @@ -1372,6 +1390,12 @@ ngx_http_mp4_read_mvhd_atom(ngx_http_mp4 ngx_log_debug0(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, "mp4 mvhd atom"); + if (mp4->mvhd_atom.buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 mvhd atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom_header = ngx_mp4_atom_header(mp4); mvhd_atom = (ngx_mp4_mvhd_atom_t *) atom_header; mvhd64_atom = (ngx_mp4_mvhd64_atom_t *) atom_header; @@ -1637,6 +1661,13 @@ ngx_http_mp4_read_tkhd_atom(ngx_http_mp4 atom_size = sizeof(ngx_mp4_atom_header_t) + (size_t) atom_data_size; trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_TKHD_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 tkhd atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->tkhd_size = atom_size; trak->movie_duration = duration; @@ -1676,6 +1707,12 @@ ngx_http_mp4_read_mdia_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_MDIA_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 mdia atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->mdia_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -1799,6 +1836,13 @@ ngx_http_mp4_read_mdhd_atom(ngx_http_mp4 atom_size = sizeof(ngx_mp4_atom_header_t) + (size_t) atom_data_size; trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_MDHD_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 mdhd atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->mdhd_size = atom_size; trak->timescale = timescale; trak->duration = duration; @@ -1862,6 +1906,12 @@ ngx_http_mp4_read_hdlr_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_HDLR_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 hdlr atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->hdlr_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -1890,6 +1940,12 @@ ngx_http_mp4_read_minf_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_MINF_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 minf atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->minf_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -1933,6 +1989,15 @@ ngx_http_mp4_read_vmhd_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_VMHD_ATOM].buf + || trak->out[NGX_HTTP_MP4_SMHD_ATOM].buf) + { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 vmhd/smhd atom in \"%s\"", + mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->vmhd_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -1964,6 +2029,15 @@ ngx_http_mp4_read_smhd_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_VMHD_ATOM].buf + || trak->out[NGX_HTTP_MP4_SMHD_ATOM].buf) + { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 vmhd/smhd atom in \"%s\"", + mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->smhd_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -1995,6 +2069,12 @@ ngx_http_mp4_read_dinf_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_DINF_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 dinf atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->dinf_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -2023,6 +2103,12 @@ ngx_http_mp4_read_stbl_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_STBL_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stbl atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->stbl_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -2144,6 +2230,12 @@ ngx_http_mp4_read_stsd_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); + if (trak->out[NGX_HTTP_MP4_STSD_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stsd atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + atom = &trak->stsd_atom_buf; atom->temporary = 1; atom->pos = atom_header; @@ -2212,6 +2304,13 @@ ngx_http_mp4_read_stts_atom(ngx_http_mp4 atom_end = atom_table + entries * sizeof(ngx_mp4_stts_entry_t); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STTS_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stts atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->time_to_sample_entries = entries; atom = &trak->stts_atom_buf; @@ -2480,6 +2579,13 @@ ngx_http_mp4_read_stss_atom(ngx_http_mp4 "sync sample entries:%uD", entries); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STSS_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stss atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->sync_samples_entries = entries; atom_table = atom_header + sizeof(ngx_http_mp4_stss_atom_t); @@ -2678,6 +2784,13 @@ ngx_http_mp4_read_ctts_atom(ngx_http_mp4 "composition offset entries:%uD", entries); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_CTTS_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 ctts atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->composition_offset_entries = entries; atom_table = atom_header + sizeof(ngx_mp4_ctts_atom_t); @@ -2881,6 +2994,13 @@ ngx_http_mp4_read_stsc_atom(ngx_http_mp4 atom_end = atom_table + entries * sizeof(ngx_mp4_stsc_entry_t); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STSC_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stsc atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->sample_to_chunk_entries = entries; atom = &trak->stsc_atom_buf; @@ -3213,6 +3333,13 @@ ngx_http_mp4_read_stsz_atom(ngx_http_mp4 "sample uniform size:%uD, entries:%uD", size, entries); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STSZ_ATOM].buf) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stsz atom in \"%s\"", mp4->file.name.data); + return NGX_ERROR; + } + trak->sample_sizes_entries = entries; atom_table = atom_header + sizeof(ngx_mp4_stsz_atom_t); @@ -3396,6 +3523,16 @@ ngx_http_mp4_read_stco_atom(ngx_http_mp4 atom_end = atom_table + entries * sizeof(uint32_t); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STCO_ATOM].buf + || trak->out[NGX_HTTP_MP4_CO64_ATOM].buf) + { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stco/co64 atom in \"%s\"", + mp4->file.name.data); + return NGX_ERROR; + } + trak->chunks = entries; atom = &trak->stco_atom_buf; @@ -3602,6 +3739,16 @@ ngx_http_mp4_read_co64_atom(ngx_http_mp4 atom_end = atom_table + entries * sizeof(uint64_t); trak = ngx_mp4_last_trak(mp4); + + if (trak->out[NGX_HTTP_MP4_STCO_ATOM].buf + || trak->out[NGX_HTTP_MP4_CO64_ATOM].buf) + { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "duplicate mp4 stco/co64 atom in \"%s\"", + mp4->file.name.data); + return NGX_ERROR; + } + trak->chunks = entries; atom = &trak->co64_atom_buf; From thresh at nginx.com Wed Oct 19 12:03:35 2022 From: thresh at nginx.com (Konstantin Pavlov) Date: Wed, 19 Oct 2022 12:03:35 +0000 Subject: [nginx] release-1.22.1 tag Message-ID: details: https://hg.nginx.org/nginx/rev/6b81c065e2d3 branches: stable-1.22 changeset: 8097:6b81c065e2d3 user: Maxim Dounin date: Wed Oct 19 11:02:20 2022 +0300 description: release-1.22.1 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r af7a3fb7558f -r 6b81c065e2d3 .hgtags --- a/.hgtags Wed Oct 19 11:02:20 2022 +0300 +++ b/.hgtags Wed Oct 19 11:02:20 2022 +0300 @@ -468,3 +468,4 @@ 39be8a682c58308d9399cddd57e37f9fdb7bdf3e d986378168fd4d70e0121cabac274c560cca9bdf release-1.21.5 714eb4b2c09e712fb2572a2164ce2bf67638ccac release-1.21.6 f669c9c2a617d80daf753e012265ab5290df0d9b release-1.22.0 +af7a3fb7558f28b3e74631f460995a09d529578a release-1.22.1 From thresh at nginx.com Wed Oct 19 12:03:32 2022 From: thresh at nginx.com (Konstantin Pavlov) Date: Wed, 19 Oct 2022 12:03:32 +0000 Subject: [nginx] nginx-1.22.1-RELEASE Message-ID: details: https://hg.nginx.org/nginx/rev/af7a3fb7558f branches: stable-1.22 changeset: 8096:af7a3fb7558f user: Maxim Dounin date: Wed Oct 19 11:02:20 2022 +0300 description: nginx-1.22.1-RELEASE diffstat: docs/xml/nginx/changes.xml | 22 ++++++++++++++++++++++ 1 files changed, 22 insertions(+), 0 deletions(-) diffs (32 lines): diff -r adae1da17749 -r af7a3fb7558f docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml Wed Oct 19 10:53:17 2022 +0300 +++ b/docs/xml/nginx/changes.xml Wed Oct 19 11:02:20 2022 +0300 @@ -5,6 +5,28 @@ + + + + +обработка специально созданного mp4-файла модулем ngx_http_mp4_module +могла приводить к падению рабочего процесса, +отправке клиенту части содержимого памяти рабочего процесса, +а также потенциально могла иметь другие последствия +(CVE-2022-41741, CVE-2022-41742). + + +processing of a specially crafted mp4 file by the ngx_http_mp4_module +might cause a worker process crash, +worker process memory disclosure, +or might have potential other impact +(CVE-2022-41741, CVE-2022-41742). + + + + + + From pluknet at nginx.com Thu Oct 20 11:34:44 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 20 Oct 2022 15:34:44 +0400 Subject: [PATCH 07 of 10] HTTP/3: unified hq code with regular HTTP/3 code In-Reply-To: <1dd6fabfdcb5b52af495.1662627994@arut-laptop> References: <1dd6fabfdcb5b52af495.1662627994@arut-laptop> Message-ID: <20221020113444.nsgt4fhc5xqfqhk3@Y9MQ9X2QVV> On Thu, Sep 08, 2022 at 01:06:34PM +0400, Roman Arutyunyan wrote: > # HG changeset patch > # User Roman Arutyunyan > # Date 1662626426 -14400 > # Thu Sep 08 12:40:26 2022 +0400 > # Branch quic > # Node ID 1dd6fabfdcb5b52af495f9d8fc00f64ae36a537c > # Parent 28437cb91bd8624b30a4c841852504110b0f3f7d > HTTP/3: unified hq code with regular HTTP/3 code. > > The change removes hq-specific request handler. Now hq requests are handled > by the HTTP/3 request handler. This brings missing keepalive timeout feature > to hq. As discussed in private, this doesn't introduce keepalive timeout for hq, as it's installed though HTTP/3-specific ngx_http_v3_wait_request_handler(), while hq uses ngx_http_wait_request_handler(). Though, it appears later by other means while reworking code for init callback. > > diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c > --- a/src/http/v3/ngx_http_v3.c > +++ b/src/http/v3/ngx_http_v3.c > @@ -17,10 +17,13 @@ static void ngx_http_v3_cleanup_session( > ngx_int_t > ngx_http_v3_init_session(ngx_connection_t *c) > { > - ngx_connection_t *pc; > - ngx_pool_cleanup_t *cln; > - ngx_http_connection_t *hc; > - ngx_http_v3_session_t *h3c; > + ngx_connection_t *pc; > + ngx_pool_cleanup_t *cln; > + ngx_http_connection_t *hc; > + ngx_http_v3_session_t *h3c; > +#if (NGX_HTTP_V3_HQ) > + ngx_http_v3_srv_conf_t *h3scf; > +#endif > > pc = c->quic->parent; > hc = pc->data; > @@ -39,6 +42,13 @@ ngx_http_v3_init_session(ngx_connection_ > h3c->max_push_id = (uint64_t) -1; > h3c->goaway_push_id = (uint64_t) -1; > > +#if (NGX_HTTP_V3_HQ) > + h3scf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_v3_module); > + if (h3scf->hq) { > + h3c->hq = 1; > + } > +#endif > + > ngx_queue_init(&h3c->blocked); > ngx_queue_init(&h3c->pushing); > > @@ -61,6 +71,12 @@ ngx_http_v3_init_session(ngx_connection_ > > hc->v3_session = h3c; > > +#if (NGX_HTTP_V3_HQ) > + if (h3c->hq) { > + return NGX_OK; > + } > +#endif > + > return ngx_http_v3_send_settings(c); > > failed: > diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h > --- a/src/http/v3/ngx_http_v3.h > +++ b/src/http/v3/ngx_http_v3.h > @@ -145,7 +145,10 @@ struct ngx_http_v3_session_s { > off_t total_bytes; > off_t payload_bytes; > > - ngx_uint_t goaway; /* unsigned goaway:1; */ > + unsigned goaway:1; > +#if (NGX_HTTP_V3_HQ) > + unsigned hq:1; > +#endif > > ngx_connection_t *known_streams[NGX_HTTP_V3_MAX_KNOWN_STREAM]; > }; > diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c > --- a/src/http/v3/ngx_http_v3_request.c > +++ b/src/http/v3/ngx_http_v3_request.c > @@ -10,9 +10,6 @@ > #include > > > -#if (NGX_HTTP_V3_HQ) > -static void ngx_http_v3_init_hq_stream(ngx_connection_t *c); > -#endif > static void ngx_http_v3_init_request_stream(ngx_connection_t *c); > static void ngx_http_v3_wait_request_handler(ngx_event_t *rev); > static void ngx_http_v3_cleanup_request(void *data); > @@ -86,13 +83,6 @@ ngx_http_v3_init(ngx_connection_t *c) > ngx_set_connection_log(c, clcf->error_log); > } > > -#if (NGX_HTTP_V3_HQ) > - if (h3scf->hq) { > - ngx_http_v3_init_hq_stream(c); > - return; > - } > -#endif > - > if (ngx_http_v3_init_session(c) != NGX_OK) { > ngx_http_close_connection(c); > return; > @@ -107,83 +97,12 @@ ngx_http_v3_init(ngx_connection_t *c) > } > > > -#if (NGX_HTTP_V3_HQ) > - > -static void > -ngx_http_v3_init_hq_stream(ngx_connection_t *c) > -{ > - uint64_t n; > - ngx_event_t *rev; > - ngx_http_connection_t *hc; > - ngx_http_core_loc_conf_t *clcf; > - ngx_http_core_srv_conf_t *cscf; > - > - ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 init hq stream"); > - > -#if (NGX_STAT_STUB) > - (void) ngx_atomic_fetch_add(ngx_stat_active, 1); > -#endif > - > - hc = c->data; > - > - /* Use HTTP/3 General Protocol Error Code 0x101 for finalization */ > - > - if (c->quic->id & NGX_QUIC_STREAM_UNIDIRECTIONAL) { > - ngx_quic_finalize_connection(c->quic->parent, > - NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR, > - "unexpected uni stream"); > - ngx_http_close_connection(c); > - return; > - } > - > - clcf = ngx_http_get_module_loc_conf(hc->conf_ctx, ngx_http_core_module); > - > - n = c->quic->id >> 2; > - > - if (n >= clcf->keepalive_requests) { > - ngx_quic_finalize_connection(c->quic->parent, > - NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR, > - "reached maximum number of requests"); > - ngx_http_close_connection(c); > - return; > - } > - > - if (ngx_current_msec - c->quic->parent->start_time > - > clcf->keepalive_time) > - { > - ngx_quic_finalize_connection(c->quic->parent, > - NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR, > - "reached maximum time for requests"); > - ngx_http_close_connection(c); > - return; > - } > - > - rev = c->read; > - > - if (rev->ready) { > - rev->handler(rev); > - return; > - } > - > - cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module); > - > - ngx_add_timer(rev, cscf->client_header_timeout); > - ngx_reusable_connection(c, 1); > - > - if (ngx_handle_read_event(rev, 0) != NGX_OK) { > - ngx_http_close_connection(c); > - return; > - } > -} > - > -#endif > - > - > static void > ngx_http_v3_init_request_stream(ngx_connection_t *c) > { > uint64_t n; > ngx_event_t *rev; > + ngx_connection_t *pc; > ngx_http_connection_t *hc; > ngx_http_v3_session_t *h3c; > ngx_http_core_loc_conf_t *clcf; > @@ -216,15 +135,21 @@ ngx_http_v3_init_request_stream(ngx_conn > return; > } > > + pc = c->quic->parent; > + > if (n + 1 == clcf->keepalive_requests > - || ngx_current_msec - c->quic->parent->start_time > - > clcf->keepalive_time) > + || ngx_current_msec - pc->start_time > clcf->keepalive_time) > { > h3c->goaway = 1; > > - if (ngx_http_v3_send_goaway(c, (n + 1) << 2) != NGX_OK) { > - ngx_http_close_connection(c); > - return; > +#if (NGX_HTTP_V3_HQ) > + if (!h3c->hq) > +#endif > + { > + if (ngx_http_v3_send_goaway(c, (n + 1) << 2) != NGX_OK) { > + ngx_http_close_connection(c); > + return; > + } > } > > ngx_http_v3_shutdown_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, > @@ -232,8 +157,14 @@ ngx_http_v3_init_request_stream(ngx_conn > } > > rev = c->read; > - rev->handler = ngx_http_v3_wait_request_handler; > - c->write->handler = ngx_http_empty_handler; > + > +#if (NGX_HTTP_V3_HQ) > + if (!h3c->hq) > +#endif > + { > + rev->handler = ngx_http_v3_wait_request_handler; > + c->write->handler = ngx_http_empty_handler; > + } > > if (rev->ready) { > rev->handler(rev); > @@ -261,8 +192,8 @@ ngx_http_v3_wait_request_handler(ngx_eve > ngx_connection_t *c; > ngx_pool_cleanup_t *cln; > ngx_http_request_t *r; > + ngx_http_v3_session_t *h3c; > ngx_http_connection_t *hc; > - ngx_http_v3_session_t *h3c; > ngx_http_core_srv_conf_t *cscf; > > c = rev->data; > @@ -401,13 +332,10 @@ ngx_http_v3_reset_connection(ngx_connect > > h3scf = ngx_http_v3_get_module_srv_conf(c, ngx_http_v3_module); > > + if (h3scf->max_table_capacity > 0 && !c->read->eof > #if (NGX_HTTP_V3_HQ) > - if (h3scf->hq) { > - return; > - } > + && !h3scf->hq > #endif > - > - if (h3scf->max_table_capacity > 0 && !c->read->eof > && (c->quic->id & NGX_QUIC_STREAM_UNIDIRECTIONAL) == 0) > { > (void) ngx_http_v3_send_cancel_stream(c, c->quic->id); > diff --git a/src/http/v3/ngx_http_v3_uni.c b/src/http/v3/ngx_http_v3_uni.c > --- a/src/http/v3/ngx_http_v3_uni.c > +++ b/src/http/v3/ngx_http_v3_uni.c > @@ -37,8 +37,23 @@ void > ngx_http_v3_init_uni_stream(ngx_connection_t *c) > { > uint64_t n; > +#if (NGX_HTTP_V3_HQ) > + ngx_http_v3_session_t *h3c; > +#endif > ngx_http_v3_uni_stream_t *us; > > +#if (NGX_HTTP_V3_HQ) > + h3c = ngx_http_v3_get_session(c); > + if (h3c->hq) { > + ngx_http_v3_finalize_connection(c, > + NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, > + "uni stream in hq mode"); > + c->data = NULL; > + ngx_http_v3_close_uni_stream(c); > + return; > + } > +#endif > + > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 init uni stream"); > > n = c->quic->id >> 2; > > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org From pluknet at nginx.com Thu Oct 20 11:50:15 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 20 Oct 2022 15:50:15 +0400 Subject: [PATCH 08 of 10] QUIC: idle mode for main connection In-Reply-To: References: Message-ID: <20221020115015.azqjb5wd7fisgm2f@Y9MQ9X2QVV> On Thu, Sep 08, 2022 at 01:06:35PM +0400, Roman Arutyunyan wrote: > # HG changeset patch > # User Roman Arutyunyan > # Date 1662627133 -14400 > # Thu Sep 08 12:52:13 2022 +0400 > # Branch quic > # Node ID e0634a484d9a2d82d43f565d64a0a22e989ac1cb > # Parent 1dd6fabfdcb5b52af495f9d8fc00f64ae36a537c > QUIC: idle mode for main connection. > > Now main QUIC connection for HTTP/3 always has c->idle flag set. This allows > the connection to receive worker shutdown notification. It is passed to > application level via a new conf->shutdown() callback. > > The HTTP/3 shutdown callback sends GOAWAY to client and gracefully shuts down > the QUIC connection. > > diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c > --- a/src/event/quic/ngx_event_quic.c > +++ b/src/event/quic/ngx_event_quic.c > @@ -341,6 +341,7 @@ ngx_quic_new_connection(ngx_connection_t > return NULL; > } > > + c->idle = 1; > ngx_reusable_connection(c, 1); > > ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, > @@ -420,9 +421,9 @@ ngx_quic_input_handler(ngx_event_t *rev) > } > > if (c->close) { > - qc->error = NGX_QUIC_ERR_NO_ERROR; > - qc->error_reason = "graceful shutdown"; > - ngx_quic_close_connection(c, NGX_ERROR); > + if (qc->conf->shutdown) { As previously discussed in private, this will need an additional check that we are not yet in qc->closing. > + qc->conf->shutdown(c); > + } > return; > } > > diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h > --- a/src/event/quic/ngx_event_quic.h > +++ b/src/event/quic/ngx_event_quic.h > @@ -28,6 +28,9 @@ > #define NGX_QUIC_STREAM_UNIDIRECTIONAL 0x02 > > > +typedef void (*ngx_quic_shutdown_pt)(ngx_connection_t *c); > + > + > typedef enum { > NGX_QUIC_STREAM_SEND_READY = 0, > NGX_QUIC_STREAM_SEND_SEND, > @@ -74,6 +77,8 @@ typedef struct { > ngx_int_t stream_reject_code_uni; > ngx_int_t stream_reject_code_bidi; > > + ngx_quic_shutdown_pt shutdown; > + > u_char av_token_key[NGX_QUIC_AV_KEY_LEN]; > u_char sr_token_key[NGX_QUIC_SR_KEY_LEN]; > } ngx_quic_conf_t; > diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h > --- a/src/http/v3/ngx_http_v3.h > +++ b/src/http/v3/ngx_http_v3.h > @@ -141,6 +141,7 @@ struct ngx_http_v3_session_s { > uint64_t next_push_id; > uint64_t max_push_id; > uint64_t goaway_push_id; > + uint64_t next_request_id; > > off_t total_bytes; > off_t payload_bytes; > @@ -158,6 +159,7 @@ void ngx_http_v3_init(ngx_connection_t * > void ngx_http_v3_reset_connection(ngx_connection_t *c); > ngx_int_t ngx_http_v3_init_session(ngx_connection_t *c); > ngx_int_t ngx_http_v3_check_flood(ngx_connection_t *c); > +void ngx_http_v3_shutdown(ngx_connection_t *c); > > ngx_int_t ngx_http_v3_read_request_body(ngx_http_request_t *r); > ngx_int_t ngx_http_v3_read_unbuffered_request_body(ngx_http_request_t *r); > diff --git a/src/http/v3/ngx_http_v3_module.c b/src/http/v3/ngx_http_v3_module.c > --- a/src/http/v3/ngx_http_v3_module.c > +++ b/src/http/v3/ngx_http_v3_module.c > @@ -249,6 +249,8 @@ ngx_http_v3_create_srv_conf(ngx_conf_t * > h3scf->quic.stream_reject_code_bidi = NGX_HTTP_V3_ERR_REQUEST_REJECTED; > h3scf->quic.active_connection_id_limit = NGX_CONF_UNSET_UINT; > > + h3scf->quic.shutdown = ngx_http_v3_shutdown; > + > return h3scf; > } > > diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c > --- a/src/http/v3/ngx_http_v3_request.c > +++ b/src/http/v3/ngx_http_v3_request.c > @@ -97,6 +97,37 @@ ngx_http_v3_init(ngx_connection_t *c) > } > > > +void > +ngx_http_v3_shutdown(ngx_connection_t *c) > +{ > + ngx_http_v3_session_t *h3c; extra indent > + > + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 shutdown"); > + > + h3c = ngx_http_v3_get_session(c); > + > + if (h3c == NULL) { > + ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, > + "connection shutdown"); > + return; > + } > + > + if (!h3c->goaway) { > + h3c->goaway = 1; > + > +#if (NGX_HTTP_V3_HQ) > + if (!h3c->hq) > +#endif > + { > + (void) ngx_http_v3_send_goaway(c, h3c->next_request_id); > + } > + > + ngx_http_v3_shutdown_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, > + "connection shutdown"); > + } Note that this callback is used to be called from a read event as part of graceful shutdown. With ngx_quic_finalize_connection() remade in patch #4 (reusable mode) to defer closing QUIC connection to a posted event, this call now results in a posted event, which no one can fulfill, hence no further action until quic idle timeout fires. It could be fixed by executing known posted events after shutdown callback or more globally - as part of graceful shutdown itself. > +} > + > + > static void > ngx_http_v3_init_request_stream(ngx_connection_t *c) > { > @@ -137,6 +168,8 @@ ngx_http_v3_init_request_stream(ngx_conn > > pc = c->quic->parent; > > + h3c->next_request_id = c->quic->id + 0x04; > + > if (n + 1 == clcf->keepalive_requests > || ngx_current_msec - pc->start_time > clcf->keepalive_time) > { > @@ -146,7 +179,7 @@ ngx_http_v3_init_request_stream(ngx_conn > if (!h3c->hq) > #endif > { > - if (ngx_http_v3_send_goaway(c, (n + 1) << 2) != NGX_OK) { > + if (ngx_http_v3_send_goaway(c, h3c->next_request_id) != NGX_OK) { > ngx_http_close_connection(c); > return; > } > > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org From pluknet at nginx.com Thu Oct 20 11:51:54 2022 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 20 Oct 2022 15:51:54 +0400 Subject: [PATCH 10 of 10] QUIC: application init() callback In-Reply-To: <8e58a27b320807aae001.1662627997@arut-laptop> References: <8e58a27b320807aae001.1662627997@arut-laptop> Message-ID: <20221020115154.ygd4gzyuykvrgqsy@Y9MQ9X2QVV> On Thu, Sep 08, 2022 at 01:06:37PM +0400, Roman Arutyunyan wrote: > # HG changeset patch > # User Roman Arutyunyan > # Date 1662627905 -14400 > # Thu Sep 08 13:05:05 2022 +0400 > # Branch quic > # Node ID 8e58a27b320807aae00194b82e2c997287e3ad42 > # Parent 861d6897151fe6773898db6cfdb36f56403302c5 > QUIC: application init() callback. > > It's called after handshake completion or prior to the first early data stream > creation. The callback should initialize application-level data before > creating streams. > > HTTP/3 callback implementation sets keepalive timer and sends SETTINGS. > > Also, this allows to limit max handshake time in ngx_http_v3_init_stream(). Also brings header timeout (to limit handshake time) and keepalive timeout in hq mode. > > diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h > --- a/src/event/quic/ngx_event_quic.h > +++ b/src/event/quic/ngx_event_quic.h > @@ -28,6 +28,7 @@ > #define NGX_QUIC_STREAM_UNIDIRECTIONAL 0x02 > > > +typedef ngx_int_t (*ngx_quic_init_pt)(ngx_connection_t *c); > typedef void (*ngx_quic_shutdown_pt)(ngx_connection_t *c); > > > @@ -77,6 +78,7 @@ typedef struct { > ngx_int_t stream_reject_code_uni; > ngx_int_t stream_reject_code_bidi; > > + ngx_quic_init_pt init; > ngx_quic_shutdown_pt shutdown; > > u_char av_token_key[NGX_QUIC_AV_KEY_LEN]; > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > --- a/src/event/quic/ngx_event_quic_streams.c > +++ b/src/event/quic/ngx_event_quic_streams.c > @@ -21,6 +21,7 @@ static ngx_quic_stream_t *ngx_quic_get_s > static ngx_int_t ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id); > static void ngx_quic_init_stream_handler(ngx_event_t *ev); > static void ngx_quic_init_streams_handler(ngx_connection_t *c); > +static ngx_int_t ngx_quic_do_init_streams(ngx_connection_t *c); > static ngx_quic_stream_t *ngx_quic_create_stream(ngx_connection_t *c, > uint64_t id); > static void ngx_quic_empty_handler(ngx_event_t *ev); > @@ -555,15 +556,22 @@ ngx_quic_init_streams(ngx_connection_t * > return NGX_OK; > } > > - ngx_quic_init_streams_handler(c); > - > - return NGX_OK; > + return ngx_quic_do_init_streams(c); > } > > > static void > ngx_quic_init_streams_handler(ngx_connection_t *c) > { > + if (ngx_quic_do_init_streams(c) != NGX_OK) { > + ngx_quic_close_connection(c, NGX_ERROR); > + } > +} > + > + > +static ngx_int_t > +ngx_quic_do_init_streams(ngx_connection_t *c) > +{ > ngx_queue_t *q; > ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > @@ -572,6 +580,12 @@ ngx_quic_init_streams_handler(ngx_connec > > qc = ngx_quic_get_connection(c); > > + if (qc->conf->init) { > + if (qc->conf->init(c) != NGX_OK) { > + return NGX_ERROR; > + } > + } > + > for (q = ngx_queue_head(&qc->streams.uninitialized); > q != ngx_queue_sentinel(&qc->streams.uninitialized); > q = ngx_queue_next(q)) > @@ -581,6 +595,8 @@ ngx_quic_init_streams_handler(ngx_connec > } > > qc->streams.initialized = 1; > + > + return NGX_OK; > } > > > diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c > --- a/src/http/v3/ngx_http_v3.c > +++ b/src/http/v3/ngx_http_v3.c > @@ -17,7 +17,6 @@ static void ngx_http_v3_cleanup_session( > ngx_int_t > ngx_http_v3_init_session(ngx_connection_t *c) > { > - ngx_connection_t *pc; > ngx_pool_cleanup_t *cln; > ngx_http_connection_t *hc; > ngx_http_v3_session_t *h3c; > @@ -25,16 +24,11 @@ ngx_http_v3_init_session(ngx_connection_ > ngx_http_v3_srv_conf_t *h3scf; > #endif > > - pc = c->quic->parent; > - hc = pc->data; > - > - if (hc->v3_session) { > - return NGX_OK; > - } > + hc = c->data; > > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 init session"); > > - h3c = ngx_pcalloc(pc->pool, sizeof(ngx_http_v3_session_t)); > + h3c = ngx_pcalloc(c->pool, sizeof(ngx_http_v3_session_t)); > if (h3c == NULL) { > goto failed; > } > @@ -52,16 +46,16 @@ ngx_http_v3_init_session(ngx_connection_ > ngx_queue_init(&h3c->blocked); > ngx_queue_init(&h3c->pushing); > > - h3c->keepalive.log = pc->log; > - h3c->keepalive.data = pc; > + h3c->keepalive.log = c->log; > + h3c->keepalive.data = c; > h3c->keepalive.handler = ngx_http_v3_keepalive_handler; > h3c->keepalive.cancelable = 1; > > - h3c->table.send_insert_count.log = pc->log; > - h3c->table.send_insert_count.data = pc; > + h3c->table.send_insert_count.log = c->log; > + h3c->table.send_insert_count.data = c; > h3c->table.send_insert_count.handler = ngx_http_v3_inc_insert_count_handler; > > - cln = ngx_pool_cleanup_add(pc->pool, 0); > + cln = ngx_pool_cleanup_add(c->pool, 0); > if (cln == NULL) { > goto failed; > } > @@ -71,13 +65,7 @@ ngx_http_v3_init_session(ngx_connection_ > > hc->v3_session = h3c; > > -#if (NGX_HTTP_V3_HQ) > - if (h3c->hq) { > - return NGX_OK; > - } > -#endif > - > - return ngx_http_v3_send_settings(c); > + return NGX_OK; > > failed: > > diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h > --- a/src/http/v3/ngx_http_v3.h > +++ b/src/http/v3/ngx_http_v3.h > @@ -159,6 +159,7 @@ void ngx_http_v3_init_stream(ngx_connect > void ngx_http_v3_reset_stream(ngx_connection_t *c); > ngx_int_t ngx_http_v3_init_session(ngx_connection_t *c); > ngx_int_t ngx_http_v3_check_flood(ngx_connection_t *c); > +ngx_int_t ngx_http_v3_init(ngx_connection_t *c); > void ngx_http_v3_shutdown(ngx_connection_t *c); > > ngx_int_t ngx_http_v3_read_request_body(ngx_http_request_t *r); > diff --git a/src/http/v3/ngx_http_v3_module.c b/src/http/v3/ngx_http_v3_module.c > --- a/src/http/v3/ngx_http_v3_module.c > +++ b/src/http/v3/ngx_http_v3_module.c > @@ -249,6 +249,7 @@ ngx_http_v3_create_srv_conf(ngx_conf_t * > h3scf->quic.stream_reject_code_bidi = NGX_HTTP_V3_ERR_REQUEST_REJECTED; > h3scf->quic.active_connection_id_limit = NGX_CONF_UNSET_UINT; > > + h3scf->quic.init = ngx_http_v3_init; > h3scf->quic.shutdown = ngx_http_v3_shutdown; > > return h3scf; > diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c > --- a/src/http/v3/ngx_http_v3_request.c > +++ b/src/http/v3/ngx_http_v3_request.c > @@ -57,18 +57,29 @@ static const struct { > void > ngx_http_v3_init_stream(ngx_connection_t *c) > { > + ngx_http_v3_session_t *h3c; > ngx_http_connection_t *hc, *phc; > ngx_http_v3_srv_conf_t *h3scf; > ngx_http_core_loc_conf_t *clcf; > + ngx_http_core_srv_conf_t *cscf; > > hc = c->data; > > hc->ssl = 1; > > clcf = ngx_http_get_module_loc_conf(hc->conf_ctx, ngx_http_core_module); > + cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module); > h3scf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_v3_module); > > if (c->quic == NULL) { > + if (ngx_http_v3_init_session(c) != NGX_OK) { > + ngx_http_close_connection(c); > + return; > + } > + > + h3c = hc->v3_session; > + ngx_add_timer(&h3c->keepalive, cscf->client_header_timeout); > + > h3scf->quic.timeout = clcf->keepalive_timeout; > ngx_quic_run(c, &h3scf->quic); > return; > @@ -83,11 +94,6 @@ ngx_http_v3_init_stream(ngx_connection_t > ngx_set_connection_log(c, clcf->error_log); > } > > - if (ngx_http_v3_init_session(c) != NGX_OK) { > - ngx_http_close_connection(c); > - return; > - } > - > if (c->quic->id & NGX_QUIC_STREAM_UNIDIRECTIONAL) { > ngx_http_v3_init_uni_stream(c); > > @@ -97,6 +103,28 @@ ngx_http_v3_init_stream(ngx_connection_t > } > > > +ngx_int_t > +ngx_http_v3_init(ngx_connection_t *c) > +{ > + ngx_http_v3_session_t *h3c; > + ngx_http_core_loc_conf_t *clcf; > + > + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 init"); > + > + h3c = ngx_http_v3_get_session(c); > + clcf = ngx_http_v3_get_module_loc_conf(c, ngx_http_core_module); > + ngx_add_timer(&h3c->keepalive, clcf->keepalive_timeout); > + > +#if (NGX_HTTP_V3_HQ) > + if (h3c->hq) { > + return NGX_OK; > + } > +#endif > + > + return ngx_http_v3_send_settings(c); > +} > + > + > void > ngx_http_v3_shutdown(ngx_connection_t *c) > { > > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org From arut at nginx.com Thu Oct 20 13:54:21 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 20 Oct 2022 17:54:21 +0400 Subject: [PATCH 07 of 10] HTTP/3: unified hq code with regular HTTP/3 code In-Reply-To: <20221020113444.nsgt4fhc5xqfqhk3@Y9MQ9X2QVV> References: <1dd6fabfdcb5b52af495.1662627994@arut-laptop> <20221020113444.nsgt4fhc5xqfqhk3@Y9MQ9X2QVV> Message-ID: <20221020135421.of6fo3hm43rqvoch@N00W24XTQX> On Thu, Oct 20, 2022 at 03:34:44PM +0400, Sergey Kandaurov wrote: > On Thu, Sep 08, 2022 at 01:06:34PM +0400, Roman Arutyunyan wrote: > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1662626426 -14400 > > # Thu Sep 08 12:40:26 2022 +0400 > > # Branch quic > > # Node ID 1dd6fabfdcb5b52af495f9d8fc00f64ae36a537c > > # Parent 28437cb91bd8624b30a4c841852504110b0f3f7d > > HTTP/3: unified hq code with regular HTTP/3 code. > > > > The change removes hq-specific request handler. Now hq requests are handled > > by the HTTP/3 request handler. This brings missing keepalive timeout feature > > to hq. > > As discussed in private, this doesn't introduce keepalive timeout for hq, > as it's installed though HTTP/3-specific ngx_http_v3_wait_request_handler(), > while hq uses ngx_http_wait_request_handler(). Though, it appears later > by other means while reworking code for init callback. Thanks. This is a leftover from previous versions of this patch. Removed the last sentence: HTTP/3: unified hq code with regular HTTP/3 code. The change removes hq-specific request handler. Now hq requests are handled by the HTTP/3 request handler. [..] -- Roman From arut at nginx.com Thu Oct 20 14:25:03 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 20 Oct 2022 18:25:03 +0400 Subject: [PATCH 08 of 10] QUIC: idle mode for main connection In-Reply-To: <20221020115015.azqjb5wd7fisgm2f@Y9MQ9X2QVV> References: <20221020115015.azqjb5wd7fisgm2f@Y9MQ9X2QVV> Message-ID: <20221020142503.d327lsrunx6hzw63@N00W24XTQX> Hi, On Thu, Oct 20, 2022 at 03:50:15PM +0400, Sergey Kandaurov wrote: > On Thu, Sep 08, 2022 at 01:06:35PM +0400, Roman Arutyunyan wrote: > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1662627133 -14400 > > # Thu Sep 08 12:52:13 2022 +0400 > > # Branch quic > > # Node ID e0634a484d9a2d82d43f565d64a0a22e989ac1cb > > # Parent 1dd6fabfdcb5b52af495f9d8fc00f64ae36a537c > > QUIC: idle mode for main connection. > > > > Now main QUIC connection for HTTP/3 always has c->idle flag set. This allows > > the connection to receive worker shutdown notification. It is passed to > > application level via a new conf->shutdown() callback. > > > > The HTTP/3 shutdown callback sends GOAWAY to client and gracefully shuts down > > the QUIC connection. > > > > diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c > > --- a/src/event/quic/ngx_event_quic.c > > +++ b/src/event/quic/ngx_event_quic.c > > @@ -341,6 +341,7 @@ ngx_quic_new_connection(ngx_connection_t > > return NULL; > > } > > > > + c->idle = 1; > > ngx_reusable_connection(c, 1); > > > > ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, > > @@ -420,9 +421,9 @@ ngx_quic_input_handler(ngx_event_t *rev) > > } > > > > if (c->close) { > > - qc->error = NGX_QUIC_ERR_NO_ERROR; > > - qc->error_reason = "graceful shutdown"; > > - ngx_quic_close_connection(c, NGX_ERROR); > > + if (qc->conf->shutdown) { > > As previously discussed in private, this will need an additional check > that we are not yet in qc->closing. > > > + qc->conf->shutdown(c); > > + } > > return; > > } Yes, added the check. Also, c->close is reset here similarly to HTTP/2 since we want to be able to handle future packets normally. Also, current code which closes the connection instantly should remain for connection reuse. To tell reuse from shutdown we can check ngx_exiting. Assuming reuse does not make sense in shutdown mode, this will work good. > > diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h > > --- a/src/event/quic/ngx_event_quic.h > > +++ b/src/event/quic/ngx_event_quic.h > > @@ -28,6 +28,9 @@ > > #define NGX_QUIC_STREAM_UNIDIRECTIONAL 0x02 > > > > > > +typedef void (*ngx_quic_shutdown_pt)(ngx_connection_t *c); > > + > > + > > typedef enum { > > NGX_QUIC_STREAM_SEND_READY = 0, > > NGX_QUIC_STREAM_SEND_SEND, > > @@ -74,6 +77,8 @@ typedef struct { > > ngx_int_t stream_reject_code_uni; > > ngx_int_t stream_reject_code_bidi; > > > > + ngx_quic_shutdown_pt shutdown; > > + > > u_char av_token_key[NGX_QUIC_AV_KEY_LEN]; > > u_char sr_token_key[NGX_QUIC_SR_KEY_LEN]; > > } ngx_quic_conf_t; > > diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h > > --- a/src/http/v3/ngx_http_v3.h > > +++ b/src/http/v3/ngx_http_v3.h > > @@ -141,6 +141,7 @@ struct ngx_http_v3_session_s { > > uint64_t next_push_id; > > uint64_t max_push_id; > > uint64_t goaway_push_id; > > + uint64_t next_request_id; > > > > off_t total_bytes; > > off_t payload_bytes; > > @@ -158,6 +159,7 @@ void ngx_http_v3_init(ngx_connection_t * > > void ngx_http_v3_reset_connection(ngx_connection_t *c); > > ngx_int_t ngx_http_v3_init_session(ngx_connection_t *c); > > ngx_int_t ngx_http_v3_check_flood(ngx_connection_t *c); > > +void ngx_http_v3_shutdown(ngx_connection_t *c); > > > > ngx_int_t ngx_http_v3_read_request_body(ngx_http_request_t *r); > > ngx_int_t ngx_http_v3_read_unbuffered_request_body(ngx_http_request_t *r); > > diff --git a/src/http/v3/ngx_http_v3_module.c b/src/http/v3/ngx_http_v3_module.c > > --- a/src/http/v3/ngx_http_v3_module.c > > +++ b/src/http/v3/ngx_http_v3_module.c > > @@ -249,6 +249,8 @@ ngx_http_v3_create_srv_conf(ngx_conf_t * > > h3scf->quic.stream_reject_code_bidi = NGX_HTTP_V3_ERR_REQUEST_REJECTED; > > h3scf->quic.active_connection_id_limit = NGX_CONF_UNSET_UINT; > > > > + h3scf->quic.shutdown = ngx_http_v3_shutdown; > > + > > return h3scf; > > } > > > > diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c > > --- a/src/http/v3/ngx_http_v3_request.c > > +++ b/src/http/v3/ngx_http_v3_request.c > > @@ -97,6 +97,37 @@ ngx_http_v3_init(ngx_connection_t *c) > > } > > > > > > +void > > +ngx_http_v3_shutdown(ngx_connection_t *c) > > +{ > > + ngx_http_v3_session_t *h3c; > > extra indent > > > + > > + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 shutdown"); > > + > > + h3c = ngx_http_v3_get_session(c); > > + > > + if (h3c == NULL) { > > + ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, > > + "connection shutdown"); > > + return; > > + } > > + > > + if (!h3c->goaway) { > > + h3c->goaway = 1; > > + > > +#if (NGX_HTTP_V3_HQ) > > + if (!h3c->hq) > > +#endif > > + { > > + (void) ngx_http_v3_send_goaway(c, h3c->next_request_id); > > + } > > + > > + ngx_http_v3_shutdown_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, > > + "connection shutdown"); > > + } > > Note that this callback is used to be called from a read event as part of > graceful shutdown. > With ngx_quic_finalize_connection() remade in patch #4 (reusable mode) > to defer closing QUIC connection to a posted event, this call now results > in a posted event, which no one can fulfill, hence no further action until > quic idle timeout fires. > It could be fixed by executing known posted events after shutdown callback > or more globally - as part of graceful shutdown itself. Yes, events posted from ngx_close_idle_connections() are not handled right away. Instead, they are handled at the end of the next cycle, which normally happens after a timeout. There seems to be no pretty way to fix this, unless we handle posted events in ngx_worker_process_cycle() right after ngx_close_idle_connections(). We are trying to avoid global changes like this. I suggest posting current connection read event as a next posted event. This will effectively set next cycle timeout to be zero and eliminate the problem. > > +} > > + > > + > > static void > > ngx_http_v3_init_request_stream(ngx_connection_t *c) > > { > > @@ -137,6 +168,8 @@ ngx_http_v3_init_request_stream(ngx_conn > > > > pc = c->quic->parent; > > > > + h3c->next_request_id = c->quic->id + 0x04; > > + > > if (n + 1 == clcf->keepalive_requests > > || ngx_current_msec - pc->start_time > clcf->keepalive_time) > > { > > @@ -146,7 +179,7 @@ ngx_http_v3_init_request_stream(ngx_conn > > if (!h3c->hq) > > #endif > > { > > - if (ngx_http_v3_send_goaway(c, (n + 1) << 2) != NGX_OK) { > > + if (ngx_http_v3_send_goaway(c, h3c->next_request_id) != NGX_OK) { > > ngx_http_close_connection(c); > > return; > > } > > > > _______________________________________________ > > nginx-devel mailing list -- nginx-devel at nginx.org > > To unsubscribe send an email to nginx-devel-leave at nginx.org > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org Attached is a diff to the current patch. -- Roman -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1666273166 -14400 # Thu Oct 20 17:39:26 2022 +0400 # Branch quic # Node ID d6c725081a0b024886822e1cc722fdace9c32621 # Parent a4ba2ac5fa55ef94bb75a66e66e0b19d792fed10 [mq]: quic-idle-fix1 diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -421,9 +421,22 @@ ngx_quic_input_handler(ngx_event_t *rev) } if (c->close) { - if (qc->conf->shutdown) { + c->close = 0; + + if (!ngx_exiting) { + qc->error = NGX_QUIC_ERR_NO_ERROR; + qc->error_reason = "graceful shutdown"; + ngx_quic_close_connection(c, NGX_ERROR); + return; + } + + if (!qc->closing && qc->conf->shutdown) { + /* do not delay events posted by shutdown() */ + ngx_post_event(rev, &ngx_posted_next_events); + qc->conf->shutdown(c); } + return; } diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c --- a/src/http/v3/ngx_http_v3_request.c +++ b/src/http/v3/ngx_http_v3_request.c @@ -100,7 +100,7 @@ ngx_http_v3_init(ngx_connection_t *c) void ngx_http_v3_shutdown(ngx_connection_t *c) { - ngx_http_v3_session_t *h3c; + ngx_http_v3_session_t *h3c; ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 shutdown"); From arut at nginx.com Thu Oct 20 14:33:22 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 20 Oct 2022 18:33:22 +0400 Subject: [PATCH 10 of 10] QUIC: application init() callback In-Reply-To: <20221020115154.ygd4gzyuykvrgqsy@Y9MQ9X2QVV> References: <8e58a27b320807aae001.1662627997@arut-laptop> <20221020115154.ygd4gzyuykvrgqsy@Y9MQ9X2QVV> Message-ID: <20221020143322.b2c2g44dg2nofap2@N00W24XTQX> Hi, On Thu, Oct 20, 2022 at 03:51:54PM +0400, Sergey Kandaurov wrote: > On Thu, Sep 08, 2022 at 01:06:37PM +0400, Roman Arutyunyan wrote: > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1662627905 -14400 > > # Thu Sep 08 13:05:05 2022 +0400 > > # Branch quic > > # Node ID 8e58a27b320807aae00194b82e2c997287e3ad42 > > # Parent 861d6897151fe6773898db6cfdb36f56403302c5 > > QUIC: application init() callback. > > > > It's called after handshake completion or prior to the first early data stream > > creation. The callback should initialize application-level data before > > creating streams. > > > > HTTP/3 callback implementation sets keepalive timer and sends SETTINGS. > > > > Also, this allows to limit max handshake time in ngx_http_v3_init_stream(). > > Also brings header timeout (to limit handshake time) > and keepalive timeout in hq mode. Looks like for hq keepalive timeout is now set in ngx_http_v3_init_stream() when main connection is created and in ngx_http_v3_init() at the end of handshake. After that it's neither set nor deleted. The code which does this, only works for http/3, but not hq. This should be addressed. [..] -- Roman From jojy_varghese at apple.com Thu Oct 20 17:51:06 2022 From: jojy_varghese at apple.com (Jojy Varghese) Date: Thu, 20 Oct 2022 10:51:06 -0700 Subject: Http2 idle connection and worker shutdown Message-ID: Hi We noticed that the Http2 idle flag (https://github.com/nginx/nginx/blob/master/src/http/v2/ngx_http_v2.h#L126) is initialized to `1` (https://github.com/nginx/nginx/blob/master/src/http/v2/ngx_http_v2.c#L335) but not reset by the http2 state machine. It also looks like Nginx does not set keep alive timer for http2 as the keep alive setter code path is short circuited for http2 (https://github.com/nginx/nginx/blob/master/src/http/ngx_http_request.c#L2706). During worker shutdown when idle connections are reaped (https://github.com/nginx/nginx/blob/master/src/core/ngx_connection.c#L1363), due to the fact that http2 connections are always `idle`, ongoing streams end up prematurely getting closed on the read side(sends GOAWAY frame). It looks like the `idle` connection implementation as it is today only answers the question - “Can nginx prematurely close the streams when shutting down?” Is the above behavior intentional? It appears from the name of the flag (`idle`) that the intent is to reflect the dynamic state of the connection (are reads and writes happening for the connection for any of the streams) but the implementation does not reflect that. What is the intended behavior ? We would have liked a `idle` flag with its corresponding timeout configuration that lets us control the behavior during shutdown (and other flows). Thanks in advance Jojy -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Oct 20 21:19:13 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 21 Oct 2022 00:19:13 +0300 Subject: Http2 idle connection and worker shutdown In-Reply-To: References: Message-ID: Hello! On Thu, Oct 20, 2022 at 10:51:06AM -0700, Jojy Varghese via nginx-devel wrote: > Hi > We noticed that the Http2 idle flag > (https://github.com/nginx/nginx/blob/master/src/http/v2/ngx_http_v2.h#L126) > is initialized to `1` > (https://github.com/nginx/nginx/blob/master/src/http/v2/ngx_http_v2.c#L335) > but not reset by the http2 state machine. It also looks like > Nginx does not set keep alive timer for http2 as the keep > alive setter code path is short circuited for http2 > (https://github.com/nginx/nginx/blob/master/src/http/ngx_http_request.c#L2706). > > During worker shutdown when idle connections are reaped > (https://github.com/nginx/nginx/blob/master/src/core/ngx_connection.c#L1363), > due to the fact that http2 connections are always `idle`, > ongoing streams end up prematurely getting closed on the read > side(sends GOAWAY frame). No streams are closed by the http2 connection read handler when it's called by ngx_close_idle_connections(). Consider re-checking the ngx_http_v2_read_handler() code: http://hg.nginx.org/nginx/file/tip/src/http/v2/ngx_http_v2.c#l365 Instead, the handler closes the connection as long as there are no active streams. If there are any active streams, it sends the GOAWAY frame to the connection, so the client will know that no additional streams will be accepted, and proceeds with normal handling of the remaining streams. > It looks like the `idle` connection implementation as it is > today only answers the question - “Can nginx prematurely close > the streams when shutting down?” Is the above behavior > intentional? > > It appears from the name of the flag (`idle`) that the intent is > to reflect the dynamic state of the connection (are reads and > writes happening for the connection for any of the streams) but > the implementation does not reflect that. > > What is the intended behavior ? We would have liked a `idle` > flag with its corresponding timeout configuration that lets us > control the behavior during shutdown (and other flows). The "idle" flag used to be a way to distinguish HTTP connections without active requests and therefore prepared to be closed during shutdown by calling c->read->handler() with c->close set. With HTTP/2, essentially all connections are prepared to be closed during shutdown (in some cases this may take a while though), so all HTTP/2 connections have c->idle flag set. While from linguistic point of view this isn't exactly correct, it's certainly correct from semantic point of view and believed to result in correct behaviour. If you think that nginx does something wrong, you may want to elaborate on the details of the incorrect behaviour you observe (as well as why you think it's incorrect, and how would you like to improve it). Hope this helps. -- Maxim Dounin http://mdounin.ru/ From v.zhestikov at f5.com Thu Oct 20 23:42:43 2022 From: v.zhestikov at f5.com (Vadim Zhestikov) Date: Thu, 20 Oct 2022 23:42:43 +0000 Subject: [njs] Fixed break label for if statement. Message-ID: details: https://hg.nginx.org/njs/rev/dc6bc4de1185 branches: changeset: 1978:dc6bc4de1185 user: Vadim Zhestikov date: Thu Oct 20 16:40:35 2022 -0700 description: Fixed break label for if statement. This fixes #591 issue on Github. diffstat: src/njs_generator.c | 10 ++++++++++ src/test/njs_unit_test.c | 6 ++++++ 2 files changed, 16 insertions(+), 0 deletions(-) diffs (43 lines): diff -r 0b87c0309b37 -r dc6bc4de1185 src/njs_generator.c --- a/src/njs_generator.c Mon Oct 10 19:01:56 2022 -0700 +++ b/src/njs_generator.c Thu Oct 20 16:40:35 2022 -0700 @@ -1127,6 +1127,14 @@ static njs_int_t njs_generate_if_statement(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node) { + njs_int_t ret; + + ret = njs_generate_start_block(vm, generator, NJS_GENERATOR_BLOCK, + &node->name); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + /* The condition expression. */ njs_generator_next(generator, njs_generate, node->left); @@ -1231,6 +1239,8 @@ njs_generate_if_statement_else(njs_vm_t njs_code_set_offset(generator, label_offset, *jump_offset); + njs_generate_patch_block_exit(vm, generator); + return njs_generator_stack_pop(vm, generator, generator->context); } diff -r 0b87c0309b37 -r dc6bc4de1185 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Mon Oct 10 19:01:56 2022 -0700 +++ b/src/test/njs_unit_test.c Thu Oct 20 16:40:35 2022 -0700 @@ -3188,6 +3188,12 @@ static njs_unit_test_t njs_test[] = njs_str("1") }, #endif + { njs_str("var r='ok'; a:if(1){break a; r='!ok'}; r"), + njs_str("ok") }, + + { njs_str("var r='ok'; a:if(0){break a; r='!ok1'} else {break a; r='!ok2'}; r"), + njs_str("ok") }, + { njs_str("var a = 0; a:{a++}; a"), njs_str("1") }, From mdounin at mdounin.ru Fri Oct 21 00:10:03 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 21 Oct 2022 03:10:03 +0300 Subject: [PATCH 3 of 4] QUIC: support for setting QUIC methods with LibreSSL In-Reply-To: References: Message-ID: Hello! On Tue, Oct 11, 2022 at 02:35:52PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1665484414 -14400 > # Tue Oct 11 14:33:34 2022 +0400 > # Branch quic > # Node ID c0165ddcb1c6981f8e5230081f03a277f62d20c3 > # Parent caced81ce0a9cb218ae8cdd6176c12e0614acee9 > QUIC: support for setting QUIC methods with LibreSSL. > > Setting QUIC methods is converted to use C99 designated initializers > for simplicity, as LibreSSL 3.6.0 has different SSL_QUIC_METHOD layout. I'm somewhat sceptical about C99 designated initializers. These aren't supported by MSVC till 2012: in particular, this includes all MSVC versions available via wineticks, as well as MSVC versions currently used to build nginx win32 binaries. A more portable solution might be to use run-time initialization instead. [...] -- Maxim Dounin http://mdounin.ru/ From jojy_varghese at apple.com Fri Oct 21 01:09:11 2022 From: jojy_varghese at apple.com (Jojy Varghese) Date: Thu, 20 Oct 2022 18:09:11 -0700 Subject: Http2 idle connection and worker shutdown In-Reply-To: References: Message-ID: <5F0F8B26-EB47-462E-B14B-F627E78BD4A4@apple.com> Hi Maxim Thanks for the response. Answers inline. -jojy > On Oct 20, 2022, at 2:19 PM, Maxim Dounin wrote: > > Hello! > > On Thu, Oct 20, 2022 at 10:51:06AM -0700, Jojy Varghese via nginx-devel wrote: > >> Hi >> We noticed that the Http2 idle flag >> (https://github.com/nginx/nginx/blob/master/src/http/v2/ngx_http_v2.h#L126) >> is initialized to `1` >> (https://github.com/nginx/nginx/blob/master/src/http/v2/ngx_http_v2.c#L335) >> but not reset by the http2 state machine. It also looks like >> Nginx does not set keep alive timer for http2 as the keep >> alive setter code path is short circuited for http2 >> (https://github.com/nginx/nginx/blob/master/src/http/ngx_http_request.c#L2706). >> >> During worker shutdown when idle connections are reaped >> (https://github.com/nginx/nginx/blob/master/src/core/ngx_connection.c#L1363), >> due to the fact that http2 connections are always `idle`, >> ongoing streams end up prematurely getting closed on the read >> side(sends GOAWAY frame). > > No streams are closed by the http2 connection read handler when > it's called by ngx_close_idle_connections(). Consider re-checking > the ngx_http_v2_read_handler() code: > > http://hg.nginx.org/nginx/file/tip/src/http/v2/ngx_http_v2.c#l365 > > Instead, the handler closes the connection as long as there are no > active streams. If there are any active streams, it sends the > GOAWAY frame to the connection, so the client will know that no > additional streams will be accepted, and proceeds with normal > handling of the remaining streams. I should have been more clear. I meant that the connection sends a GOAWAY frame due to which during the finalizing of the request, nginx closes the server end of the connection (we have slightly older nginx version that closes the connection as opposed to lingering close). > >> It looks like the `idle` connection implementation as it is >> today only answers the question - “Can nginx prematurely close >> the streams when shutting down?” Is the above behavior >> intentional? >> >> It appears from the name of the flag (`idle`) that the intent is >> to reflect the dynamic state of the connection (are reads and >> writes happening for the connection for any of the streams) but >> the implementation does not reflect that. >> >> What is the intended behavior ? We would have liked a `idle` >> flag with its corresponding timeout configuration that lets us >> control the behavior during shutdown (and other flows). > > The "idle" flag used to be a way to distinguish HTTP connections > without active requests and therefore prepared to be closed during > shutdown by calling c->read->handler() with c->close set. With > HTTP/2, essentially all connections are prepared to be closed > during shutdown (in some cases this may take a while though), so > all HTTP/2 connections have c->idle flag set. While from > linguistic point of view this isn't exactly correct, it's > certainly correct from semantic point of view and believed to > result in correct behaviour. > > If you think that nginx does something wrong, you may want to > elaborate on the details of the incorrect behaviour you observe > (as well as why you think it's incorrect, and how would you like > to improve it). We ran into a problem where the connection close (as described above) causes some clients (chrome browser) to fail long running (large) downloads using h2. This is I think because these clients send a WINDOW UPDATE towards the end and Nginx sends a TCP RST since the connection is closed. We were wondering if there is any reason for `idle` flag to be always 1. Or we could introduce a “lingering” time concept so that we can control if we want to set the idle flag after the lingering time is over. > > Hope this helps. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Oct 21 01:43:44 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 21 Oct 2022 04:43:44 +0300 Subject: Http2 idle connection and worker shutdown In-Reply-To: <5F0F8B26-EB47-462E-B14B-F627E78BD4A4@apple.com> References: <5F0F8B26-EB47-462E-B14B-F627E78BD4A4@apple.com> Message-ID: Hello! On Thu, Oct 20, 2022 at 06:09:11PM -0700, Jojy Varghese via nginx-devel wrote: [...] > > If you think that nginx does something wrong, you may want to > > elaborate on the details of the incorrect behaviour you observe > > (as well as why you think it's incorrect, and how would you like > > to improve it). > > We ran into a problem where the connection close (as described > above) causes some clients (chrome browser) to fail long running > (large) downloads using h2. This is I think because these > clients send a WINDOW UPDATE towards the end and Nginx sends a > TCP RST since the connection is closed. So it looks like you are running into the race condition the lingering close is expected to solve. You may want to upgrade nginx then. Note well that lingering close support in HTTP/2 was added in nginx 1.19.1, more than two years ago, and the last nginx version without it is not supported for more than a year. -- Maxim Dounin http://mdounin.ru/ From fino.meng at intel.com Fri Oct 21 13:42:27 2022 From: fino.meng at intel.com (Meng, Fino) Date: Fri, 21 Oct 2022 21:42:27 +0800 Subject: [PATCH] Intel DLB(Dynamic Load Banlander) demo in NGINX Message-ID: <20221021134227.5696-1-fino.meng@intel.com> Intel® Dynamic Load Balancer (Intel® DLB) is a hardware managed system of queues and arbiters connecting producers and consumers. It is a PCI device envisaged to live in the server CPU uncore and can interact with software running on cores, and potentially with other devices. This demo need run on next generation Xeon processer(code name Sapphire Rapids). Signed-off-by: Meng, Fino --- auto/configure | 1 + auto/dlbque | 20 ++ auto/options | 4 + src/core/ngx_thread_pool.c | 454 ++++++++++++++++++++++++- src/core/ngx_thread_pool.h | 3 + src/http/ngx_http_copy_filter_module.c | 3 + src/http/ngx_http_file_cache.c | 3 + src/http/ngx_http_parse.c | 24 ++ src/http/ngx_http_request.c | 15 + src/http/ngx_http_request.h | 3 + src/http/ngx_http_upstream.c | 3 + 11 files changed, 523 insertions(+), 10 deletions(-) create mode 100644 auto/dlbque diff --git a/auto/configure b/auto/configure index 474d69e8..92a86de2 100755 --- a/auto/configure +++ b/auto/configure @@ -59,6 +59,7 @@ if [ "$NGX_PLATFORM" != win32 ]; then fi . auto/threads +. auto/dlbque . auto/modules . auto/lib/conf diff --git a/auto/dlbque b/auto/dlbque new file mode 100644 index 00000000..af2c4d25 --- /dev/null +++ b/auto/dlbque @@ -0,0 +1,20 @@ + +# Copyright (C) Nginx, Inc. + +if [ $USE_DLBQUE = YES ]; then + + if [ "$NGX_PLATFORM" = win32 ]; then + cat << END + +$0: --with-dlbque is not supported on Windows + +END + exit 1 + fi + + have=NGX_DLBQUE . auto/have + #CORE_DEPS="$CORE_DEPS $DLBQUE_DEPS" + #CORE_SRCS="$CORE_SRCS $DLBQUE_SRCS" + CORE_INCS="$CORE_INCS $DLBQUE_LIB_PATH" + CORE_LIBS="$CORE_LIBS -L $DLBQUE_LIB_PATH -ldlb -lrt" +fi diff --git a/auto/options b/auto/options index 48f3a1a4..06738680 100644 --- a/auto/options +++ b/auto/options @@ -42,6 +42,8 @@ EVENT_SELECT=NO EVENT_POLL=NO USE_THREADS=NO +USE_DLBQUE=NO +DLBQUE_LIB_PATH= NGX_FILE_AIO=NO @@ -208,6 +210,8 @@ do --without-poll_module) EVENT_POLL=NONE ;; --with-threads) USE_THREADS=YES ;; + --with-dlbque) USE_DLBQUE=YES ;; + --dlbque-lib-path=*) DLBQUE_LIB_PATH="$value" ;; --with-file-aio) NGX_FILE_AIO=YES ;; diff --git a/src/core/ngx_thread_pool.c b/src/core/ngx_thread_pool.c index 7fb0f7f8..412ce282 100644 --- a/src/core/ngx_thread_pool.c +++ b/src/core/ngx_thread_pool.c @@ -11,6 +11,44 @@ #include +#if (NGX_DLBQUE) + +#include "dlb.h" + +#define DLB2 1 +#define NUM_EVENTS_PER_BATCH 1 +#define DLB_NUM 4 + +#define CQ_DEPTH 2 +static int partial_resources = 4; + +typedef struct { + int dev_id; + int domain_id; + int queue_id; + int tx_port_id; + int rx_port_id; + dlb_hdl_t dlb; + dlb_domain_hdl_t domain; + dlb_port_hdl_t tx_port; + dlb_port_hdl_t rx_port; + dlb_dev_cap_t cap; + dlb_resources_t rsrcs; + int ldb_pool_id; + int dir_pool_id; + int num_credit_combined; + int num_credit_ldb; + int num_credit_dir; +}ngx_ldb_t; + +enum wait_mode_t { + POLL, + INTERRUPT, +} wait_mode = POLL; + +#endif + + typedef struct { ngx_array_t pools; } ngx_thread_pool_conf_t; @@ -40,8 +78,11 @@ struct ngx_thread_pool_s { u_char *file; ngx_uint_t line; -}; +#if (NGX_DLBQUE) + ngx_ldb_t loadbal_queue; +#endif +}; static ngx_int_t ngx_thread_pool_init(ngx_thread_pool_t *tp, ngx_log_t *log, ngx_pool_t *pool); @@ -102,6 +143,203 @@ static ngx_uint_t ngx_thread_pool_task_id; static ngx_atomic_t ngx_thread_pool_done_lock; static ngx_thread_pool_queue_t ngx_thread_pool_done; +#if (NGX_DLBQUE) + +static int create_sched_domain( + dlb_hdl_t dlb, + ngx_ldb_t *ldb_p) +{ + dlb_create_sched_domain_t args; + int p_rsrsc = partial_resources; + + args.num_ldb_queues = 1; + args.num_ldb_ports = 2; + args.num_dir_ports = 2; + args.num_ldb_event_state_entries = 2 * args.num_ldb_ports * CQ_DEPTH; + + if (!ldb_p->cap.combined_credits) { + args.num_ldb_credits = ldb_p->rsrcs.max_contiguous_ldb_credits * p_rsrsc / 100; + args.num_dir_credits = ldb_p->rsrcs.max_contiguous_dir_credits * p_rsrsc / 100; + args.num_ldb_credit_pools = 1; + args.num_dir_credit_pools = 1; + } else { + args.num_credits = ldb_p->rsrcs.num_credits * p_rsrsc / 100; + args.num_credit_pools = 1; + } + + args.num_sn_slots[0] = ldb_p->rsrcs.num_sn_slots[0] * p_rsrsc / 100; + args.num_sn_slots[1] = ldb_p->rsrcs.num_sn_slots[1] * p_rsrsc / 100; + + return dlb_create_sched_domain(dlb, &args); +} + +static int create_ldb_queue( + dlb_domain_hdl_t domain) +{ + dlb_create_ldb_queue_t args = {0}; + args.num_sequence_numbers = 0; + return dlb_create_ldb_queue(domain, &args); +} + +static int create_ldb_port( + ngx_ldb_t *ldb_p, + dlb_domain_hdl_t domain, + int ldb_pool, + int dir_pool) +{ + dlb_create_port_t args; + + if (!ldb_p->cap.combined_credits) { + args.ldb_credit_pool_id = ldb_pool; + args.dir_credit_pool_id = dir_pool; + } else { + args.credit_pool_id = ldb_pool; + } + args.cq_depth = CQ_DEPTH; + args.num_ldb_event_state_entries = CQ_DEPTH*2; +#ifdef DLB2 + args.cos_id = DLB_PORT_COS_ID_ANY; +#endif + return dlb_create_ldb_port(domain, &args); +} + +static ngx_int_t +ngx_loadbalance_init(ngx_thread_pool_t *tp, int index) +{ + int domain_id, tx_port_id, rx_port_id; + int ret; + ngx_ldb_t *ldb_p = &tp->loadbal_queue; + static int dlb_index = 0; + + ldb_p->num_credit_combined = 2048; + ldb_p->num_credit_ldb = 2048; + ldb_p->num_credit_dir = 1024; + index = 0; + + ldb_p->dev_id = index % DLB_NUM; + + while( index < DLB_NUM ) { + ngx_log_error(NGX_LOG_INFO,tp->log, 0, "ngx_loadbalance_init index %d dlb_indx %d ldb_p %lx", index, dlb_index, ldb_p); + + ret = dlb_open(ldb_p->dev_id, &ldb_p->dlb); + if (ret < 0) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_open fail %d errno %d", ret, errno); + goto retry; + } + + if (dlb_get_dev_capabilities(ldb_p->dlb, &ldb_p->cap)) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_get_dev_capabilities errno %u, return", errno); + return NGX_ERROR; + } + + if (dlb_get_num_resources(ldb_p->dlb, &ldb_p->rsrcs)) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_get_dev_capabilities errno %u, return", errno); + return NGX_ERROR; + } + + unsigned int sns_per_queue; + ret = dlb_get_ldb_sequence_number_allocation(ldb_p->dlb, 0, &sns_per_queue); + if (ret < 0 ) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_set_ldb_sequence_number_allocation errno %d, return", ret, errno); + return NGX_ERROR; + } + + domain_id = create_sched_domain(ldb_p->dlb, ldb_p); + if (domain_id < 0) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "create_sched_domain errno %d, index %d ", errno, index); + goto retry; + } else { + ldb_p->domain_id = domain_id; + ldb_p->domain = dlb_attach_sched_domain(ldb_p->dlb, domain_id); + if (ldb_p->domain == NULL) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_attach_sched_domain errno %d", errno); + return NGX_ERROR; + } + if (!ldb_p->cap.combined_credits) { + int max_ldb_credits = ldb_p->rsrcs.num_ldb_credits * partial_resources / 100; + int max_dir_credits = ldb_p->rsrcs.num_dir_credits * partial_resources / 100; + + ldb_p->ldb_pool_id = dlb_create_ldb_credit_pool(ldb_p->domain, max_ldb_credits); + if (ldb_p->ldb_pool_id == -1) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_create_ldb_credit_pool number %d, errno %d", max_ldb_credits, errno); + goto retry; + } + ldb_p->dir_pool_id = dlb_create_dir_credit_pool(ldb_p->domain, max_dir_credits); + if (ldb_p->dir_pool_id == -1) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_create_dir_credit_pool errno %d", errno); + goto retry; + } + } else { + int max_credits = ldb_p->rsrcs.num_credits * partial_resources / 100; + + ldb_p->ldb_pool_id = dlb_create_credit_pool(ldb_p->domain, max_credits); + if (ldb_p->ldb_pool_id == -1) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_create_credit_pool errno %d", errno); + goto retry; + } + } + break; + } + +retry: + ldb_p->dev_id = ++index; + } + + if (index >= DLB_NUM) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "no DLB resource left, used up to %d", index); + return NGX_ERROR; + } + + ldb_p->queue_id = create_ldb_queue(ldb_p->domain); + if (ldb_p->queue_id == -1) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "create_ldb_queue %d errno %d", ldb_p->queue_id, errno); + return NGX_ERROR; + } + + tx_port_id = create_ldb_port(ldb_p, ldb_p->domain, ldb_p->ldb_pool_id, ldb_p->dir_pool_id); + if (tx_port_id == -1) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_create_credit_pool errno %d", errno); + return NGX_ERROR; + } + + ldb_p->tx_port = dlb_attach_ldb_port(ldb_p->domain, tx_port_id); + if (ldb_p->tx_port == NULL) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_create_credit_pool errno %d", errno); + return NGX_ERROR; + } + + rx_port_id = create_ldb_port(ldb_p, ldb_p->domain, ldb_p->ldb_pool_id, ldb_p->dir_pool_id); + if (rx_port_id == -1) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_create_credit_pool errno %d", errno); + return NGX_ERROR; + } + + ldb_p->rx_port = dlb_attach_ldb_port(ldb_p->domain, rx_port_id); + if (ldb_p->rx_port == NULL) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_create_credit_pool errno %d", errno); + return NGX_ERROR; + } + + if (dlb_link_queue(ldb_p->rx_port, ldb_p->queue_id, 0) == -1) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_link_queue errno %d", errno); + return NGX_ERROR; + } + + if (dlb_launch_domain_alert_thread(ldb_p->domain, NULL, NULL)) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_launch_domain_alert_thread errno %d", errno); + return NGX_ERROR; + } + + if (dlb_start_sched_domain(ldb_p->domain)) { + ngx_log_error(NGX_LOG_ERR,tp->log, 0, "dlb_start_sched_domain errno %d", errno); + return NGX_ERROR; + } + + ngx_log_error(NGX_LOG_INFO,tp->log, 0, "DLB created Success on device %d", ldb_p->dev_id); + + return NGX_OK; +} +#endif static ngx_int_t ngx_thread_pool_init(ngx_thread_pool_t *tp, ngx_log_t *log, ngx_pool_t *pool) @@ -144,20 +382,13 @@ ngx_thread_pool_init(ngx_thread_pool_t *tp, ngx_log_t *log, ngx_pool_t *pool) return NGX_ERROR; } -#if 0 - err = pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN); - if (err) { - ngx_log_error(NGX_LOG_ALERT, log, err, - "pthread_attr_setstacksize() failed"); - return NGX_ERROR; - } -#endif - for (n = 0; n < tp->threads; n++) { + err = pthread_create(&tid, &attr, ngx_thread_pool_cycle, tp); if (err) { ngx_log_error(NGX_LOG_ALERT, log, err, "pthread_create() failed"); + return NGX_ERROR; } } @@ -194,6 +425,34 @@ ngx_thread_pool_destroy(ngx_thread_pool_t *tp) task.event.active = 0; } +#if (NGX_DLBQUE) + + if (dlb_disable_port(tp->loadbal_queue.rx_port)) + ngx_log_error(NGX_LOG_ERR, tp->log, 0, + "dlb_disable_port() rx_port failed with errno %d", errno); + + if (dlb_detach_port(tp->loadbal_queue.rx_port) == -1) + ngx_log_error(NGX_LOG_ERR, tp->log, 0, + "dlb_detach_port() rx_port failed with errno %d", errno); + + if (dlb_detach_port(tp->loadbal_queue.tx_port) == -1) + ngx_log_error(NGX_LOG_ERR, tp->log, 0, + "dlb_detach_port() tx_port failed with errno %d", errno); + + if (dlb_detach_sched_domain(tp->loadbal_queue.domain) == -1) + ngx_log_error(NGX_LOG_ERR, tp->log, 0, + "dlb_detach_sched_domain() failed with errno %d", errno); + + if (dlb_reset_sched_domain(tp->loadbal_queue.dlb, tp->loadbal_queue.domain_id) == -1) + ngx_log_error(NGX_LOG_ERR, tp->log, 0, + "dlb_reset_sched_domain() failed with errno %d", errno); + + if(dlb_close(tp->loadbal_queue.dlb) == -1) + ngx_log_error(NGX_LOG_ERR, tp->log, 0, + "dlb_close() failed with errno %d", errno); + +#endif + (void) ngx_thread_cond_destroy(&tp->cond, tp->log); (void) ngx_thread_mutex_destroy(&tp->mtx, tp->log); @@ -226,6 +485,79 @@ ngx_thread_task_alloc(ngx_pool_t *pool, size_t size) return task; } +#if (NGX_DLBQUE) + +ngx_int_t +ngx_thread_task_post(ngx_thread_pool_t *tp, ngx_thread_task_t *task) +{ + int ret; + dlb_event_t dlb_events[NUM_EVENTS_PER_BATCH]; + static uint16_t i = 0; + + task->event.active = 1; + task->id = ngx_thread_pool_task_id++; + task->next = NULL; + + //send stored task first + while(tp->queue.first != NULL) { + task = tp->queue.first; + tp->queue.first = task->next; + if (tp->queue.first == NULL) { + tp->queue.last = &tp->queue.first; + } + tp->waiting --; + + dlb_events[0].send.queue_id = tp->loadbal_queue.queue_id; + dlb_events[0].send.sched_type = SCHED_UNORDERED; + dlb_events[0].send.priority = task->priority * 2; //priority map to 0/2/4/6 + dlb_events[0].send.udata64 = (uint64_t)task; + dlb_events[0].send.udata64 = (uint64_t)task; + dlb_events[0].send.udata64 = (uint64_t)task; + dlb_events[0].send.udata16 = (uint16_t)++i; + ret = dlb_send(tp->loadbal_queue.tx_port, 1, &dlb_events[0]); + if (ret <= 0) { + //dlb_send failed case, just put the task in one software queue + if (tp->waiting >= tp->max_queue) { + ngx_log_error(NGX_LOG_ERR, tp->log, 0, + "thread pool \"%V\" queue overflow: %i tasks waiting", + &tp->name, tp->waiting); + return NGX_ERROR; + } + //put the task in the queue again + *tp->queue.last = task; + tp->queue.last = &task->next; + tp->waiting++; + ngx_log_error(NGX_LOG_ALERT, tp->log, 0, "resend task %u failed with ret %d errno %d",task->id, ret, errno); + return NGX_OK; + } + } + + dlb_events[0].send.queue_id = tp->loadbal_queue.queue_id; + dlb_events[0].send.sched_type = SCHED_UNORDERED; + dlb_events[0].send.priority = task->priority * 2; //priority map to 0/2/4/6 + dlb_events[0].send.udata64 = (uint64_t)task; + dlb_events[0].send.udata16 = (uint16_t)++i; + + ret = dlb_send(tp->loadbal_queue.tx_port, 1, &dlb_events[0]); + if (ret <= 0) { + //dlb_send failed case, just put the task in one software queue + if (tp->waiting >= tp->max_queue) { + ngx_log_error(NGX_LOG_ERR, tp->log, 0, + "thread pool \"%V\" queue overflow: %i tasks waiting", + &tp->name, tp->waiting); + return NGX_ERROR; + } + *tp->queue.last = task; + tp->queue.last = &task->next; + tp->waiting++; + ngx_log_error(NGX_LOG_ALERT, tp->log, 0, + "task %lu send failed, store in the sw queue first", task->id); + } + + return NGX_OK; +} + +#else ngx_int_t ngx_thread_task_post(ngx_thread_pool_t *tp, ngx_thread_task_t *task) @@ -273,6 +605,99 @@ ngx_thread_task_post(ngx_thread_pool_t *tp, ngx_thread_task_t *task) return NGX_OK; } +#endif + + + +#if (NGX_DLBQUE) + +static void * +ngx_thread_pool_cycle(void *data) +{ + ngx_thread_pool_t* tp = (ngx_thread_pool_t*)data; + ngx_thread_task_t *task; + sigset_t set; + int ret; + static uint16_t seq=0; + + dlb_event_t events[NUM_EVENTS_PER_BATCH]; + + ngx_log_debug1(NGX_LOG_DEBUG_CORE, tp->log, 0, + "ldb thread in pool \"%V\" started", &tp->name); + + sigfillset(&set); + + sigdelset(&set, SIGILL); + sigdelset(&set, SIGFPE); + sigdelset(&set, SIGSEGV); + sigdelset(&set, SIGBUS); + + ret = pthread_sigmask(SIG_BLOCK, &set, NULL); + if (ret) { + ngx_log_error(NGX_LOG_ALERT, tp->log, ret, "ldb pthread_sigmask() failed"); + return NULL; + } + + for ( ;; ) { + + if (ngx_thread_mutex_lock(&tp->mtx, tp->log) != NGX_OK) { + return NULL; + } + + ret = dlb_recv(tp->loadbal_queue.rx_port, + NUM_EVENTS_PER_BATCH, + (wait_mode == POLL), + events); + + /* The port was disabled, indicating the thread should return */ + if (ret == -1 && errno == EACCES){ + ngx_log_error(NGX_LOG_ALERT, tp->log, ret, "dlb_recv() error"); + exit(1); + } + + if (dlb_release(tp->loadbal_queue.rx_port, ret) != ret) { + ngx_log_error(NGX_LOG_ALERT, tp->log, ret, "Failed to release all %d events !\n"); + return NULL; + } + + if (ngx_thread_mutex_unlock(&tp->mtx, tp->log) != NGX_OK) { + return NULL; + } + + for (int i =0; i < ret; i++){ + task = (ngx_thread_task_t*)events[i].recv.udata64; + + ngx_log_debug2(NGX_LOG_DEBUG_CORE, tp->log, 0, + "run task #%ui in ldb thread pool \"%V\"", + task->id, &tp->name); + + task->handler(task->ctx, tp->log); + + seq++; + + ngx_log_debug2(NGX_LOG_DEBUG_CORE, tp->log, 0, + "complete task #%ui in ldb thread pool \"%V\"", + task->id, &tp->name); + + task->next = NULL; + + ngx_spinlock(&ngx_thread_pool_done_lock, 1, 2048); + + *ngx_thread_pool_done.last = task; + ngx_thread_pool_done.last = &task->next; + + ngx_memory_barrier(); + + ngx_unlock(&ngx_thread_pool_done_lock); + + (void) ngx_notify(ngx_thread_pool_handler); + + } + } +} + + +#else static void * ngx_thread_pool_cycle(void *data) @@ -360,6 +785,7 @@ ngx_thread_pool_cycle(void *data) } } +#endif static void ngx_thread_pool_handler(ngx_event_t *ev) @@ -604,6 +1030,14 @@ ngx_thread_pool_init_worker(ngx_cycle_t *cycle) tpp = tcf->pools.elts; for (i = 0; i < tcf->pools.nelts; i++) { + tpp[i]->log = cycle->log; + +#if (NGX_DLBQUE) + if (ngx_loadbalance_init(tpp[i], i) != NGX_OK) { + return NGX_ERROR; + } +#endif + if (ngx_thread_pool_init(tpp[i], cycle->log, cycle->pool) != NGX_OK) { return NGX_ERROR; } diff --git a/src/core/ngx_thread_pool.h b/src/core/ngx_thread_pool.h index 5e5adf62..96a39889 100644 --- a/src/core/ngx_thread_pool.h +++ b/src/core/ngx_thread_pool.h @@ -20,6 +20,9 @@ struct ngx_thread_task_s { void *ctx; void (*handler)(void *data, ngx_log_t *log); ngx_event_t event; +#if (NGX_DLBQUE) + int16_t priority; +#endif }; diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c index bd3028bc..636b133f 100644 --- a/src/http/ngx_http_copy_filter_module.c +++ b/src/http/ngx_http_copy_filter_module.c @@ -259,6 +259,9 @@ ngx_http_copy_thread_handler(ngx_thread_task_t *task, ngx_file_t *file) task->event.data = r; task->event.handler = ngx_http_copy_thread_event_handler; +#if (NGX_DLBQUE) + task->priority = r->priority; +#endif if (ngx_thread_task_post(tp, task) != NGX_OK) { return NGX_ERROR; diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c index 4d2f6c42..055996a4 100644 --- a/src/http/ngx_http_file_cache.c +++ b/src/http/ngx_http_file_cache.c @@ -781,6 +781,9 @@ ngx_http_cache_thread_handler(ngx_thread_task_t *task, ngx_file_t *file) task->event.data = r; task->event.handler = ngx_http_cache_thread_event_handler; +#if (NGX_DLBQUE) + task->priority = r->priority; +#endif if (ngx_thread_task_post(tp, task) != NGX_OK) { return NGX_ERROR; diff --git a/src/http/ngx_http_parse.c b/src/http/ngx_http_parse.c index d4f2dae8..f0569c8c 100644 --- a/src/http/ngx_http_parse.c +++ b/src/http/ngx_http_parse.c @@ -100,6 +100,29 @@ static uint32_t usual[] = { /* gcc, icc, msvc and others compile these switches as an jump table */ +static void http_set_priority(ngx_http_request_t* r, u_char* m, ngx_buf_t *b) +{ + + u_char * p; + for (p = m+10; p < b->last; p++) { + if(ngx_str5cmp(p, 'V', 'I', 'D', 'E', 'O')) { + r->priority = 0; + break; + } + else if(ngx_str5cmp(p, 'A', 'U', 'D', 'I', 'O')) { + r->priority = 1; + break; + } + else if (ngx_str4cmp(p, 'T', 'E', 'X', 'T')) { + r->priority = 2; + break; + } + } + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "URI priority : \"%d\"", r->priority); +} + ngx_int_t ngx_http_parse_request_line(ngx_http_request_t *r, ngx_buf_t *b) { @@ -164,6 +187,7 @@ ngx_http_parse_request_line(ngx_http_request_t *r, ngx_buf_t *b) case 3: if (ngx_str3_cmp(m, 'G', 'E', 'T', ' ')) { r->method = NGX_HTTP_GET; + http_set_priority(r, m, b); break; } diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c index 131a2c83..bffacabe 100644 --- a/src/http/ngx_http_request.c +++ b/src/http/ngx_http_request.c @@ -573,6 +573,8 @@ ngx_http_alloc_request(ngx_connection_t *c) r->header_in = hc->busy ? hc->busy->buf : c->buffer; + r->priority = 3; + if (ngx_list_init(&r->headers_out.headers, r->pool, 20, sizeof(ngx_table_elt_t)) != NGX_OK) @@ -1323,6 +1325,19 @@ ngx_http_process_request_uri(ngx_http_request_t *r) ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http args: \"%V\"", &r->args); + ngx_str_t dlb_priority; + if (ngx_http_arg(r, (u_char *) "dlb_priority", 12, &dlb_priority) == NGX_OK) { + ngx_int_t dlb_priority_int = ngx_atoi(dlb_priority.data, dlb_priority.len); + if (dlb_priority_int >=0 && dlb_priority_int <=255) { + r->priority = dlb_priority_int; + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "dlb_priority: \"%V\", %d", &dlb_priority, dlb_priority_int); + } else { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "invalid dlb_priority: %d", dlb_priority_int); + } + } + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http exten: \"%V\"", &r->exten); diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h index 8c9eed24..15b11a1a 100644 --- a/src/http/ngx_http_request.h +++ b/src/http/ngx_http_request.h @@ -597,6 +597,9 @@ struct ngx_http_request_s { u_char *port_start; u_char *port_end; + //priority + u_char priority; //0~3 + unsigned http_minor:16; unsigned http_major:16; }; diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c index 3ae822bb..ea7b8252 100644 --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -3920,6 +3920,9 @@ ngx_http_upstream_thread_handler(ngx_thread_task_t *task, ngx_file_t *file) task->event.data = r; task->event.handler = ngx_http_upstream_thread_event_handler; +#if (NGX_DLBQUE) + task->priority = r->priority; +#endif if (ngx_thread_task_post(tp, task) != NGX_OK) { return NGX_ERROR; -- 2.34.1 From alvn at alvn.dk Fri Oct 21 12:12:48 2022 From: alvn at alvn.dk (Anders Nicolaisen) Date: Fri, 21 Oct 2022 14:12:48 +0200 Subject: [nginx] allowing auth_request to proxy TOO_MANY_REQUESTS In-Reply-To: References: <142666848.1.1665486276619@alvn> Message-ID: This works! Although this still requires an extra line for each new location, which might be forgotten by a collaborator, but this configuration is definitely viable. Thanks a lot! On Wed, 12 Oct 2022 at 15:44, Maxim Dounin wrote: > Hello! > > On Wed, Oct 12, 2022 at 11:04:50AM +0200, Anders Nicolaisen via > nginx-devel wrote: > > > Thanks! > > This does make sense, and one might be able to create a somewhat working > > example using this. > > > > However, this seems to introduce a couple of drawbacks, and kind of > > breaks the semantics of the 'auth_request'. > > > > Let me illustrate: > > > > First of all, having auth_request in the server context guards against > > any newly added locations that might yet be missing rules handled > > by the authentication server. > > So, whenever a new location is added, the authentication server needs > > to be updated as well before any requests can be redirected. > > This will most often actually be a good thing in an environment with > > a lot of rules and multiple developers. > > > > Second, if multiple developers are editing the configurations, they are > > not required to remember the 'internal' in order to bar these from > > outsiders, > > as this would be automatically imposed via auth_request. > > > > It seems to be more in line with the current semantics of auth_request, > > and also by far cleaner code/configurations, by having auth_request be > > able to relay this one more status code. > > Sure, details of the X-Accel-Redirect semantics is different from > the one provided by auth_request. > > If you prefer auth_request semantics, you can do the same with > auth_request and appropriate handling of the 403 errors, for > example (assuming the auth backend cannot be modified and returns > 429): > > server { > listen 8080; > > location / { > auth_request /auth; > error_page 403 = /error; > proxy_pass ... > } > > location = /auth { > error_page 429 = /limit; > proxy_intercept_errors on; > proxy_pass http://127.0.0.1:8081; > } > > location = /limit { > set $limit 1; > return 403; > } > > location = /error { > if ($limit) { > return 429; > } > return 403; > } > } > > server { > listen 8081; > > # an example X-Accel-Redirect server > # which rejects requests with 'foo' header set to a true > # value > > if ($http_foo) { > return 429; > } > > return 204; > } > > The general issue with "having auth_request be able to relay this > one more status code" as I see it is that it's not just one status > code. For example, request limiting in nginx by default uses 503 > status code, and it is not clear why 429 handling should be > different. Further, there is the Retry-After header field, which > is optional, though may appear in both 429 and 503 responses. > Further, there are other temporary conditions which might be > considered, such as 413 (with Retry-After) or 502/504 errors. > Trying to extend auth_reqest to handle unrelated response codes is > going to result in a lot of additional logic there, which is not > needed in most configurations and will complicate things. And > this is something I would prefer to avoid, especially given that > the desired handling can be easily implemented simply by writing > an appropriate configuration. > > > P.S.: > > I tried to test your suggestion with this simple conf: > > ----- > > server { > > > > location / { > > proxy_pass http://localhost:8888/auth; > > } > > location @content { > > proxy_pass http://localhost:8888/; > > } > > } > > ---- > > > > And got this error: > > > > === > > 2022/10/12 08:51:09 [emerg] 1451#1451: "proxy_pass" cannot have URI part > in > > location given by regular expression, or inside named location, or inside > > "if" statement, or inside "limit_except" block > > === > > > > I'm guessing I just did something wrong, but the error message seems to > > tell me that it is > > not possible to do it this way. > > In named locations there are no location prefix to replace with > the URI part specified in proxy_pass, so you should use proxy_pass > without URI part, that is, "proxy_pass http://localhost:8888;", > note no "/" at the end. > > See here for details: > > http://nginx.org/r/proxy_pass > > Hope this helps. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org > -------------- next part -------------- An HTML attachment was scrubbed... URL: From e.grebenshchikov at f5.com Fri Oct 21 14:39:23 2022 From: e.grebenshchikov at f5.com (=?iso-8859-1?q?Eugene_Grebenschikov?=) Date: Fri, 21 Oct 2022 07:39:23 -0700 Subject: [PATCH] Tests: variables for proxy protocol v2 TLVs Message-ID: <4f282294d8aec6fc20d6.1666363163@DHNVMN3.localdomain> A non-text attachment was scrubbed... Name: nginx-tests.patch Type: text/x-patch Size: 4840 bytes Desc: not available URL: From e.grebenshchikov at f5.com Fri Oct 21 14:55:34 2022 From: e.grebenshchikov at f5.com (=?iso-8859-1?q?Eugene_Grebenschikov?=) Date: Fri, 21 Oct 2022 07:55:34 -0700 Subject: [PATCH] Tests: clearing of pre-existing Content-Range headers Message-ID: <20ef9719316bdee06a6f.1666364134@DHNVMN3.localdomain> A non-text attachment was scrubbed... Name: nginx-tests.patch Type: text/x-patch Size: 2065 bytes Desc: not available URL: From mdounin at mdounin.ru Fri Oct 21 15:08:00 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 21 Oct 2022 18:08:00 +0300 Subject: [PATCH] Intel DLB(Dynamic Load Banlander) demo in NGINX In-Reply-To: <20221021134227.5696-1-fino.meng@intel.com> References: <20221021134227.5696-1-fino.meng@intel.com> Message-ID: Hello! On Fri, Oct 21, 2022 at 09:42:27PM +0800, Meng, Fino wrote: > Intel® Dynamic Load Balancer (Intel® DLB) is a hardware managed system of queues > and arbiters connecting producers and consumers. It is a PCI device envisaged to > live in the server CPU uncore and can interact with software running on cores, > and potentially with other devices. This demo need run on next generation > Xeon processer(code name Sapphire Rapids). > > Signed-off-by: Meng, Fino While it is clear from the patch that it is not intended to be committed or even reviewed, it might be a good idea describe what this demo is expected to do and what are potential benefits. Thanks! -- Maxim Dounin http://mdounin.ru/ From jojy_varghese at apple.com Fri Oct 21 16:13:55 2022 From: jojy_varghese at apple.com (Jojy Varghese) Date: Fri, 21 Oct 2022 09:13:55 -0700 Subject: Http2 idle connection and worker shutdown In-Reply-To: References: <5F0F8B26-EB47-462E-B14B-F627E78BD4A4@apple.com> Message-ID: Thanks again Maxim. Response inlined. > On Oct 20, 2022, at 6:43 PM, Maxim Dounin wrote: > > Hello! > > On Thu, Oct 20, 2022 at 06:09:11PM -0700, Jojy Varghese via nginx-devel wrote: > > [...] > >>> If you think that nginx does something wrong, you may want to >>> elaborate on the details of the incorrect behaviour you observe >>> (as well as why you think it's incorrect, and how would you like >>> to improve it). >> >> We ran into a problem where the connection close (as described >> above) causes some clients (chrome browser) to fail long running >> (large) downloads using h2. This is I think because these >> clients send a WINDOW UPDATE towards the end and Nginx sends a >> TCP RST since the connection is closed. > > So it looks like you are running into the race condition the > lingering close is expected to solve. You may want to upgrade > nginx then. Indeed looks like that :). I think the lingering close should solve the problem/race condition we are seeing. > > Note well that lingering close support in HTTP/2 was added in > nginx 1.19.1, more than two years ago, and the last nginx version > without it is not supported for more than a year. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org From mdounin at mdounin.ru Fri Oct 21 22:12:38 2022 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 22 Oct 2022 01:12:38 +0300 Subject: [PATCH] Tests: clearing of pre-existing Content-Range headers In-Reply-To: <20ef9719316bdee06a6f.1666364134@DHNVMN3.localdomain> References: <20ef9719316bdee06a6f.1666364134@DHNVMN3.localdomain> Message-ID: Hello! On Fri, Oct 21, 2022 at 07:55:34AM -0700, Eugene Grebenschikov via nginx-devel wrote: > # HG changeset patch > # User Eugene Grebenschikov > # Date 1666307094 25200 > # Thu Oct 20 16:04:54 2022 -0700 > # Node ID 20ef9719316bdee06a6faf731bf76c1777bc98ba > # Parent 4f282294d8aec6fc20d6d68690bdf800629ad606 > Tests: clearing of pre-existing Content-Range headers > > diff -r 4f282294d8ae -r 20ef9719316b range.t > --- a/range.t Wed Oct 19 10:27:19 2022 -0700 > +++ b/range.t Thu Oct 20 16:04:54 2022 -0700 > @@ -21,7 +21,7 @@ > select STDERR; $| = 1; > select STDOUT; $| = 1; > > -my $t = Test::Nginx->new()->has(qw/http charset/)->plan(41); > +my $t = Test::Nginx->new()->has(qw/http charset proxy cache/)->plan(44); > > $t->write_file_expand('nginx.conf', <<'EOF'); > > @@ -39,6 +39,9 @@ > 58 59; # X -> Y > } > > + proxy_cache_path %%TESTDIR%%/cache levels=1:2 > + keys_zone=NAME:1m; > + > server { > listen 127.0.0.1:8080; > server_name localhost; > @@ -55,6 +58,18 @@ > location /t4.html { > max_ranges 0; > } > + > + location /t5.html { > + proxy_pass http://127.0.0.1:8080/stub; > + proxy_cache NAME; > + proxy_cache_valid 200 1m; > + } > + > + location /stub { > + add_header Content-Range stub; > + add_header Accept-Ranges bytes; > + return 200 "SEE-THIS"; > + } > } > } > > @@ -154,6 +169,20 @@ > unlike(http_get_range('/t4.html', 'Range: bytes=0-9'), qr/ 206 /, > 'max_ranges zero'); > > +# clearing of pre-existing Content-Range headers > + > +TODO: { > +local $TODO = 'not yet' unless $t->has_version('1.23.1'); > + > +like(http_get_range('/t5.html', 'Range: bytes=0-4'), > + qr/ 206 (?!.*stub)/s, 'content range cleared - range request'); > +like(http_get_range('/t5.html', 'Range: bytes=0-2,4-'), > + qr/ 206 (?!.*stub)/s, 'content range cleared - multipart'); > +like(http_get_range('/t5.html', 'Range: bytes=1000-'), > + qr/ 416 (?!.*stub)/s, 'content range cleared - not satisfable'); > + > +} > + > ############################################################################### > > sub http_get_range { It might be a good idea to keep this separate from the basic range filter tests. E.g., in a separate file, similarly to range_charset.t tests. Or in a generic test file for tests with proxying, that is, combined with range_charset.t. This approach ensures that even a stripped-down nginx version, such as one compiled with "--without-http_proxy_module", can be properly tested by the test suite. -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Sat Oct 22 01:49:10 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Sat, 22 Oct 2022 01:49:10 +0000 Subject: [njs] Fixed shell tests after changes in backtraces and function names. Message-ID: details: https://hg.nginx.org/njs/rev/c42cdba1626d branches: changeset: 1979:c42cdba1626d user: Dmitry Volyntsev date: Fri Oct 21 18:48:39 2022 -0700 description: Fixed shell tests after changes in backtraces and function names. diffstat: test/shell_test.exp | 12 ++++++------ 1 files changed, 6 insertions(+), 6 deletions(-) diffs (45 lines): diff -r dc6bc4de1185 -r c42cdba1626d test/shell_test.exp --- a/test/shell_test.exp Thu Oct 20 16:40:35 2022 -0700 +++ b/test/shell_test.exp Fri Oct 21 18:48:39 2022 -0700 @@ -313,9 +313,9 @@ njs_test { {"var print = console.dump.bind(console); print(1, 'a', [1, 2])\r\n" "1 a \\\[\r\n 1,\r\n 2\r\n]\r\nundefined\r\n>> "} {"var print = console.log.bind(console); print(console.a.a)\r\n" - "TypeError: cannot get property \"a\" of undefined*at console.log"} + "TypeError: cannot get property \"a\" of undefined"} {"print(console.a.a)\r\n" - "TypeError: cannot get property \"a\" of undefined*at console.log"} + "TypeError: cannot get property \"a\" of undefined"} } # Backtraces for external objects @@ -329,7 +329,7 @@ njs_test { {"var o = {toString: function(){}, log: console.log}\r\n" "undefined\r\n>> "} {"o\r\n" - "o\r\n{\r\n toString: \\\[Function],\r\n log: \\\[Function: log]\r\n}"} + "o\r\n{\r\n toString: \\\[Function: toString],\r\n log: \\\[Function: log]\r\n}"} } njs_test { @@ -387,7 +387,7 @@ njs_test { {"Object.keys(null)\r\n" "Thrown:\r\nTypeError: cannot convert null argument to object"} {"e\r\n" - "TypeError: cannot get property \"a\" of undefined*at f (shell:1)"} + "TypeError: cannot get property \"a\" of undefined"} } # Non-ASCII characters @@ -541,9 +541,9 @@ njs_test { {"import ref from 'ref_exception.js'\r\n" "ReferenceError: \"undeclared\" is not defined"} {"var ref\r\n" - "undefined\r\n"} + "SyntaxError: \"ref\" has already been declared"} {"import ref from 'ref_exception.js'\r\n" - "ReferenceError: \"undeclared\" is not defined"} + "SyntaxError: \"ref\" has already been declared"} } "-p test/js/module/ -p test/js/module/libs/" # quiet mode From xeioex at nginx.com Sat Oct 22 01:49:12 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Sat, 22 Oct 2022 01:49:12 +0000 Subject: [njs] Added njs.memoryStats object. Message-ID: details: https://hg.nginx.org/njs/rev/ebc48ac9f656 branches: changeset: 1980:ebc48ac9f656 user: Dmitry Volyntsev date: Fri Oct 21 18:48:39 2022 -0700 description: Added njs.memoryStats object. njs.memoryStats.size is a number of bytes a VM claimed from the OS. diffstat: src/njs_builtin.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++ src/njs_mp.c | 24 ++++++++++++++++ src/njs_mp.h | 9 ++++++ src/test/njs_unit_test.c | 16 ++++++++++ 4 files changed, 119 insertions(+), 0 deletions(-) diffs (173 lines): diff -r c42cdba1626d -r ebc48ac9f656 src/njs_builtin.c --- a/src/njs_builtin.c Fri Oct 21 18:48:39 2022 -0700 +++ b/src/njs_builtin.c Fri Oct 21 18:48:39 2022 -0700 @@ -912,6 +912,69 @@ njs_ext_on(njs_vm_t *vm, njs_value_t *ar static njs_int_t +njs_ext_memory_stats(njs_vm_t *vm, njs_object_prop_t *prop, + njs_value_t *unused, njs_value_t *unused2, njs_value_t *retval) +{ + njs_int_t ret; + njs_value_t object, value; + njs_object_t *stat; + njs_mp_stat_t mp_stat; + + static const njs_value_t size_string = njs_string("size"); + static const njs_value_t nblocks_string = njs_string("nblocks"); + static const njs_value_t page_string = njs_string("page_size"); + static const njs_value_t cluster_string = njs_string("cluster_size"); + + stat = njs_object_alloc(vm); + if (njs_slow_path(stat == NULL)) { + return NJS_ERROR; + } + + njs_set_object(&object, stat); + + njs_mp_stat(vm->mem_pool, &mp_stat); + + njs_set_number(&value, mp_stat.size); + + ret = njs_value_property_set(vm, &object, njs_value_arg(&size_string), + &value); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + + njs_set_number(&value, mp_stat.nblocks); + + ret = njs_value_property_set(vm, &object, njs_value_arg(&nblocks_string), + &value); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + + njs_set_number(&value, mp_stat.cluster_size); + + ret = njs_value_property_set(vm, &object, njs_value_arg(&cluster_string), + &value); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + + njs_set_number(&value, mp_stat.page_size); + + ret = njs_value_property_set(vm, &object, njs_value_arg(&page_string), + &value); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + + njs_set_object(retval, stat); + + return NJS_OK; +} + + + + +static njs_int_t njs_global_this_prop_handler(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *global, njs_value_t *setval, njs_value_t *retval) { @@ -1725,6 +1788,13 @@ static const njs_object_prop_t njs_njs_ .value = njs_native_function(njs_ext_on, 0), .configurable = 1, }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("memoryStats"), + .value = njs_prop_handler(njs_ext_memory_stats), + }, + }; diff -r c42cdba1626d -r ebc48ac9f656 src/njs_mp.c --- a/src/njs_mp.c Fri Oct 21 18:48:39 2022 -0700 +++ b/src/njs_mp.c Fri Oct 21 18:48:39 2022 -0700 @@ -285,6 +285,30 @@ njs_mp_destroy(njs_mp_t *mp) } +void +njs_mp_stat(njs_mp_t *mp, njs_mp_stat_t *stat) +{ + njs_mp_block_t *block; + njs_rbtree_node_t *node; + + stat->size = 0; + stat->nblocks = 0; + stat->cluster_size = mp->cluster_size; + stat->page_size = mp->page_size; + + node = njs_rbtree_min(&mp->blocks); + + while (njs_rbtree_is_there_successor(&mp->blocks, node)) { + block = (njs_mp_block_t *) node; + + stat->nblocks++; + stat->size += block->size; + + node = njs_rbtree_node_successor(&mp->blocks, node); + } +} + + void * njs_mp_alloc(njs_mp_t *mp, size_t size) { diff -r c42cdba1626d -r ebc48ac9f656 src/njs_mp.h --- a/src/njs_mp.h Fri Oct 21 18:48:39 2022 -0700 +++ b/src/njs_mp.h Fri Oct 21 18:48:39 2022 -0700 @@ -20,6 +20,14 @@ struct njs_mp_cleanup_s { }; +typedef struct { + size_t size; + size_t nblocks; + size_t page_size; + size_t cluster_size; +} njs_mp_stat_t; + + NJS_EXPORT njs_mp_t *njs_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size, size_t min_chunk_size) NJS_MALLOC_LIKE; NJS_EXPORT njs_mp_t * njs_mp_fast_create(size_t cluster_size, @@ -27,6 +35,7 @@ NJS_EXPORT njs_mp_t * njs_mp_fast_create NJS_MALLOC_LIKE; NJS_EXPORT njs_bool_t njs_mp_is_empty(njs_mp_t *mp); NJS_EXPORT void njs_mp_destroy(njs_mp_t *mp); +NJS_EXPORT void njs_mp_stat(njs_mp_t *mp, njs_mp_stat_t *stat); NJS_EXPORT void *njs_mp_alloc(njs_mp_t *mp, size_t size) NJS_MALLOC_LIKE; diff -r c42cdba1626d -r ebc48ac9f656 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Fri Oct 21 18:48:39 2022 -0700 +++ b/src/test/njs_unit_test.c Fri Oct 21 18:48:39 2022 -0700 @@ -18239,6 +18239,22 @@ static njs_unit_test_t njs_test[] = { njs_str("njs.on('exit', ()=>{}); 1"), njs_str("1") }, + /* njs.memoryStats. */ + + { njs_str("Object.keys(njs.memoryStats).sort()"), + njs_str("cluster_size,nblocks,page_size,size") }, + + { njs_str("typeof njs.memoryStats.size"), + njs_str("number") }, + + { njs_str("njs.memoryStats.size > 4096"), + njs_str("true") }, + + { njs_str("var size = njs.memoryStats.size;" + "new Array(2**15);" + "njs.memoryStats.size > size"), + njs_str("true") }, + /* Built-in methods name. */ { njs_str( From arut at nginx.com Sun Oct 23 04:28:23 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Sun, 23 Oct 2022 08:28:23 +0400 Subject: [PATCH] Documented the $proxy_protocol_tlv_ variable In-Reply-To: <881627fb56bfff009297.1666170103@ORK-ML-00007151> References: <881627fb56bfff009297.1666170103@ORK-ML-00007151> Message-ID: Hi, > On 19 Oct 2022, at 13:01, Yaroslav Zhuravlev wrote: > > xml/en/docs/http/ngx_http_core_module.xml | 107 +++++++++++++++++++++++++- > xml/en/docs/stream/ngx_stream_core_module.xml | 105 +++++++++++++++++++++++++- > xml/ru/docs/http/ngx_http_core_module.xml | 106 +++++++++++++++++++++++++- > xml/ru/docs/stream/ngx_stream_core_module.xml | 106 +++++++++++++++++++++++++- > 4 files changed, 420 insertions(+), 4 deletions(-) > > > # HG changeset patch > # User Yaroslav Zhuravlev > # Date 1666118553 -3600 > # Tue Oct 18 19:42:33 2022 +0100 > # Node ID 881627fb56bfff009297b6cc15cd1f5d58ebc030 > # Parent 3f5e91af4a52c5b8bbc75c0ea31e3ceac0bebea8 > Documented the $proxy_protocol_tlv_ variable. > > diff --git a/xml/en/docs/http/ngx_http_core_module.xml b/xml/en/docs/http/ngx_http_core_module.xml > --- a/xml/en/docs/http/ngx_http_core_module.xml > +++ b/xml/en/docs/http/ngx_http_core_module.xml > @@ -10,7 +10,7 @@ > link="/en/docs/http/ngx_http_core_module.html" > lang="en" > - rev="100"> > + rev="101"> > >
> > @@ -3228,6 +3228,111 @@ > > > > +$proxy_protocol_tlv_name > + > +TLV from the PROXY Protocol header (1.23.2). > +The name can be a TLV type name or its numeric value. > +In the latter case, the value is hexadecimal > +and should be prefixed with 0x: > + > + > +$proxy_protocol_tlv_alpn > +$proxy_protocol_tlv_0x01 > + > +SSL TLVs can also be accessed by TLV type name > +or its numeric value, > +both prefixed by ssl_: > + > +$proxy_protocol_tlv_ssl_version > +$proxy_protocol_tlv_ssl_0x22 > + > + > + > +The following TLV type names are supported: > + > + > + > +alpn (0x01)— > +upper layer protocol used over the connection > + > + > + > +authority (0x02)— > +host name value passed by the client > + > + > + > +unique_id (0x05)— > +unique connection id > + > + > + > +netns (0x30)— > +name of the namespace > + > + > + > +ssl (0x20)— > +client SSL fields as binary > + > + > + > + > + > + > +The following SSL TLV type names are supported: > + > + > + > +ssl_version (0x21)— > +SSL version used in client connection > + > + > + > +ssl_cn (0x22)— > +SSL certificate Common Name > + > + > + > +ssl_cipher (0x23)— > +name of the used cipher > + > + > + > +ssl_sig_alg (0x24)— > +algorithm used to sign the certificate > + > + > + > +ssl_key_alg (0x25)— > +public-key algorithm > + > + > + > + > + > + > +Also, the following special SSL TLV type name is supported: > + > + > + > +ssl_verify— > +client SSL certificate verification result, > +0 if the client presented a certificate > +and it was successfully verified, > +non-zero otherwise. > + > + > + > + > + > + > +The PROXY protocol must be previously enabled by setting the > +proxy_protocol parameter > +in the directive. > + > + > + > $query_string > > same as $args > diff --git a/xml/en/docs/stream/ngx_stream_core_module.xml b/xml/en/docs/stream/ngx_stream_core_module.xml > --- a/xml/en/docs/stream/ngx_stream_core_module.xml > +++ b/xml/en/docs/stream/ngx_stream_core_module.xml > @@ -9,7 +9,7 @@ > link="/en/docs/stream/ngx_stream_core_module.html" > lang="en" > - rev="35"> > + rev="36"> > >
> > @@ -587,6 +587,109 @@ > > > > +$proxy_protocol_tlv_name > + > +TLV from the PROXY Protocol header (1.23.2). > +The name can be a TLV type name or its numeric value. > +In the latter case, the value is hexadecimal > +and should be prefixed with 0x: > + > + > +$proxy_protocol_tlv_alpn > +$proxy_protocol_tlv_0x01 > + > +SSL TLVs can also be accessed by TLV type name or its numeric value, > +both prefixed by ssl_: > + > +$proxy_protocol_tlv_ssl_version > +$proxy_protocol_tlv_ssl_0x22 > + > + > + > +The following TLV type names are supported: > + > + > + > +alpn (0x01)— > +upper layer protocol used over the connection > + > + > + > +authority (0x02)— > +host name value passed by the client > + > + > + > +unique_id (0x05)— > +unique connection id > + > + > + > +netns (0x30)— > +name of the namespace > + > + > + > +ssl (0x20)— > +client SSL fields as binary > + > + > + > + > + > + > +The following SSL TLV type names are supported: > + > + > + > +ssl_version (0x21)— > +SSL version used in client connection > + > + > + > +ssl_cn (0x22)— > +SSL certificate Common Name > + > + > + > +ssl_cipher (0x23)— > +name of the used cipher > + > + > + > +ssl_sig_alg (0x24)— > +algorithm used to sign the certificate > + > + > + > +ssl_key_alg (0x25)— > +public-key algorithm > + > + > + > + > + > + > +Also, the following special SSL TLV type name is supported: > + > + > + > +ssl_verify— > +client SSL certificate verification result, > +zero if the client presented a certificate > +and it was successfully verified, and non-zero otherwise > + > + > + > + > + > + > +The PROXY protocol must be previously enabled by setting the > +proxy_protocol parameter > +in the directive. > + > + > + > $remote_addr > > client address > diff --git a/xml/ru/docs/http/ngx_http_core_module.xml b/xml/ru/docs/http/ngx_http_core_module.xml > --- a/xml/ru/docs/http/ngx_http_core_module.xml > +++ b/xml/ru/docs/http/ngx_http_core_module.xml > @@ -10,7 +10,7 @@ > link="/ru/docs/http/ngx_http_core_module.html" > lang="ru" > - rev="100"> > + rev="101"> > >
> > @@ -3221,6 +3221,110 @@ > > > > +$proxy_protocol_tlv_имя > + > +TLV, полученный из заголовка протокола PROXY (1.23.2). > +Имя может быть именем типа TLV или его числовым значением. > +В последнем случае значение задаётся в шестнадцатеричном виде > +и должно начинаться с 0x: > + > + > +$proxy_protocol_tlv_alpn > +$proxy_protocol_tlv_0x01 > + > +SSL TLV могут также быть доступны как по имени типа TLV, > +так и по его числовому значению, > +оба должны начинаться с ssl_: > + > +$proxy_protocol_tlv_ssl_version > +$proxy_protocol_tlv_ssl_0x22 > + > + > + > +Поддерживаются следующие имена типов TLV: > + > + > + > +alpn (0x01)— > +протокол более высокого уровня, используемый поверх соединения > + > + > + > +authority (0x02)— > +значение имени хоста, передаваемое клиентом > + > + > + > +unique_id (0x05)— > +уникальный идентификатор соединения > + > + > + > +netns (0x30)— > +имя пространства имён > + > + > + > +ssl (0x20)— > +клиентские SSL поля в бинарном виде > + > + > + > + > + > + > +Поддерживаются следующие имена типов SSL TLV: > + > + > + > +ssl_version (0x21)— > +версия SSL, используемая в клиентском соединении > + > + > + > +ssl_cn (0x22)— > +Common Name сертификата > + > + > + > +ssl_cipher (0x23)— > +имя используемого шифра > + > + > + > +ssl_sig_alg (0x24)— > +алгоритм, используемый для подписи сертификата > + > + > + > +ssl_key_alg (0x25)— > +алгоритм публичного ключа > + > + > + > + > + > + > +Также поддерживается следующее специальное имя типа SSL TLV: > + > + > + > +ssl_verify— > +результат проверки клиентского сертификата: > +0, если клиент предоставил сертификат > +и он был успешно верифицирован, > +либо ненулевое значение > + > + > + > + > + > + > +Протокол PROXY должен быть предварительно включён при помощи установки > +параметра proxy_protocol в директиве . > + > + > + > $query_string > > то же, что и $args > diff --git a/xml/ru/docs/stream/ngx_stream_core_module.xml b/xml/ru/docs/stream/ngx_stream_core_module.xml > --- a/xml/ru/docs/stream/ngx_stream_core_module.xml > +++ b/xml/ru/docs/stream/ngx_stream_core_module.xml > @@ -9,7 +9,7 @@ > link="/ru/docs/stream/ngx_stream_core_module.html" > lang="ru" > - rev="35"> > + rev="36"> > >
> > @@ -591,6 +591,110 @@ > > > > +$proxy_protocol_tlv_имя > + > +TLV, полученный из заголовка протокола PROXY (1.23.2). > +Имя может быть именем типа TLV или его числовым значением. > +В последнем случае значение задаётся в шестнадцатеричном виде > +и должно начинаться с 0x: > + > + > +$proxy_protocol_tlv_alpn > +$proxy_protocol_tlv_0x01 > + > +SSL TLV могут также быть доступны как по имени типа TLV, > +так и по его числовому значению, > +оба должны начинаться с ssl_: > + > +$proxy_protocol_tlv_ssl_version > +$proxy_protocol_tlv_ssl_0x22 > + > + > + > +Поддерживаются следующие имена типов TLV: > + > + > + > +alpn (0x01)— > +протокол более высокого уровня, используемый поверх соединения > + > + > + > +authority (0x02)— > +значение имени хоста, передаваемое клиентом > + > + > + > +unique_id (0x05)— > +уникальный идентификатор соединения > + > + > + > +netns (0x30)— > +имя пространства имён > + > + > + > +ssl (0x20)— > +клиентские SSL поля в бинарном виде > + > + > + > + > + > + > +Поддерживаются следующие имена типов SSL TLV: > + > + > + > +ssl_version (0x21)— > +версия SSL, используемая в клиентском соединении > + > + > + > +ssl_cn (0x22)— > +Common Name сертификата > + > + > + > +ssl_cipher (0x23)— > +имя используемого шифра > + > + > + > +ssl_sig_alg (0x24)— > +алгоритм, используемый для подписи сертификата > + > + > + > +ssl_key_alg (0x25)— > +алгоритм публичного ключа > + > + > + > + > + > + > +Также поддерживается следующее специальное имя типа SSL TLV: > + > + > + > +ssl_verify— > +результат проверки клиентского сертификата: > +0, если клиент предоставил сертификат > +и он был успешно верифицирован, > +либо ненулевое значение > + > + > + > + > + > + > +Протокол PROXY должен быть предварительно включён при помощи установки > +параметра proxy_protocol в директиве . > + > + > + > $remote_addr > > адрес клиента > _______________________________________________ > nginx-devel mailing list -- nginx-devel at nginx.org > To unsubscribe send an email to nginx-devel-leave at nginx.org This has been slightly updated during internal review, Overall, looks good to me. ---- Roman Arutyunyan arut at nginx.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From v.zhestikov at f5.com Mon Oct 24 17:27:20 2022 From: v.zhestikov at f5.com (Vadim Zhestikov) Date: Mon, 24 Oct 2022 17:27:20 +0000 Subject: [njs] Fixed labeled empty statement. Message-ID: details: https://hg.nginx.org/njs/rev/594821c68b83 branches: changeset: 1981:594821c68b83 user: Vadim Zhestikov date: Mon Oct 24 07:48:28 2022 -0700 description: Fixed labeled empty statement. This fixes #593 issue on Github. diffstat: src/njs_parser.c | 64 ++++++++++++++++++++++++----------------------- src/test/njs_unit_test.c | 3 ++ 2 files changed, 36 insertions(+), 31 deletions(-) diffs (89 lines): diff -r ebc48ac9f656 -r 594821c68b83 src/njs_parser.c --- a/src/njs_parser.c Fri Oct 21 18:48:39 2022 -0700 +++ b/src/njs_parser.c Mon Oct 24 07:48:28 2022 -0700 @@ -4701,39 +4701,41 @@ njs_parser_statement_after(njs_parser_t new_node = parser->node; - if (new_node->hoist) { - child = &njs_parser_chain_top(parser); - - while (*child != NULL) { - node = *child; - - if (node->hoist) { - break; + if (new_node != NULL) { + if (new_node->hoist) { + child = &njs_parser_chain_top(parser); + + while (*child != NULL) { + node = *child; + + if (node->hoist) { + break; + } + + child = &node->left; } - child = &node->left; - } - - last = *child; - } - - stmt = njs_parser_node_new(parser, NJS_TOKEN_STATEMENT); - if (njs_slow_path(stmt == NULL)) { - return NJS_ERROR; - } - - stmt->hoist = new_node->hoist; - stmt->left = last; - stmt->right = new_node; - - *child = stmt; - - top = (child != &parser->target) ? njs_parser_chain_top(parser) - : stmt; - - parser->node = top; - - njs_parser_chain_top_set(parser, top); + last = *child; + } + + stmt = njs_parser_node_new(parser, NJS_TOKEN_STATEMENT); + if (njs_slow_path(stmt == NULL)) { + return NJS_ERROR; + } + + stmt->hoist = new_node->hoist; + stmt->left = last; + stmt->right = new_node; + + *child = stmt; + + top = (child != &parser->target) ? njs_parser_chain_top(parser) + : stmt; + + parser->node = top; + + njs_parser_chain_top_set(parser, top); + } return njs_parser_stack_pop(parser); } diff -r ebc48ac9f656 -r 594821c68b83 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Fri Oct 21 18:48:39 2022 -0700 +++ b/src/test/njs_unit_test.c Mon Oct 24 07:48:28 2022 -0700 @@ -3159,6 +3159,9 @@ static njs_unit_test_t njs_test[] = { njs_str("a:\n\n1"), njs_str("1") }, + { njs_str("a:;"), + njs_str("undefined") }, + { njs_str("a:\n\n"), njs_str("SyntaxError: Unexpected end of input in 3") }, From xeioex at nginx.com Tue Oct 25 05:50:21 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 25 Oct 2022 05:50:21 +0000 Subject: [njs] Version 0.7.8. Message-ID: details: https://hg.nginx.org/njs/rev/3308415d7de8 branches: changeset: 1982:3308415d7de8 user: Dmitry Volyntsev date: Mon Oct 24 21:29:44 2022 -0700 description: Version 0.7.8. diffstat: CHANGES | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 50 insertions(+), 0 deletions(-) diffs (57 lines): diff -r 594821c68b83 -r 3308415d7de8 CHANGES --- a/CHANGES Mon Oct 24 07:48:28 2022 -0700 +++ b/CHANGES Mon Oct 24 21:29:44 2022 -0700 @@ -1,3 +1,53 @@ +Changes with njs 0.7.8 25 Oct 2022 + + nginx modules: + + *) Feature: added js_preload_object directive. + + *) Feature: added ngx.conf_prefix property. + + *) Feature: added s.sendUpstream() and s.sendDownstream() + in stream module. + + *) Feature: added support for HEAD method in Fetch API. + + *) Improvement: improved async callback support for s.send() + in stream module. + + Core: + + *) Feature: added "name" instance property for a function + object. + + *) Feature: added njs.memoryStats object. + + *) Bugfix: fixed String.prototype.trimEnd() with unicode + string. + + *) Bugfix: fixed Object.freeze() with fast arrays. + + *) Bugfix: fixed Object.defineProperty() with fast arrays. + + *) Bugfix: fixed async token as a property name of an object. + + *) Bugfix: fixed property set instruction when key modifies + base binding. + + *) Bugfix: fixed complex assignments. + + *) Bugfix: fixed handling of unhandled promise rejection. + + *) Bugfix: fixed process.env when duplicate environ variables + are present. + + *) Bugfix: fixed double declaration detection in modules. + + *) Bugfix: fixed bound function calls according to the spec. + + *) Bugfix: fixed break label for if statement. + + *) Bugfix: fixed labeled empty statements. + Changes with njs 0.7.7 30 Aug 2022 nginx modules: From xeioex at nginx.com Tue Oct 25 05:50:23 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 25 Oct 2022 05:50:23 +0000 Subject: [njs] Added tag 0.7.8 for changeset 3308415d7de8 Message-ID: details: https://hg.nginx.org/njs/rev/ac02f9219df3 branches: changeset: 1983:ac02f9219df3 user: Dmitry Volyntsev date: Mon Oct 24 22:49:55 2022 -0700 description: Added tag 0.7.8 for changeset 3308415d7de8 diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r 3308415d7de8 -r ac02f9219df3 .hgtags --- a/.hgtags Mon Oct 24 21:29:44 2022 -0700 +++ b/.hgtags Mon Oct 24 22:49:55 2022 -0700 @@ -53,3 +53,4 @@ b5198f7f11a3b5e174f9e75a7bd50394fa354fb0 63c258c456ca018385b13f352faefdf25c7bd3bb 0.7.5 461dfb0bb60e531d361319f30993f29860c19f55 0.7.6 1592d46d9076aa832b2d37d50b90f5edfca67030 0.7.7 +3308415d7de83c3c0c7c65405bec4836685a71de 0.7.8 From arut at nginx.com Tue Oct 25 09:11:02 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 25 Oct 2022 13:11:02 +0400 Subject: [PATCH 10 of 10] QUIC: application init() callback In-Reply-To: <20221020143322.b2c2g44dg2nofap2@N00W24XTQX> References: <8e58a27b320807aae001.1662627997@arut-laptop> <20221020115154.ygd4gzyuykvrgqsy@Y9MQ9X2QVV> <20221020143322.b2c2g44dg2nofap2@N00W24XTQX> Message-ID: <20221025091102.b52ydaz5cifjuwf6@N00W24XTQX> Hi, On Thu, Oct 20, 2022 at 06:33:22PM +0400, Roman Arutyunyan wrote: > Hi, > > On Thu, Oct 20, 2022 at 03:51:54PM +0400, Sergey Kandaurov wrote: > > On Thu, Sep 08, 2022 at 01:06:37PM +0400, Roman Arutyunyan wrote: > > > # HG changeset patch > > > # User Roman Arutyunyan > > > # Date 1662627905 -14400 > > > # Thu Sep 08 13:05:05 2022 +0400 > > > # Branch quic > > > # Node ID 8e58a27b320807aae00194b82e2c997287e3ad42 > > > # Parent 861d6897151fe6773898db6cfdb36f56403302c5 > > > QUIC: application init() callback. > > > > > > It's called after handshake completion or prior to the first early data stream > > > creation. The callback should initialize application-level data before > > > creating streams. > > > > > > HTTP/3 callback implementation sets keepalive timer and sends SETTINGS. > > > > > > Also, this allows to limit max handshake time in ngx_http_v3_init_stream(). > > > > Also brings header timeout (to limit handshake time) > > and keepalive timeout in hq mode. > > Looks like for hq keepalive timeout is now set in ngx_http_v3_init_stream() > when main connection is created and in ngx_http_v3_init() at the end of > handshake. After that it's neither set nor deleted. The code which does this, > only works for http/3, but not hq. This should be addressed. [..] Attached is a patch which fixes this. Now there's a connection cleanup which works for hq as well. The patch should be applied before the patch #10. -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1666687929 -14400 # Tue Oct 25 12:52:09 2022 +0400 # Branch quic # Node ID 16896749d64305c5e873434e420c95f58823dd4f # Parent e9285a39ff34aab034bb11b10e6ad723fd9cc2fa HTTP/3: implement keepalive for hq. Previously, keepalive timer was deleted in ngx_http_v3_wait_request_handler() and set in request cleanup handler. This worked for HTTP/3 connections, but not for hq connections. Now keepalive timer is deleted in ngx_http_v3_init_request_stream() and set in connection cleanup handler, which works both for HTTP/3 and hq. diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c --- a/src/http/v3/ngx_http_v3_request.c +++ b/src/http/v3/ngx_http_v3_request.c @@ -12,6 +12,7 @@ static void ngx_http_v3_init_request_stream(ngx_connection_t *c); static void ngx_http_v3_wait_request_handler(ngx_event_t *rev); +static void ngx_http_v3_cleanup_connection(void *data); static void ngx_http_v3_cleanup_request(void *data); static void ngx_http_v3_process_request(ngx_event_t *rev); static ngx_int_t ngx_http_v3_process_header(ngx_http_request_t *r, @@ -134,6 +135,7 @@ ngx_http_v3_init_request_stream(ngx_conn uint64_t n; ngx_event_t *rev; ngx_connection_t *pc; + ngx_pool_cleanup_t *cln; ngx_http_connection_t *hc; ngx_http_v3_session_t *h3c; ngx_http_core_loc_conf_t *clcf; @@ -189,6 +191,21 @@ ngx_http_v3_init_request_stream(ngx_conn "reached maximum number of requests"); } + cln = ngx_pool_cleanup_add(c->pool, 0); + if (cln == NULL) { + ngx_http_close_connection(c); + return; + } + + cln->handler = ngx_http_v3_cleanup_connection; + cln->data = c; + + h3c->nrequests++; + + if (h3c->keepalive.timer_set) { + ngx_del_timer(&h3c->keepalive); + } + rev = c->read; #if (NGX_HTTP_V3_HQ) @@ -225,7 +242,6 @@ ngx_http_v3_wait_request_handler(ngx_eve ngx_connection_t *c; ngx_pool_cleanup_t *cln; ngx_http_request_t *r; - ngx_http_v3_session_t *h3c; ngx_http_connection_t *hc; ngx_http_core_srv_conf_t *cscf; @@ -346,13 +362,6 @@ ngx_http_v3_wait_request_handler(ngx_eve cln->handler = ngx_http_v3_cleanup_request; cln->data = r; - h3c = ngx_http_v3_get_session(c); - h3c->nrequests++; - - if (h3c->keepalive.timer_set) { - ngx_del_timer(&h3c->keepalive); - } - rev->handler = ngx_http_v3_process_request; ngx_http_v3_process_request(rev); } @@ -387,20 +396,13 @@ ngx_http_v3_reset_stream(ngx_connection_ static void -ngx_http_v3_cleanup_request(void *data) +ngx_http_v3_cleanup_connection(void *data) { - ngx_http_request_t *r = data; + ngx_connection_t *c = data; - ngx_connection_t *c; ngx_http_v3_session_t *h3c; ngx_http_core_loc_conf_t *clcf; - c = r->connection; - - if (!r->response_sent) { - c->error = 1; - } - h3c = ngx_http_v3_get_session(c); if (--h3c->nrequests == 0) { @@ -411,6 +413,17 @@ ngx_http_v3_cleanup_request(void *data) static void +ngx_http_v3_cleanup_request(void *data) +{ + ngx_http_request_t *r = data; + + if (!r->response_sent) { + r->connection->error = 1; + } +} + + +static void ngx_http_v3_process_request(ngx_event_t *rev) { u_char *p; From thresh at nginx.com Tue Oct 25 11:26:49 2022 From: thresh at nginx.com (=?iso-8859-1?q?Konstantin_Pavlov?=) Date: Tue, 25 Oct 2022 15:26:49 +0400 Subject: [PATCH] Linux packages: added Ubuntu 22.10 "kinetic" Message-ID: # HG changeset patch # User Konstantin Pavlov # Date 1666697160 -14400 # Tue Oct 25 15:26:00 2022 +0400 # Node ID ba6c27b903c7cd1b7277e6fcebf2308e863e6c64 # Parent e4a87f3a05d851f874bcbe8750280929eb5f9894 Linux packages: added Ubuntu 22.10 "kinetic". diff -r e4a87f3a05d8 -r ba6c27b903c7 xml/en/linux_packages.xml --- a/xml/en/linux_packages.xml Fri Oct 21 16:33:37 2022 -0700 +++ b/xml/en/linux_packages.xml Tue Oct 25 15:26:00 2022 +0400 @@ -7,7 +7,7 @@
+ rev="81">
@@ -87,6 +87,11 @@ versions: x86_64, aarch64/arm64, s390x + +22.10 “kinetic” +x86_64, aarch64/arm64 + + diff -r e4a87f3a05d8 -r ba6c27b903c7 xml/ru/linux_packages.xml --- a/xml/ru/linux_packages.xml Fri Oct 21 16:33:37 2022 -0700 +++ b/xml/ru/linux_packages.xml Tue Oct 25 15:26:00 2022 +0400 @@ -7,7 +7,7 @@
+ rev="81">
@@ -87,6 +87,11 @@ x86_64, aarch64/arm64, s390x + +22.10 “kinetic” +x86_64, aarch64/arm64 + + From v.zhestikov at f5.com Tue Oct 25 13:53:32 2022 From: v.zhestikov at f5.com (Vadim Zhestikov) Date: Tue, 25 Oct 2022 13:53:32 +0000 Subject: [njs] Computed goto support added to vmcode. Message-ID: details: https://hg.nginx.org/njs/rev/86784a68e8c8 branches: changeset: 1984:86784a68e8c8 user: Vadim Zhestikov date: Tue Oct 25 06:43:10 2022 -0700 description: Computed goto support added to vmcode. diffstat: auto/clang | 19 + auto/computed_goto | 25 + auto/help | 4 + auto/options | 4 + auto/summary | 5 + configure | 1 + src/njs_clang.h | 8 + src/njs_disassembler.c | 2 - src/njs_parser.c | 2 +- src/njs_vmcode.c | 2633 ++++++++++++++++++++++++++++++----------------- src/njs_vmcode.h | 29 +- 11 files changed, 1748 insertions(+), 984 deletions(-) diffs (truncated from 2948 to 1000 lines): diff -r ac02f9219df3 -r 86784a68e8c8 auto/clang --- a/auto/clang Mon Oct 24 22:49:55 2022 -0700 +++ b/auto/clang Tue Oct 25 06:43:10 2022 -0700 @@ -142,6 +142,25 @@ njs_feature_test="struct __attribute__(( . auto/feature +njs_feature="GCC __attribute__ fallthrough" +njs_feature_name=NJS_HAVE_GCC_ATTRIBUTE_FALLTHROUGH +njs_feature_run=no +njs_feature_path= +njs_feature_libs= +njs_feature_test="int main(int argc, char *argv[]) { + switch (argc) { + case 0: + argc++; + __attribute__((fallthrough)); + default: + argc++; + } + + return argc; + }" +. auto/feature + + njs_feature="Address sanitizer" njs_feature_name=NJS_HAVE_ADDRESS_SANITIZER njs_feature_run=no diff -r ac02f9219df3 -r 86784a68e8c8 auto/computed_goto --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/auto/computed_goto Tue Oct 25 06:43:10 2022 -0700 @@ -0,0 +1,25 @@ + +# Copyright (C) Vadim Zhestikov +# Copyright (C) NGINX, Inc. + + +NJS_HAVE_COMPUTED_GOTO=NO + + +if [ $NJS_TRY_GOTO = YES ]; then + + njs_feature="Computed goto" + njs_feature_name=NJS_HAVE_COMPUTED_GOTO + njs_feature_run=no + njs_feature_incs= + njs_feature_libs= + njs_feature_test="int main(void) { + void *ptr; + ptr = &&label; + goto *ptr; + label: + return 0; + }" + . auto/feature + +fi diff -r ac02f9219df3 -r 86784a68e8c8 auto/help --- a/auto/help Mon Oct 24 22:49:55 2022 -0700 +++ b/auto/help Tue Oct 25 06:43:10 2022 -0700 @@ -27,6 +27,10 @@ default: "$NJS_LD_OPT" When this option is enabled only PCRE library is discovered. + --no-goto disables computed goto discovery. + When this option is enabled 'switch' statement + will be always used in instead of computed goto. + --no-openssl disables OpenSSL discovery. When this option is enabled OpenSSL dependant code is not built as a part of libnjs.a. diff -r ac02f9219df3 -r 86784a68e8c8 auto/options --- a/auto/options Mon Oct 24 22:49:55 2022 -0700 +++ b/auto/options Tue Oct 25 06:43:10 2022 -0700 @@ -20,6 +20,8 @@ NJS_OPENSSL=YES NJS_PCRE=YES NJS_TRY_PCRE2=YES +NJS_TRY_GOTO=YES + NJS_CONFIGURE_OPTIONS= for njs_option @@ -50,6 +52,8 @@ do --no-pcre) NJS_PCRE=NO ;; --no-pcre2) NJS_TRY_PCRE2=NO ;; + --no-goto) NJS_TRY_GOTO=NO ;; + --help) . auto/help exit 0 diff -r ac02f9219df3 -r 86784a68e8c8 auto/summary --- a/auto/summary Mon Oct 24 22:49:55 2022 -0700 +++ b/auto/summary Tue Oct 25 06:43:10 2022 -0700 @@ -22,6 +22,11 @@ if [ $NJS_HAVE_OPENSSL = YES ]; then echo " + using OpenSSL library: $NJS_OPENSSL_LIB" fi +if [ $NJS_HAVE_COMPUTED_GOTO = YES ]; then + echo " + using computed goto" +fi + + echo echo " njs build dir: $NJS_BUILD_DIR" echo " njs CLI: $NJS_BUILD_DIR/njs" diff -r ac02f9219df3 -r 86784a68e8c8 configure --- a/configure Mon Oct 24 22:49:55 2022 -0700 +++ b/configure Tue Oct 25 06:43:10 2022 -0700 @@ -46,6 +46,7 @@ NJS_LIB_AUX_LIBS= . auto/memalign . auto/getrandom . auto/stat +. auto/computed_goto . auto/explicit_bzero . auto/pcre . auto/readline diff -r ac02f9219df3 -r 86784a68e8c8 src/njs_clang.h --- a/src/njs_clang.h Mon Oct 24 22:49:55 2022 -0700 +++ b/src/njs_clang.h Tue Oct 25 06:43:10 2022 -0700 @@ -146,6 +146,14 @@ njs_leading_zeros64(uint64_t x) #endif +#if (NJS_HAVE_GCC_ATTRIBUTE_FALLTHROUGH) +#define NJS_FALLTHROUGH __attribute__((fallthrough)) + +#else +#define NJS_FALLTHROUGH +#endif + + #if (NJS_HAVE_GCC_ATTRIBUTE_MALLOC) #define NJS_MALLOC_LIKE __attribute__((__malloc__)) diff -r ac02f9219df3 -r 86784a68e8c8 src/njs_disassembler.c --- a/src/njs_disassembler.c Mon Oct 24 22:49:55 2022 -0700 +++ b/src/njs_disassembler.c Tue Oct 25 06:43:10 2022 -0700 @@ -23,8 +23,6 @@ static njs_code_name_t code_names[] = { njs_str("OBJECT ") }, { NJS_VMCODE_FUNCTION, sizeof(njs_vmcode_function_t), njs_str("FUNCTION ") }, - { NJS_VMCODE_THIS, sizeof(njs_vmcode_this_t), - njs_str("THIS ") }, { NJS_VMCODE_ARGUMENTS, sizeof(njs_vmcode_arguments_t), njs_str("ARGUMENTS ") }, { NJS_VMCODE_REGEXP, sizeof(njs_vmcode_regexp_t), diff -r ac02f9219df3 -r 86784a68e8c8 src/njs_parser.c --- a/src/njs_parser.c Mon Oct 24 22:49:55 2022 -0700 +++ b/src/njs_parser.c Tue Oct 25 06:43:10 2022 -0700 @@ -4527,7 +4527,7 @@ njs_parser_expression_comma(njs_parser_t njs_parser_next(parser, njs_parser_assignment_expression); return njs_parser_expression_node(parser, token, current, NJS_TOKEN_COMMA, - NJS_VMCODE_NOP, + 0, njs_parser_expression_comma); } diff -r ac02f9219df3 -r 86784a68e8c8 src/njs_vmcode.c --- a/src/njs_vmcode.c Mon Oct 24 22:49:55 2022 -0700 +++ b/src/njs_vmcode.c Tue Oct 25 06:43:10 2022 -0700 @@ -108,7 +108,6 @@ njs_vmcode_interpreter(njs_vm_t *vm, u_c njs_vmcode_variable_t *var; njs_vmcode_prop_get_t *get; njs_vmcode_prop_set_t *set; - njs_vmcode_operation_t op; njs_vmcode_prop_next_t *pnext; njs_vmcode_test_jump_t *test_jump; njs_vmcode_equal_jump_t *equal; @@ -121,980 +120,1694 @@ njs_vmcode_interpreter(njs_vm_t *vm, u_c njs_vmcode_debug(vm, pc, "ENTER"); -next: - - for ( ;; ) { - - vmcode = (njs_vmcode_generic_t *) pc; - - /* - * The first operand is passed as is in value2 to - * NJS_VMCODE_JUMP, - * NJS_VMCODE_IF_TRUE_JUMP, - * NJS_VMCODE_IF_FALSE_JUMP, - * NJS_VMCODE_FUNCTION_FRAME, - * NJS_VMCODE_FUNCTION_CALL, - * NJS_VMCODE_RETURN, - * NJS_VMCODE_TRY_START, - * NJS_VMCODE_TRY_CONTINUE, - * NJS_VMCODE_TRY_BREAK, - * NJS_VMCODE_TRY_END, - * NJS_VMCODE_CATCH, - * NJS_VMCODE_THROW, - * NJS_VMCODE_STOP. - */ - value2 = (njs_value_t *) vmcode->operand1; - value1 = NULL; - - switch (vmcode->code.operands) { - - case NJS_VMCODE_3OPERANDS: - njs_vmcode_operand(vm, vmcode->operand3, value2); - - /* Fall through. */ - - case NJS_VMCODE_2OPERANDS: - njs_vmcode_operand(vm, vmcode->operand2, value1); +#if !defined(NJS_HAVE_COMPUTED_GOTO) + #define SWITCH(op) switch (op) + #define CASE(op) case op + #define BREAK pc += ret; NEXT + + #define NEXT vmcode = (njs_vmcode_generic_t *) pc; \ + goto next + + #define NEXT_LBL next: + #define FALLTHROUGH NJS_FALLTHROUGH + +#else + #define SWITCH(op) goto *switch_tbl[(uint8_t) op]; + #define CASE(op) case_ ## op + #define BREAK pc += ret; NEXT + + #define NEXT vmcode = (njs_vmcode_generic_t *) pc; \ + SWITCH (vmcode->code.operation) + + #define NEXT_LBL + #define FALLTHROUGH + + #define NJS_GOTO_ROW(name) [ (uint8_t) name ] = &&case_ ## name + + static const void * const switch_tbl[NJS_VMCODES] = { + + NJS_GOTO_ROW(NJS_VMCODE_PUT_ARG), + NJS_GOTO_ROW(NJS_VMCODE_STOP), + NJS_GOTO_ROW(NJS_VMCODE_JUMP), + NJS_GOTO_ROW(NJS_VMCODE_PROPERTY_SET), + NJS_GOTO_ROW(NJS_VMCODE_PROPERTY_ACCESSOR), + NJS_GOTO_ROW(NJS_VMCODE_IF_TRUE_JUMP), + NJS_GOTO_ROW(NJS_VMCODE_IF_FALSE_JUMP), + NJS_GOTO_ROW(NJS_VMCODE_IF_EQUAL_JUMP), + NJS_GOTO_ROW(NJS_VMCODE_PROPERTY_INIT), + NJS_GOTO_ROW(NJS_VMCODE_RETURN), + NJS_GOTO_ROW(NJS_VMCODE_FUNCTION_COPY), + NJS_GOTO_ROW(NJS_VMCODE_FUNCTION_FRAME), + NJS_GOTO_ROW(NJS_VMCODE_METHOD_FRAME), + NJS_GOTO_ROW(NJS_VMCODE_FUNCTION_CALL), + NJS_GOTO_ROW(NJS_VMCODE_PROPERTY_NEXT), + NJS_GOTO_ROW(NJS_VMCODE_ARGUMENTS), + NJS_GOTO_ROW(NJS_VMCODE_PROTO_INIT), + NJS_GOTO_ROW(NJS_VMCODE_TO_PROPERTY_KEY), + NJS_GOTO_ROW(NJS_VMCODE_TO_PROPERTY_KEY_CHK), + NJS_GOTO_ROW(NJS_VMCODE_SET_FUNCTION_NAME), + NJS_GOTO_ROW(NJS_VMCODE_IMPORT), + NJS_GOTO_ROW(NJS_VMCODE_AWAIT), + NJS_GOTO_ROW(NJS_VMCODE_TRY_START), + NJS_GOTO_ROW(NJS_VMCODE_THROW), + NJS_GOTO_ROW(NJS_VMCODE_TRY_BREAK), + NJS_GOTO_ROW(NJS_VMCODE_TRY_CONTINUE), + NJS_GOTO_ROW(NJS_VMCODE_TRY_END), + NJS_GOTO_ROW(NJS_VMCODE_CATCH), + NJS_GOTO_ROW(NJS_VMCODE_FINALLY), + NJS_GOTO_ROW(NJS_VMCODE_LET), + NJS_GOTO_ROW(NJS_VMCODE_LET_UPDATE), + NJS_GOTO_ROW(NJS_VMCODE_INITIALIZATION_TEST), + NJS_GOTO_ROW(NJS_VMCODE_NOT_INITIALIZED), + NJS_GOTO_ROW(NJS_VMCODE_ASSIGNMENT_ERROR), + NJS_GOTO_ROW(NJS_VMCODE_ERROR), + NJS_GOTO_ROW(NJS_VMCODE_MOVE), + NJS_GOTO_ROW(NJS_VMCODE_PROPERTY_GET), + NJS_GOTO_ROW(NJS_VMCODE_INCREMENT), + NJS_GOTO_ROW(NJS_VMCODE_POST_INCREMENT), + NJS_GOTO_ROW(NJS_VMCODE_DECREMENT), + NJS_GOTO_ROW(NJS_VMCODE_POST_DECREMENT), + NJS_GOTO_ROW(NJS_VMCODE_TRY_RETURN), + NJS_GOTO_ROW(NJS_VMCODE_GLOBAL_GET), + NJS_GOTO_ROW(NJS_VMCODE_LESS), + NJS_GOTO_ROW(NJS_VMCODE_GREATER), + NJS_GOTO_ROW(NJS_VMCODE_LESS_OR_EQUAL), + NJS_GOTO_ROW(NJS_VMCODE_GREATER_OR_EQUAL), + NJS_GOTO_ROW(NJS_VMCODE_ADDITION), + NJS_GOTO_ROW(NJS_VMCODE_EQUAL), + NJS_GOTO_ROW(NJS_VMCODE_NOT_EQUAL), + NJS_GOTO_ROW(NJS_VMCODE_SUBSTRACTION), + NJS_GOTO_ROW(NJS_VMCODE_MULTIPLICATION), + NJS_GOTO_ROW(NJS_VMCODE_EXPONENTIATION), + NJS_GOTO_ROW(NJS_VMCODE_DIVISION), + NJS_GOTO_ROW(NJS_VMCODE_REMAINDER), + NJS_GOTO_ROW(NJS_VMCODE_BITWISE_AND), + NJS_GOTO_ROW(NJS_VMCODE_BITWISE_OR), + NJS_GOTO_ROW(NJS_VMCODE_BITWISE_XOR), + NJS_GOTO_ROW(NJS_VMCODE_LEFT_SHIFT), + NJS_GOTO_ROW(NJS_VMCODE_RIGHT_SHIFT), + NJS_GOTO_ROW(NJS_VMCODE_UNSIGNED_RIGHT_SHIFT), + NJS_GOTO_ROW(NJS_VMCODE_OBJECT_COPY), + NJS_GOTO_ROW(NJS_VMCODE_TEMPLATE_LITERAL), + NJS_GOTO_ROW(NJS_VMCODE_PROPERTY_IN), + NJS_GOTO_ROW(NJS_VMCODE_PROPERTY_DELETE), + NJS_GOTO_ROW(NJS_VMCODE_PROPERTY_FOREACH), + NJS_GOTO_ROW(NJS_VMCODE_STRICT_EQUAL), + NJS_GOTO_ROW(NJS_VMCODE_STRICT_NOT_EQUAL), + NJS_GOTO_ROW(NJS_VMCODE_TEST_IF_TRUE), + NJS_GOTO_ROW(NJS_VMCODE_TEST_IF_FALSE), + NJS_GOTO_ROW(NJS_VMCODE_COALESCE), + NJS_GOTO_ROW(NJS_VMCODE_UNARY_PLUS), + NJS_GOTO_ROW(NJS_VMCODE_UNARY_NEGATION), + NJS_GOTO_ROW(NJS_VMCODE_BITWISE_NOT), + NJS_GOTO_ROW(NJS_VMCODE_LOGICAL_NOT), + NJS_GOTO_ROW(NJS_VMCODE_OBJECT), + NJS_GOTO_ROW(NJS_VMCODE_ARRAY), + NJS_GOTO_ROW(NJS_VMCODE_FUNCTION), + NJS_GOTO_ROW(NJS_VMCODE_REGEXP), + NJS_GOTO_ROW(NJS_VMCODE_INSTANCE_OF), + NJS_GOTO_ROW(NJS_VMCODE_TYPEOF), + NJS_GOTO_ROW(NJS_VMCODE_VOID), + NJS_GOTO_ROW(NJS_VMCODE_DELETE), + NJS_GOTO_ROW(NJS_VMCODE_DEBUGGER), + }; + +#endif + + vmcode = (njs_vmcode_generic_t *) pc; + +NEXT_LBL; + + SWITCH (vmcode->code.operation) { + + CASE (NJS_VMCODE_MOVE): + njs_vmcode_debug_opcode(); + + njs_vmcode_operand(vm, vmcode->operand2, value1); + njs_vmcode_operand(vm, vmcode->operand1, retval); + *retval = *value1; + + pc += sizeof(njs_vmcode_move_t); + NEXT; + + CASE (NJS_VMCODE_PROPERTY_GET): + njs_vmcode_debug_opcode(); + + njs_vmcode_operand(vm, vmcode->operand3, value2); + njs_vmcode_operand(vm, vmcode->operand2, value1); + + get = (njs_vmcode_prop_get_t *) pc; + njs_vmcode_operand(vm, get->value, retval); + + if (njs_slow_path(!njs_is_index_or_key(value2))) { + if (njs_slow_path(njs_is_null_or_undefined(value1))) { + (void) njs_throw_cannot_property(vm, value1, value2, "get"); + goto error; + } + + ret = njs_value_to_key(vm, &primitive1, value2); + if (njs_slow_path(ret != NJS_OK)) { + goto error; + } + + value2 = &primitive1; } - op = vmcode->code.operation; - - /* - * On success an operation returns size of the bytecode, - * a jump offset or zero after the call or return operations. - * Jumps can return a negative offset. Compilers can generate - * (ret < 0 && ret >= NJS_PREEMPT) - * as a single unsigned comparision. - */ - -#ifdef NJS_DEBUG_OPCODE - if (vm->options.opcode_debug) { - njs_disassemble(pc, NULL, 1, NULL); + ret = njs_value_property(vm, value1, value2, retval); + if (njs_slow_path(ret == NJS_ERROR)) { + goto error; } -#endif - - if (op > NJS_VMCODE_NORET) { - - if (op == NJS_VMCODE_MOVE) { - njs_vmcode_operand(vm, vmcode->operand1, retval); - *retval = *value1; - - pc += sizeof(njs_vmcode_move_t); - goto next; + + pc += sizeof(njs_vmcode_prop_get_t); + NEXT; + + CASE (NJS_VMCODE_INCREMENT): + njs_vmcode_debug_opcode(); + + njs_vmcode_operand(vm, vmcode->operand3, value2); + njs_vmcode_operand(vm, vmcode->operand2, value1); + + if (njs_slow_path(!njs_is_numeric(value2))) { + ret = njs_value_to_numeric(vm, value2, &numeric1); + if (njs_slow_path(ret != NJS_OK)) { + goto error; } - if (op == NJS_VMCODE_PROPERTY_GET) { - get = (njs_vmcode_prop_get_t *) pc; - njs_vmcode_operand(vm, get->value, retval); - - if (njs_slow_path(!njs_is_index_or_key(value2))) { - if (njs_slow_path(njs_is_null_or_undefined(value1))) { - (void) njs_throw_cannot_property(vm, value1, value2, - "get"); - goto error; - } - - ret = njs_value_to_key(vm, &primitive1, value2); - if (njs_slow_path(ret != NJS_OK)) { - goto error; - } - - value2 = &primitive1; - } - - ret = njs_value_property(vm, value1, value2, retval); - if (njs_slow_path(ret == NJS_ERROR)) { - goto error; - } - - pc += sizeof(njs_vmcode_prop_get_t); - goto next; + num = njs_number(&numeric1); + + } else { + num = njs_number(value2); + } + + njs_set_number(value1, num + 1); + + njs_vmcode_operand(vm, vmcode->operand1, retval); + + *retval = *value1; + + pc += sizeof(njs_vmcode_3addr_t); + NEXT; + + CASE (NJS_VMCODE_POST_INCREMENT): + njs_vmcode_debug_opcode(); + + njs_vmcode_operand(vm, vmcode->operand3, value2); + njs_vmcode_operand(vm, vmcode->operand2, value1); + + if (njs_slow_path(!njs_is_numeric(value2))) { + ret = njs_value_to_numeric(vm, value2, &numeric1); + if (njs_slow_path(ret != NJS_OK)) { + goto error; + } + + num = njs_number(&numeric1); + + } else { + num = njs_number(value2); + } + + njs_set_number(value1, num + 1); + + njs_vmcode_operand(vm, vmcode->operand1, retval); + + njs_set_number(retval, num); + + pc += sizeof(njs_vmcode_3addr_t); + NEXT; + + CASE (NJS_VMCODE_DECREMENT): + njs_vmcode_debug_opcode(); + + njs_vmcode_operand(vm, vmcode->operand3, value2); + njs_vmcode_operand(vm, vmcode->operand2, value1); + + if (njs_slow_path(!njs_is_numeric(value2))) { + ret = njs_value_to_numeric(vm, value2, &numeric1); + if (njs_slow_path(ret != NJS_OK)) { + goto error; } - switch (op) { - case NJS_VMCODE_INCREMENT: - case NJS_VMCODE_POST_INCREMENT: - case NJS_VMCODE_DECREMENT: - case NJS_VMCODE_POST_DECREMENT: - if (njs_slow_path(!njs_is_numeric(value2))) { - ret = njs_value_to_numeric(vm, value2, &numeric1); - if (njs_slow_path(ret != NJS_OK)) { - goto error; - } - - num = njs_number(&numeric1); - - } else { - num = njs_number(value2); - } - - njs_set_number(value1, - num + (1 - 2 * ((op - NJS_VMCODE_INCREMENT) >> 1))); - - njs_vmcode_operand(vm, vmcode->operand1, retval); - - if (op & 1) { - njs_set_number(retval, num); - - } else { - *retval = *value1; - } - - pc += sizeof(njs_vmcode_3addr_t); - goto next; - - case NJS_VMCODE_GLOBAL_GET: - get = (njs_vmcode_prop_get_t *) pc; - njs_vmcode_operand(vm, get->value, retval); - - ret = njs_value_property(vm, value1, value2, retval); - if (njs_slow_path(ret == NJS_ERROR)) { - goto error; - } - - pc += sizeof(njs_vmcode_prop_get_t); - - if (ret == NJS_OK) { - pc += sizeof(njs_vmcode_error_t); - } - - goto next; - - /* - * njs_vmcode_try_return() saves a return value to use it later by - * njs_vmcode_finally(), and jumps to the nearest try_break block. - */ - case NJS_VMCODE_TRY_RETURN: - njs_vmcode_operand(vm, vmcode->operand1, retval); - *retval = *value1; - - try_return = (njs_vmcode_try_return_t *) pc; - pc += try_return->offset; - goto next; - - case NJS_VMCODE_LESS: - case NJS_VMCODE_GREATER: - case NJS_VMCODE_LESS_OR_EQUAL: - case NJS_VMCODE_GREATER_OR_EQUAL: - case NJS_VMCODE_ADDITION: - if (njs_slow_path(!njs_is_primitive(value1))) { - hint = (op == NJS_VMCODE_ADDITION) && njs_is_date(value1); - ret = njs_value_to_primitive(vm, &primitive1, value1, hint); - if (ret != NJS_OK) { - goto error; - } - - value1 = &primitive1; - } - - if (njs_slow_path(!njs_is_primitive(value2))) { - hint = (op == NJS_VMCODE_ADDITION) && njs_is_date(value2); - ret = njs_value_to_primitive(vm, &primitive2, value2, hint); - if (ret != NJS_OK) { - goto error; - } - - value2 = &primitive2; - } - - if (njs_slow_path(njs_is_symbol(value1) - || njs_is_symbol(value2))) - { - njs_symbol_conversion_failed(vm, - (op == NJS_VMCODE_ADDITION) && - (njs_is_string(value1) || njs_is_string(value2))); - - goto error; - } - - njs_vmcode_operand(vm, vmcode->operand1, retval); - - if (op == NJS_VMCODE_ADDITION) { - if (njs_fast_path(njs_is_numeric(value1) - && njs_is_numeric(value2))) - { - njs_set_number(retval, njs_number(value1) - + njs_number(value2)); - pc += sizeof(njs_vmcode_3addr_t); - goto next; - } - - if (njs_is_string(value1)) { - s1 = value1; - s2 = &dst; - src = value2; - - } else { - s1 = &dst; - s2 = value2; - src = value1; - } - - ret = njs_primitive_value_to_string(vm, &dst, src); - if (njs_slow_path(ret != NJS_OK)) { - goto error; - } - - ret = njs_string_concat(vm, s1, s2); - if (njs_slow_path(ret == NJS_ERROR)) { - goto error; - } - - *retval = vm->retval; - - pc += ret; - goto next; - } - - if ((uint8_t) (op - NJS_VMCODE_GREATER) < 2) { - /* NJS_VMCODE_GREATER, NJS_VMCODE_LESS_OR_EQUAL */ - src = value1; - value1 = value2; - value2 = src; - } - - ret = njs_primitive_values_compare(vm, value1, value2); - - if (op < NJS_VMCODE_LESS_OR_EQUAL) { - ret = ret > 0; - - } else { - ret = ret == 0; - } - - njs_set_boolean(retval, ret); - - pc += sizeof(njs_vmcode_3addr_t); - goto next; - - case NJS_VMCODE_EQUAL: - case NJS_VMCODE_NOT_EQUAL: - ret = njs_values_equal(vm, value1, value2); - if (njs_slow_path(ret < 0)) { - goto error; - } - - ret ^= op - NJS_VMCODE_EQUAL; - - njs_vmcode_operand(vm, vmcode->operand1, retval); - njs_set_boolean(retval, ret); - - pc += sizeof(njs_vmcode_3addr_t); - goto next; - - case NJS_VMCODE_SUBSTRACTION: - case NJS_VMCODE_MULTIPLICATION: - case NJS_VMCODE_EXPONENTIATION: - case NJS_VMCODE_DIVISION: - case NJS_VMCODE_REMAINDER: - case NJS_VMCODE_BITWISE_AND: - case NJS_VMCODE_BITWISE_OR: - case NJS_VMCODE_BITWISE_XOR: - case NJS_VMCODE_LEFT_SHIFT: - case NJS_VMCODE_RIGHT_SHIFT: - case NJS_VMCODE_UNSIGNED_RIGHT_SHIFT: - if (njs_slow_path(!njs_is_numeric(value1))) { - ret = njs_value_to_numeric(vm, value1, &numeric1); - if (njs_slow_path(ret != NJS_OK)) { - goto error; - } - - value1 = &numeric1; - } - - if (njs_slow_path(!njs_is_numeric(value2))) { - ret = njs_value_to_numeric(vm, value2, &numeric2); - if (njs_slow_path(ret != NJS_OK)) { - goto error; - } - - value2 = &numeric2; - } - - num = njs_number(value1); - - njs_vmcode_operand(vm, vmcode->operand1, retval); - pc += sizeof(njs_vmcode_3addr_t); - - switch (op) { - case NJS_VMCODE_SUBSTRACTION: - num -= njs_number(value2); - break; - - case NJS_VMCODE_MULTIPLICATION: - num *= njs_number(value2); - break; - - case NJS_VMCODE_EXPONENTIATION: - exponent = njs_number(value2); - - /* - * According to ES7: - * 1. If exponent is NaN, the result should be NaN; - * 2. The result of +/-1 ** +/-Infinity should be NaN. - */ - valid = njs_expect(1, fabs(num) != 1 - || (!isnan(exponent) - && !isinf(exponent))); - - num = valid ? pow(num, exponent) : NAN; - break; - - case NJS_VMCODE_DIVISION: - num /= njs_number(value2); - break; - - case NJS_VMCODE_REMAINDER: - num = fmod(num, njs_number(value2)); - break; - - case NJS_VMCODE_BITWISE_AND: - case NJS_VMCODE_BITWISE_OR: - case NJS_VMCODE_BITWISE_XOR: - i32 = njs_number_to_int32(njs_number(value2)); - - switch (op) { - case NJS_VMCODE_BITWISE_AND: - i32 &= njs_number_to_int32(num); - break; - - case NJS_VMCODE_BITWISE_OR: - i32 |= njs_number_to_int32(num); - break; - - case NJS_VMCODE_BITWISE_XOR: - i32 ^= njs_number_to_int32(num); - break; - } - - njs_set_int32(retval, i32); - goto next; - - default: - u32 = njs_number_to_uint32(njs_number(value2)) & 0x1f; - - switch (op) { - case NJS_VMCODE_LEFT_SHIFT: - case NJS_VMCODE_RIGHT_SHIFT: - i32 = njs_number_to_int32(num); - - if (op == NJS_VMCODE_LEFT_SHIFT) { - /* Shifting of negative numbers is undefined. */ - i32 = (uint32_t) i32 << u32; - } else { - i32 >>= u32; - } - - njs_set_int32(retval, i32); - break; - - default: /* NJS_VMCODE_UNSIGNED_RIGHT_SHIFT */ - njs_set_uint32(retval, - njs_number_to_uint32(num) >> u32); - } - - goto next; - } - - njs_set_number(retval, num); - goto next; - - case NJS_VMCODE_OBJECT_COPY: - ret = njs_vmcode_object_copy(vm, value1, value2); - break; - - case NJS_VMCODE_TEMPLATE_LITERAL: - ret = njs_vmcode_template_literal(vm, value1, value2); - break; - - case NJS_VMCODE_PROPERTY_IN: - ret = njs_vmcode_property_in(vm, value1, value2); - break; - - case NJS_VMCODE_PROPERTY_DELETE: - ret = njs_value_property_delete(vm, value1, value2, NULL, 1); - if (njs_fast_path(ret != NJS_ERROR)) { - vm->retval = njs_value_true; - - ret = sizeof(njs_vmcode_3addr_t); - } - - break; - - case NJS_VMCODE_PROPERTY_FOREACH: - ret = njs_vmcode_property_foreach(vm, value1, value2, pc); - break; - - case NJS_VMCODE_STRICT_EQUAL: - case NJS_VMCODE_STRICT_NOT_EQUAL: - ret = njs_values_strict_equal(value1, value2); - - ret ^= op - NJS_VMCODE_STRICT_EQUAL; - - njs_vmcode_operand(vm, vmcode->operand1, retval); - njs_set_boolean(retval, ret); - - pc += sizeof(njs_vmcode_3addr_t); - goto next; - - case NJS_VMCODE_TEST_IF_TRUE: - case NJS_VMCODE_TEST_IF_FALSE: - case NJS_VMCODE_COALESCE: - if (op == NJS_VMCODE_COALESCE) { - ret = !njs_is_null_or_undefined(value1); - - } else { - ret = njs_is_true(value1); - ret ^= op - NJS_VMCODE_TEST_IF_TRUE; - } - - if (ret) { - test_jump = (njs_vmcode_test_jump_t *) pc; - ret = test_jump->offset; - - } else { - ret = sizeof(njs_vmcode_3addr_t); - } - - njs_vmcode_operand(vm, vmcode->operand1, retval); - *retval = *value1; - - pc += ret; - goto next; - - case NJS_VMCODE_UNARY_PLUS: - case NJS_VMCODE_UNARY_NEGATION: - case NJS_VMCODE_BITWISE_NOT: - if (njs_slow_path(!njs_is_numeric(value1))) { - ret = njs_value_to_numeric(vm, value1, &numeric1); - if (njs_slow_path(ret != NJS_OK)) { - goto error; - } - - value1 = &numeric1; - } - - num = njs_number(value1); - njs_vmcode_operand(vm, vmcode->operand1, retval); - - switch (op) { - case NJS_VMCODE_UNARY_NEGATION: - num = -num; - - /* Fall through. */ - case NJS_VMCODE_UNARY_PLUS: - njs_set_number(retval, num); - break; - - case NJS_VMCODE_BITWISE_NOT: - njs_set_int32(retval, ~njs_number_to_uint32(num)); - } - - pc += sizeof(njs_vmcode_2addr_t); - goto next; - - case NJS_VMCODE_LOGICAL_NOT: - njs_vmcode_operand(vm, vmcode->operand1, retval); - njs_set_boolean(retval, !njs_is_true(value1)); - - pc += sizeof(njs_vmcode_2addr_t); - goto next; - - case NJS_VMCODE_OBJECT: - ret = njs_vmcode_object(vm); - break; - - case NJS_VMCODE_ARRAY: - ret = njs_vmcode_array(vm, pc); - break; - - case NJS_VMCODE_FUNCTION: - ret = njs_vmcode_function(vm, pc); - break; - - case NJS_VMCODE_REGEXP: - ret = njs_vmcode_regexp(vm, pc); - break; - - case NJS_VMCODE_INSTANCE_OF: - ret = njs_vmcode_instance_of(vm, value1, value2); - break; - - case NJS_VMCODE_TYPEOF: - ret = njs_vmcode_typeof(vm, value1, value2); - break; - - case NJS_VMCODE_VOID: - njs_set_undefined(&vm->retval); - - ret = sizeof(njs_vmcode_2addr_t); - break; - - case NJS_VMCODE_DELETE: - njs_release(vm, value1); - vm->retval = njs_value_true; - - ret = sizeof(njs_vmcode_2addr_t); - break; - - case NJS_VMCODE_DEBUGGER: - ret = njs_vmcode_debugger(vm); - break; - - default: - njs_internal_error(vm, "%d has retval", op); + num = njs_number(&numeric1); + + } else { + num = njs_number(value2); + } + + njs_set_number(value1, num - 1); + + njs_vmcode_operand(vm, vmcode->operand1, retval); + + *retval = *value1; + + pc += sizeof(njs_vmcode_3addr_t); + NEXT; + + CASE (NJS_VMCODE_POST_DECREMENT): + njs_vmcode_debug_opcode(); + + njs_vmcode_operand(vm, vmcode->operand3, value2); + njs_vmcode_operand(vm, vmcode->operand2, value1); + + if (njs_slow_path(!njs_is_numeric(value2))) { + ret = njs_value_to_numeric(vm, value2, &numeric1); + if (njs_slow_path(ret != NJS_OK)) { + goto error; + } + + num = njs_number(&numeric1); + + } else { + num = njs_number(value2); + } + + njs_set_number(value1, num - 1); + + njs_vmcode_operand(vm, vmcode->operand1, retval); + + njs_set_number(retval, num); + + pc += sizeof(njs_vmcode_3addr_t); + NEXT; + + CASE (NJS_VMCODE_GLOBAL_GET): + njs_vmcode_debug_opcode(); + + njs_vmcode_operand(vm, vmcode->operand3, value2); + njs_vmcode_operand(vm, vmcode->operand2, value1); + + get = (njs_vmcode_prop_get_t *) pc; + njs_vmcode_operand(vm, get->value, retval); + + ret = njs_value_property(vm, value1, value2, retval); + if (njs_slow_path(ret == NJS_ERROR)) { + goto error; + } + + pc += sizeof(njs_vmcode_prop_get_t); + + if (ret == NJS_OK) { + pc += sizeof(njs_vmcode_error_t); + } + + NEXT; + + /* + * njs_vmcode_try_return() saves a return value to use it later by + * njs_vmcode_finally(), and jumps to the nearest try_break block. + */ + CASE (NJS_VMCODE_TRY_RETURN): + njs_vmcode_debug_opcode(); + + njs_vmcode_operand(vm, vmcode->operand2, value1); + + njs_vmcode_operand(vm, vmcode->operand1, retval); + *retval = *value1; + + try_return = (njs_vmcode_try_return_t *) pc; + pc += try_return->offset; + NEXT; + + CASE (NJS_VMCODE_LESS): + njs_vmcode_debug_opcode(); From xeioex at nginx.com Tue Oct 25 17:03:38 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 25 Oct 2022 17:03:38 +0000 Subject: [njs] Version bump. Message-ID: details: https://hg.nginx.org/njs/rev/96a0be3dfb0b branches: changeset: 1985:96a0be3dfb0b user: Dmitry Volyntsev date: Tue Oct 25 09:19:14 2022 -0700 description: Version bump. diffstat: src/njs.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r 86784a68e8c8 -r 96a0be3dfb0b src/njs.h --- a/src/njs.h Tue Oct 25 06:43:10 2022 -0700 +++ b/src/njs.h Tue Oct 25 09:19:14 2022 -0700 @@ -11,8 +11,8 @@ #include -#define NJS_VERSION "0.7.8" -#define NJS_VERSION_NUMBER 0x000708 +#define NJS_VERSION "0.7.9" +#define NJS_VERSION_NUMBER 0x000709 #include /* STDOUT_FILENO, STDERR_FILENO */ From xeioex at nginx.com Tue Oct 25 17:03:40 2022 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 25 Oct 2022 17:03:40 +0000 Subject: [njs] Refactored working with an object properties. Message-ID: details: https://hg.nginx.org/njs/rev/109e6cb4dea5 branches: changeset: 1986:109e6cb4dea5 user: Dmitry Volyntsev date: Tue Oct 25 09:19:32 2022 -0700 description: Refactored working with an object properties. 1) njs_object_prop_t is compacted from 72 to 40 bytes on 64bit platforms. 2) njs_object_prop_define() is revorked to accomodate fast property creation using njs_value_create_data_prop() which corresponds to CreateDataProperty() from the specs. diffstat: external/njs_fs_module.c | 241 +------ src/njs_array.c | 401 +++---------- src/njs_array_buffer.c | 77 +-- src/njs_async.c | 40 +- src/njs_boolean.c | 47 +- src/njs_buffer.c | 738 ++++++------------------- src/njs_builtin.c | 769 ++++++------------------- src/njs_date.c | 522 ++++------------- src/njs_encoding.c | 110 +--- src/njs_error.c | 508 +++-------------- src/njs_extern.c | 28 +- src/njs_function.c | 232 ++----- src/njs_iterator.c | 22 +- src/njs_json.c | 38 +- src/njs_main.h | 1 + src/njs_math.c | 383 ++---------- src/njs_number.c | 185 +----- src/njs_object.c | 372 +++--------- src/njs_object.h | 66 +- src/njs_object_prop.c | 269 +++++--- src/njs_object_prop_declare.h | 74 ++ src/njs_promise.c | 123 +--- src/njs_regexp.c | 144 +---- src/njs_string.c | 407 +++---------- src/njs_symbol.c | 183 +---- src/njs_typed_array.c | 1180 ++++++++++------------------------------ src/njs_value.c | 173 ++--- src/njs_value.h | 89 ++- src/njs_vm.c | 4 +- src/njs_vm.h | 9 - src/njs_vmcode.c | 6 +- src/test/njs_unit_test.c | 39 +- 32 files changed, 2076 insertions(+), 5404 deletions(-) diffs (truncated from 9903 to 1000 lines): diff -r 96a0be3dfb0b -r 109e6cb4dea5 external/njs_fs_module.c --- a/external/njs_fs_module.c Tue Oct 25 09:19:14 2022 -0700 +++ b/external/njs_fs_module.c Tue Oct 25 09:19:32 2022 -0700 @@ -3231,25 +3231,11 @@ njs_fs_dirent_constructor(njs_vm_t *vm, static const njs_object_prop_t njs_dirent_constructor_properties[] = { - { - .type = NJS_PROPERTY, - .name = njs_string("name"), - .value = njs_string("Dirent"), - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("length"), - .value = njs_value(NJS_NUMBER, 1, 2.0), - .configurable = 1, - }, - - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("prototype"), - .value = njs_prop_handler(njs_object_prototype_create), - }, + NJS_DECLARE_PROP_NAME("Dirent"), + + NJS_DECLARE_PROP_LENGTH(2), + + NJS_DECLARE_PROP_HANDLER("prototype", njs_object_prototype_create, 0, 0, 0), }; @@ -3427,7 +3413,7 @@ njs_fs_stats_prop(njs_vm_t *vm, njs_obje return NJS_DECLINED; } - switch (prop->value.data.magic32 & 0xf) { + switch (njs_prop_magic32(prop) & 0xf) { case NJS_FS_STAT_DEV: v = st->st_dev; break; @@ -3486,7 +3472,7 @@ njs_fs_stats_prop(njs_vm_t *vm, njs_obje break; } - switch (prop->value.data.magic32 >> 4) { + switch (njs_prop_magic32(prop) >> 4) { case NJS_NUMBER: njs_set_number(retval, v); break; @@ -3635,146 +3621,44 @@ njs_fs_bytes_written_create(njs_vm_t *vm static const njs_object_prop_t njs_fs_promises_properties[] = { - { - .type = NJS_PROPERTY, - .name = njs_string("readFile"), - .value = njs_native_function2(njs_fs_read_file, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("readSync"), - .value = njs_native_function2(njs_fs_read, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("appendFile"), - .value = njs_native_function2(njs_fs_write_file, 0, - njs_fs_magic(NJS_FS_PROMISE, NJS_FS_APPEND)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("writeFile"), - .value = njs_native_function2(njs_fs_write_file, 0, - njs_fs_magic(NJS_FS_PROMISE, NJS_FS_TRUNC)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("access"), - .value = njs_native_function2(njs_fs_access, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("mkdir"), - .value = njs_native_function2(njs_fs_mkdir, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("open"), - .value = njs_native_function2(njs_fs_open, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("close"), - .value = njs_native_function2(njs_fs_close, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("rename"), - .value = njs_native_function2(njs_fs_rename, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("rmdir"), - .value = njs_native_function2(njs_fs_rmdir, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("readdir"), - .value = njs_native_function2(njs_fs_readdir, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("fstat"), - .value = njs_native_function2(njs_fs_stat, 0, - njs_fs_magic(NJS_FS_PROMISE, NJS_FS_FSTAT)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("lstat"), - .value = njs_native_function2(njs_fs_stat, 0, - njs_fs_magic(NJS_FS_PROMISE, NJS_FS_LSTAT)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("stat"), - .value = njs_native_function2(njs_fs_stat, 0, - njs_fs_magic(NJS_FS_PROMISE, NJS_FS_STAT)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("symlink"), - .value = njs_native_function2(njs_fs_symlink, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("unlink"), - .value = njs_native_function2(njs_fs_unlink, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("realpath"), - .value = njs_native_function2(njs_fs_realpath, 0, NJS_FS_PROMISE), - .writable = 1, - .configurable = 1, - }, + NJS_DECLARE_PROP_NATIVE("readFile", njs_fs_read_file, 0, NJS_FS_PROMISE), + + NJS_DECLARE_PROP_NATIVE("readSync", njs_fs_read, 0, NJS_FS_PROMISE), + + NJS_DECLARE_PROP_NATIVE("appendFile", njs_fs_write_file, 0, + njs_fs_magic(NJS_FS_PROMISE, NJS_FS_APPEND)), + + NJS_DECLARE_PROP_NATIVE("writeFile", njs_fs_write_file, 0, + njs_fs_magic(NJS_FS_PROMISE, NJS_FS_TRUNC)), + + NJS_DECLARE_PROP_NATIVE("access", njs_fs_access, 0, NJS_FS_PROMISE), + + NJS_DECLARE_PROP_NATIVE("mkdir", njs_fs_mkdir, 0, NJS_FS_PROMISE), + + NJS_DECLARE_PROP_NATIVE("open", njs_fs_open, 0, NJS_FS_PROMISE), + + NJS_DECLARE_PROP_NATIVE("close", njs_fs_close, 0, NJS_FS_PROMISE), + + NJS_DECLARE_PROP_NATIVE("rename", njs_fs_rename, 0, NJS_FS_PROMISE), + + NJS_DECLARE_PROP_NATIVE("rmdir", njs_fs_rmdir, 0, NJS_FS_PROMISE), + + NJS_DECLARE_PROP_NATIVE("readdir", njs_fs_readdir, 0, NJS_FS_PROMISE), + + NJS_DECLARE_PROP_NATIVE("fstat", njs_fs_stat, 0, + njs_fs_magic(NJS_FS_PROMISE, NJS_FS_FSTAT)), + + NJS_DECLARE_PROP_NATIVE("lstat", njs_fs_stat, 0, + njs_fs_magic(NJS_FS_PROMISE, NJS_FS_LSTAT)), + + NJS_DECLARE_PROP_NATIVE("stat", njs_fs_stat, 0, + njs_fs_magic(NJS_FS_PROMISE, NJS_FS_STAT)), + + NJS_DECLARE_PROP_NATIVE("symlink", njs_fs_symlink, 0, NJS_FS_PROMISE), + + NJS_DECLARE_PROP_NATIVE("unlink", njs_fs_unlink, 0, NJS_FS_PROMISE), + + NJS_DECLARE_PROP_NATIVE("realpath", njs_fs_realpath, 0, NJS_FS_PROMISE), }; @@ -3794,30 +3678,17 @@ njs_fs_promises(njs_vm_t *vm, njs_object static const njs_object_prop_t njs_fs_constants_properties[] = { - { - .type = NJS_PROPERTY, - .name = njs_string("F_OK"), - .value = njs_value(NJS_NUMBER, 0, F_OK), - .enumerable = 1, - }, - { - .type = NJS_PROPERTY, - .name = njs_string("R_OK"), - .value = njs_value(NJS_NUMBER, 1, R_OK), - .enumerable = 1, - }, - { - .type = NJS_PROPERTY, - .name = njs_string("W_OK"), - .value = njs_value(NJS_NUMBER, 1, W_OK), - .enumerable = 1, - }, - { - .type = NJS_PROPERTY, - .name = njs_string("X_OK"), - .value = njs_value(NJS_NUMBER, 1, X_OK), - .enumerable = 1, - }, + NJS_DECLARE_PROP_VALUE("F_OK", njs_value(NJS_NUMBER, 0, F_OK), + NJS_OBJECT_PROP_VALUE_E), + + NJS_DECLARE_PROP_VALUE("R_OK", njs_value(NJS_NUMBER, 0, R_OK), + NJS_OBJECT_PROP_VALUE_E), + + NJS_DECLARE_PROP_VALUE("W_OK", njs_value(NJS_NUMBER, 0, W_OK), + NJS_OBJECT_PROP_VALUE_E), + + NJS_DECLARE_PROP_VALUE("X_OK", njs_value(NJS_NUMBER, 0, X_OK), + NJS_OBJECT_PROP_VALUE_E), }; diff -r 96a0be3dfb0b -r 109e6cb4dea5 src/njs_array.c --- a/src/njs_array.c Tue Oct 25 09:19:14 2022 -0700 +++ b/src/njs_array.c Tue Oct 25 09:19:32 2022 -0700 @@ -159,7 +159,7 @@ njs_array_convert_to_slow_array(njs_vm_t return NJS_ERROR; } - prop->value = array->start[i]; + njs_value_assign(njs_prop_value(prop), &array->start[i]); } } @@ -197,7 +197,7 @@ njs_array_length_redefine(njs_vm_t *vm, prop->enumerable = 0; prop->configurable = 0; - njs_value_number_set(&prop->value, length); + njs_value_number_set(njs_prop_value(prop), length); return NJS_OK; } @@ -224,7 +224,7 @@ njs_array_length_set(njs_vm_t *vm, njs_v return NJS_ERROR; } - ret = njs_value_to_length(vm, &prev->value, &prev_length); + ret = njs_value_to_length(vm, njs_prop_value(prev), &prev_length); if (njs_slow_path(ret != NJS_OK)) { return ret; } @@ -523,41 +523,15 @@ njs_array_of(njs_vm_t *vm, njs_value_t * static const njs_object_prop_t njs_array_constructor_properties[] = { - { - .type = NJS_PROPERTY, - .name = njs_string("name"), - .value = njs_string("Array"), - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("length"), - .value = njs_value(NJS_NUMBER, 1, 1.0), - .configurable = 1, - }, - - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("prototype"), - .value = njs_prop_handler(njs_object_prototype_create), - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("isArray"), - .value = njs_native_function(njs_array_is_array, 1), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("of"), - .value = njs_native_function(njs_array_of, 0), - .writable = 1, - .configurable = 1, - }, + NJS_DECLARE_PROP_NAME("Array"), + + NJS_DECLARE_PROP_LENGTH(1), + + NJS_DECLARE_PROP_HANDLER("prototype", njs_object_prototype_create, 0, 0, 0), + + NJS_DECLARE_PROP_NATIVE("isArray", njs_array_is_array, 1, 0), + + NJS_DECLARE_PROP_NATIVE("of", njs_array_of, 0, 0), }; @@ -646,9 +620,9 @@ njs_array_length(njs_vm_t *vm,njs_object } prop->type = NJS_PROPERTY; - njs_set_number(&prop->value, length); - - *retval = *setval; + njs_set_number(njs_prop_value(prop), length); + + njs_value_assign(retval, setval); return NJS_OK; } @@ -2850,265 +2824,92 @@ njs_array_prototype_iterator_obj(njs_vm_ static const njs_object_prop_t njs_array_prototype_properties[] = { - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("length"), - .value = njs_prop_handler(njs_array_length), - .writable = 1, - }, - - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("constructor"), - .value = njs_prop_handler(njs_object_prototype_create_constructor), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("concat"), - .value = njs_native_function(njs_array_prototype_concat, 1), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("copyWithin"), - .value = njs_native_function(njs_array_prototype_copy_within, 2), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("entries"), - .value = njs_native_function2(njs_array_prototype_iterator_obj, 0, - NJS_ENUM_BOTH), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("every"), - .value = njs_native_function2(njs_array_prototype_iterator, 1, - njs_array_func(NJS_ARRAY_EVERY)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("fill"), - .value = njs_native_function(njs_array_prototype_fill, 1), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("filter"), - .value = njs_native_function2(njs_array_prototype_iterator, 1, - njs_array_func(NJS_ARRAY_FILTER)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("find"), - .value = njs_native_function2(njs_array_prototype_iterator, 1, - njs_array_func(NJS_ARRAY_FIND)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("findIndex"), - .value = njs_native_function2(njs_array_prototype_iterator, 1, - njs_array_func(NJS_ARRAY_FIND_INDEX)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("forEach"), - .value = njs_native_function2(njs_array_prototype_iterator, 1, - njs_array_func(NJS_ARRAY_FOR_EACH)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("includes"), - .value = njs_native_function2(njs_array_prototype_iterator, 1, - njs_array_arg(NJS_ARRAY_INCLUDES)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("indexOf"), - .value = njs_native_function2(njs_array_prototype_iterator, 1, - njs_array_arg(NJS_ARRAY_INDEX_OF)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("join"), - .value = njs_native_function(njs_array_prototype_join, 1), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("keys"), - .value = njs_native_function2(njs_array_prototype_iterator_obj, 0, - NJS_ENUM_KEYS), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("lastIndexOf"), - .value = njs_native_function2(njs_array_prototype_reverse_iterator, 1, - NJS_ARRAY_LAST_INDEX_OF), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("map"), - .value = njs_native_function2(njs_array_prototype_iterator, 1, - njs_array_func(NJS_ARRAY_MAP)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("pop"), - .value = njs_native_function(njs_array_prototype_pop, 0), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("push"), - .value = njs_native_function(njs_array_prototype_push, 1), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("reduce"), - .value = njs_native_function2(njs_array_prototype_iterator, 1, - njs_array_func(NJS_ARRAY_REDUCE)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("reduceRight"), - .value = njs_native_function2(njs_array_prototype_reverse_iterator, 1, - NJS_ARRAY_REDUCE_RIGHT), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("reverse"), - .value = njs_native_function(njs_array_prototype_reverse, 0), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("shift"), - .value = njs_native_function(njs_array_prototype_shift, 0), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("slice"), - .value = njs_native_function(njs_array_prototype_slice, 2), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("some"), - .value = njs_native_function2(njs_array_prototype_iterator, 1, - njs_array_func(NJS_ARRAY_SOME)), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("sort"), - .value = njs_native_function(njs_array_prototype_sort, 1), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("splice"), - .value = njs_native_function(njs_array_prototype_splice, 2), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("toString"), - .value = njs_native_function(njs_array_prototype_to_string, 0), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("unshift"), - .value = njs_native_function(njs_array_prototype_unshift, 1), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("values"), - .value = njs_native_function2(njs_array_prototype_iterator_obj, 0, - NJS_ENUM_VALUES), - .writable = 1, - .configurable = 1, - }, + NJS_DECLARE_PROP_HANDLER("length", njs_array_length, 0, 0, + NJS_OBJECT_PROP_VALUE_W), + + NJS_DECLARE_PROP_HANDLER("constructor", + njs_object_prototype_create_constructor, + 0, 0, NJS_OBJECT_PROP_VALUE_CW), + + NJS_DECLARE_PROP_NATIVE("concat", njs_array_prototype_concat, 1, 0), + + NJS_DECLARE_PROP_NATIVE("copyWithin", njs_array_prototype_copy_within, 2, + 0), + + NJS_DECLARE_PROP_NATIVE("entries", njs_array_prototype_iterator_obj, 0, + NJS_ENUM_BOTH), + + NJS_DECLARE_PROP_NATIVE("every", njs_array_prototype_iterator, 1, + njs_array_func(NJS_ARRAY_EVERY)), + + NJS_DECLARE_PROP_NATIVE("fill", njs_array_prototype_fill, 1, 0), + + NJS_DECLARE_PROP_NATIVE("filter", njs_array_prototype_iterator, 1, + njs_array_func(NJS_ARRAY_FILTER)), + + NJS_DECLARE_PROP_NATIVE("find", njs_array_prototype_iterator, 1, + njs_array_func(NJS_ARRAY_FIND)), + + NJS_DECLARE_PROP_NATIVE("findIndex", njs_array_prototype_iterator, 1, + njs_array_func(NJS_ARRAY_FIND_INDEX)), + + NJS_DECLARE_PROP_NATIVE("forEach", njs_array_prototype_iterator, 1, + njs_array_func(NJS_ARRAY_FOR_EACH)), + + NJS_DECLARE_PROP_NATIVE("includes", njs_array_prototype_iterator, 1, + njs_array_arg(NJS_ARRAY_INCLUDES)), + + NJS_DECLARE_PROP_NATIVE("indexOf", njs_array_prototype_iterator, 1, + njs_array_arg(NJS_ARRAY_INDEX_OF)), + + NJS_DECLARE_PROP_NATIVE("join", njs_array_prototype_join, 1, 0), + + NJS_DECLARE_PROP_NATIVE("keys", njs_array_prototype_iterator_obj, 0, + NJS_ENUM_KEYS), + + NJS_DECLARE_PROP_NATIVE("lastIndexOf", + njs_array_prototype_reverse_iterator, 1, + NJS_ARRAY_LAST_INDEX_OF), + + NJS_DECLARE_PROP_NATIVE("map", njs_array_prototype_iterator, 1, + njs_array_func(NJS_ARRAY_MAP)), + + NJS_DECLARE_PROP_NATIVE("pop", njs_array_prototype_pop, 0, 0), + + NJS_DECLARE_PROP_NATIVE("push", njs_array_prototype_push, 1, 0), + + NJS_DECLARE_PROP_NATIVE("reduce", njs_array_prototype_iterator, 1, + njs_array_func(NJS_ARRAY_REDUCE)), + + NJS_DECLARE_PROP_NATIVE("reduceRight", + njs_array_prototype_reverse_iterator, 1, + njs_array_func(NJS_ARRAY_REDUCE_RIGHT)), + + NJS_DECLARE_PROP_NATIVE("reverse", njs_array_prototype_reverse, 0, 0), + + NJS_DECLARE_PROP_NATIVE("shift", njs_array_prototype_shift, 0, 0), + + NJS_DECLARE_PROP_NATIVE("slice", njs_array_prototype_slice, 2, 0), + + NJS_DECLARE_PROP_NATIVE("some", njs_array_prototype_iterator, 1, + njs_array_func(NJS_ARRAY_SOME)), + + NJS_DECLARE_PROP_NATIVE("sort", njs_array_prototype_sort, 1, 0), + + NJS_DECLARE_PROP_NATIVE("splice", njs_array_prototype_splice, 2, 0), + + NJS_DECLARE_PROP_NATIVE("toString", njs_array_prototype_to_string, 0, 0), + + NJS_DECLARE_PROP_NATIVE("unshift", njs_array_prototype_unshift, 1, 0), + + NJS_DECLARE_PROP_NATIVE("values", njs_array_prototype_iterator_obj, 0, + NJS_ENUM_VALUES), { .type = NJS_PROPERTY, .name = njs_wellknown_symbol(NJS_SYMBOL_ITERATOR), - .value = njs_native_function2(njs_array_prototype_iterator_obj, 0, - NJS_ENUM_VALUES), + .u.value = njs_native_function2(njs_array_prototype_iterator_obj, 0, + NJS_ENUM_VALUES), .writable = 1, .configurable = 1, }, @@ -3123,12 +2924,8 @@ const njs_object_init_t njs_array_proto const njs_object_prop_t njs_array_instance_properties[] = { - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("length"), - .value = njs_prop_handler(njs_array_length), - .writable = 1 - }, + NJS_DECLARE_PROP_HANDLER("length", njs_array_length, 0, 0, + NJS_OBJECT_PROP_VALUE_W), }; diff -r 96a0be3dfb0b -r 109e6cb4dea5 src/njs_array_buffer.c --- a/src/njs_array_buffer.c Tue Oct 25 09:19:14 2022 -0700 +++ b/src/njs_array_buffer.c Tue Oct 25 09:19:32 2022 -0700 @@ -141,44 +141,21 @@ njs_array_buffer_writable(njs_vm_t *vm, static const njs_object_prop_t njs_array_buffer_constructor_properties[] = { - { - .type = NJS_PROPERTY, - .name = njs_string("name"), - .value = njs_string("ArrayBuffer"), - .configurable = 1, - }, + NJS_DECLARE_PROP_NAME("ArrayBuffer"), + + NJS_DECLARE_PROP_LENGTH(1), + + NJS_DECLARE_PROP_HANDLER("prototype", njs_object_prototype_create, 0, 0, 0), { - .type = NJS_PROPERTY, - .name = njs_string("length"), - .value = njs_value(NJS_NUMBER, 1, 1.0), + .type = NJS_ACCESSOR, + .name = njs_wellknown_symbol(NJS_SYMBOL_SPECIES), + .u.accessor = njs_getter(njs_array_buffer_get_this, 0), + .writable = NJS_ATTRIBUTE_UNSET, .configurable = 1, }, - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("prototype"), - .value = njs_prop_handler(njs_object_prototype_create), - }, - - { - .type = NJS_PROPERTY, - .name = njs_wellknown_symbol(NJS_SYMBOL_SPECIES), - .value = njs_value(NJS_INVALID, 1, NAN), - .getter = njs_native_function(njs_array_buffer_get_this, 0), - .setter = njs_value(NJS_UNDEFINED, 0, NAN), - .writable = NJS_ATTRIBUTE_UNSET, - .configurable = 1, - .enumerable = 0, - }, - - { - .type = NJS_PROPERTY, - .name = njs_string("isView"), - .value = njs_native_function(njs_array_buffer_is_view, 1), - .writable = 1, - .configurable = 1, - }, + NJS_DECLARE_PROP_NATIVE("isView", njs_array_buffer_is_view, 1, 0), }; @@ -265,39 +242,19 @@ njs_array_buffer_prototype_slice(njs_vm_ static const njs_object_prop_t njs_array_buffer_prototype_properties[] = { - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("constructor"), - .value = njs_prop_handler(njs_object_prototype_create_constructor), - .writable = 1, - .configurable = 1, - }, + NJS_DECLARE_PROP_HANDLER("constructor", + njs_object_prototype_create_constructor, + 0, 0, NJS_OBJECT_PROP_VALUE_CW), - { - .type = NJS_PROPERTY, - .name = njs_string("byteLength"), - .value = njs_value(NJS_INVALID, 1, NAN), - .getter = njs_native_function(njs_array_buffer_prototype_byte_length, - 0), - .setter = njs_value(NJS_UNDEFINED, 0, NAN), - .writable = NJS_ATTRIBUTE_UNSET, - .configurable = 1, - .enumerable = 0, - }, + NJS_DECLARE_PROP_GETTER("byteLength", + njs_array_buffer_prototype_byte_length, 0), - { - .type = NJS_PROPERTY, - .name = njs_string("slice"), - .value = njs_native_function(njs_array_buffer_prototype_slice, 2), - .writable = 1, - .configurable = 1, - .enumerable = 0, - }, + NJS_DECLARE_PROP_NATIVE("slice", njs_array_buffer_prototype_slice, 2, 0), { .type = NJS_PROPERTY, .name = njs_wellknown_symbol(NJS_SYMBOL_TO_STRING_TAG), - .value = njs_string("ArrayBuffer"), + .u.value = njs_string("ArrayBuffer"), .configurable = 1, }, }; diff -r 96a0be3dfb0b -r 109e6cb4dea5 src/njs_async.c --- a/src/njs_async.c Tue Oct 25 09:19:14 2022 -0700 +++ b/src/njs_async.c Tue Oct 25 09:19:32 2022 -0700 @@ -167,18 +167,9 @@ njs_async_context_free(njs_vm_t *vm, njs static const njs_object_prop_t njs_async_constructor_properties[] = { - { - .type = NJS_PROPERTY, - .name = njs_string("length"), - .value = njs_value(NJS_NUMBER, 1, 1.0), - .configurable = 1, - }, + NJS_DECLARE_PROP_LENGTH(1), - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("prototype"), - .value = njs_prop_handler(njs_object_prototype_create), - }, + NJS_DECLARE_PROP_HANDLER("prototype", njs_object_prototype_create, 0, 0, 0), }; @@ -193,16 +184,13 @@ static const njs_object_prop_t njs_asyn { .type = NJS_PROPERTY, .name = njs_wellknown_symbol(NJS_SYMBOL_TO_STRING_TAG), - .value = njs_string("AsyncFunction"), + .u.value = njs_string("AsyncFunction"), .configurable = 1, }, - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("constructor"), - .value = njs_prop_handler(njs_object_prototype_create_constructor), - .configurable = 1, - }, + NJS_DECLARE_PROP_HANDLER("constructor", + njs_object_prototype_create_constructor, + 0, 0, NJS_OBJECT_PROP_VALUE_CW), }; @@ -222,19 +210,11 @@ const njs_object_type_init_t njs_async_ const njs_object_prop_t njs_async_function_instance_properties[] = { - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("length"), - .value = njs_prop_handler(njs_function_instance_length), - .configurable = 1, - }, + NJS_DECLARE_PROP_HANDLER("length", njs_function_instance_length, 0, 0, + NJS_OBJECT_PROP_VALUE_C), - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("name"), - .value = njs_prop_handler(njs_function_instance_name), - .configurable = 1, - }, + NJS_DECLARE_PROP_HANDLER("name", njs_function_instance_name, 0, 0, + NJS_OBJECT_PROP_VALUE_C), }; diff -r 96a0be3dfb0b -r 109e6cb4dea5 src/njs_boolean.c --- a/src/njs_boolean.c Tue Oct 25 09:19:14 2022 -0700 +++ b/src/njs_boolean.c Tue Oct 25 09:19:32 2022 -0700 @@ -43,22 +43,18 @@ static const njs_object_prop_t njs_bool { .type = NJS_PROPERTY, .name = njs_string("name"), - .value = njs_string("Boolean"), + .u.value = njs_string("Boolean"), .configurable = 1, }, { .type = NJS_PROPERTY, .name = njs_string("length"), - .value = njs_value(NJS_NUMBER, 1, 1.0), + .u.value = njs_value(NJS_NUMBER, 1, 1.0), .configurable = 1, }, - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("prototype"), - .value = njs_prop_handler(njs_object_prototype_create), - }, + NJS_DECLARE_PROP_HANDLER("prototype", njs_object_prototype_create, 0, 0, 0), }; @@ -122,37 +118,16 @@ njs_boolean_prototype_to_string(njs_vm_t static const njs_object_prop_t njs_boolean_prototype_properties[] = { - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("__proto__"), - .value = njs_prop_handler(njs_primitive_prototype_get_proto), - .writable = 1, - .configurable = 1, - }, - - { - .type = NJS_PROPERTY_HANDLER, - .name = njs_string("constructor"), - .value = njs_prop_handler(njs_object_prototype_create_constructor), - .writable = 1, - .configurable = 1, - }, + NJS_DECLARE_PROP_HANDLER("__proto__", njs_primitive_prototype_get_proto, + 0, 0, NJS_OBJECT_PROP_VALUE_CW), - { - .type = NJS_PROPERTY, - .name = njs_string("valueOf"), - .value = njs_native_function(njs_boolean_prototype_value_of, 0), - .writable = 1, - .configurable = 1, - }, + NJS_DECLARE_PROP_HANDLER("constructor", + njs_object_prototype_create_constructor, + 0, 0, NJS_OBJECT_PROP_VALUE_CW), - { - .type = NJS_PROPERTY, - .name = njs_string("toString"), - .value = njs_native_function(njs_boolean_prototype_to_string, 0), - .writable = 1, - .configurable = 1, - }, + NJS_DECLARE_PROP_NATIVE("valueOf", njs_boolean_prototype_value_of, 0, 0), + + NJS_DECLARE_PROP_NATIVE("toString", njs_boolean_prototype_to_string, 0, 0), }; diff -r 96a0be3dfb0b -r 109e6cb4dea5 src/njs_buffer.c --- a/src/njs_buffer.c Tue Oct 25 09:19:14 2022 -0700 +++ b/src/njs_buffer.c Tue Oct 25 09:19:32 2022 -0700 @@ -310,7 +310,7 @@ next: && !(njs_is_object(&retval) && njs_object(&retval) == njs_object(value))) { - *value = retval; + njs_value_assign(value, &retval); goto next; } @@ -2340,467 +2340,154 @@ static const njs_object_prop_t njs_buff { .type = NJS_PROPERTY, From v.zhestikov at f5.com Wed Oct 26 23:35:25 2022 From: v.zhestikov at f5.com (Vadim Zhestikov) Date: Wed, 26 Oct 2022 23:35:25 +0000 Subject: [njs] Fixed static analyzer issues. Message-ID: details: https://hg.nginx.org/njs/rev/4f66a66ef300 branches: changeset: 1987:4f66a66ef300 user: Vadim Zhestikov date: Wed Oct 26 16:33:15 2022 -0700 description: Fixed static analyzer issues. diffstat: src/njs_builtin.c | 5 +++++ src/njs_object.c | 4 ++-- src/njs_object_prop.c | 2 +- src/njs_value.c | 4 ++-- src/njs_vmcode.c | 4 ---- 5 files changed, 10 insertions(+), 9 deletions(-) diffs (90 lines): diff -r 109e6cb4dea5 -r 4f66a66ef300 src/njs_builtin.c --- a/src/njs_builtin.c Tue Oct 25 09:19:32 2022 -0700 +++ b/src/njs_builtin.c Wed Oct 26 16:33:15 2022 -0700 @@ -390,6 +390,11 @@ njs_builtin_traverse(njs_vm_t *vm, njs_t } } + if (traverse == NULL) { + njs_type_error(vm, "njs_builtin_traverse() traverse arg is NULL"); + return NJS_ERROR; + } + n = 0; while (traverse != NULL) { diff -r 109e6cb4dea5 -r 4f66a66ef300 src/njs_object.c --- a/src/njs_object.c Tue Oct 25 09:19:32 2022 -0700 +++ b/src/njs_object.c Wed Oct 26 16:33:15 2022 -0700 @@ -2372,7 +2372,7 @@ njs_object_prototype_has_own_property(nj case NJS_ERROR: default: - return ret; + return NJS_ERROR; } } @@ -2420,7 +2420,7 @@ njs_object_prototype_prop_is_enumerable( case NJS_ERROR: default: - return ret; + return NJS_ERROR; } vm->retval = *retval; diff -r 109e6cb4dea5 -r 4f66a66ef300 src/njs_object_prop.c --- a/src/njs_object_prop.c Tue Oct 25 09:19:32 2022 -0700 +++ b/src/njs_object_prop.c Wed Oct 26 16:33:15 2022 -0700 @@ -842,7 +842,7 @@ njs_object_prop_descriptor(njs_vm_t *vm, case NJS_ERROR: default: - return ret; + return NJS_ERROR; } desc = njs_object_alloc(vm); diff -r 109e6cb4dea5 -r 4f66a66ef300 src/njs_value.c --- a/src/njs_value.c Tue Oct 25 09:19:32 2022 -0700 +++ b/src/njs_value.c Wed Oct 26 16:33:15 2022 -0700 @@ -1108,7 +1108,7 @@ slow_path: case NJS_ERROR: default: - return ret; + return NJS_ERROR; } return NJS_OK; @@ -1282,7 +1282,7 @@ slow_path: case NJS_ERROR: default: - return ret; + return NJS_ERROR; } if (njs_slow_path(!njs_object(value)->extensible)) { diff -r 109e6cb4dea5 -r 4f66a66ef300 src/njs_vmcode.c --- a/src/njs_vmcode.c Tue Oct 25 09:19:32 2022 -0700 +++ b/src/njs_vmcode.c Wed Oct 26 16:33:15 2022 -0700 @@ -103,7 +103,6 @@ njs_vmcode_interpreter(njs_vm_t *vm, u_c njs_native_frame_t *previous, *native; njs_property_next_t *next; njs_vmcode_import_t *import; - njs_vmcode_finally_t *finally; njs_vmcode_generic_t *vmcode; njs_vmcode_variable_t *var; njs_vmcode_prop_get_t *get; @@ -1722,9 +1721,6 @@ NEXT_LBL; value2 = (njs_value_t *) vmcode->operand1; - finally = (njs_vmcode_finally_t *) pc; - value1 = njs_scope_value(vm, finally->exit_value); - ret = njs_vmcode_finally(vm, NULL, value2, pc); switch (ret) { From mdounin at mdounin.ru Sun Oct 30 02:41:00 2022 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Sun, 30 Oct 2022 05:41:00 +0300 Subject: [PATCH] Filtering duplicate addresses in listen (ticket #2400) Message-ID: <55bcf8dc4ee35ccf40f5.1667097660@vm-bsd.mdounin.ru> # HG changeset patch # User Maxim Dounin # Date 1667097653 -10800 # Sun Oct 30 05:40:53 2022 +0300 # Node ID 55bcf8dc4ee35ccf40f5b8a7cffde63e7edb9494 # Parent 1ae25660c0c76edef14121ca64362f28b9d57a70 Filtering duplicate addresses in listen (ticket #2400). Due to the glibc bug[1], getaddrinfo("localhost") with AI_ADDRCONFIG on a typical host with glibc and without IPv6 returns two 127.0.0.1 addresses, and therefore "listen localhost:80;" used to result the "duplicate ... address and port pair" after 4f9b72a229c1. Fix is to explicitly filter out duplicate addresses returned during resolution of a name. [1] https://sourceware.org/bugzilla/show_bug.cgi?id=14969 diff --git a/src/http/ngx_http_core_module.c b/src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c +++ b/src/http/ngx_http_core_module.c @@ -3963,7 +3963,7 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx ngx_str_t *value, size; ngx_url_t u; - ngx_uint_t n; + ngx_uint_t n, i; ngx_http_listen_opt_t lsopt; cscf->listen = 1; @@ -4289,6 +4289,16 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx } for (n = 0; n < u.naddrs; n++) { + + for (i = 0; i < n; i++) { + if (ngx_cmp_sockaddr(u.addrs[n].sockaddr, u.addrs[n].socklen, + u.addrs[i].sockaddr, u.addrs[i].socklen, 0) + == NGX_OK) + { + goto next; + } + } + lsopt.sockaddr = u.addrs[n].sockaddr; lsopt.socklen = u.addrs[n].socklen; lsopt.addr_text = u.addrs[n].name; @@ -4297,6 +4307,9 @@ ngx_http_core_listen(ngx_conf_t *cf, ngx if (ngx_http_add_listen(cf, cscf, &lsopt) != NGX_OK) { return NGX_CONF_ERROR; } + + next: + continue; } return NGX_CONF_OK; diff --git a/src/mail/ngx_mail_core_module.c b/src/mail/ngx_mail_core_module.c --- a/src/mail/ngx_mail_core_module.c +++ b/src/mail/ngx_mail_core_module.c @@ -308,7 +308,7 @@ ngx_mail_core_listen(ngx_conf_t *cf, ngx ngx_str_t *value, size; ngx_url_t u; ngx_uint_t i, n, m; - ngx_mail_listen_t *ls, *als; + ngx_mail_listen_t *ls, *als, *nls; ngx_mail_module_t *module; ngx_mail_core_main_conf_t *cmcf; @@ -333,7 +333,7 @@ ngx_mail_core_listen(ngx_conf_t *cf, ngx cmcf = ngx_mail_conf_get_module_main_conf(cf, ngx_mail_core_module); - ls = ngx_array_push_n(&cmcf->listen, u.naddrs); + ls = ngx_array_push(&cmcf->listen); if (ls == NULL) { return NGX_CONF_ERROR; } @@ -571,17 +571,37 @@ ngx_mail_core_listen(ngx_conf_t *cf, ngx als = cmcf->listen.elts; for (n = 0; n < u.naddrs; n++) { - ls[n] = ls[0]; + + for (i = 0; i < n; i++) { + if (ngx_cmp_sockaddr(u.addrs[n].sockaddr, u.addrs[n].socklen, + u.addrs[i].sockaddr, u.addrs[i].socklen, 0) + == NGX_OK) + { + goto next; + } + } - ls[n].sockaddr = u.addrs[n].sockaddr; - ls[n].socklen = u.addrs[n].socklen; - ls[n].addr_text = u.addrs[n].name; - ls[n].wildcard = ngx_inet_wildcard(ls[n].sockaddr); + if (n != 0) { + nls = ngx_array_push(&cmcf->listen); + if (nls == NULL) { + return NGX_CONF_ERROR; + } + + *nls = *ls; - for (i = 0; i < cmcf->listen.nelts - u.naddrs + n; i++) { + } else { + nls = ls; + } + + nls->sockaddr = u.addrs[n].sockaddr; + nls->socklen = u.addrs[n].socklen; + nls->addr_text = u.addrs[n].name; + nls->wildcard = ngx_inet_wildcard(nls->sockaddr); + + for (i = 0; i < cmcf->listen.nelts - 1; i++) { if (ngx_cmp_sockaddr(als[i].sockaddr, als[i].socklen, - ls[n].sockaddr, ls[n].socklen, 1) + nls->sockaddr, nls->socklen, 1) != NGX_OK) { continue; @@ -589,9 +609,12 @@ ngx_mail_core_listen(ngx_conf_t *cf, ngx ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "duplicate \"%V\" address and port pair", - &ls[n].addr_text); + &nls->addr_text); return NGX_CONF_ERROR; } + + next: + continue; } return NGX_CONF_OK; diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -578,7 +578,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n ngx_str_t *value, size; ngx_url_t u; ngx_uint_t i, n, backlog; - ngx_stream_listen_t *ls, *als; + ngx_stream_listen_t *ls, *als, *nls; ngx_stream_core_main_conf_t *cmcf; cscf->listen = 1; @@ -602,7 +602,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n cmcf = ngx_stream_conf_get_module_main_conf(cf, ngx_stream_core_module); - ls = ngx_array_push_n(&cmcf->listen, u.naddrs); + ls = ngx_array_push(&cmcf->listen); if (ls == NULL) { return NGX_CONF_ERROR; } @@ -889,20 +889,40 @@ ngx_stream_core_listen(ngx_conf_t *cf, n als = cmcf->listen.elts; for (n = 0; n < u.naddrs; n++) { - ls[n] = ls[0]; + + for (i = 0; i < n; i++) { + if (ngx_cmp_sockaddr(u.addrs[n].sockaddr, u.addrs[n].socklen, + u.addrs[i].sockaddr, u.addrs[i].socklen, 0) + == NGX_OK) + { + goto next; + } + } - ls[n].sockaddr = u.addrs[n].sockaddr; - ls[n].socklen = u.addrs[n].socklen; - ls[n].addr_text = u.addrs[n].name; - ls[n].wildcard = ngx_inet_wildcard(ls[n].sockaddr); + if (n != 0) { + nls = ngx_array_push(&cmcf->listen); + if (nls == NULL) { + return NGX_CONF_ERROR; + } + + *nls = *ls; - for (i = 0; i < cmcf->listen.nelts - u.naddrs + n; i++) { - if (ls[n].type != als[i].type) { + } else { + nls = ls; + } + + nls->sockaddr = u.addrs[n].sockaddr; + nls->socklen = u.addrs[n].socklen; + nls->addr_text = u.addrs[n].name; + nls->wildcard = ngx_inet_wildcard(nls->sockaddr); + + for (i = 0; i < cmcf->listen.nelts - 1; i++) { + if (nls->type != als[i].type) { continue; } if (ngx_cmp_sockaddr(als[i].sockaddr, als[i].socklen, - ls[n].sockaddr, ls[n].socklen, 1) + nls->sockaddr, nls->socklen, 1) != NGX_OK) { continue; @@ -910,9 +930,12 @@ ngx_stream_core_listen(ngx_conf_t *cf, n ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "duplicate \"%V\" address and port pair", - &ls[n].addr_text); + &nls->addr_text); return NGX_CONF_ERROR; } + + next: + continue; } return NGX_CONF_OK; From mdounin at mdounin.ru Sun Oct 30 02:41:38 2022 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Sun, 30 Oct 2022 05:41:38 +0300 Subject: [PATCH] Disabled cloning of sockets without master process (ticket #2403) Message-ID: # HG changeset patch # User Maxim Dounin # Date 1667097682 -10800 # Sun Oct 30 05:41:22 2022 +0300 # Node ID b73d95226c84b93e51f23f7b35782d98d3b516b9 # Parent 55bcf8dc4ee35ccf40f5b8a7cffde63e7edb9494 Disabled cloning of sockets without master process (ticket #2403). Cloning of listening sockets for each worker process does not make sense when working without master process, and causes some of the connections not to be accepted if worker_processes is set to more than one and there are listening sockets configured with the reuseport flag. Fix is to disable cloning when master process is disabled. diff --git a/src/event/ngx_event.c b/src/event/ngx_event.c --- a/src/event/ngx_event.c +++ b/src/event/ngx_event.c @@ -416,6 +416,7 @@ ngx_event_init_conf(ngx_cycle_t *cycle, { #if (NGX_HAVE_REUSEPORT) ngx_uint_t i; + ngx_core_conf_t *ccf; ngx_listening_t *ls; #endif @@ -442,7 +443,9 @@ ngx_event_init_conf(ngx_cycle_t *cycle, #if (NGX_HAVE_REUSEPORT) - if (!ngx_test_config) { + ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); + + if (!ngx_test_config && ccf->master) { ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { From mdounin at mdounin.ru Sun Oct 30 02:42:33 2022 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Sun, 30 Oct 2022 05:42:33 +0300 Subject: [PATCH] Fixed segfault when switching off master process during upgrade Message-ID: # HG changeset patch # User Maxim Dounin # Date 1667097733 -10800 # Sun Oct 30 05:42:13 2022 +0300 # Node ID ef9c94be7fe4685f0eeee41f76b964ea252f519f # Parent b73d95226c84b93e51f23f7b35782d98d3b516b9 Fixed segfault when switching off master process during upgrade. Binary upgrades are not supported without master process, but it is, however, possible, that nginx running with master process is asked to upgrade binary, and the configuration file as available on disk at this time includes "master_process off;". If this happens, listening sockets inherited from the previous binary will have ls[i].previous set. But the old cycle on initial process startup, including startup after binary upgrade, is destroyed by ngx_init_cycle() once configuration parsing is complete. As a result, an attempt to dereference ls[i].previous in ngx_event_process_init() accesses already freed memory. Fix is to avoid looking into ls[i].previous if the old cycle is already freed. diff --git a/src/event/ngx_event.c b/src/event/ngx_event.c --- a/src/event/ngx_event.c +++ b/src/event/ngx_event.c @@ -813,7 +813,9 @@ ngx_event_process_init(ngx_cycle_t *cycl rev->deferred_accept = ls[i].deferred_accept; #endif - if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { + if (!(ngx_event_flags & NGX_USE_IOCP_EVENT) + && cycle->old_cycle) + { if (ls[i].previous) { /* From arut at nginx.com Mon Oct 31 12:07:00 2022 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 31 Oct 2022 16:07:00 +0400 Subject: [PATCH] Core: support for reading PROXY protocol v2 TLVs In-Reply-To: References: <20220905132318.s27wgtof6wuqde7x@N00W24XTQX> <20220909154658.fpnpndo2opnnzywx@N00W24XTQX> <20220913150304.k2fjjdxgesgzbilu@N00W24XTQX> <20220927094125.w7oo4g2quw3yyqfh@N00W24XTQX> <20221011130111.oiljq55eydpp3dh6@N00W24XTQX> Message-ID: <20221031120700.z5o4el64rhhpfn3y@N00W24XTQX> Hi, While testing the feature, it became clear that 107 bytes as maximum PROXY protocol header size may not be enough. This limit came from v1 and stayed unchanged when v2 was added. With this limit, there are only 79 bytes left for TLVs in case of IPv4 and 55 bytes in case of IPv6. Attached is a patch that increases buffer size up to 65K while reading PROXY protocl header. Writing is not changed since only v1 is supported. On Tue, Oct 11, 2022 at 11:53:56PM +0300, Maxim Dounin wrote: > Hello! > > On Tue, Oct 11, 2022 at 05:01:11PM +0400, Roman Arutyunyan wrote: > > [...] > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1664263604 -14400 > > # Tue Sep 27 11:26:44 2022 +0400 > > # Node ID 2774f8d59b108635752f9f2dbe3a5394a3650b85 > > # Parent ba5cf8f73a2d0a3615565bf9545f3d65216a0530 > > PROXY protocol v2 TLV variables. > > > > The variables have prefix $proxy_protocol_tlv_ and are accessible by name > > and by type. Examples are: $proxy_protocol_tlv_0x01, $proxy_protocol_tlv_alpn. > > [..] -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1667216033 -14400 # Mon Oct 31 15:33:53 2022 +0400 # Node ID 8c99314f90eccc2ad5aaf4b3de5368e964c4ffe0 # Parent 81b4326daac70d6de70abbc3fe36d4f6e3da54a2 Increased maximum read PROXY protocol header size. Maximum size for reading the PROXY protocol header is increased to 65536 to accommodate a bigger number of TLVs, which are supported since cca4c8a715de. Maximum size for writing the PROXY protocol header is not changed since only version 1 is currently supported. diff --git a/src/core/ngx_proxy_protocol.c b/src/core/ngx_proxy_protocol.c --- a/src/core/ngx_proxy_protocol.c +++ b/src/core/ngx_proxy_protocol.c @@ -281,7 +281,7 @@ ngx_proxy_protocol_write(ngx_connection_ { ngx_uint_t port, lport; - if (last - buf < NGX_PROXY_PROTOCOL_MAX_HEADER) { + if (last - buf < NGX_PROXY_PROTOCOL_V1_MAX_HEADER) { return NULL; } diff --git a/src/core/ngx_proxy_protocol.h b/src/core/ngx_proxy_protocol.h --- a/src/core/ngx_proxy_protocol.h +++ b/src/core/ngx_proxy_protocol.h @@ -13,7 +13,8 @@ #include -#define NGX_PROXY_PROTOCOL_MAX_HEADER 107 +#define NGX_PROXY_PROTOCOL_V1_MAX_HEADER 107 +#define NGX_PROXY_PROTOCOL_MAX_HEADER 65536 struct ngx_proxy_protocol_s { diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c +++ b/src/http/ngx_http_request.c @@ -641,7 +641,7 @@ ngx_http_alloc_request(ngx_connection_t static void ngx_http_ssl_handshake(ngx_event_t *rev) { - u_char *p, buf[NGX_PROXY_PROTOCOL_MAX_HEADER + 1]; + u_char *p; size_t size; ssize_t n; ngx_err_t err; @@ -651,6 +651,7 @@ ngx_http_ssl_handshake(ngx_event_t *rev) ngx_http_ssl_srv_conf_t *sscf; ngx_http_core_loc_conf_t *clcf; ngx_http_core_srv_conf_t *cscf; + static u_char buf[NGX_PROXY_PROTOCOL_MAX_HEADER + 1]; c = rev->data; hc = c->data; diff --git a/src/mail/ngx_mail_handler.c b/src/mail/ngx_mail_handler.c --- a/src/mail/ngx_mail_handler.c +++ b/src/mail/ngx_mail_handler.c @@ -197,13 +197,14 @@ ngx_mail_init_connection(ngx_connection_ static void ngx_mail_proxy_protocol_handler(ngx_event_t *rev) { - u_char *p, buf[NGX_PROXY_PROTOCOL_MAX_HEADER]; + u_char *p; size_t size; ssize_t n; ngx_err_t err; ngx_connection_t *c; ngx_mail_session_t *s; ngx_mail_core_srv_conf_t *cscf; + static u_char buf[NGX_PROXY_PROTOCOL_MAX_HEADER]; c = rev->data; s = c->data; diff --git a/src/mail/ngx_mail_proxy_module.c b/src/mail/ngx_mail_proxy_module.c --- a/src/mail/ngx_mail_proxy_module.c +++ b/src/mail/ngx_mail_proxy_module.c @@ -890,7 +890,7 @@ ngx_mail_proxy_send_proxy_protocol(ngx_m u_char *p; ssize_t n, size; ngx_connection_t *c; - u_char buf[NGX_PROXY_PROTOCOL_MAX_HEADER]; + u_char buf[NGX_PROXY_PROTOCOL_V1_MAX_HEADER]; s->connection->log->action = "sending PROXY protocol header to upstream"; @@ -898,7 +898,7 @@ ngx_mail_proxy_send_proxy_protocol(ngx_m "mail proxy send PROXY protocol header"); p = ngx_proxy_protocol_write(s->connection, buf, - buf + NGX_PROXY_PROTOCOL_MAX_HEADER); + buf + NGX_PROXY_PROTOCOL_V1_MAX_HEADER); if (p == NULL) { ngx_mail_proxy_internal_server_error(s); return NGX_ERROR; diff --git a/src/stream/ngx_stream_handler.c b/src/stream/ngx_stream_handler.c --- a/src/stream/ngx_stream_handler.c +++ b/src/stream/ngx_stream_handler.c @@ -205,13 +205,14 @@ ngx_stream_init_connection(ngx_connectio static void ngx_stream_proxy_protocol_handler(ngx_event_t *rev) { - u_char *p, buf[NGX_PROXY_PROTOCOL_MAX_HEADER]; + u_char *p; size_t size; ssize_t n; ngx_err_t err; ngx_connection_t *c; ngx_stream_session_t *s; ngx_stream_core_srv_conf_t *cscf; + static u_char buf[NGX_PROXY_PROTOCOL_MAX_HEADER]; c = rev->data; s = c->data; diff --git a/src/stream/ngx_stream_proxy_module.c b/src/stream/ngx_stream_proxy_module.c --- a/src/stream/ngx_stream_proxy_module.c +++ b/src/stream/ngx_stream_proxy_module.c @@ -894,7 +894,7 @@ ngx_stream_proxy_init_upstream(ngx_strea return; } - p = ngx_pnalloc(c->pool, NGX_PROXY_PROTOCOL_MAX_HEADER); + p = ngx_pnalloc(c->pool, NGX_PROXY_PROTOCOL_V1_MAX_HEADER); if (p == NULL) { ngx_stream_proxy_finalize(s, NGX_STREAM_INTERNAL_SERVER_ERROR); return; @@ -902,7 +902,8 @@ ngx_stream_proxy_init_upstream(ngx_strea cl->buf->pos = p; - p = ngx_proxy_protocol_write(c, p, p + NGX_PROXY_PROTOCOL_MAX_HEADER); + p = ngx_proxy_protocol_write(c, p, + p + NGX_PROXY_PROTOCOL_V1_MAX_HEADER); if (p == NULL) { ngx_stream_proxy_finalize(s, NGX_STREAM_INTERNAL_SERVER_ERROR); return; @@ -946,14 +947,15 @@ ngx_stream_proxy_send_proxy_protocol(ngx ngx_connection_t *c, *pc; ngx_stream_upstream_t *u; ngx_stream_proxy_srv_conf_t *pscf; - u_char buf[NGX_PROXY_PROTOCOL_MAX_HEADER]; + u_char buf[NGX_PROXY_PROTOCOL_V1_MAX_HEADER]; c = s->connection; ngx_log_debug0(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream proxy send PROXY protocol header"); - p = ngx_proxy_protocol_write(c, buf, buf + NGX_PROXY_PROTOCOL_MAX_HEADER); + p = ngx_proxy_protocol_write(c, buf, + buf + NGX_PROXY_PROTOCOL_V1_MAX_HEADER); if (p == NULL) { ngx_stream_proxy_finalize(s, NGX_STREAM_INTERNAL_SERVER_ERROR); return NGX_ERROR;