From u5.horie at gmail.com Sun Oct 1 22:58:49 2023 From: u5.horie at gmail.com (u5h) Date: Mon, 2 Oct 2023 07:58:49 +0900 Subject: Fixed stucking cache status into UPDATING. #2162 Message-ID: # HG changeset patch # User Yugo Horie # Date 1696144340 -32400 # Sun Oct 01 16:12:20 2023 +0900 # Node ID f8f6290d60615a4dfe2d8b26246891228f19aa5d # Parent 3db945fda515014d220151046d02f3960bcfca0a Fixed stucking cache status into UPDATING. Fixed issue-2162's stucking cache status to introduce `last_updating_time` to record at the time to start the item last update. https://trac.nginx.org/nginx/ticket/2162#comment:6 diff -r 3db945fda515 -r f8f6290d6061 src/http/modules/ngx_http_proxy_module.c --- a/src/http/modules/ngx_http_proxy_module.c Fri Sep 22 19:23:57 2023 +0400 +++ b/src/http/modules/ngx_http_proxy_module.c Sun Oct 01 16:12:20 2023 +0900 @@ -564,6 +564,13 @@ offsetof(ngx_http_proxy_loc_conf_t, upstream.cache_use_stale), &ngx_http_proxy_next_upstream_masks }, + { ngx_string("proxy_cache_updating_timeout"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, + ngx_conf_set_msec_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_proxy_loc_conf_t, upstream.cache_updating_timeout), + NULL }, + { ngx_string("proxy_cache_methods"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_1MORE, ngx_conf_set_bitmask_slot, @@ -3388,6 +3395,7 @@ conf->upstream.no_cache = NGX_CONF_UNSET_PTR; conf->upstream.cache_valid = NGX_CONF_UNSET_PTR; conf->upstream.cache_lock = NGX_CONF_UNSET; + conf->upstream.cache_updating_timeout = NGX_CONF_UNSET_MSEC; conf->upstream.cache_lock_timeout = NGX_CONF_UNSET_MSEC; conf->upstream.cache_lock_age = NGX_CONF_UNSET_MSEC; conf->upstream.cache_revalidate = NGX_CONF_UNSET; diff -r 3db945fda515 -r f8f6290d6061 src/http/ngx_http_cache.h --- a/src/http/ngx_http_cache.h Fri Sep 22 19:23:57 2023 +0400 +++ b/src/http/ngx_http_cache.h Sun Oct 01 16:12:20 2023 +0900 @@ -59,6 +59,7 @@ size_t body_start; off_t fs_size; ngx_msec_t lock_time; + ngx_msec_t last_updating_time; } ngx_http_file_cache_node_t; @@ -122,6 +123,8 @@ unsigned stale_updating:1; unsigned stale_error:1; + + ngx_msec_t updating_timeout; }; diff -r 3db945fda515 -r f8f6290d6061 src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c Fri Sep 22 19:23:57 2023 +0400 +++ b/src/http/ngx_http_file_cache.c Sun Oct 01 16:12:20 2023 +0900 @@ -643,6 +643,10 @@ ngx_shmtx_lock(&cache->shpool->mutex); if (c->node->updating) { + if (c->updating_timeout != NGX_CONF_UNSET_MSEC + && ngx_current_msec - c->node->last_updating_time > c->updating_ timeout) { + c->node->updating = 0; + } rc = NGX_HTTP_CACHE_UPDATING; } else { @@ -1377,6 +1381,10 @@ cache = c->file_cache; + ngx_shmtx_lock(&cache->shpool->mutex); + c->node->last_updating_time = ngx_current_msec; + ngx_shmtx_unlock(&cache->shpool->mutex); + c->updated = 1; c->updating = 0; diff -r 3db945fda515 -r f8f6290d6061 src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Fri Sep 22 19:23:57 2023 +0400 +++ b/src/http/ngx_http_upstream.c Sun Oct 01 16:12:20 2023 +0900 @@ -877,6 +877,7 @@ c->body_start = u->conf->buffer_size; c->min_uses = u->conf->cache_min_uses; c->file_cache = cache; + c->updating_timeout = u->conf->cache_updating_timeout; switch (ngx_http_test_predicates(r, u->conf->cache_bypass)) { diff -r 3db945fda515 -r f8f6290d6061 src/http/ngx_http_upstream.h --- a/src/http/ngx_http_upstream.h Fri Sep 22 19:23:57 2023 +0400 +++ b/src/http/ngx_http_upstream.h Sun Oct 01 16:12:20 2023 +0900 @@ -199,6 +199,8 @@ ngx_uint_t cache_use_stale; ngx_uint_t cache_methods; + ngx_msec_t cache_updating_timeout; + off_t cache_max_range_offset; ngx_flag_t cache_lock; -------------- next part -------------- An HTML attachment was scrubbed... URL: From yar at nginx.com Tue Oct 3 09:18:22 2023 From: yar at nginx.com (Yaroslav Zhuravlev) Date: Tue, 3 Oct 2023 10:18:22 +0100 Subject: [PATCH] Updated OpenSSL and zlib versions In-Reply-To: References: <32a78db44cf00ab54063.1695734058@ORK-ML-00007151> Message-ID: [...] >> - --with-zlib=../zlib-1.2.11 >> + --with-zlib=../zlib-1.2.13 >> >> >> > > zlib 1.3 was recently released and works just fine, so this > probably should be bumped to 1.3 instead. > >> diff --git a/xml/en/docs/howto_build_on_win32.xml b/xml/en/docs/howto_build_on_win32.xml >> --- a/xml/en/docs/howto_build_on_win32.xml >> +++ b/xml/en/docs/howto_build_on_win32.xml >> @@ -9,7 +9,7 @@ >>
> link="/en/docs/howto_build_on_win32.html" >> lang="en" >> - rev="25"> >> + rev="26"> >> >>
>> >> @@ -81,8 +81,8 @@ >> mkdir objs/lib >> cd objs/lib >> tar -xzf ../../pcre2-10.39.tar.gz >> -tar -xzf ../../zlib-1.2.11.tar.gz >> -tar -xzf ../../openssl-1.1.1m.tar.gz >> +tar -xzf ../../zlib-1.2.13.tar.gz >> +tar -xzf ../../openssl-3.0.10.tar.gz >> >> >> >> @@ -105,8 +105,8 @@ >> --http-uwsgi-temp-path=temp/uwsgi_temp \ >> --with-cc-opt=-DFD_SETSIZE=1024 \ >> --with-pcre=objs/lib/pcre2-10.39 \ >> - --with-zlib=objs/lib/zlib-1.2.11 \ >> - --with-openssl=objs/lib/openssl-1.1.1m \ >> + --with-zlib=objs/lib/zlib-1.2.13 \ >> + --with-openssl=objs/lib/openssl-3.0.10 \ >> --with-openssl-opt=no-asm \ >> --with-http_ssl_module >> > > And this used to match latest release of nginx/Windows, so zlib > 1.2.13 and OpenSSL 3.0.10 are probably fine here. On the other > hand, bumping to zlib 1.3 and OpenSSL 3.0.11 might be good enough > as well (and will save an update to this file). > >> diff --git a/xml/ru/docs/configure.xml b/xml/ru/docs/configure.xml >> --- a/xml/ru/docs/configure.xml >> +++ b/xml/ru/docs/configure.xml >> @@ -8,7 +8,7 @@ >>
> link="/ru/docs/configure.html" >> lang="ru" >> - rev="22"> >> + rev="23"> >> >>
>> >> @@ -1250,7 +1250,7 @@ >> >> задаёт путь к исходным текстам библиотеки zlib. >> Дистрибутив библиотеки (версию >> -1.1.3—1.2.11) нужно взять на сайте >> +1.1.3—1.2.13) нужно взять на сайте >> zlib и распаковать. >> Всё остальное сделают ./configure nginx’а и >> make. >> @@ -1339,7 +1339,7 @@ >> --pid-path=/usr/local/nginx/nginx.pid >> --with-http_ssl_module >> --with-pcre=../pcre2-10.39 >> - --with-zlib=../zlib-1.2.11 >> + --with-zlib=../zlib-1.2.13 >> >> >> > > Otherwise looks good to me. Hi Maxim, Thank you for the review - zlib version updated to 1.3 and patch committed - http://hg.nginx.org/nginx.org/rev/f55e18559df9 Best regards, Yar From leamovret at gmail.com Tue Oct 3 22:46:05 2023 From: leamovret at gmail.com (=?iso-8859-1?q?Toshihito_Kikuchi?=) Date: Tue, 03 Oct 2023 15:46:05 -0700 Subject: [PATCH] Auth basic: Cache credentials if auth_basic_user_file is static Message-ID: # HG changeset patch # User Toshihito Kikuchi # Date 1696359541 25200 # Tue Oct 03 11:59:01 2023 -0700 # Node ID e397ea6cfa85e85ae0865c5061397dc295fb7df1 # Parent 3db945fda515014d220151046d02f3960bcfca0a Auth basic: Cache credentials if auth_basic_user_file is static. In the current design, when auth_basic is on, every HTTP request triggers file I/O (open, read, close) to the file specified in auth_basic_user_file. Probably this is to allow auth_basic_user_file to contain variables. If the value is just a static text, however, there is no reason to read the same file every request in every worker process. It unnecessarily consumes system resources. With this patch, if auth_basic_user_file does not have any variables, we cache its content in the location context at configuration time and use it in all subsequent requests. If auth_basic_user_file contain variables, we keep the original behavior. diff --git a/src/http/modules/ngx_http_auth_basic_module.c b/src/http/modules/ngx_http_auth_basic_module.c --- a/src/http/modules/ngx_http_auth_basic_module.c +++ b/src/http/modules/ngx_http_auth_basic_module.c @@ -15,11 +15,21 @@ typedef struct { + ngx_chain_t cache; ngx_http_complex_value_t *realm; ngx_http_complex_value_t *user_file; } ngx_http_auth_basic_loc_conf_t; +typedef struct { + off_t offset; + ngx_file_t file; + ngx_chain_t *chain; + ngx_http_request_t *r; + ngx_http_auth_basic_loc_conf_t *alcf; +} ngx_http_auth_basic_file_ctx_t; + + static ngx_int_t ngx_http_auth_basic_handler(ngx_http_request_t *r); static ngx_int_t ngx_http_auth_basic_crypt_handler(ngx_http_request_t *r, ngx_str_t *passwd, ngx_str_t *realm); @@ -31,6 +41,14 @@ static char *ngx_http_auth_basic_merge_l static ngx_int_t ngx_http_auth_basic_init(ngx_conf_t *cf); static char *ngx_http_auth_basic_user_file(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +static ngx_int_t ngx_http_auth_basic_read_file(ngx_pool_t *pool, + ngx_str_t *filename, ngx_log_t *log, ngx_chain_t *chain_out); +static ngx_int_t ngx_http_auth_basic_init_file_ctx( + ngx_http_auth_basic_file_ctx_t *ctx); +static ssize_t ngx_http_auth_basic_read_file_ctx( + ngx_http_auth_basic_file_ctx_t *ctx, u_char *buf_out, size_t size); +static void ngx_http_auth_basic_cleanup_file_ctx( + ngx_http_auth_basic_file_ctx_t *ctx); static ngx_command_t ngx_http_auth_basic_commands[] = { @@ -89,14 +107,11 @@ ngx_module_t ngx_http_auth_basic_module static ngx_int_t ngx_http_auth_basic_handler(ngx_http_request_t *r) { - off_t offset; ssize_t n; - ngx_fd_t fd; ngx_int_t rc; - ngx_err_t err; - ngx_str_t pwd, realm, user_file; - ngx_uint_t i, level, login, left, passwd; - ngx_file_t file; + ngx_str_t pwd, realm; + ngx_uint_t i, login, left, passwd; + ngx_http_auth_basic_file_ctx_t file_ctx; ngx_http_auth_basic_loc_conf_t *alcf; u_char buf[NGX_HTTP_AUTH_BUF_SIZE]; enum { @@ -133,47 +148,23 @@ ngx_http_auth_basic_handler(ngx_http_req return NGX_HTTP_INTERNAL_SERVER_ERROR; } - if (ngx_http_complex_value(r, alcf->user_file, &user_file) != NGX_OK) { - return NGX_ERROR; - } - - fd = ngx_open_file(user_file.data, NGX_FILE_RDONLY, NGX_FILE_OPEN, 0); - - if (fd == NGX_INVALID_FILE) { - err = ngx_errno; - - if (err == NGX_ENOENT) { - level = NGX_LOG_ERR; - rc = NGX_HTTP_FORBIDDEN; - - } else { - level = NGX_LOG_CRIT; - rc = NGX_HTTP_INTERNAL_SERVER_ERROR; - } - - ngx_log_error(level, r->connection->log, err, - ngx_open_file_n " \"%s\" failed", user_file.data); - + file_ctx.alcf = alcf; + file_ctx.r = r; + rc = ngx_http_auth_basic_init_file_ctx(&file_ctx); + if (rc != NGX_OK) { return rc; } - ngx_memzero(&file, sizeof(ngx_file_t)); - - file.fd = fd; - file.name = user_file; - file.log = r->connection->log; - state = sw_login; passwd = 0; login = 0; left = 0; - offset = 0; for ( ;; ) { i = left; - n = ngx_read_file(&file, buf + left, NGX_HTTP_AUTH_BUF_SIZE - left, - offset); + n = ngx_http_auth_basic_read_file_ctx(&file_ctx, buf + left, + NGX_HTTP_AUTH_BUF_SIZE - left); if (n == NGX_ERROR) { rc = NGX_HTTP_INTERNAL_SERVER_ERROR; @@ -245,8 +236,6 @@ ngx_http_auth_basic_handler(ngx_http_req } else { left = 0; } - - offset += n; } if (state == sw_passwd) { @@ -264,16 +253,13 @@ ngx_http_auth_basic_handler(ngx_http_req ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "user \"%V\" was not found in \"%s\"", - &r->headers_in.user, user_file.data); + &r->headers_in.user, &file_ctx.file.name.data); rc = ngx_http_auth_basic_set_realm(r, &realm); cleanup: - if (ngx_close_file(file.fd) == NGX_FILE_ERROR) { - ngx_log_error(NGX_LOG_ALERT, r->connection->log, ngx_errno, - ngx_close_file_n " \"%s\" failed", user_file.data); - } + ngx_http_auth_basic_cleanup_file_ctx(&file_ctx); ngx_explicit_memzero(buf, NGX_HTTP_AUTH_BUF_SIZE); @@ -374,6 +360,19 @@ ngx_http_auth_basic_merge_loc_conf(ngx_c ngx_conf_merge_ptr_value(conf->realm, prev->realm, NULL); ngx_conf_merge_ptr_value(conf->user_file, prev->user_file, NULL); + if (conf->user_file != NULL && conf->user_file->lengths == NULL) { + + /* + * If the given expression is a static text, we read the file at + * configuration time. + */ + + if (ngx_http_auth_basic_read_file(cf->pool, &conf->user_file->value, + cf->log, &conf->cache) != NGX_OK) { + return NGX_CONF_ERROR; + } + } + return NGX_CONF_OK; } @@ -430,3 +429,207 @@ ngx_http_auth_basic_user_file(ngx_conf_t return NGX_CONF_OK; } + + +static ngx_int_t +ngx_http_auth_basic_read_file(ngx_pool_t *pool, ngx_str_t *filename, + ngx_log_t *log, ngx_chain_t *chain_out) +{ + off_t offset; + ssize_t n; + ngx_fd_t fd; + ngx_buf_t *p; + ngx_int_t rc; + ngx_file_t file; + ngx_chain_t *cl; + u_char buf[NGX_HTTP_AUTH_BUF_SIZE]; + + if (chain_out == NULL + || chain_out->buf != NULL + || chain_out->next != NULL) { + ngx_log_error(NGX_LOG_ALERT, log, ngx_errno, + "ngx_http_auth_basic_read_file() accepts only an empty" + " chain."); + return NGX_ERROR; + } + + fd = ngx_open_file(filename->data, NGX_FILE_RDONLY, NGX_FILE_OPEN, 0); + if (fd == NGX_INVALID_FILE) { + ngx_log_error(NGX_LOG_ALERT, log, ngx_errno, + ngx_open_file_n " \"%V\" failed", filename); + return NGX_ENOENT; + } + + ngx_memzero(&file, sizeof(ngx_file_t)); + file.fd = fd; + file.name = *filename; + file.log = log; + + rc = NGX_OK; + + for (offset = 0; /* void */ ; offset += n) { + n = ngx_read_file(&file, buf, NGX_HTTP_AUTH_BUF_SIZE, offset); + if (n == 0) { + break; + + } else if (n == NGX_ERROR) { + rc = NGX_ERROR; + break; + } + + p = ngx_create_temp_buf(pool, n); + if (p == NULL) { + rc = NGX_ENOMEM; + ngx_log_error(NGX_LOG_ALERT, log, ngx_errno, + "Cannot allocate a buffer"); + break; + } + + ngx_memcpy(p->start, buf, n); + + if (chain_out->buf == NULL) { + + /* First chain is provided by the caller. No allocation needed. */ + + chain_out->buf = p; + + } else { + cl = ngx_alloc_chain_link(pool); + if (cl == NULL) { + rc = NGX_ENOMEM; + ngx_log_error(NGX_LOG_ALERT, log, ngx_errno, + "Cannot allocate a chain"); + break; + } + + cl->buf = p; + chain_out->next = cl; + chain_out = cl; + } + } + + if (ngx_close_file(fd) == NGX_FILE_ERROR) { + ngx_log_error(NGX_LOG_ALERT, log, ngx_errno, + ngx_close_file_n " \"%V\" failed", filename); + } + + return rc; +} + + +static ngx_int_t +ngx_http_auth_basic_init_file_ctx(ngx_http_auth_basic_file_ctx_t *ctx) +{ + ngx_fd_t fd; + ngx_err_t err; + ngx_int_t rc; + ngx_str_t user_file; + ngx_uint_t level; + ngx_http_request_t *r; + ngx_http_auth_basic_loc_conf_t *alcf; + + r = ctx->r; + alcf = ctx->alcf; + + ngx_memzero(&ctx->file, sizeof(ngx_file_t)); + ctx->chain = NULL; + ctx->offset = 0; + + if (alcf->user_file->lengths == NULL) { + ctx->chain = &alcf->cache; + ctx->file.name = alcf->user_file->value; + return NGX_OK; + } + + if (ngx_http_complex_value(r, alcf->user_file, &user_file) != NGX_OK) { + return NGX_ERROR; + } + + fd = ngx_open_file(user_file.data, NGX_FILE_RDONLY, NGX_FILE_OPEN, 0); + + if (fd == NGX_INVALID_FILE) { + err = ngx_errno; + + if (err == NGX_ENOENT) { + level = NGX_LOG_ERR; + rc = NGX_HTTP_FORBIDDEN; + + } else { + level = NGX_LOG_CRIT; + rc = NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + ngx_log_error(level, r->connection->log, err, + ngx_open_file_n " \"%s\" failed", user_file.data); + + return rc; + } + + ctx->file.fd = fd; + ctx->file.name = user_file; + ctx->file.log = r->connection->log; + + return NGX_OK; +} + + +static ssize_t +ngx_http_auth_basic_read_file_ctx(ngx_http_auth_basic_file_ctx_t *ctx, + u_char *buf_out, size_t size) +{ + off_t offset; + size_t remaining; + u_char *p; + ssize_t n; + ngx_buf_t *buf; + ngx_chain_t *cl; + + if (!ctx->alcf->user_file->lengths) { + offset = ctx->offset; + p = buf_out; + n = 0; + + for (cl = ctx->chain; cl; cl = cl->next) { + buf = cl->buf; + remaining = buf->end - buf->start - offset; + if (size <= remaining) { + ngx_memcpy(p, buf->start, size); + n += size; + offset += size; + break; + } + + ngx_memcpy(p, buf->start, remaining); + n += remaining; + p += remaining; + size -= remaining; + offset = 0; + } + + ctx->chain = cl; + ctx->offset = offset; + return n; + } + + n = ngx_read_file(&ctx->file, buf_out, size, ctx->offset); + if (n == NGX_ERROR) { + return NGX_ERROR; + } + + ctx->offset += n; + return n; +} + + +static void +ngx_http_auth_basic_cleanup_file_ctx(ngx_http_auth_basic_file_ctx_t *ctx) +{ + if (ctx->alcf->user_file->lengths == NULL) { + return; + } + + if (ngx_close_file(ctx->file.fd) == NGX_FILE_ERROR) { + ngx_log_error(NGX_LOG_ALERT, ctx->r->connection->log, ngx_errno, + ngx_close_file_n " \"%s\" failed", ctx->file.name.data); + } +} From mdounin at mdounin.ru Wed Oct 4 01:03:44 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 4 Oct 2023 04:03:44 +0300 Subject: [PATCH] Auth basic: Cache credentials if auth_basic_user_file is static In-Reply-To: References: Message-ID: Hello! On Tue, Oct 03, 2023 at 03:46:05PM -0700, Toshihito Kikuchi wrote: > # HG changeset patch > # User Toshihito Kikuchi > # Date 1696359541 25200 > # Tue Oct 03 11:59:01 2023 -0700 > # Node ID e397ea6cfa85e85ae0865c5061397dc295fb7df1 > # Parent 3db945fda515014d220151046d02f3960bcfca0a > Auth basic: Cache credentials if auth_basic_user_file is static. > > In the current design, when auth_basic is on, every HTTP request triggers > file I/O (open, read, close) to the file specified in auth_basic_user_file. > Probably this is to allow auth_basic_user_file to contain variables. > > If the value is just a static text, however, there is no reason to read the > same file every request in every worker process. It unnecessarily consumes > system resources. > > With this patch, if auth_basic_user_file does not have any variables, we > cache its content in the location context at configuration time and use it > in all subsequent requests. If auth_basic_user_file contain variables, we keep > the original behavior. As currently implemented, auth_basic_user_file is read at runtime, making it possible to change users and their passwords - which is a relatively common task - without reloading nginx itself. And this behaviour matches the one in Apache, which does the same. Changing this behaviour to read the password file while loading configuration (so any changes to the file won't be applied unless nginx is reloaded) would certainly break POLA, and needs some really good justification. Further, in typical setups the file is effectively cached by the OS itself, making the I/O operations mentioned almost free, especially compared to costs of typical password hash calculations. [...] -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Wed Oct 4 20:54:58 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Wed, 04 Oct 2023 20:54:58 +0000 Subject: [njs] Improved memory footprint of RegExp.prototype.test(). Message-ID: details: https://hg.nginx.org/njs/rev/a6410aad6eab branches: changeset: 2213:a6410aad6eab user: Dmitry Volyntsev date: Tue Oct 03 00:39:04 2023 -0700 description: Improved memory footprint of RegExp.prototype.test(). diffstat: src/njs_regexp.c | 102 ++++++++++++++++++++++++++++++++++++++---------------- src/njs_regexp.h | 2 - 2 files changed, 71 insertions(+), 33 deletions(-) diffs (228 lines): diff -r f076da3c7a6f -r a6410aad6eab src/njs_regexp.c --- a/src/njs_regexp.c Fri Sep 29 21:41:34 2023 -0700 +++ b/src/njs_regexp.c Tue Oct 03 00:39:04 2023 -0700 @@ -25,6 +25,9 @@ static u_char *njs_regexp_compile_trace_ njs_trace_data_t *td, u_char *start); static u_char *njs_regexp_match_trace_handler(njs_trace_t *trace, njs_trace_data_t *td, u_char *start); +#define NJS_REGEXP_FLAG_TEST 1 +static njs_int_t njs_regexp_exec(njs_vm_t *vm, njs_value_t *r, njs_value_t *s, + unsigned flags, njs_value_t *retval); static njs_array_t *njs_regexp_exec_result(njs_vm_t *vm, njs_value_t *r, njs_utf8_t utf8, njs_string_prop_t *string, njs_regex_match_data_t *data); static njs_int_t njs_regexp_string_create(njs_vm_t *vm, njs_value_t *value, @@ -849,7 +852,7 @@ njs_regexp_prototype_test(njs_vm_t *vm, return NJS_ERROR; } - ret = njs_regexp_exec(vm, r, string, &value); + ret = njs_regexp_exec(vm, r, string, NJS_REGEXP_FLAG_TEST, &value); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } @@ -865,10 +868,11 @@ njs_regexp_prototype_test(njs_vm_t *vm, */ static njs_int_t njs_regexp_builtin_exec(njs_vm_t *vm, njs_value_t *r, njs_value_t *s, - njs_value_t *retval) + unsigned flags, njs_value_t *retval) { - size_t length, offset; + size_t c, length, offset; int64_t last_index; + uint32_t index; njs_int_t ret; njs_utf8_t utf8; njs_value_t value; @@ -940,6 +944,32 @@ njs_regexp_builtin_exec(njs_vm_t *vm, nj ret = njs_regexp_match(vm, &pattern->regex[type], string.start, offset, string.size, match_data); if (ret >= 0) { + if (pattern->global || pattern->sticky) { + c = njs_regex_capture(match_data, 1); + + if (utf8 == NJS_STRING_UTF8) { + index = njs_string_index(&string, c); + + } else { + index = c; + } + + njs_set_number(&value, index); + ret = njs_value_property_set(vm, r, + njs_value_arg(&njs_string_lindex), + &value); + if (njs_slow_path(ret != NJS_OK)) { + njs_regex_match_data_free(match_data, vm->regex_generic_ctx); + return NJS_ERROR; + } + } + + if (flags & NJS_REGEXP_FLAG_TEST) { + njs_regex_match_data_free(match_data, vm->regex_generic_ctx); + njs_set_boolean(retval, 1); + return NJS_OK; + } + result = njs_regexp_exec_result(vm, r, utf8, &string, match_data); njs_regex_match_data_free(match_data, vm->regex_generic_ctx); @@ -975,6 +1005,12 @@ not_found: } +static njs_exotic_slots_t njs_regexp_prototype_exotic_slots = { + .prop_handler = NULL, + .keys = NULL, +}; + + static njs_array_t * njs_regexp_exec_result(njs_vm_t *vm, njs_value_t *r, njs_utf8_t utf8, njs_string_prop_t *string, njs_regex_match_data_t *match_data) @@ -986,7 +1022,7 @@ njs_regexp_exec_result(njs_vm_t *vm, njs njs_int_t ret; njs_uint_t i, n; njs_array_t *array; - njs_value_t name, value; + njs_value_t name; njs_object_t *groups; njs_regexp_t *regexp; njs_object_prop_t *prop; @@ -1005,6 +1041,8 @@ njs_regexp_exec_result(njs_vm_t *vm, njs goto fail; } + array->object.slots = &njs_regexp_prototype_exotic_slots; + for (i = 0; i < pattern->ncaptures; i++) { n = 2 * i; c = njs_regex_capture(match_data, n); @@ -1048,24 +1086,6 @@ njs_regexp_exec_result(njs_vm_t *vm, njs njs_set_number(&prop->u.value, index); - if (pattern->global || pattern->sticky) { - c = njs_regex_capture(match_data, 1); - - if (utf8 == NJS_STRING_UTF8) { - index = njs_string_index(string, c); - - } else { - index = c; - } - - njs_set_number(&value, index); - ret = njs_value_property_set(vm, r, njs_value_arg(&njs_string_lindex), - &value); - if (njs_slow_path(ret != NJS_OK)) { - goto fail; - } - } - lhq.key_hash = NJS_INDEX_HASH; lhq.key = njs_str_value("index"); lhq.replace = 0; @@ -1184,16 +1204,18 @@ njs_regexp_prototype_exec(njs_vm_t *vm, return ret; } - return njs_regexp_builtin_exec(vm, r, s, retval); + return njs_regexp_builtin_exec(vm, r, s, + njs_number(njs_arg(args, nargs, 2)), retval); } -njs_int_t -njs_regexp_exec(njs_vm_t *vm, njs_value_t *r, njs_value_t *s, +static njs_int_t +njs_regexp_exec(njs_vm_t *vm, njs_value_t *r, njs_value_t *s, unsigned flags, njs_value_t *retval) { njs_int_t ret; njs_value_t exec; + njs_value_t arguments[2]; static const njs_value_t string_exec = njs_string("exec"); @@ -1203,12 +1225,30 @@ njs_regexp_exec(njs_vm_t *vm, njs_value_ } if (njs_is_function(&exec)) { - ret = njs_function_call(vm, njs_function(&exec), r, s, 1, retval); + njs_value_assign(&arguments[0], s); + + if (flags) { + njs_set_number(&arguments[1], flags); + } + + ret = njs_function_call(vm, njs_function(&exec), r, arguments, + flags ? 2 : 1, retval); if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } - if (njs_slow_path(!njs_is_object(retval) && !njs_is_null(retval))) { + if (njs_slow_path(!njs_is_null(retval) + && (flags & NJS_REGEXP_FLAG_TEST + && !njs_is_boolean(retval)))) + { + njs_type_error(vm, "unexpected \"%s\" retval in njs_regexp_exec()", + njs_type_string(retval->type)); + return NJS_ERROR; + } + + if (njs_slow_path(!njs_is_null(retval) + && (!flags && !njs_is_object(retval)))) + { njs_type_error(vm, "unexpected \"%s\" retval in njs_regexp_exec()", njs_type_string(retval->type)); return NJS_ERROR; @@ -1222,7 +1262,7 @@ njs_regexp_exec(njs_vm_t *vm, njs_value_ return NJS_ERROR; } - return njs_regexp_builtin_exec(vm, r, s, retval); + return njs_regexp_builtin_exec(vm, r, s, flags, retval); } @@ -1319,7 +1359,7 @@ njs_regexp_prototype_symbol_replace(njs_ goto exception; } - ret = njs_regexp_exec(vm, rx, string, r); + ret = njs_regexp_exec(vm, rx, string, 0, r); if (njs_slow_path(ret != NJS_OK)) { goto exception; } @@ -1630,7 +1670,7 @@ njs_regexp_prototype_symbol_split(njs_vm length = njs_string_prop(&s, string); if (njs_slow_path(s.size == 0)) { - ret = njs_regexp_exec(vm, rx, string, &z); + ret = njs_regexp_exec(vm, rx, string, NJS_REGEXP_FLAG_TEST, &z); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } @@ -1659,7 +1699,7 @@ njs_regexp_prototype_symbol_split(njs_vm return NJS_ERROR; } - ret = njs_regexp_exec(vm, rx, string, &z); + ret = njs_regexp_exec(vm, rx, string, 0, &z); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } diff -r f076da3c7a6f -r a6410aad6eab src/njs_regexp.h --- a/src/njs_regexp.h Fri Sep 29 21:41:34 2023 -0700 +++ b/src/njs_regexp.h Tue Oct 03 00:39:04 2023 -0700 @@ -17,8 +17,6 @@ njs_regexp_pattern_t *njs_regexp_pattern njs_int_t njs_regexp_match(njs_vm_t *vm, njs_regex_t *regex, const u_char *subject, size_t off, size_t len, njs_regex_match_data_t *d); njs_regexp_t *njs_regexp_alloc(njs_vm_t *vm, njs_regexp_pattern_t *pattern); -njs_int_t njs_regexp_exec(njs_vm_t *vm, njs_value_t *r, njs_value_t *s, - njs_value_t *retval); njs_int_t njs_regexp_prototype_exec(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval); njs_int_t njs_regexp_prototype_symbol_replace(njs_vm_t *vm, From xeioex at nginx.com Wed Oct 4 20:55:00 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Wed, 04 Oct 2023 20:55:00 +0000 Subject: [njs] Improved memory footprint of RegExp.prototype.replace(). Message-ID: details: https://hg.nginx.org/njs/rev/cf85d0f8640a branches: changeset: 2214:cf85d0f8640a user: Dmitry Volyntsev date: Tue Oct 03 18:09:06 2023 -0700 description: Improved memory footprint of RegExp.prototype.replace(). diffstat: src/njs_flathsh.c | 9 +++++++++ src/njs_flathsh.h | 1 + src/njs_regexp.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 0 deletions(-) diffs (104 lines): diff -r a6410aad6eab -r cf85d0f8640a src/njs_flathsh.c --- a/src/njs_flathsh.c Tue Oct 03 00:39:04 2023 -0700 +++ b/src/njs_flathsh.c Tue Oct 03 18:09:06 2023 -0700 @@ -162,6 +162,15 @@ njs_flathsh_new(njs_flathsh_query_t *fhq } +void +njs_flathsh_destroy(njs_flathsh_t *fh, njs_flathsh_query_t *fhq) +{ + njs_flathsh_free(fhq, njs_flathsh_chunk(fh->slot)); + + fh->slot = NULL; +} + + static njs_flathsh_descr_t * njs_flathsh_alloc(njs_flathsh_query_t *fhq, size_t hash_size, size_t elts_size) { diff -r a6410aad6eab -r cf85d0f8640a src/njs_flathsh.h --- a/src/njs_flathsh.h Tue Oct 03 00:39:04 2023 -0700 +++ b/src/njs_flathsh.h Tue Oct 03 18:09:06 2023 -0700 @@ -130,6 +130,7 @@ NJS_EXPORT njs_flathsh_elt_t *njs_flaths njs_flathsh_query_t *fhq); NJS_EXPORT njs_flathsh_descr_t *njs_flathsh_new(njs_flathsh_query_t *fhq); +NJS_EXPORT void njs_flathsh_destroy(njs_flathsh_t *fh, njs_flathsh_query_t *fhq); /* Temporary backward compatibility .*/ diff -r a6410aad6eab -r cf85d0f8640a src/njs_regexp.c --- a/src/njs_regexp.c Tue Oct 03 00:39:04 2023 -0700 +++ b/src/njs_regexp.c Tue Oct 03 18:09:06 2023 -0700 @@ -1182,6 +1182,50 @@ done: } +static void +njs_regexp_exec_result_free(njs_vm_t *vm, njs_array_t *result) +{ + njs_uint_t n; + njs_value_t *start; + njs_flathsh_t *hash; + njs_object_prop_t *prop; + njs_lvlhsh_each_t lhe; + njs_lvlhsh_query_t lhq; + + if (result->object.fast_array) { + start = result->start; + + for (n = 0; n < result->length; n++) { + if (start[n].short_string.size == NJS_STRING_LONG) { + njs_mp_free(vm->mem_pool, start[n].long_string.data); + } + } + } + + njs_lvlhsh_each_init(&lhe, &njs_object_hash_proto); + + hash = &result->object.hash; + + for ( ;; ) { + prop = njs_flathsh_each(hash, &lhe); + + if (prop == NULL) { + break; + } + + njs_mp_free(vm->mem_pool, prop); + } + + + lhq.pool = vm->mem_pool; + lhq.proto = &njs_object_hash_proto; + + njs_flathsh_destroy(hash, &lhq); + + njs_array_destroy(vm, result); +} + + njs_int_t njs_regexp_prototype_exec(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval) @@ -1497,6 +1541,10 @@ njs_regexp_prototype_symbol_replace(njs_ arguments, ncaptures, &groups, replace, retval); + if (njs_object_slots(r)) { + njs_regexp_exec_result_free(vm, njs_array(r)); + } + } else { ret = njs_array_expand(vm, array, 0, njs_is_defined(&groups) ? 3 : 2); @@ -1517,6 +1565,8 @@ njs_regexp_prototype_symbol_replace(njs_ arguments, n, retval); } + njs_array_destroy(vm, array); + if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } From xeioex at nginx.com Wed Oct 4 20:55:02 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Wed, 04 Oct 2023 20:55:02 +0000 Subject: [njs] Improved memory footprint of RegExp.prototype.split(). Message-ID: details: https://hg.nginx.org/njs/rev/c0aad58cfadb branches: changeset: 2215:c0aad58cfadb user: Dmitry Volyntsev date: Wed Oct 04 13:35:38 2023 -0700 description: Improved memory footprint of RegExp.prototype.split(). diffstat: src/njs_regexp.c | 16 ++++++++++++++++ 1 files changed, 16 insertions(+), 0 deletions(-) diffs (44 lines): diff -r cf85d0f8640a -r c0aad58cfadb src/njs_regexp.c --- a/src/njs_regexp.c Tue Oct 03 18:09:06 2023 -0700 +++ b/src/njs_regexp.c Wed Oct 04 13:35:38 2023 -0700 @@ -1773,6 +1773,10 @@ njs_regexp_prototype_symbol_split(njs_vm e = njs_min(e, length); if (e == p) { + if (njs_object_slots(&z)) { + njs_regexp_exec_result_free(vm, njs_array(&z)); + } + q = q + 1; continue; } @@ -1794,6 +1798,10 @@ njs_regexp_prototype_symbol_split(njs_vm } if (array->length == limit) { + if (njs_object_slots(&z)) { + njs_regexp_exec_result_free(vm, njs_array(&z)); + } + goto done; } @@ -1818,10 +1826,18 @@ njs_regexp_prototype_symbol_split(njs_vm } if (array->length == limit) { + if (njs_object_slots(&z)) { + njs_regexp_exec_result_free(vm, njs_array(&z)); + } + goto done; } } + if (njs_object_slots(&z)) { + njs_regexp_exec_result_free(vm, njs_array(&z)); + } + q = p; } From leamovret at gmail.com Wed Oct 4 21:32:26 2023 From: leamovret at gmail.com (Toshihito Kikuchi) Date: Wed, 4 Oct 2023 14:32:26 -0700 Subject: [PATCH] Auth basic: Cache credentials if auth_basic_user_file is static In-Reply-To: References: Message-ID: Hi, On Tue, Oct 3, 2023 at 6:03 PM Maxim Dounin wrote: > > Hello! > > On Tue, Oct 03, 2023 at 03:46:05PM -0700, Toshihito Kikuchi wrote: > > > # HG changeset patch > > # User Toshihito Kikuchi > > # Date 1696359541 25200 > > # Tue Oct 03 11:59:01 2023 -0700 > > # Node ID e397ea6cfa85e85ae0865c5061397dc295fb7df1 > > # Parent 3db945fda515014d220151046d02f3960bcfca0a > > Auth basic: Cache credentials if auth_basic_user_file is static. > > > > In the current design, when auth_basic is on, every HTTP request triggers > > file I/O (open, read, close) to the file specified in auth_basic_user_file. > > Probably this is to allow auth_basic_user_file to contain variables. > > > > If the value is just a static text, however, there is no reason to read the > > same file every request in every worker process. It unnecessarily consumes > > system resources. > > > > With this patch, if auth_basic_user_file does not have any variables, we > > cache its content in the location context at configuration time and use it > > in all subsequent requests. If auth_basic_user_file contain variables, we keep > > the original behavior. > > As currently implemented, auth_basic_user_file is read at runtime, > making it possible to change users and their passwords - which is > a relatively common task - without reloading nginx itself. And > this behaviour matches the one in Apache, which does the same. > Changing this behaviour to read the password file while loading > configuration (so any changes to the file won't be applied unless > nginx is reloaded) would certainly break POLA, and needs some > really good justification. Thank you for your comments! I understood the risk of changing the default behavior. Then the right approach would be to introduce a new directive such as "auth_basic_preload_mode" for users to opt in. Does that make sense? For justification, let me explain our use case. We're a blockchain node runner hosting private endpoints to multiple blockchains such as Ethereum, Polygon, and etc. Technically each blockchain is a JSON-RPC server over HTTP, and we put them behind Nginx with Basic Authentication to prevent free riders from using our endpoints for free. Keeping the best performance is crucial for our business. We are observing performance improvement after we deployed this patch to our infrastructure. We believe this patch benefits other blockchain node runners and the industry. > > Further, in typical setups the file is effectively cached by the > OS itself, making the I/O operations mentioned almost free, > especially compared to costs of typical password hash > calculations. In our use case, prioritizing performance over the freerider risk, we use the plaintext format in our credential file. File I/O may be ignorable compared to password hash calculations, but we cannot say it's almost free because it still triggers context switches to execute syscalls, which is expensive. Thanks, Toshihito Kikuchi From xeioex at nginx.com Wed Oct 4 22:23:52 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Wed, 04 Oct 2023 22:23:52 +0000 Subject: [njs] XML: fixed exception handling. Message-ID: details: https://hg.nginx.org/njs/rev/632002c161b1 branches: changeset: 2216:632002c161b1 user: Dmitry Volyntsev date: Wed Oct 04 15:00:02 2023 -0700 description: XML: fixed exception handling. This closes #675 issue on Github. diffstat: external/njs_xml_module.c | 2 +- src/test/njs_unit_test.c | 4 ++++ 2 files changed, 5 insertions(+), 1 deletions(-) diffs (26 lines): diff -r c0aad58cfadb -r 632002c161b1 external/njs_xml_module.c --- a/external/njs_xml_module.c Wed Oct 04 13:35:38 2023 -0700 +++ b/external/njs_xml_module.c Wed Oct 04 15:00:02 2023 -0700 @@ -2000,7 +2000,7 @@ njs_xml_error(njs_vm_t *vm, njs_xml_doc_ err->int2); } - njs_vm_error(vm, "%s", p - errstr, errstr); + njs_vm_error(vm, "%*s", p - errstr, errstr); } diff -r c0aad58cfadb -r 632002c161b1 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Wed Oct 04 13:35:38 2023 -0700 +++ b/src/test/njs_unit_test.c Wed Oct 04 15:00:02 2023 -0700 @@ -22035,6 +22035,10 @@ static njs_unit_test_t njs_xml_test[] = njs_str("FOO,BAR,0,2") }, { njs_str("const xml = require('xml');" + "let doc = xml.parse(`GARBAGE`)"), + njs_str("Error: failed to parse XML (libxml2: \"Start tag expected, '<' not found\" at 1:1)") }, + + { njs_str("const xml = require('xml');" "let doc = xml.parse(`TEXT`);" "doc.r.$text"), njs_str("TEXT") }, From nojima at ynojima.com Thu Oct 5 01:51:26 2023 From: nojima at ynojima.com (Yusuke Nojima) Date: Thu, 5 Oct 2023 10:51:26 +0900 Subject: [PATCH] Improve performance when starting nginx with a lot of locations In-Reply-To: References: Message-ID: Thank you for your comment! > Could you please provide some more details about the use case, > such as how locations are used, and what is the approximate number > of locations being used? Our team provides development environments to our company's engineers and QA. In this environment, engineers and QA can freely create VMs and deploy applications on them. Our nginx has the role of routing requests from the internet to all applications deployed in this environment. Additionally, it allows setting IP address restrictions, BASIC authentication, TLS client authentication, and other configurations for each application. To implement these requirements, we generate a location for each application. Currently, there are approximately 40,000 locations in our environment. > Further, since each location contains configuration for > all modules, such configurations are expected to require a lot of > memory Each of our nginx processes was consuming 5GB of memory in terms of resident size. This is not a problem as our servers have sufficient memory. > Rather, I would suggest recursive top-bottom merge sort implementation > instead, which is much simpler and uses stack as temporary storage > (so it'll naturally die if there will be a queue which requires > more space for sorting than we have). > > Please take a look if it works for you: I think this implementation is simple and easy to understand. Although the number of traversals of the list will increase compared to bottom-up, it will not affect the order. I believe this will provide sufficient optimization in terms of speed. 2023年9月30日(土) 12:38 Maxim Dounin : > > Hello! > > On Fri, Sep 22, 2023 at 03:58:41PM +0900, Yusuke Nojima wrote: > > > # HG changeset patch > > # User Yusuke Nojima > > # Date 1679555707 -32400 > > # Thu Mar 23 16:15:07 2023 +0900 > > # Node ID 6aac98fb135e47ca9cf7ad7d780cf4a10e9aa55c > > # Parent 8771d35d55d0a2b1cefaab04401d6f837f5a05a2 > > Improve performance when starting nginx with a lot of locations > > > > Our team has a configuration file with a very large number of > > locations, and we found that starting nginx with this file takes an > > unacceptable amount of time. After investigating the issue, we > > discovered that the root cause of the long startup time is the sorting > > of the location list. > > Interesting. > > Could you please provide some more details about the use case, > such as how locations are used, and what is the approximate number > of locations being used? > > In my practice, it is extremely uncommon to use more than 1k-10k > prefix locations (and even these numbers are huge for normal > setups). Further, since each location contains configuration for > all modules, such configurations are expected to require a lot of > memory (currently about 5-10KB per empty location, so about > 50-100MB per 10k locations, and 0.5-1G per 100k locations). > Accordingly, using other approaches such as map (assuming exact > match is actually needed) might be beneficial regardless of the > sorting costs. > > Nevertheless, swapping the sorting algorithm to a faster one looks > like an obvious improvement. > > > > > Currently, the sorting algorithm used in nginx is insertion sort, > > which requires O(n^2) time for n locations. We have modified the > > sorting algorithm to use merge sort instead, which has a time > > complexity of O(n log n). > > > > We have tested the modified code using micro-benchmarks and confirmed > > that the new algorithm improves nginx startup time significantly > > (shown below). We believe that this change would be valuable for other > > users who are experiencing similar issues. > > > > > > Table: nginx startup time in seconds > > > > n current patched > > 2000 0.033 0.018 > > 4000 0.047 0.028 > > 6000 0.062 0.038 > > 8000 0.079 0.050 > > 10000 0.091 0.065 > > 12000 0.367 0.081 > > 14000 0.683 0.086 > > 16000 0.899 0.097 > > 18000 1.145 0.110 > > 20000 1.449 0.122 > > 22000 1.650 0.137 > > 24000 2.155 0.151 > > 26000 3.096 0.155 > > 28000 3.711 0.168 > > 30000 3.539 0.184 > > 32000 3.980 0.193 > > 34000 4.543 0.208 > > 36000 4.349 0.217 > > 38000 5.021 0.229 > > 40000 4.918 0.245 > > 42000 4.835 0.256 > > 44000 5.159 0.262 > > 46000 5.802 0.331 > > 48000 6.205 0.295 > > 50000 5.701 0.308 > > 52000 5.992 0.335 > > 54000 6.561 0.323 > > 56000 6.856 0.333 > > 58000 6.515 0.347 > > 60000 7.051 0.359 > > 62000 6.956 0.377 > > 64000 7.376 0.376 > > 66000 7.506 0.404 > > 68000 7.292 0.407 > > 70000 7.422 0.461 > > 72000 10.090 0.443 > > 74000 18.505 0.463 > > 76000 11.857 0.462 > > 78000 9.752 0.470 > > 80000 12.485 0.481 > > 82000 11.027 0.498 > > 84000 9.804 0.523 > > 86000 8.482 0.515 > > 88000 9.838 0.560 > > 90000 12.341 0.546 > > 92000 13.881 0.648 > > 94000 8.309 0.635 > > 96000 8.854 0.650 > > 98000 12.871 0.674 > > 100000 8.261 0.698 > > This probably can be reduced to something like 3-5 data points. > > > > > diff -r 8771d35d55d0 -r 6aac98fb135e src/core/ngx_queue.c > > --- a/src/core/ngx_queue.c Fri Mar 10 07:43:50 2023 +0300 > > +++ b/src/core/ngx_queue.c Thu Mar 23 16:15:07 2023 +0900 > > @@ -45,36 +45,103 @@ > > } > > > > > > -/* the stable insertion sort */ > > +/* merge queue2 into queue1. queue2 becomes empty after merge. */ > > + > > +static void > > +ngx_queue_merge(ngx_queue_t *queue1, ngx_queue_t *queue2, > > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > +{ > > + ngx_queue_t *p1, *p2; > > Nitpicking: there are various style issues here and in other places. > > > + > > + p1 = ngx_queue_head(queue1); > > + p2 = ngx_queue_head(queue2); > > + > > + while (p1 != ngx_queue_sentinel(queue1) > > + && p2 != ngx_queue_sentinel(queue2)) { > > + > > + if (cmp(p1, p2) > 0) { > > + ngx_queue_t *next, *prev; > > + > > + next = ngx_queue_next(p2); > > + ngx_queue_remove(p2); > > + prev = ngx_queue_prev(p1); > > + ngx_queue_insert_after(prev, p2); > > + p2 = next; > > Nitpicking: there is no need to preserve "next" here, since p2 is > always the head of queue2 and, and the next element can be > obtained by ngx_queue_head(queue2). > > Also, instead of obtaining "prev" and using > ngx_queue_insert_after() it would be easier to use > ngx_queue_insert_before(). It is not currently defined, but it is > trivial to define one: it is an alias to ngx_queue_insert_tail(), > much like ngx_queue_insert_after() is an alias to > ngx_queue_insert_head(). > > > + } else { > > + p1 = ngx_queue_next(p1); > > + } > > + } > > + if (p2 != ngx_queue_sentinel(queue2)) { > > + ngx_queue_add(queue1, queue2); > > + ngx_queue_init(queue2); > > + } > > +} > > + > > + > > +/* move all elements from src to dest. dest should be empty before call. */ > > + > > +static void > > +ngx_queue_move(ngx_queue_t *dest, ngx_queue_t *src) > > +{ > > + *dest = *src; > > + ngx_queue_init(src); > > + > > + if (dest->next == src) { > > + dest->next = dest; > > + } else { > > + dest->next->prev = dest; > > + } > > + if (dest->prev == src) { > > + dest->prev = dest; > > + } else { > > + dest->prev->next = dest; > > + } > > +} > > This function looks strange to me. There is the ngx_queue_add() > macro, which probably should be used instead (if needed). > > > + > > + > > +/* the stable merge sort */ > > > > void > > ngx_queue_sort(ngx_queue_t *queue, > > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > { > > - ngx_queue_t *q, *prev, *next; > > + ngx_queue_t merged[64], *p, *last; > > > > - q = ngx_queue_head(queue); > > - > > - if (q == ngx_queue_last(queue)) { > > + if (ngx_queue_head(queue) == ngx_queue_last(queue)) { > > return; > > } > > > > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { > > + last = merged; > > > > - prev = ngx_queue_prev(q); > > - next = ngx_queue_next(q); > > + while (!ngx_queue_empty(queue)) { > > + /* > > + * Loop invariant: > > + * merged[i] must have exactly 0 or 2^i elements in sorted order. > > + * For each iteration, we take one element from the given queue and > > + * insert it into merged without violating the invariant condition. > > + */ > > > > - ngx_queue_remove(q); > > + ngx_queue_t carry, *h; > > + > > + h = ngx_queue_head(queue); > > + ngx_queue_remove(h); > > + ngx_queue_init(&carry); > > + ngx_queue_insert_head(&carry, h); > > > > - do { > > - if (cmp(prev, q) <= 0) { > > - break; > > - } > > + for (p = merged; p != last && !ngx_queue_empty(p); p++) { > > + ngx_queue_merge(p, &carry, cmp); > > + ngx_queue_move(&carry, p); > > + } > > + if (p == last) { > > + ngx_queue_init(last); > > + last++; > > + } > > + ngx_queue_move(p, &carry); > > + } > > > > - prev = ngx_queue_prev(prev); > > - > > - } while (prev != ngx_queue_sentinel(queue)); > > - > > - ngx_queue_insert_after(prev, q); > > + /* Merge all queues into one queue */ > > + for (p = merged + 1; p != last; p++) { > > + ngx_queue_merge(p, p-1, cmp); > > } > > + ngx_queue_move(queue, last-1); > > } > > While bottom-up merge sort implementation might be more efficient, > I find it disturbing to use fixed array of queues without any > checks if we are within the array bounds. > > Rather, I would suggest recursive top-bottom merge sort implementation > instead, which is much simpler and uses stack as temporary storage > (so it'll naturally die if there will be a queue which requires > more space for sorting than we have). > > Please take a look if it works for you: > > diff --git a/src/core/ngx_queue.c b/src/core/ngx_queue.c > --- a/src/core/ngx_queue.c > +++ b/src/core/ngx_queue.c > @@ -9,6 +9,10 @@ > #include > > > +static void ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)); > + > + > /* > * find the middle queue element if the queue has odd number of elements > * or the first element of the queue's second part otherwise > @@ -45,13 +49,13 @@ ngx_queue_middle(ngx_queue_t *queue) > } > > > -/* the stable insertion sort */ > +/* the stable merge sort */ > > void > ngx_queue_sort(ngx_queue_t *queue, > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > { > - ngx_queue_t *q, *prev, *next; > + ngx_queue_t *q, tail; > > q = ngx_queue_head(queue); > > @@ -59,22 +63,44 @@ ngx_queue_sort(ngx_queue_t *queue, > return; > } > > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { > + q = ngx_queue_middle(queue); > + > + ngx_queue_split(queue, q, &tail); > + > + ngx_queue_sort(queue, cmp); > + ngx_queue_sort(&tail, cmp); > + > + ngx_queue_merge(queue, &tail, cmp); > +} > > - prev = ngx_queue_prev(q); > - next = ngx_queue_next(q); > > - ngx_queue_remove(q); > +static void > +ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > +{ > + ngx_queue_t *q1, *q2; > + > + q1 = ngx_queue_head(queue); > + q2 = ngx_queue_head(tail); > > - do { > - if (cmp(prev, q) <= 0) { > - break; > - } > + for ( ;; ) { > + if (q1 == ngx_queue_sentinel(queue)) { > + ngx_queue_add(queue, tail); > + break; > + } > + > + if (q2 == ngx_queue_sentinel(tail)) { > + break; > + } > > - prev = ngx_queue_prev(prev); > + if (cmp(q1, q2) <= 0) { > + q1 = ngx_queue_next(q1); > + continue; > + } > > - } while (prev != ngx_queue_sentinel(queue)); > + ngx_queue_remove(q2); > + ngx_queue_insert_before(q1, q2); > > - ngx_queue_insert_after(prev, q); > + q2 = ngx_queue_head(tail); > } > } > diff --git a/src/core/ngx_queue.h b/src/core/ngx_queue.h > --- a/src/core/ngx_queue.h > +++ b/src/core/ngx_queue.h > @@ -47,6 +47,9 @@ struct ngx_queue_s { > (h)->prev = x > > > +#define ngx_queue_insert_before ngx_queue_insert_tail > + > + > #define ngx_queue_head(h) \ > (h)->next > > > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From chipitsine at gmail.com Thu Oct 5 06:17:39 2023 From: chipitsine at gmail.com (=?UTF-8?B?0JjQu9GM0Y8g0KjQuNC/0LjRhtC40L0=?=) Date: Thu, 5 Oct 2023 08:17:39 +0200 Subject: [PATCH] Improve performance when starting nginx with a lot of locations In-Reply-To: References: Message-ID: чт, 5 окт. 2023 г. в 03:51, Yusuke Nojima : > Thank you for your comment! > > > Could you please provide some more details about the use case, > > such as how locations are used, and what is the approximate number > > of locations being used? > > Our team provides development environments to our company's engineers and > QA. > In this environment, engineers and QA can freely create VMs and deploy > applications on them. > > Our nginx has the role of routing requests from the internet to all > applications deployed in this environment. > Additionally, it allows setting IP address restrictions, BASIC > authentication, TLS client authentication, and other configurations > for each application. > BASIC auth is known to be CPU consuming as well, hash is calculated on every http request. what is a ratio of authenticated requests ? do you see high CPU consumption ? > > To implement these requirements, we generate a location for each > application. > Currently, there are approximately 40,000 locations in our environment. > > > Further, since each location contains configuration for > > all modules, such configurations are expected to require a lot of > > memory > > Each of our nginx processes was consuming 5GB of memory in terms of > resident size. > This is not a problem as our servers have sufficient memory. > > > Rather, I would suggest recursive top-bottom merge sort implementation > > instead, which is much simpler and uses stack as temporary storage > > (so it'll naturally die if there will be a queue which requires > > more space for sorting than we have). > > > > Please take a look if it works for you: > > I think this implementation is simple and easy to understand. > Although the number of traversals of the list will increase compared > to bottom-up, it will not affect the order. > I believe this will provide sufficient optimization in terms of speed. > > > 2023年9月30日(土) 12:38 Maxim Dounin : > > > > Hello! > > > > On Fri, Sep 22, 2023 at 03:58:41PM +0900, Yusuke Nojima wrote: > > > > > # HG changeset patch > > > # User Yusuke Nojima > > > # Date 1679555707 -32400 > > > # Thu Mar 23 16:15:07 2023 +0900 > > > # Node ID 6aac98fb135e47ca9cf7ad7d780cf4a10e9aa55c > > > # Parent 8771d35d55d0a2b1cefaab04401d6f837f5a05a2 > > > Improve performance when starting nginx with a lot of locations > > > > > > Our team has a configuration file with a very large number of > > > locations, and we found that starting nginx with this file takes an > > > unacceptable amount of time. After investigating the issue, we > > > discovered that the root cause of the long startup time is the sorting > > > of the location list. > > > > Interesting. > > > > Could you please provide some more details about the use case, > > such as how locations are used, and what is the approximate number > > of locations being used? > > > > In my practice, it is extremely uncommon to use more than 1k-10k > > prefix locations (and even these numbers are huge for normal > > setups). Further, since each location contains configuration for > > all modules, such configurations are expected to require a lot of > > memory (currently about 5-10KB per empty location, so about > > 50-100MB per 10k locations, and 0.5-1G per 100k locations). > > Accordingly, using other approaches such as map (assuming exact > > match is actually needed) might be beneficial regardless of the > > sorting costs. > > > > Nevertheless, swapping the sorting algorithm to a faster one looks > > like an obvious improvement. > > > > > > > > Currently, the sorting algorithm used in nginx is insertion sort, > > > which requires O(n^2) time for n locations. We have modified the > > > sorting algorithm to use merge sort instead, which has a time > > > complexity of O(n log n). > > > > > > We have tested the modified code using micro-benchmarks and confirmed > > > that the new algorithm improves nginx startup time significantly > > > (shown below). We believe that this change would be valuable for other > > > users who are experiencing similar issues. > > > > > > > > > Table: nginx startup time in seconds > > > > > > n current patched > > > 2000 0.033 0.018 > > > 4000 0.047 0.028 > > > 6000 0.062 0.038 > > > 8000 0.079 0.050 > > > 10000 0.091 0.065 > > > 12000 0.367 0.081 > > > 14000 0.683 0.086 > > > 16000 0.899 0.097 > > > 18000 1.145 0.110 > > > 20000 1.449 0.122 > > > 22000 1.650 0.137 > > > 24000 2.155 0.151 > > > 26000 3.096 0.155 > > > 28000 3.711 0.168 > > > 30000 3.539 0.184 > > > 32000 3.980 0.193 > > > 34000 4.543 0.208 > > > 36000 4.349 0.217 > > > 38000 5.021 0.229 > > > 40000 4.918 0.245 > > > 42000 4.835 0.256 > > > 44000 5.159 0.262 > > > 46000 5.802 0.331 > > > 48000 6.205 0.295 > > > 50000 5.701 0.308 > > > 52000 5.992 0.335 > > > 54000 6.561 0.323 > > > 56000 6.856 0.333 > > > 58000 6.515 0.347 > > > 60000 7.051 0.359 > > > 62000 6.956 0.377 > > > 64000 7.376 0.376 > > > 66000 7.506 0.404 > > > 68000 7.292 0.407 > > > 70000 7.422 0.461 > > > 72000 10.090 0.443 > > > 74000 18.505 0.463 > > > 76000 11.857 0.462 > > > 78000 9.752 0.470 > > > 80000 12.485 0.481 > > > 82000 11.027 0.498 > > > 84000 9.804 0.523 > > > 86000 8.482 0.515 > > > 88000 9.838 0.560 > > > 90000 12.341 0.546 > > > 92000 13.881 0.648 > > > 94000 8.309 0.635 > > > 96000 8.854 0.650 > > > 98000 12.871 0.674 > > > 100000 8.261 0.698 > > > > This probably can be reduced to something like 3-5 data points. > > > > > > > > diff -r 8771d35d55d0 -r 6aac98fb135e src/core/ngx_queue.c > > > --- a/src/core/ngx_queue.c Fri Mar 10 07:43:50 2023 +0300 > > > +++ b/src/core/ngx_queue.c Thu Mar 23 16:15:07 2023 +0900 > > > @@ -45,36 +45,103 @@ > > > } > > > > > > > > > -/* the stable insertion sort */ > > > +/* merge queue2 into queue1. queue2 becomes empty after merge. */ > > > + > > > +static void > > > +ngx_queue_merge(ngx_queue_t *queue1, ngx_queue_t *queue2, > > > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > > +{ > > > + ngx_queue_t *p1, *p2; > > > > Nitpicking: there are various style issues here and in other places. > > > > > + > > > + p1 = ngx_queue_head(queue1); > > > + p2 = ngx_queue_head(queue2); > > > + > > > + while (p1 != ngx_queue_sentinel(queue1) > > > + && p2 != ngx_queue_sentinel(queue2)) { > > > + > > > + if (cmp(p1, p2) > 0) { > > > + ngx_queue_t *next, *prev; > > > + > > > + next = ngx_queue_next(p2); > > > + ngx_queue_remove(p2); > > > + prev = ngx_queue_prev(p1); > > > + ngx_queue_insert_after(prev, p2); > > > + p2 = next; > > > > Nitpicking: there is no need to preserve "next" here, since p2 is > > always the head of queue2 and, and the next element can be > > obtained by ngx_queue_head(queue2). > > > > Also, instead of obtaining "prev" and using > > ngx_queue_insert_after() it would be easier to use > > ngx_queue_insert_before(). It is not currently defined, but it is > > trivial to define one: it is an alias to ngx_queue_insert_tail(), > > much like ngx_queue_insert_after() is an alias to > > ngx_queue_insert_head(). > > > > > + } else { > > > + p1 = ngx_queue_next(p1); > > > + } > > > + } > > > + if (p2 != ngx_queue_sentinel(queue2)) { > > > + ngx_queue_add(queue1, queue2); > > > + ngx_queue_init(queue2); > > > + } > > > +} > > > + > > > + > > > +/* move all elements from src to dest. dest should be empty before > call. */ > > > + > > > +static void > > > +ngx_queue_move(ngx_queue_t *dest, ngx_queue_t *src) > > > +{ > > > + *dest = *src; > > > + ngx_queue_init(src); > > > + > > > + if (dest->next == src) { > > > + dest->next = dest; > > > + } else { > > > + dest->next->prev = dest; > > > + } > > > + if (dest->prev == src) { > > > + dest->prev = dest; > > > + } else { > > > + dest->prev->next = dest; > > > + } > > > +} > > > > This function looks strange to me. There is the ngx_queue_add() > > macro, which probably should be used instead (if needed). > > > > > + > > > + > > > +/* the stable merge sort */ > > > > > > void > > > ngx_queue_sort(ngx_queue_t *queue, > > > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > > { > > > - ngx_queue_t *q, *prev, *next; > > > + ngx_queue_t merged[64], *p, *last; > > > > > > - q = ngx_queue_head(queue); > > > - > > > - if (q == ngx_queue_last(queue)) { > > > + if (ngx_queue_head(queue) == ngx_queue_last(queue)) { > > > return; > > > } > > > > > > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = > next) { > > > + last = merged; > > > > > > - prev = ngx_queue_prev(q); > > > - next = ngx_queue_next(q); > > > + while (!ngx_queue_empty(queue)) { > > > + /* > > > + * Loop invariant: > > > + * merged[i] must have exactly 0 or 2^i elements in sorted > order. > > > + * For each iteration, we take one element from the given > queue and > > > + * insert it into merged without violating the invariant > condition. > > > + */ > > > > > > - ngx_queue_remove(q); > > > + ngx_queue_t carry, *h; > > > + > > > + h = ngx_queue_head(queue); > > > + ngx_queue_remove(h); > > > + ngx_queue_init(&carry); > > > + ngx_queue_insert_head(&carry, h); > > > > > > - do { > > > - if (cmp(prev, q) <= 0) { > > > - break; > > > - } > > > + for (p = merged; p != last && !ngx_queue_empty(p); p++) { > > > + ngx_queue_merge(p, &carry, cmp); > > > + ngx_queue_move(&carry, p); > > > + } > > > + if (p == last) { > > > + ngx_queue_init(last); > > > + last++; > > > + } > > > + ngx_queue_move(p, &carry); > > > + } > > > > > > - prev = ngx_queue_prev(prev); > > > - > > > - } while (prev != ngx_queue_sentinel(queue)); > > > - > > > - ngx_queue_insert_after(prev, q); > > > + /* Merge all queues into one queue */ > > > + for (p = merged + 1; p != last; p++) { > > > + ngx_queue_merge(p, p-1, cmp); > > > } > > > + ngx_queue_move(queue, last-1); > > > } > > > > While bottom-up merge sort implementation might be more efficient, > > I find it disturbing to use fixed array of queues without any > > checks if we are within the array bounds. > > > > Rather, I would suggest recursive top-bottom merge sort implementation > > instead, which is much simpler and uses stack as temporary storage > > (so it'll naturally die if there will be a queue which requires > > more space for sorting than we have). > > > > Please take a look if it works for you: > > > > diff --git a/src/core/ngx_queue.c b/src/core/ngx_queue.c > > --- a/src/core/ngx_queue.c > > +++ b/src/core/ngx_queue.c > > @@ -9,6 +9,10 @@ > > #include > > > > > > +static void ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)); > > + > > + > > /* > > * find the middle queue element if the queue has odd number of elements > > * or the first element of the queue's second part otherwise > > @@ -45,13 +49,13 @@ ngx_queue_middle(ngx_queue_t *queue) > > } > > > > > > -/* the stable insertion sort */ > > +/* the stable merge sort */ > > > > void > > ngx_queue_sort(ngx_queue_t *queue, > > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > { > > - ngx_queue_t *q, *prev, *next; > > + ngx_queue_t *q, tail; > > > > q = ngx_queue_head(queue); > > > > @@ -59,22 +63,44 @@ ngx_queue_sort(ngx_queue_t *queue, > > return; > > } > > > > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = > next) { > > + q = ngx_queue_middle(queue); > > + > > + ngx_queue_split(queue, q, &tail); > > + > > + ngx_queue_sort(queue, cmp); > > + ngx_queue_sort(&tail, cmp); > > + > > + ngx_queue_merge(queue, &tail, cmp); > > +} > > > > - prev = ngx_queue_prev(q); > > - next = ngx_queue_next(q); > > > > - ngx_queue_remove(q); > > +static void > > +ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > +{ > > + ngx_queue_t *q1, *q2; > > + > > + q1 = ngx_queue_head(queue); > > + q2 = ngx_queue_head(tail); > > > > - do { > > - if (cmp(prev, q) <= 0) { > > - break; > > - } > > + for ( ;; ) { > > + if (q1 == ngx_queue_sentinel(queue)) { > > + ngx_queue_add(queue, tail); > > + break; > > + } > > + > > + if (q2 == ngx_queue_sentinel(tail)) { > > + break; > > + } > > > > - prev = ngx_queue_prev(prev); > > + if (cmp(q1, q2) <= 0) { > > + q1 = ngx_queue_next(q1); > > + continue; > > + } > > > > - } while (prev != ngx_queue_sentinel(queue)); > > + ngx_queue_remove(q2); > > + ngx_queue_insert_before(q1, q2); > > > > - ngx_queue_insert_after(prev, q); > > + q2 = ngx_queue_head(tail); > > } > > } > > diff --git a/src/core/ngx_queue.h b/src/core/ngx_queue.h > > --- a/src/core/ngx_queue.h > > +++ b/src/core/ngx_queue.h > > @@ -47,6 +47,9 @@ struct ngx_queue_s { > > (h)->prev = x > > > > > > +#define ngx_queue_insert_before ngx_queue_insert_tail > > + > > + > > #define ngx_queue_head(h) > \ > > (h)->next > > > > > > > > -- > > Maxim Dounin > > http://mdounin.ru/ > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From nojima at ynojima.com Fri Oct 6 02:39:07 2023 From: nojima at ynojima.com (Yusuke Nojima) Date: Fri, 6 Oct 2023 11:39:07 +0900 Subject: [PATCH] Improve performance when starting nginx with a lot of locations In-Reply-To: References: Message-ID: > BASIC auth is known to be CPU consuming as well, hash is calculated on every http request. > what is a ratio of authenticated requests ? do you see high CPU consumption ? First of all, this patch is related to the startup time of nginx and not to the processing time per request. The processing speed per request is sufficiently reasonable for our use case. In our environment, requests with BASIC authentication are very rare. Most users prefer the simpler IP address restriction method. 2023年10月5日(木) 15:17 Илья Шипицин : > > > > чт, 5 окт. 2023 г. в 03:51, Yusuke Nojima : >> >> Thank you for your comment! >> >> > Could you please provide some more details about the use case, >> > such as how locations are used, and what is the approximate number >> > of locations being used? >> >> Our team provides development environments to our company's engineers and QA. >> In this environment, engineers and QA can freely create VMs and deploy >> applications on them. >> >> Our nginx has the role of routing requests from the internet to all >> applications deployed in this environment. >> Additionally, it allows setting IP address restrictions, BASIC >> authentication, TLS client authentication, and other configurations >> for each application. > > > BASIC auth is known to be CPU consuming as well, hash is calculated on every http request. > what is a ratio of authenticated requests ? do you see high CPU consumption ? > >> >> >> To implement these requirements, we generate a location for each application. >> Currently, there are approximately 40,000 locations in our environment. >> >> > Further, since each location contains configuration for >> > all modules, such configurations are expected to require a lot of >> > memory >> >> Each of our nginx processes was consuming 5GB of memory in terms of >> resident size. >> This is not a problem as our servers have sufficient memory. >> >> > Rather, I would suggest recursive top-bottom merge sort implementation >> > instead, which is much simpler and uses stack as temporary storage >> > (so it'll naturally die if there will be a queue which requires >> > more space for sorting than we have). >> > >> > Please take a look if it works for you: >> >> I think this implementation is simple and easy to understand. >> Although the number of traversals of the list will increase compared >> to bottom-up, it will not affect the order. >> I believe this will provide sufficient optimization in terms of speed. >> >> >> 2023年9月30日(土) 12:38 Maxim Dounin : >> > >> > Hello! >> > >> > On Fri, Sep 22, 2023 at 03:58:41PM +0900, Yusuke Nojima wrote: >> > >> > > # HG changeset patch >> > > # User Yusuke Nojima >> > > # Date 1679555707 -32400 >> > > # Thu Mar 23 16:15:07 2023 +0900 >> > > # Node ID 6aac98fb135e47ca9cf7ad7d780cf4a10e9aa55c >> > > # Parent 8771d35d55d0a2b1cefaab04401d6f837f5a05a2 >> > > Improve performance when starting nginx with a lot of locations >> > > >> > > Our team has a configuration file with a very large number of >> > > locations, and we found that starting nginx with this file takes an >> > > unacceptable amount of time. After investigating the issue, we >> > > discovered that the root cause of the long startup time is the sorting >> > > of the location list. >> > >> > Interesting. >> > >> > Could you please provide some more details about the use case, >> > such as how locations are used, and what is the approximate number >> > of locations being used? >> > >> > In my practice, it is extremely uncommon to use more than 1k-10k >> > prefix locations (and even these numbers are huge for normal >> > setups). Further, since each location contains configuration for >> > all modules, such configurations are expected to require a lot of >> > memory (currently about 5-10KB per empty location, so about >> > 50-100MB per 10k locations, and 0.5-1G per 100k locations). >> > Accordingly, using other approaches such as map (assuming exact >> > match is actually needed) might be beneficial regardless of the >> > sorting costs. >> > >> > Nevertheless, swapping the sorting algorithm to a faster one looks >> > like an obvious improvement. >> > >> > > >> > > Currently, the sorting algorithm used in nginx is insertion sort, >> > > which requires O(n^2) time for n locations. We have modified the >> > > sorting algorithm to use merge sort instead, which has a time >> > > complexity of O(n log n). >> > > >> > > We have tested the modified code using micro-benchmarks and confirmed >> > > that the new algorithm improves nginx startup time significantly >> > > (shown below). We believe that this change would be valuable for other >> > > users who are experiencing similar issues. >> > > >> > > >> > > Table: nginx startup time in seconds >> > > >> > > n current patched >> > > 2000 0.033 0.018 >> > > 4000 0.047 0.028 >> > > 6000 0.062 0.038 >> > > 8000 0.079 0.050 >> > > 10000 0.091 0.065 >> > > 12000 0.367 0.081 >> > > 14000 0.683 0.086 >> > > 16000 0.899 0.097 >> > > 18000 1.145 0.110 >> > > 20000 1.449 0.122 >> > > 22000 1.650 0.137 >> > > 24000 2.155 0.151 >> > > 26000 3.096 0.155 >> > > 28000 3.711 0.168 >> > > 30000 3.539 0.184 >> > > 32000 3.980 0.193 >> > > 34000 4.543 0.208 >> > > 36000 4.349 0.217 >> > > 38000 5.021 0.229 >> > > 40000 4.918 0.245 >> > > 42000 4.835 0.256 >> > > 44000 5.159 0.262 >> > > 46000 5.802 0.331 >> > > 48000 6.205 0.295 >> > > 50000 5.701 0.308 >> > > 52000 5.992 0.335 >> > > 54000 6.561 0.323 >> > > 56000 6.856 0.333 >> > > 58000 6.515 0.347 >> > > 60000 7.051 0.359 >> > > 62000 6.956 0.377 >> > > 64000 7.376 0.376 >> > > 66000 7.506 0.404 >> > > 68000 7.292 0.407 >> > > 70000 7.422 0.461 >> > > 72000 10.090 0.443 >> > > 74000 18.505 0.463 >> > > 76000 11.857 0.462 >> > > 78000 9.752 0.470 >> > > 80000 12.485 0.481 >> > > 82000 11.027 0.498 >> > > 84000 9.804 0.523 >> > > 86000 8.482 0.515 >> > > 88000 9.838 0.560 >> > > 90000 12.341 0.546 >> > > 92000 13.881 0.648 >> > > 94000 8.309 0.635 >> > > 96000 8.854 0.650 >> > > 98000 12.871 0.674 >> > > 100000 8.261 0.698 >> > >> > This probably can be reduced to something like 3-5 data points. >> > >> > > >> > > diff -r 8771d35d55d0 -r 6aac98fb135e src/core/ngx_queue.c >> > > --- a/src/core/ngx_queue.c Fri Mar 10 07:43:50 2023 +0300 >> > > +++ b/src/core/ngx_queue.c Thu Mar 23 16:15:07 2023 +0900 >> > > @@ -45,36 +45,103 @@ >> > > } >> > > >> > > >> > > -/* the stable insertion sort */ >> > > +/* merge queue2 into queue1. queue2 becomes empty after merge. */ >> > > + >> > > +static void >> > > +ngx_queue_merge(ngx_queue_t *queue1, ngx_queue_t *queue2, >> > > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) >> > > +{ >> > > + ngx_queue_t *p1, *p2; >> > >> > Nitpicking: there are various style issues here and in other places. >> > >> > > + >> > > + p1 = ngx_queue_head(queue1); >> > > + p2 = ngx_queue_head(queue2); >> > > + >> > > + while (p1 != ngx_queue_sentinel(queue1) >> > > + && p2 != ngx_queue_sentinel(queue2)) { >> > > + >> > > + if (cmp(p1, p2) > 0) { >> > > + ngx_queue_t *next, *prev; >> > > + >> > > + next = ngx_queue_next(p2); >> > > + ngx_queue_remove(p2); >> > > + prev = ngx_queue_prev(p1); >> > > + ngx_queue_insert_after(prev, p2); >> > > + p2 = next; >> > >> > Nitpicking: there is no need to preserve "next" here, since p2 is >> > always the head of queue2 and, and the next element can be >> > obtained by ngx_queue_head(queue2). >> > >> > Also, instead of obtaining "prev" and using >> > ngx_queue_insert_after() it would be easier to use >> > ngx_queue_insert_before(). It is not currently defined, but it is >> > trivial to define one: it is an alias to ngx_queue_insert_tail(), >> > much like ngx_queue_insert_after() is an alias to >> > ngx_queue_insert_head(). >> > >> > > + } else { >> > > + p1 = ngx_queue_next(p1); >> > > + } >> > > + } >> > > + if (p2 != ngx_queue_sentinel(queue2)) { >> > > + ngx_queue_add(queue1, queue2); >> > > + ngx_queue_init(queue2); >> > > + } >> > > +} >> > > + >> > > + >> > > +/* move all elements from src to dest. dest should be empty before call. */ >> > > + >> > > +static void >> > > +ngx_queue_move(ngx_queue_t *dest, ngx_queue_t *src) >> > > +{ >> > > + *dest = *src; >> > > + ngx_queue_init(src); >> > > + >> > > + if (dest->next == src) { >> > > + dest->next = dest; >> > > + } else { >> > > + dest->next->prev = dest; >> > > + } >> > > + if (dest->prev == src) { >> > > + dest->prev = dest; >> > > + } else { >> > > + dest->prev->next = dest; >> > > + } >> > > +} >> > >> > This function looks strange to me. There is the ngx_queue_add() >> > macro, which probably should be used instead (if needed). >> > >> > > + >> > > + >> > > +/* the stable merge sort */ >> > > >> > > void >> > > ngx_queue_sort(ngx_queue_t *queue, >> > > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) >> > > { >> > > - ngx_queue_t *q, *prev, *next; >> > > + ngx_queue_t merged[64], *p, *last; >> > > >> > > - q = ngx_queue_head(queue); >> > > - >> > > - if (q == ngx_queue_last(queue)) { >> > > + if (ngx_queue_head(queue) == ngx_queue_last(queue)) { >> > > return; >> > > } >> > > >> > > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { >> > > + last = merged; >> > > >> > > - prev = ngx_queue_prev(q); >> > > - next = ngx_queue_next(q); >> > > + while (!ngx_queue_empty(queue)) { >> > > + /* >> > > + * Loop invariant: >> > > + * merged[i] must have exactly 0 or 2^i elements in sorted order. >> > > + * For each iteration, we take one element from the given queue and >> > > + * insert it into merged without violating the invariant condition. >> > > + */ >> > > >> > > - ngx_queue_remove(q); >> > > + ngx_queue_t carry, *h; >> > > + >> > > + h = ngx_queue_head(queue); >> > > + ngx_queue_remove(h); >> > > + ngx_queue_init(&carry); >> > > + ngx_queue_insert_head(&carry, h); >> > > >> > > - do { >> > > - if (cmp(prev, q) <= 0) { >> > > - break; >> > > - } >> > > + for (p = merged; p != last && !ngx_queue_empty(p); p++) { >> > > + ngx_queue_merge(p, &carry, cmp); >> > > + ngx_queue_move(&carry, p); >> > > + } >> > > + if (p == last) { >> > > + ngx_queue_init(last); >> > > + last++; >> > > + } >> > > + ngx_queue_move(p, &carry); >> > > + } >> > > >> > > - prev = ngx_queue_prev(prev); >> > > - >> > > - } while (prev != ngx_queue_sentinel(queue)); >> > > - >> > > - ngx_queue_insert_after(prev, q); >> > > + /* Merge all queues into one queue */ >> > > + for (p = merged + 1; p != last; p++) { >> > > + ngx_queue_merge(p, p-1, cmp); >> > > } >> > > + ngx_queue_move(queue, last-1); >> > > } >> > >> > While bottom-up merge sort implementation might be more efficient, >> > I find it disturbing to use fixed array of queues without any >> > checks if we are within the array bounds. >> > >> > Rather, I would suggest recursive top-bottom merge sort implementation >> > instead, which is much simpler and uses stack as temporary storage >> > (so it'll naturally die if there will be a queue which requires >> > more space for sorting than we have). >> > >> > Please take a look if it works for you: >> > >> > diff --git a/src/core/ngx_queue.c b/src/core/ngx_queue.c >> > --- a/src/core/ngx_queue.c >> > +++ b/src/core/ngx_queue.c >> > @@ -9,6 +9,10 @@ >> > #include >> > >> > >> > +static void ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, >> > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)); >> > + >> > + >> > /* >> > * find the middle queue element if the queue has odd number of elements >> > * or the first element of the queue's second part otherwise >> > @@ -45,13 +49,13 @@ ngx_queue_middle(ngx_queue_t *queue) >> > } >> > >> > >> > -/* the stable insertion sort */ >> > +/* the stable merge sort */ >> > >> > void >> > ngx_queue_sort(ngx_queue_t *queue, >> > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) >> > { >> > - ngx_queue_t *q, *prev, *next; >> > + ngx_queue_t *q, tail; >> > >> > q = ngx_queue_head(queue); >> > >> > @@ -59,22 +63,44 @@ ngx_queue_sort(ngx_queue_t *queue, >> > return; >> > } >> > >> > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { >> > + q = ngx_queue_middle(queue); >> > + >> > + ngx_queue_split(queue, q, &tail); >> > + >> > + ngx_queue_sort(queue, cmp); >> > + ngx_queue_sort(&tail, cmp); >> > + >> > + ngx_queue_merge(queue, &tail, cmp); >> > +} >> > >> > - prev = ngx_queue_prev(q); >> > - next = ngx_queue_next(q); >> > >> > - ngx_queue_remove(q); >> > +static void >> > +ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, >> > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) >> > +{ >> > + ngx_queue_t *q1, *q2; >> > + >> > + q1 = ngx_queue_head(queue); >> > + q2 = ngx_queue_head(tail); >> > >> > - do { >> > - if (cmp(prev, q) <= 0) { >> > - break; >> > - } >> > + for ( ;; ) { >> > + if (q1 == ngx_queue_sentinel(queue)) { >> > + ngx_queue_add(queue, tail); >> > + break; >> > + } >> > + >> > + if (q2 == ngx_queue_sentinel(tail)) { >> > + break; >> > + } >> > >> > - prev = ngx_queue_prev(prev); >> > + if (cmp(q1, q2) <= 0) { >> > + q1 = ngx_queue_next(q1); >> > + continue; >> > + } >> > >> > - } while (prev != ngx_queue_sentinel(queue)); >> > + ngx_queue_remove(q2); >> > + ngx_queue_insert_before(q1, q2); >> > >> > - ngx_queue_insert_after(prev, q); >> > + q2 = ngx_queue_head(tail); >> > } >> > } >> > diff --git a/src/core/ngx_queue.h b/src/core/ngx_queue.h >> > --- a/src/core/ngx_queue.h >> > +++ b/src/core/ngx_queue.h >> > @@ -47,6 +47,9 @@ struct ngx_queue_s { >> > (h)->prev = x >> > >> > >> > +#define ngx_queue_insert_before ngx_queue_insert_tail >> > + >> > + >> > #define ngx_queue_head(h) \ >> > (h)->next >> > >> > >> > >> > -- >> > Maxim Dounin >> > http://mdounin.ru/ >> > _______________________________________________ >> > nginx-devel mailing list >> > nginx-devel at nginx.org >> > https://mailman.nginx.org/mailman/listinfo/nginx-devel >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> https://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From 0x0davood at gmail.com Sat Oct 7 04:09:18 2023 From: 0x0davood at gmail.com (Davood Falahati) Date: Sat, 7 Oct 2023 06:09:18 +0200 Subject: optimize the code for searching file in ngx_conf_open_file Message-ID: # HG changeset patch # User Davood Falahati <0x0davood at gmail.com> # Date 1696647746 -7200 # Sat Oct 07 05:02:26 2023 +0200 # Node ID ab14ea51bbb15331c9f44f14901d0fd378168647 # Parent c073e545e1cdcc736f8869a012a78b2dd836eac9 optimize the code for searching file in ngx_conf_open_file This patch combines two consecutive if statements into one and leveraging short circuits circuiting. In the current code, we check the lengths of file names and if they match, the file names are being compared. I see few other places in the code that writing multiple if statements are preferred over short circuit evaluation (e.g. http://hg.nginx.org/nginx/file/tip/src/http/ngx_http_file_cache.c#l1153). I wanted to make sure if it's a matter of community's taste or if it's in line with some performance considerations? diff -r c073e545e1cd -r ab14ea51bbb1 src/core/ngx_conf_file.c --- a/src/core/ngx_conf_file.c Thu May 18 23:42:22 2023 +0200 +++ b/src/core/ngx_conf_file.c Sat Oct 07 05:02:26 2023 +0200 @@ -927,11 +927,7 @@ i = 0; } - if (full.len != file[i].name.len) { - continue; - } - - if (ngx_strcmp(full.data, file[i].name.data) == 0) { + if (full.len == file[i].name.len && ngx_strcmp(full.data, file[i].name.data) == 0) { return &file[i]; } } -------------- next part -------------- An HTML attachment was scrubbed... URL: From xeioex at nginx.com Sat Oct 7 05:49:06 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Sat, 07 Oct 2023 05:49:06 +0000 Subject: [njs] Fixed RegExp.prototype.split(). Message-ID: details: https://hg.nginx.org/njs/rev/c16f64d334f2 branches: changeset: 2217:c16f64d334f2 user: Dmitry Volyntsev date: Fri Oct 06 16:49:59 2023 -0700 description: Fixed RegExp.prototype.split(). The issue was introduced in c0aad58cfadb. diffstat: src/njs_regexp.c | 15 +++++++++------ src/test/njs_unit_test.c | 3 +++ 2 files changed, 12 insertions(+), 6 deletions(-) diffs (48 lines): diff -r 632002c161b1 -r c16f64d334f2 src/njs_regexp.c --- a/src/njs_regexp.c Wed Oct 04 15:00:02 2023 -0700 +++ b/src/njs_regexp.c Fri Oct 06 16:49:59 2023 -0700 @@ -1625,7 +1625,7 @@ njs_regexp_prototype_symbol_split(njs_vm njs_value_t r, z, this, s_lvalue, setval, constructor; njs_object_t *object; const u_char *start, *end; - njs_string_prop_t s; + njs_string_prop_t s, sv; njs_value_t arguments[2]; static const njs_value_t string_lindex = njs_string("lastIndex"); @@ -1815,14 +1815,17 @@ njs_regexp_prototype_symbol_split(njs_vm ncaptures = njs_max(ncaptures - 1, 0); for (i = 1; i <= ncaptures; i++) { - value = njs_array_push(vm, array); - if (njs_slow_path(value == NULL)) { + ret = njs_value_property_i64(vm, &z, i, retval); + if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } - ret = njs_value_property_i64(vm, &z, i, value); - if (njs_slow_path(ret == NJS_ERROR)) { - return NJS_ERROR; + (void) njs_string_prop(&sv, retval); + + ret = njs_array_string_add(vm, array, sv.start, sv.size, + sv.length); + if (njs_slow_path(ret != NJS_OK)) { + return ret; } if (array->length == limit) { diff -r 632002c161b1 -r c16f64d334f2 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Wed Oct 04 15:00:02 2023 -0700 +++ b/src/test/njs_unit_test.c Fri Oct 06 16:49:59 2023 -0700 @@ -9734,6 +9734,9 @@ static njs_unit_test_t njs_test[] = { njs_str("'мояВерблюжьяСтрока'.split(/(?=[А-Я])/)"), njs_str("моя,Верблюжья,Строка") }, + { njs_str("`aaaaaaaaaaaaaaaaa`.split(/(.*)/)"), + njs_str(",aaaaaaaaaaaaaaaaa,") }, + { njs_str("'Harry Trump ;Fred Barney; Helen Rigby ; Bill Abel ;Chris Hand '.split( /\\s*(?:;|$)\\s*/)"), njs_str("Harry Trump,Fred Barney,Helen Rigby,Bill Abel,Chris Hand,") }, From xeioex at nginx.com Sat Oct 7 05:49:08 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Sat, 07 Oct 2023 05:49:08 +0000 Subject: [njs] Fixed RegExp.prototype.replace(). Message-ID: details: https://hg.nginx.org/njs/rev/b67fc7398a83 branches: changeset: 2218:b67fc7398a83 user: Dmitry Volyntsev date: Fri Oct 06 16:51:53 2023 -0700 description: Fixed RegExp.prototype.replace(). The issue was introduced in cf85d0f8640a. diffstat: src/njs_regexp.c | 13 +++++++++---- 1 files changed, 9 insertions(+), 4 deletions(-) diffs (30 lines): diff -r c16f64d334f2 -r b67fc7398a83 src/njs_regexp.c --- a/src/njs_regexp.c Fri Oct 06 16:49:59 2023 -0700 +++ b/src/njs_regexp.c Fri Oct 06 16:51:53 2023 -0700 @@ -1541,10 +1541,6 @@ njs_regexp_prototype_symbol_replace(njs_ arguments, ncaptures, &groups, replace, retval); - if (njs_object_slots(r)) { - njs_regexp_exec_result_free(vm, njs_array(r)); - } - } else { ret = njs_array_expand(vm, array, 0, njs_is_defined(&groups) ? 3 : 2); @@ -1586,6 +1582,15 @@ njs_regexp_prototype_symbol_replace(njs_ next_pos = pos + (int64_t) m.length; } + + if (!func_replace && njs_object_slots(r)) { + /* + * Doing free here ONLY for non-function replace, because + * otherwise we cannot be certain the result of match + * was not stored elsewhere. + */ + njs_regexp_exec_result_free(vm, njs_array(r)); + } } if (next_pos < (int64_t) s.size) { From xeioex at nginx.com Sat Oct 7 05:49:10 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Sat, 07 Oct 2023 05:49:10 +0000 Subject: [njs] Fixed Array.prototype.sort() with --debug=YES and --debug-memory=YES. Message-ID: details: https://hg.nginx.org/njs/rev/b49a98886c02 branches: changeset: 2219:b49a98886c02 user: Dmitry Volyntsev date: Fri Oct 06 16:52:23 2023 -0700 description: Fixed Array.prototype.sort() with --debug=YES and --debug-memory=YES. Previously, --debug-memory=YES activated a different allocation mechanism that was not able to properly handle the 0 size allocation. Specifically, njs_mp_free() failed to find a block to free when the size of the block is 0. The fix is to alloc at least 1 byte in the --debug-memory=YES mode. diffstat: src/njs_array.c | 28 +++++++++++++++++++++++----- src/njs_mp.c | 8 ++++++++ 2 files changed, 31 insertions(+), 5 deletions(-) diffs (77 lines): diff -r b67fc7398a83 -r b49a98886c02 src/njs_array.c --- a/src/njs_array.c Fri Oct 06 16:51:53 2023 -0700 +++ b/src/njs_array.c Fri Oct 06 16:52:23 2023 -0700 @@ -2782,6 +2782,8 @@ njs_sort_indexed_properties(njs_vm_t *vm njs_array_sort_ctx_t ctx; njs_array_sort_slot_t *p, *end, *slots, *newslots; + njs_assert(length != 0); + slots = NULL; keys = NULL; ctx.vm = vm; @@ -2993,6 +2995,12 @@ njs_array_prototype_sort(njs_vm_t *vm, n return ret; } + slots = NULL; + + if (length == 0) { + goto done; + } + /* Satisfy gcc -O3 */ nslots = 0; @@ -3027,6 +3035,8 @@ njs_array_prototype_sort(njs_vm_t *vm, n } } +done: + njs_value_assign(retval, this); ret = NJS_OK; @@ -3083,11 +3093,19 @@ njs_array_prototype_to_sorted(njs_vm_t * return NJS_ERROR; } - slots = njs_sort_indexed_properties(vm, this, length, compare, 0, &nslots, - &nunds); - if (njs_slow_path(slots == NULL)) { - ret = NJS_ERROR; - goto exception; + if (length != 0) { + slots = njs_sort_indexed_properties(vm, this, length, compare, 0, + &nslots, &nunds); + if (njs_slow_path(slots == NULL)) { + ret = NJS_ERROR; + goto exception; + } + + } else { + slots = NULL; + length = 0; + nslots = 0; + nunds = 0; } njs_assert(length == (nslots + nunds)); diff -r b67fc7398a83 -r b49a98886c02 src/njs_mp.c --- a/src/njs_mp.c Fri Oct 06 16:51:53 2023 -0700 +++ b/src/njs_mp.c Fri Oct 06 16:52:23 2023 -0700 @@ -592,6 +592,14 @@ njs_mp_alloc_large(njs_mp_t *mp, size_t return NULL; } +#if (NJS_DEBUG) + /* + * Ensure that the size is not zero, otherwise njs_mp_find_block() + * will not be able to find the block. + */ + size += size == 0; +#endif + if (njs_is_power_of_two(size)) { block = njs_malloc(sizeof(njs_mp_block_t)); if (njs_slow_path(block == NULL)) { From mdounin at mdounin.ru Sat Oct 7 17:59:51 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 7 Oct 2023 20:59:51 +0300 Subject: optimize the code for searching file in ngx_conf_open_file In-Reply-To: References: Message-ID: Hello! On Sat, Oct 07, 2023 at 06:09:18AM +0200, Davood Falahati wrote: > # HG changeset patch > # User Davood Falahati <0x0davood at gmail.com> > # Date 1696647746 -7200 > # Sat Oct 07 05:02:26 2023 +0200 > # Node ID ab14ea51bbb15331c9f44f14901d0fd378168647 > # Parent c073e545e1cdcc736f8869a012a78b2dd836eac9 > optimize the code for searching file in ngx_conf_open_file > > This patch combines two consecutive if statements into one and leveraging > short circuits circuiting. In the current code, we check the lengths of > file names and if they match, the file names are being compared. > > I see few other places in the code that writing multiple if statements are > preferred over short circuit evaluation (e.g. > http://hg.nginx.org/nginx/file/tip/src/http/ngx_http_file_cache.c#l1153). I > wanted to make sure if it's a matter of community's taste or if it's in > line with some performance considerations? > > diff -r c073e545e1cd -r ab14ea51bbb1 src/core/ngx_conf_file.c > --- a/src/core/ngx_conf_file.c Thu May 18 23:42:22 2023 +0200 > +++ b/src/core/ngx_conf_file.c Sat Oct 07 05:02:26 2023 +0200 > @@ -927,11 +927,7 @@ > i = 0; > } > > - if (full.len != file[i].name.len) { > - continue; > - } > - > - if (ngx_strcmp(full.data, file[i].name.data) == 0) { > + if (full.len == file[i].name.len && ngx_strcmp(full.data, > file[i].name.data) == 0) { > return &file[i]; > } > } Thanks for your efforts, but we prefer the code as is. There is no difference between these variants from the performance point of view. On the other hand, the existing variant with separate length comparison is certainly more readable. Further, the suggested change uses incorrect indentation, which is just wrong. For more information about submitting patches, please see tips here: http://nginx.org/en/docs/contributing_changes.html For more information about nginx coding style, please refer to the code and here: http://nginx.org/en/docs/dev/development_guide.html#code_style Hope this helps. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Oct 10 12:29:02 2023 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Tue, 10 Oct 2023 15:29:02 +0300 Subject: [PATCH] HTTP/2: per-iteration stream handling limit Message-ID: # HG changeset patch # User Maxim Dounin # Date 1696940019 -10800 # Tue Oct 10 15:13:39 2023 +0300 # Node ID cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc # Parent 3db945fda515014d220151046d02f3960bcfca0a HTTP/2: per-iteration stream handling limit. To ensure that attempts to flood servers with many streams are detected early, a limit of no more than 2 * max_concurrent_streams new streams per one event loop iteration was introduced. This limit is applied even if max_concurrent_streams is not yet reached - for example, if corresponding streams are handled synchronously or reset. Further, refused streams are now limited to maximum of max_concurrent_streams and 100, similarly to priority_limit initial value, providing some tolerance to clients trying to open several streams at the connection start, yet low tolerance to flooding attempts. diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c +++ b/src/http/v2/ngx_http_v2.c @@ -347,6 +347,7 @@ ngx_http_v2_read_handler(ngx_event_t *re ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http2 read handler"); h2c->blocked = 1; + h2c->new_streams = 0; if (c->close) { c->close = 0; @@ -1284,6 +1285,14 @@ ngx_http_v2_state_headers(ngx_http_v2_co goto rst_stream; } + if (h2c->new_streams++ >= 2 * h2scf->concurrent_streams) { + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, + "client sent too many streams at once"); + + status = NGX_HTTP_V2_REFUSED_STREAM; + goto rst_stream; + } + if (!h2c->settings_ack && !(h2c->state.flags & NGX_HTTP_V2_END_STREAM_FLAG) && h2scf->preread_size < NGX_HTTP_V2_DEFAULT_WINDOW) @@ -1349,6 +1358,12 @@ ngx_http_v2_state_headers(ngx_http_v2_co rst_stream: + if (h2c->refused_streams++ > ngx_max(h2scf->concurrent_streams, 100)) { + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, + "client sent too many refused streams"); + return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_NO_ERROR); + } + if (ngx_http_v2_send_rst_stream(h2c, h2c->state.sid, status) != NGX_OK) { return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); } diff --git a/src/http/v2/ngx_http_v2.h b/src/http/v2/ngx_http_v2.h --- a/src/http/v2/ngx_http_v2.h +++ b/src/http/v2/ngx_http_v2.h @@ -131,6 +131,8 @@ struct ngx_http_v2_connection_s { ngx_uint_t processing; ngx_uint_t frames; ngx_uint_t idle; + ngx_uint_t new_streams; + ngx_uint_t refused_streams; ngx_uint_t priority_limit; size_t send_window; From pluknet at nginx.com Tue Oct 10 12:46:04 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 10 Oct 2023 16:46:04 +0400 Subject: [PATCH] HTTP/2: per-iteration stream handling limit In-Reply-To: References: Message-ID: <723C9C70-E59E-4D3A-9069-A27112577914@nginx.com> > On 10 Oct 2023, at 16:29, Maxim Dounin wrote: > > # HG changeset patch > # User Maxim Dounin > # Date 1696940019 -10800 > # Tue Oct 10 15:13:39 2023 +0300 > # Node ID cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > # Parent 3db945fda515014d220151046d02f3960bcfca0a > HTTP/2: per-iteration stream handling limit. > > To ensure that attempts to flood servers with many streams are detected > early, a limit of no more than 2 * max_concurrent_streams new streams per one > event loop iteration was introduced. This limit is applied even if > max_concurrent_streams is not yet reached - for example, if corresponding > streams are handled synchronously or reset. > > Further, refused streams are now limited to maximum of max_concurrent_streams > and 100, similarly to priority_limit initial value, providing some tolerance > to clients trying to open several streams at the connection start, yet > low tolerance to flooding attempts. > > diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c > --- a/src/http/v2/ngx_http_v2.c > +++ b/src/http/v2/ngx_http_v2.c > @@ -347,6 +347,7 @@ ngx_http_v2_read_handler(ngx_event_t *re > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http2 read handler"); > > h2c->blocked = 1; > + h2c->new_streams = 0; > > if (c->close) { > c->close = 0; > @@ -1284,6 +1285,14 @@ ngx_http_v2_state_headers(ngx_http_v2_co > goto rst_stream; > } > > + if (h2c->new_streams++ >= 2 * h2scf->concurrent_streams) { > + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, > + "client sent too many streams at once"); > + > + status = NGX_HTTP_V2_REFUSED_STREAM; > + goto rst_stream; > + } > + > if (!h2c->settings_ack > && !(h2c->state.flags & NGX_HTTP_V2_END_STREAM_FLAG) > && h2scf->preread_size < NGX_HTTP_V2_DEFAULT_WINDOW) > @@ -1349,6 +1358,12 @@ ngx_http_v2_state_headers(ngx_http_v2_co > > rst_stream: > > + if (h2c->refused_streams++ > ngx_max(h2scf->concurrent_streams, 100)) { > + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, > + "client sent too many refused streams"); > + return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_NO_ERROR); > + } > + > if (ngx_http_v2_send_rst_stream(h2c, h2c->state.sid, status) != NGX_OK) { > return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); > } > diff --git a/src/http/v2/ngx_http_v2.h b/src/http/v2/ngx_http_v2.h > --- a/src/http/v2/ngx_http_v2.h > +++ b/src/http/v2/ngx_http_v2.h > @@ -131,6 +131,8 @@ struct ngx_http_v2_connection_s { > ngx_uint_t processing; > ngx_uint_t frames; > ngx_uint_t idle; > + ngx_uint_t new_streams; > + ngx_uint_t refused_streams; > ngx_uint_t priority_limit; > > size_t send_window; Looks good. -- Sergey Kandaurov From mdounin at mdounin.ru Tue Oct 10 12:52:38 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Oct 2023 15:52:38 +0300 Subject: [PATCH] HTTP/2: per-iteration stream handling limit In-Reply-To: References: Message-ID: Hello! On Tue, Oct 10, 2023 at 03:29:02PM +0300, Maxim Dounin wrote: > # HG changeset patch > # User Maxim Dounin > # Date 1696940019 -10800 > # Tue Oct 10 15:13:39 2023 +0300 > # Node ID cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > # Parent 3db945fda515014d220151046d02f3960bcfca0a > HTTP/2: per-iteration stream handling limit. > > To ensure that attempts to flood servers with many streams are detected > early, a limit of no more than 2 * max_concurrent_streams new streams per one > event loop iteration was introduced. This limit is applied even if > max_concurrent_streams is not yet reached - for example, if corresponding > streams are handled synchronously or reset. > > Further, refused streams are now limited to maximum of max_concurrent_streams > and 100, similarly to priority_limit initial value, providing some tolerance > to clients trying to open several streams at the connection start, yet > low tolerance to flooding attempts. To clarify: There is FUD being spread that nginx is vulnerable to CVE-2023-44487. We do not consider nginx to be affected by this issue. In the default configuration, nginx is sufficiently protected by the limit of allowed requests per connection (see http://nginx.org/r/keepalive_requests for details), so an attacker will be required to reconnect very often, making the attack obvious and easy to stop at the network level. And it is not possible to circumvent the max concurrent streams limit in nginx, as nginx only allows additional streams when previous streams are completely closed. Further, additional protection can be implemented in nginx by using the "limit_req" directive, which limits the rate of requests and rejects excessive requests. Overall, with the handling as implemented in nginx, impact of streams being reset does no seem to be significantly different from impacts from over workloads with large number of requests being sent by the client, such as handling of multiple HTTP/2 requests or HTTP/1.x pipelined requests. Nevertheless, we've decided to implemented some additional mitigations which will help nginx to detect such attacks and drop connections with misbehaving clients faster. Hence the patch. > > diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c > --- a/src/http/v2/ngx_http_v2.c > +++ b/src/http/v2/ngx_http_v2.c > @@ -347,6 +347,7 @@ ngx_http_v2_read_handler(ngx_event_t *re > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http2 read handler"); > > h2c->blocked = 1; > + h2c->new_streams = 0; > > if (c->close) { > c->close = 0; > @@ -1284,6 +1285,14 @@ ngx_http_v2_state_headers(ngx_http_v2_co > goto rst_stream; > } > > + if (h2c->new_streams++ >= 2 * h2scf->concurrent_streams) { > + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, > + "client sent too many streams at once"); > + > + status = NGX_HTTP_V2_REFUSED_STREAM; > + goto rst_stream; > + } > + > if (!h2c->settings_ack > && !(h2c->state.flags & NGX_HTTP_V2_END_STREAM_FLAG) > && h2scf->preread_size < NGX_HTTP_V2_DEFAULT_WINDOW) > @@ -1349,6 +1358,12 @@ ngx_http_v2_state_headers(ngx_http_v2_co > > rst_stream: > > + if (h2c->refused_streams++ > ngx_max(h2scf->concurrent_streams, 100)) { > + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, > + "client sent too many refused streams"); > + return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_NO_ERROR); > + } > + > if (ngx_http_v2_send_rst_stream(h2c, h2c->state.sid, status) != NGX_OK) { > return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); > } > diff --git a/src/http/v2/ngx_http_v2.h b/src/http/v2/ngx_http_v2.h > --- a/src/http/v2/ngx_http_v2.h > +++ b/src/http/v2/ngx_http_v2.h > @@ -131,6 +131,8 @@ struct ngx_http_v2_connection_s { > ngx_uint_t processing; > ngx_uint_t frames; > ngx_uint_t idle; > + ngx_uint_t new_streams; > + ngx_uint_t refused_streams; > ngx_uint_t priority_limit; > > size_t send_window; > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Oct 10 12:56:13 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Oct 2023 15:56:13 +0300 Subject: [PATCH] HTTP/2: per-iteration stream handling limit In-Reply-To: <723C9C70-E59E-4D3A-9069-A27112577914@nginx.com> References: <723C9C70-E59E-4D3A-9069-A27112577914@nginx.com> Message-ID: Hello! On Tue, Oct 10, 2023 at 04:46:04PM +0400, Sergey Kandaurov wrote: > > > On 10 Oct 2023, at 16:29, Maxim Dounin wrote: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1696940019 -10800 > > # Tue Oct 10 15:13:39 2023 +0300 > > # Node ID cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > > # Parent 3db945fda515014d220151046d02f3960bcfca0a > > HTTP/2: per-iteration stream handling limit. > > > > To ensure that attempts to flood servers with many streams are detected > > early, a limit of no more than 2 * max_concurrent_streams new streams per one > > event loop iteration was introduced. This limit is applied even if > > max_concurrent_streams is not yet reached - for example, if corresponding > > streams are handled synchronously or reset. > > > > Further, refused streams are now limited to maximum of max_concurrent_streams > > and 100, similarly to priority_limit initial value, providing some tolerance > > to clients trying to open several streams at the connection start, yet > > low tolerance to flooding attempts. > > > > diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c > > --- a/src/http/v2/ngx_http_v2.c > > +++ b/src/http/v2/ngx_http_v2.c > > @@ -347,6 +347,7 @@ ngx_http_v2_read_handler(ngx_event_t *re > > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http2 read handler"); > > > > h2c->blocked = 1; > > + h2c->new_streams = 0; > > > > if (c->close) { > > c->close = 0; > > @@ -1284,6 +1285,14 @@ ngx_http_v2_state_headers(ngx_http_v2_co > > goto rst_stream; > > } > > > > + if (h2c->new_streams++ >= 2 * h2scf->concurrent_streams) { > > + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, > > + "client sent too many streams at once"); > > + > > + status = NGX_HTTP_V2_REFUSED_STREAM; > > + goto rst_stream; > > + } > > + > > if (!h2c->settings_ack > > && !(h2c->state.flags & NGX_HTTP_V2_END_STREAM_FLAG) > > && h2scf->preread_size < NGX_HTTP_V2_DEFAULT_WINDOW) > > @@ -1349,6 +1358,12 @@ ngx_http_v2_state_headers(ngx_http_v2_co > > > > rst_stream: > > > > + if (h2c->refused_streams++ > ngx_max(h2scf->concurrent_streams, 100)) { > > + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, > > + "client sent too many refused streams"); > > + return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_NO_ERROR); > > + } > > + > > if (ngx_http_v2_send_rst_stream(h2c, h2c->state.sid, status) != NGX_OK) { > > return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); > > } > > diff --git a/src/http/v2/ngx_http_v2.h b/src/http/v2/ngx_http_v2.h > > --- a/src/http/v2/ngx_http_v2.h > > +++ b/src/http/v2/ngx_http_v2.h > > @@ -131,6 +131,8 @@ struct ngx_http_v2_connection_s { > > ngx_uint_t processing; > > ngx_uint_t frames; > > ngx_uint_t idle; > > + ngx_uint_t new_streams; > > + ngx_uint_t refused_streams; > > ngx_uint_t priority_limit; > > > > size_t send_window; > > Looks good. Pushed to http://mdounin.ru/hg/nginx/, thanks. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Tue Oct 10 13:05:49 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Tue, 10 Oct 2023 13:05:49 +0000 Subject: [nginx] HTTP/2: per-iteration stream handling limit. Message-ID: details: https://hg.nginx.org/nginx/rev/cdda286c0f1b branches: changeset: 9165:cdda286c0f1b user: Maxim Dounin date: Tue Oct 10 15:13:39 2023 +0300 description: HTTP/2: per-iteration stream handling limit. To ensure that attempts to flood servers with many streams are detected early, a limit of no more than 2 * max_concurrent_streams new streams per one event loop iteration was introduced. This limit is applied even if max_concurrent_streams is not yet reached - for example, if corresponding streams are handled synchronously or reset. Further, refused streams are now limited to maximum of max_concurrent_streams and 100, similarly to priority_limit initial value, providing some tolerance to clients trying to open several streams at the connection start, yet low tolerance to flooding attempts. diffstat: src/http/v2/ngx_http_v2.c | 15 +++++++++++++++ src/http/v2/ngx_http_v2.h | 2 ++ 2 files changed, 17 insertions(+), 0 deletions(-) diffs (51 lines): diff -r 3db945fda515 -r cdda286c0f1b src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Fri Sep 22 19:23:57 2023 +0400 +++ b/src/http/v2/ngx_http_v2.c Tue Oct 10 15:13:39 2023 +0300 @@ -347,6 +347,7 @@ ngx_http_v2_read_handler(ngx_event_t *re ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http2 read handler"); h2c->blocked = 1; + h2c->new_streams = 0; if (c->close) { c->close = 0; @@ -1284,6 +1285,14 @@ ngx_http_v2_state_headers(ngx_http_v2_co goto rst_stream; } + if (h2c->new_streams++ >= 2 * h2scf->concurrent_streams) { + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, + "client sent too many streams at once"); + + status = NGX_HTTP_V2_REFUSED_STREAM; + goto rst_stream; + } + if (!h2c->settings_ack && !(h2c->state.flags & NGX_HTTP_V2_END_STREAM_FLAG) && h2scf->preread_size < NGX_HTTP_V2_DEFAULT_WINDOW) @@ -1349,6 +1358,12 @@ ngx_http_v2_state_headers(ngx_http_v2_co rst_stream: + if (h2c->refused_streams++ > ngx_max(h2scf->concurrent_streams, 100)) { + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, + "client sent too many refused streams"); + return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_NO_ERROR); + } + if (ngx_http_v2_send_rst_stream(h2c, h2c->state.sid, status) != NGX_OK) { return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); } diff -r 3db945fda515 -r cdda286c0f1b src/http/v2/ngx_http_v2.h --- a/src/http/v2/ngx_http_v2.h Fri Sep 22 19:23:57 2023 +0400 +++ b/src/http/v2/ngx_http_v2.h Tue Oct 10 15:13:39 2023 +0300 @@ -131,6 +131,8 @@ struct ngx_http_v2_connection_s { ngx_uint_t processing; ngx_uint_t frames; ngx_uint_t idle; + ngx_uint_t new_streams; + ngx_uint_t refused_streams; ngx_uint_t priority_limit; size_t send_window; From teward at thomas-ward.net Tue Oct 10 17:57:58 2023 From: teward at thomas-ward.net (Thomas Ward) Date: Tue, 10 Oct 2023 17:57:58 +0000 Subject: [PATCH] HTTP/2: per-iteration stream handling limit In-Reply-To: References: Message-ID: Maxim: If NGINX is unaffected, then F5 is distributing the FUD. Refer to https://my.f5.com/manage/s/article/K000137106 and the NGINX category, specifically NGINX OSS. Right now, unless this is updated by F5 (who owns NGINX now) there is conflicting information here. May I suggest you go internally to F5 and have them actually *revise* their update, or further indicate that "There are already mitigations in place"? Ultimately, though, I would assume that keepalive at 100 is also still not enough to *fully* protect against this, therefore at the core "NGINX is still vulnerable to this CVE despite mitigations already being in place for default configuration values and options" is the interpretation, which means yes NGINX is in fact affected by this *in non-default configurations*, which means that it *is* still vulnerable to the CVE. Mitigations don't mean "fixed" or "not affected" in the strictest interpretation of languages and what means what. Thomas -----Original Message----- From: nginx-devel On Behalf Of Maxim Dounin Sent: Tuesday, October 10, 2023 8:53 AM To: nginx-devel at nginx.org Subject: Re: [PATCH] HTTP/2: per-iteration stream handling limit Hello! On Tue, Oct 10, 2023 at 03:29:02PM +0300, Maxim Dounin wrote: > # HG changeset patch > # User Maxim Dounin # Date 1696940019 -10800 > # Tue Oct 10 15:13:39 2023 +0300 > # Node ID cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > # Parent 3db945fda515014d220151046d02f3960bcfca0a > HTTP/2: per-iteration stream handling limit. > > To ensure that attempts to flood servers with many streams are > detected early, a limit of no more than 2 * max_concurrent_streams new > streams per one event loop iteration was introduced. This limit is > applied even if max_concurrent_streams is not yet reached - for > example, if corresponding streams are handled synchronously or reset. > > Further, refused streams are now limited to maximum of > max_concurrent_streams and 100, similarly to priority_limit initial > value, providing some tolerance to clients trying to open several > streams at the connection start, yet low tolerance to flooding attempts. To clarify: There is FUD being spread that nginx is vulnerable to CVE-2023-44487. We do not consider nginx to be affected by this issue. In the default configuration, nginx is sufficiently protected by the limit of allowed requests per connection (see http://nginx.org/r/keepalive_requests for details), so an attacker will be required to reconnect very often, making the attack obvious and easy to stop at the network level. And it is not possible to circumvent the max concurrent streams limit in nginx, as nginx only allows additional streams when previous streams are completely closed. Further, additional protection can be implemented in nginx by using the "limit_req" directive, which limits the rate of requests and rejects excessive requests. Overall, with the handling as implemented in nginx, impact of streams being reset does no seem to be significantly different from impacts from over workloads with large number of requests being sent by the client, such as handling of multiple HTTP/2 requests or HTTP/1.x pipelined requests. Nevertheless, we've decided to implemented some additional mitigations which will help nginx to detect such attacks and drop connections with misbehaving clients faster. Hence the patch. > > diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c > --- a/src/http/v2/ngx_http_v2.c > +++ b/src/http/v2/ngx_http_v2.c > @@ -347,6 +347,7 @@ ngx_http_v2_read_handler(ngx_event_t *re > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http2 read > handler"); > > h2c->blocked = 1; > + h2c->new_streams = 0; > > if (c->close) { > c->close = 0; > @@ -1284,6 +1285,14 @@ ngx_http_v2_state_headers(ngx_http_v2_co > goto rst_stream; > } > > + if (h2c->new_streams++ >= 2 * h2scf->concurrent_streams) { > + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, > + "client sent too many streams at once"); > + > + status = NGX_HTTP_V2_REFUSED_STREAM; > + goto rst_stream; > + } > + > if (!h2c->settings_ack > && !(h2c->state.flags & NGX_HTTP_V2_END_STREAM_FLAG) > && h2scf->preread_size < NGX_HTTP_V2_DEFAULT_WINDOW) @@ > -1349,6 +1358,12 @@ ngx_http_v2_state_headers(ngx_http_v2_co > > rst_stream: > > + if (h2c->refused_streams++ > ngx_max(h2scf->concurrent_streams, 100)) { > + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, > + "client sent too many refused streams"); > + return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_NO_ERROR); > + } > + > if (ngx_http_v2_send_rst_stream(h2c, h2c->state.sid, status) != NGX_OK) { > return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); > } > diff --git a/src/http/v2/ngx_http_v2.h b/src/http/v2/ngx_http_v2.h > --- a/src/http/v2/ngx_http_v2.h > +++ b/src/http/v2/ngx_http_v2.h > @@ -131,6 +131,8 @@ struct ngx_http_v2_connection_s { > ngx_uint_t processing; > ngx_uint_t frames; > ngx_uint_t idle; > + ngx_uint_t new_streams; > + ngx_uint_t refused_streams; > ngx_uint_t priority_limit; > > size_t send_window; > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Maxim Dounin http://mdounin.ru/ _______________________________________________ nginx-devel mailing list nginx-devel at nginx.org https://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Tue Oct 10 18:39:48 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Oct 2023 21:39:48 +0300 Subject: [PATCH] HTTP/2: per-iteration stream handling limit In-Reply-To: References: Message-ID: Hello! On Tue, Oct 10, 2023 at 05:57:58PM +0000, Thomas Ward wrote: > Maxim: > > If NGINX is unaffected, then F5 is distributing the FUD. Yes, they are. > Refer to https://my.f5.com/manage/s/article/K000137106 and the > NGINX category, specifically NGINX OSS. Right now, unless this > is updated by F5 (who owns NGINX now) there is conflicting > information here. May I suggest you go internally to F5 and > have them actually *revise* their update, or further indicate > that "There are already mitigations in place"? I'm not "internally to F5", I'm an independent developer. This was discussed within nginx security team, with other nginx developers, and the conclusion is that nginx is not affected. I have no idea why F5 decided to include nginx as affected in their advisory, who did this, and why. This is incorrect and should be fixed. I've already pinged them, and hopefully they will fix this. > Ultimately, > though, I would assume that keepalive at 100 is also still not > enough to *fully* protect against this, therefore at the core > "NGINX is still vulnerable to this CVE despite mitigations > already being in place for default configuration values and > options" is the interpretation, which means yes NGINX is in fact > affected by this *in non-default configurations*, which means > that it *is* still vulnerable to the CVE. Mitigations don't > mean "fixed" or "not affected" in the strictest interpretation > of languages and what means what. As often the case for DoS vulnerabilities, you cannot be *fully* protected, since there is a work to do anyway. As long as nginx is configured to serve HTTP requests, it can be loaded with serving HTTP requests, and maximum possible load depends on the configurations, including various builtin limits, such as keepalive_requests and http2_max_concurrent_streams, and DoS mitigations which needs to be explicitly configured, such as limit_req and limit_conn. As already said, in this case the work nginx is willing to do is no different from other workloads, such as with normal HTTP/2 requests or HTTP/1.x requests. As such, nginx is not considered to be affected by this issue. > > > Thomas > > -----Original Message----- > From: nginx-devel On Behalf Of Maxim Dounin > Sent: Tuesday, October 10, 2023 8:53 AM > To: nginx-devel at nginx.org > Subject: Re: [PATCH] HTTP/2: per-iteration stream handling limit > > Hello! > > On Tue, Oct 10, 2023 at 03:29:02PM +0300, Maxim Dounin wrote: > > > # HG changeset patch > > # User Maxim Dounin # Date 1696940019 -10800 > > # Tue Oct 10 15:13:39 2023 +0300 > > # Node ID cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > > # Parent 3db945fda515014d220151046d02f3960bcfca0a > > HTTP/2: per-iteration stream handling limit. > > > > To ensure that attempts to flood servers with many streams are > > detected early, a limit of no more than 2 * max_concurrent_streams new > > streams per one event loop iteration was introduced. This limit is > > applied even if max_concurrent_streams is not yet reached - for > > example, if corresponding streams are handled synchronously or reset. > > > > Further, refused streams are now limited to maximum of > > max_concurrent_streams and 100, similarly to priority_limit initial > > value, providing some tolerance to clients trying to open several > > streams at the connection start, yet low tolerance to flooding attempts. > > To clarify: > > There is FUD being spread that nginx is vulnerable to CVE-2023-44487. > > We do not consider nginx to be affected by this issue. In the default configuration, nginx is sufficiently protected by the limit of allowed requests per connection (see http://nginx.org/r/keepalive_requests for details), so an attacker will be required to reconnect very often, making the attack obvious and easy to stop at the network level. And it is not possible to circumvent the max concurrent streams limit in nginx, as nginx only allows additional streams when previous streams are completely closed. > > Further, additional protection can be implemented in nginx by using the "limit_req" directive, which limits the rate of requests and rejects excessive requests. > > Overall, with the handling as implemented in nginx, impact of streams being reset does no seem to be significantly different from impacts from over workloads with large number of requests being sent by the client, such as handling of multiple HTTP/2 requests or HTTP/1.x pipelined requests. > > Nevertheless, we've decided to implemented some additional mitigations which will help nginx to detect such attacks and drop connections with misbehaving clients faster. Hence the patch. > > > > > diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c > > --- a/src/http/v2/ngx_http_v2.c > > +++ b/src/http/v2/ngx_http_v2.c > > @@ -347,6 +347,7 @@ ngx_http_v2_read_handler(ngx_event_t *re > > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http2 read > > handler"); > > > > h2c->blocked = 1; > > + h2c->new_streams = 0; > > > > if (c->close) { > > c->close = 0; > > @@ -1284,6 +1285,14 @@ ngx_http_v2_state_headers(ngx_http_v2_co > > goto rst_stream; > > } > > > > + if (h2c->new_streams++ >= 2 * h2scf->concurrent_streams) { > > + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, > > + "client sent too many streams at once"); > > + > > + status = NGX_HTTP_V2_REFUSED_STREAM; > > + goto rst_stream; > > + } > > + > > if (!h2c->settings_ack > > && !(h2c->state.flags & NGX_HTTP_V2_END_STREAM_FLAG) > > && h2scf->preread_size < NGX_HTTP_V2_DEFAULT_WINDOW) @@ > > -1349,6 +1358,12 @@ ngx_http_v2_state_headers(ngx_http_v2_co > > > > rst_stream: > > > > + if (h2c->refused_streams++ > ngx_max(h2scf->concurrent_streams, 100)) { > > + ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0, > > + "client sent too many refused streams"); > > + return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_NO_ERROR); > > + } > > + > > if (ngx_http_v2_send_rst_stream(h2c, h2c->state.sid, status) != NGX_OK) { > > return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); > > } > > diff --git a/src/http/v2/ngx_http_v2.h b/src/http/v2/ngx_http_v2.h > > --- a/src/http/v2/ngx_http_v2.h > > +++ b/src/http/v2/ngx_http_v2.h > > @@ -131,6 +131,8 @@ struct ngx_http_v2_connection_s { > > ngx_uint_t processing; > > ngx_uint_t frames; > > ngx_uint_t idle; > > + ngx_uint_t new_streams; > > + ngx_uint_t refused_streams; > > ngx_uint_t priority_limit; > > > > size_t send_window; > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Oct 10 22:04:04 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 11 Oct 2023 01:04:04 +0300 Subject: Memory Leak Issue in Nginx PCRE2 In-Reply-To: References: Message-ID: Hello! On Wed, Sep 27, 2023 at 01:13:44AM +0800, 上勾拳 wrote: > Dear Nginx Developers, > > I hope this email finds you well. I am reaching out to the mailing list for > the first time to report and discuss an issue I encountered while working > on supporting PCRE2 in OpenResty. If I have made any errors in my reporting > or discussion, please do not hesitate to provide feedback. Your guidance is > greatly appreciated. > > During my recent work, I used the sanitizer to inspect potential issues, > and I identified a small memory leak in the PCRE2 code section of Nginx. > While this issue does not seem to be critical, it could potentially disrupt > memory checking tools. To help you reproduce the problem, I have included a > minimal configuration below. Please note that this issue occurs when Nginx > is configured to use PCRE2, and the version is 1.22.1 or higher. > > *Minimal Configuration for Reproduction:* > worker_processes 1; > daemon off; > master_process off; > error_log > /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/error.log > debug; > pid > /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/nginx.pid; > > http { > access_log > /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/access.log; > #access_log off; > default_type text/plain; > keepalive_timeout 68000ms; > server { > listen 1984; > #placeholder > server_name 'localhost'; > > client_max_body_size 30M; > #client_body_buffer_size 4k; > > # Begin preamble config... > > # End preamble config... > > # Begin test case config... > > location ~ '^/[a-d]$' { > return 200; > } > } > } > events { > accept_mutex off; > > worker_connections 64; > } > > *nginx -V :* > nginx version: nginx/1.25.1 (no pool) > built by gcc 11.4.1 20230605 (Red Hat 11.4.1-2) (GCC) > built with OpenSSL 1.1.1u 30 May 2023 > TLS SNI support enabled > configure arguments: > --prefix=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/work/nginx > --with-threads --with-pcre-jit --with-ipv6 > --with-cc-opt='-fno-omit-frame-pointer -fsanitize=address > -DNGX_LUA_USE_ASSERT -I/opt/pcre2/include -I/opt/ssl/include' > --with-http_v2_module --with-http_v3_module --with-http_realip_module > --with-http_ssl_module > --add-module=/home/zhenzhongw/code/pcre_pr/ndk-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/set-misc-nginx-module > --with-ld-opt='-fsanitize=address -L/opt/pcre2/lib -L/opt/ssl/lib > -Wl,-rpath,/opt/pcre2/lib:/opt/drizzle/lib:/opt/ssl/lib' > --without-mail_pop3_module --without-mail_imap_module > --with-http_image_filter_module --without-mail_smtp_module --with-stream > --with-stream_ssl_module --without-http_upstream_ip_hash_module > --without-http_memcached_module --without-http_auth_basic_module > --without-http_userid_module --with-http_auth_request_module > --add-module=/home/zhenzhongw/code/pcre_pr/echo-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/memc-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/srcache-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/lua-upstream-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/headers-more-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/drizzle-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/rds-json-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/coolkit-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/redis2-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/stream-lua-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-module > --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-shm-module > --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-delayed-load-module > --with-http_gunzip_module --with-http_dav_module --with-select_module > --with-poll_module --with-debug --with-poll_module --with-cc=gcc > > *The sanitizer tool reported the following error message: * > ================================================================= > ==555798==ERROR: LeakSanitizer: detected memory leaks > > Direct leak of 72 byte(s) in 1 object(s) allocated from: > #0 0x7f502f6b4a07 in __interceptor_malloc (/lib64/libasan.so.6+0xb4a07) > #1 0x4a1737 in ngx_alloc src/os/unix/ngx_alloc.c:22 > #2 0x525796 in ngx_regex_malloc src/core/ngx_regex.c:509 > #3 0x7f502f3e745e in _pcre2_memctl_malloc_8 > (/opt/pcre2/lib/libpcre2-8.so.0+0x1145e) > #4 0x5771ad in ngx_http_regex_compile src/http/ngx_http_variables.c:2555 > #5 0x536088 in ngx_http_core_regex_location > src/http/ngx_http_core_module.c:3263 > #6 0x537f94 in ngx_http_core_location > src/http/ngx_http_core_module.c:3115 > #7 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > #8 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > #9 0x5391ec in ngx_http_core_server src/http/ngx_http_core_module.c:2991 > #10 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > #11 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > #12 0x528e4c in ngx_http_block src/http/ngx_http.c:239 > #13 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > #14 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > #15 0x463f74 in ngx_init_cycle src/core/ngx_cycle.c:284 > #12 0x528e4c in ngx_http_block src/http/ngx_http.c:239 > #13 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > #14 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > #15 0x463f74 in ngx_init_cycle src/core/ngx_cycle.c:284 > #16 0x4300c7 in main src/core/nginx.c:295 > #17 0x7ff31a43feaf in __libc_start_call_main (/lib64/libc.so.6+0x3feaf) > > SUMMARY: AddressSanitizer: 72 byte(s) leaked in 1 allocation(s). > > *I have created a patch to address this memory leak issue, which I am > sharing below:* > diff --git a/src/core/ngx_regex.c b/src/core/ngx_regex.c > index 91381f499..71f583789 100644 > --- a/src/core/ngx_regex.c > +++ b/src/core/ngx_regex.c > @@ -600,6 +600,8 @@ ngx_regex_cleanup(void *data) > * the new cycle, these will be re-allocated. > */ > > + ngx_regex_malloc_init(NULL); > + > if (ngx_regex_compile_context) { > pcre2_compile_context_free(ngx_regex_compile_context); > ngx_regex_compile_context = NULL; > @@ -611,6 +613,8 @@ ngx_regex_cleanup(void *data) > ngx_regex_match_data_size = 0; > } > > + ngx_regex_malloc_done(); > + > #endif > } > > @@ -706,7 +710,13 @@ ngx_regex_module_init(ngx_cycle_t *cycle) > ngx_regex_malloc_done(); > > ngx_regex_studies = NULL; > + > #if (NGX_PCRE2) > + if (ngx_regex_compile_context) { > + ngx_regex_malloc_init(NULL); > + pcre2_compile_context_free(ngx_regex_compile_context); > + ngx_regex_malloc_done(); > + } > ngx_regex_compile_context = NULL; > #endif > > I kindly request your assistance in reviewing this matter and considering > the patch for inclusion in Nginx. If you have any questions or need further > information, please feel free to reach out to me. Your expertise and > feedback are highly valuable in resolving this issue. Thank you for the report. Indeed, this looks like a small leak which manifests itself during reconfiguration when nginx is compiled with PCRE2. The patch looks correct to me, though I think it would be better to don't do anything with ngx_regex_compile_context in ngx_regex_module_init(). Please take a look if the following patch looks good to you: # HG changeset patch # User Maxim Dounin # Date 1696950530 -10800 # Tue Oct 10 18:08:50 2023 +0300 # Node ID 0ceb96f57592b77618fba4200797df977241ec9b # Parent cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc Core: fixed memory leak on configuration reload with PCRE2. In ngx_regex_cleanup() allocator wasn't configured when calling pcre2_compile_context_free() and pcre2_match_data_free(), resulting in no ngx_free() call and leaked memory. Fix is ensure that allocator is configured for global allocations, so that ngx_free() is actually called to free memory. Additionally, ngx_regex_compile_context was cleared in ngx_regex_module_init(). It should be either not cleared, so it will be freed by ngx_regex_cleanup(), or properly freed. Fix is to not clear it, so ngx_regex_cleanup() will be able to free it. Reported by ZhenZhong Wu, https://mailman.nginx.org/pipermail/nginx-devel/2023-September/3Z5FIKUDRN2WBSL3JWTZJ7SXDA6YIWPB.html diff --git a/src/core/ngx_regex.c b/src/core/ngx_regex.c --- a/src/core/ngx_regex.c +++ b/src/core/ngx_regex.c @@ -600,6 +600,8 @@ ngx_regex_cleanup(void *data) * the new cycle, these will be re-allocated. */ + ngx_regex_malloc_init(NULL); + if (ngx_regex_compile_context) { pcre2_compile_context_free(ngx_regex_compile_context); ngx_regex_compile_context = NULL; @@ -611,6 +613,8 @@ ngx_regex_cleanup(void *data) ngx_regex_match_data_size = 0; } + ngx_regex_malloc_done(); + #endif } @@ -706,9 +710,6 @@ ngx_regex_module_init(ngx_cycle_t *cycle ngx_regex_malloc_done(); ngx_regex_studies = NULL; -#if (NGX_PCRE2) - ngx_regex_compile_context = NULL; -#endif return NGX_OK; } -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Oct 10 22:56:13 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 11 Oct 2023 01:56:13 +0300 Subject: [PATCH] Improve performance when starting nginx with a lot of locations In-Reply-To: References: Message-ID: Hello! On Thu, Oct 05, 2023 at 10:51:26AM +0900, Yusuke Nojima wrote: > Thank you for your comment! > > > Could you please provide some more details about the use case, > > such as how locations are used, and what is the approximate number > > of locations being used? > > Our team provides development environments to our company's engineers and QA. > In this environment, engineers and QA can freely create VMs and deploy > applications on them. > > Our nginx has the role of routing requests from the internet to all > applications deployed in this environment. > Additionally, it allows setting IP address restrictions, BASIC > authentication, TLS client authentication, and other configurations > for each application. > > To implement these requirements, we generate a location for each application. > Currently, there are approximately 40,000 locations in our environment. Thank you for the details. Such configuration looks somewhat sub-optimal, but understandable for a development / test environment. And certainly 40k locations is a lot for the sorting algorithm currently used. > > Further, since each location contains configuration for > > all modules, such configurations are expected to require a lot of > > memory > > Each of our nginx processes was consuming 5GB of memory in terms of > resident size. > This is not a problem as our servers have sufficient memory. > > > Rather, I would suggest recursive top-bottom merge sort implementation > > instead, which is much simpler and uses stack as temporary storage > > (so it'll naturally die if there will be a queue which requires > > more space for sorting than we have). > > > > Please take a look if it works for you: > > I think this implementation is simple and easy to understand. > Although the number of traversals of the list will increase compared > to bottom-up, it will not affect the order. > I believe this will provide sufficient optimization in terms of speed. Thanks for looking. In my limited testing, it is slightly faster than your bottom-up implementation (and significantly faster than the existing insertion sort when many locations are used). Below is the full patch (code unchanged), I'll commit it as soon as some other nginx developer will review it. # HG changeset patch # User Maxim Dounin # Date 1696977468 -10800 # Wed Oct 11 01:37:48 2023 +0300 # Node ID b891840852ee5cc823eee1769d092ab50928919f # Parent cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc Core: changed ngx_queue_sort() to use merge sort. This improves nginx startup times significantly when using very large number of locations due computational complexity of the sorting algorithm being used (insertion sort is O(n*n) on average, while merge sort is O(n*log(n))). In particular, in a test configuration with 20k locations total startup time is reduced from 8 seconds to 0.9 seconds. Prodded by Yusuke Nojima, https://mailman.nginx.org/pipermail/nginx-devel/2023-September/NUL3Y2FPPFSHMPTFTL65KXSXNTX3NQMK.html diff --git a/src/core/ngx_queue.c b/src/core/ngx_queue.c --- a/src/core/ngx_queue.c +++ b/src/core/ngx_queue.c @@ -9,6 +9,10 @@ #include +static void ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)); + + /* * find the middle queue element if the queue has odd number of elements * or the first element of the queue's second part otherwise @@ -45,13 +49,13 @@ ngx_queue_middle(ngx_queue_t *queue) } -/* the stable insertion sort */ +/* the stable merge sort */ void ngx_queue_sort(ngx_queue_t *queue, ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) { - ngx_queue_t *q, *prev, *next; + ngx_queue_t *q, tail; q = ngx_queue_head(queue); @@ -59,22 +63,44 @@ ngx_queue_sort(ngx_queue_t *queue, return; } - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { + q = ngx_queue_middle(queue); + + ngx_queue_split(queue, q, &tail); + + ngx_queue_sort(queue, cmp); + ngx_queue_sort(&tail, cmp); + + ngx_queue_merge(queue, &tail, cmp); +} - prev = ngx_queue_prev(q); - next = ngx_queue_next(q); - ngx_queue_remove(q); +static void +ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) +{ + ngx_queue_t *q1, *q2; + + q1 = ngx_queue_head(queue); + q2 = ngx_queue_head(tail); - do { - if (cmp(prev, q) <= 0) { - break; - } + for ( ;; ) { + if (q1 == ngx_queue_sentinel(queue)) { + ngx_queue_add(queue, tail); + break; + } + + if (q2 == ngx_queue_sentinel(tail)) { + break; + } - prev = ngx_queue_prev(prev); + if (cmp(q1, q2) <= 0) { + q1 = ngx_queue_next(q1); + continue; + } - } while (prev != ngx_queue_sentinel(queue)); + ngx_queue_remove(q2); + ngx_queue_insert_before(q1, q2); - ngx_queue_insert_after(prev, q); + q2 = ngx_queue_head(tail); } } diff --git a/src/core/ngx_queue.h b/src/core/ngx_queue.h --- a/src/core/ngx_queue.h +++ b/src/core/ngx_queue.h @@ -47,6 +47,9 @@ struct ngx_queue_s { (h)->prev = x +#define ngx_queue_insert_before ngx_queue_insert_tail + + #define ngx_queue_head(h) \ (h)->next > > > 2023年9月30日(土) 12:38 Maxim Dounin : > > > > Hello! > > > > On Fri, Sep 22, 2023 at 03:58:41PM +0900, Yusuke Nojima wrote: > > > > > # HG changeset patch > > > # User Yusuke Nojima > > > # Date 1679555707 -32400 > > > # Thu Mar 23 16:15:07 2023 +0900 > > > # Node ID 6aac98fb135e47ca9cf7ad7d780cf4a10e9aa55c > > > # Parent 8771d35d55d0a2b1cefaab04401d6f837f5a05a2 > > > Improve performance when starting nginx with a lot of locations > > > > > > Our team has a configuration file with a very large number of > > > locations, and we found that starting nginx with this file takes an > > > unacceptable amount of time. After investigating the issue, we > > > discovered that the root cause of the long startup time is the sorting > > > of the location list. > > > > Interesting. > > > > Could you please provide some more details about the use case, > > such as how locations are used, and what is the approximate number > > of locations being used? > > > > In my practice, it is extremely uncommon to use more than 1k-10k > > prefix locations (and even these numbers are huge for normal > > setups). Further, since each location contains configuration for > > all modules, such configurations are expected to require a lot of > > memory (currently about 5-10KB per empty location, so about > > 50-100MB per 10k locations, and 0.5-1G per 100k locations). > > Accordingly, using other approaches such as map (assuming exact > > match is actually needed) might be beneficial regardless of the > > sorting costs. > > > > Nevertheless, swapping the sorting algorithm to a faster one looks > > like an obvious improvement. > > > > > > > > Currently, the sorting algorithm used in nginx is insertion sort, > > > which requires O(n^2) time for n locations. We have modified the > > > sorting algorithm to use merge sort instead, which has a time > > > complexity of O(n log n). > > > > > > We have tested the modified code using micro-benchmarks and confirmed > > > that the new algorithm improves nginx startup time significantly > > > (shown below). We believe that this change would be valuable for other > > > users who are experiencing similar issues. > > > > > > > > > Table: nginx startup time in seconds > > > > > > n current patched > > > 2000 0.033 0.018 > > > 4000 0.047 0.028 > > > 6000 0.062 0.038 > > > 8000 0.079 0.050 > > > 10000 0.091 0.065 > > > 12000 0.367 0.081 > > > 14000 0.683 0.086 > > > 16000 0.899 0.097 > > > 18000 1.145 0.110 > > > 20000 1.449 0.122 > > > 22000 1.650 0.137 > > > 24000 2.155 0.151 > > > 26000 3.096 0.155 > > > 28000 3.711 0.168 > > > 30000 3.539 0.184 > > > 32000 3.980 0.193 > > > 34000 4.543 0.208 > > > 36000 4.349 0.217 > > > 38000 5.021 0.229 > > > 40000 4.918 0.245 > > > 42000 4.835 0.256 > > > 44000 5.159 0.262 > > > 46000 5.802 0.331 > > > 48000 6.205 0.295 > > > 50000 5.701 0.308 > > > 52000 5.992 0.335 > > > 54000 6.561 0.323 > > > 56000 6.856 0.333 > > > 58000 6.515 0.347 > > > 60000 7.051 0.359 > > > 62000 6.956 0.377 > > > 64000 7.376 0.376 > > > 66000 7.506 0.404 > > > 68000 7.292 0.407 > > > 70000 7.422 0.461 > > > 72000 10.090 0.443 > > > 74000 18.505 0.463 > > > 76000 11.857 0.462 > > > 78000 9.752 0.470 > > > 80000 12.485 0.481 > > > 82000 11.027 0.498 > > > 84000 9.804 0.523 > > > 86000 8.482 0.515 > > > 88000 9.838 0.560 > > > 90000 12.341 0.546 > > > 92000 13.881 0.648 > > > 94000 8.309 0.635 > > > 96000 8.854 0.650 > > > 98000 12.871 0.674 > > > 100000 8.261 0.698 > > > > This probably can be reduced to something like 3-5 data points. > > > > > > > > diff -r 8771d35d55d0 -r 6aac98fb135e src/core/ngx_queue.c > > > --- a/src/core/ngx_queue.c Fri Mar 10 07:43:50 2023 +0300 > > > +++ b/src/core/ngx_queue.c Thu Mar 23 16:15:07 2023 +0900 > > > @@ -45,36 +45,103 @@ > > > } > > > > > > > > > -/* the stable insertion sort */ > > > +/* merge queue2 into queue1. queue2 becomes empty after merge. */ > > > + > > > +static void > > > +ngx_queue_merge(ngx_queue_t *queue1, ngx_queue_t *queue2, > > > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > > +{ > > > + ngx_queue_t *p1, *p2; > > > > Nitpicking: there are various style issues here and in other places. > > > > > + > > > + p1 = ngx_queue_head(queue1); > > > + p2 = ngx_queue_head(queue2); > > > + > > > + while (p1 != ngx_queue_sentinel(queue1) > > > + && p2 != ngx_queue_sentinel(queue2)) { > > > + > > > + if (cmp(p1, p2) > 0) { > > > + ngx_queue_t *next, *prev; > > > + > > > + next = ngx_queue_next(p2); > > > + ngx_queue_remove(p2); > > > + prev = ngx_queue_prev(p1); > > > + ngx_queue_insert_after(prev, p2); > > > + p2 = next; > > > > Nitpicking: there is no need to preserve "next" here, since p2 is > > always the head of queue2 and, and the next element can be > > obtained by ngx_queue_head(queue2). > > > > Also, instead of obtaining "prev" and using > > ngx_queue_insert_after() it would be easier to use > > ngx_queue_insert_before(). It is not currently defined, but it is > > trivial to define one: it is an alias to ngx_queue_insert_tail(), > > much like ngx_queue_insert_after() is an alias to > > ngx_queue_insert_head(). > > > > > + } else { > > > + p1 = ngx_queue_next(p1); > > > + } > > > + } > > > + if (p2 != ngx_queue_sentinel(queue2)) { > > > + ngx_queue_add(queue1, queue2); > > > + ngx_queue_init(queue2); > > > + } > > > +} > > > + > > > + > > > +/* move all elements from src to dest. dest should be empty before call. */ > > > + > > > +static void > > > +ngx_queue_move(ngx_queue_t *dest, ngx_queue_t *src) > > > +{ > > > + *dest = *src; > > > + ngx_queue_init(src); > > > + > > > + if (dest->next == src) { > > > + dest->next = dest; > > > + } else { > > > + dest->next->prev = dest; > > > + } > > > + if (dest->prev == src) { > > > + dest->prev = dest; > > > + } else { > > > + dest->prev->next = dest; > > > + } > > > +} > > > > This function looks strange to me. There is the ngx_queue_add() > > macro, which probably should be used instead (if needed). > > > > > + > > > + > > > +/* the stable merge sort */ > > > > > > void > > > ngx_queue_sort(ngx_queue_t *queue, > > > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > > { > > > - ngx_queue_t *q, *prev, *next; > > > + ngx_queue_t merged[64], *p, *last; > > > > > > - q = ngx_queue_head(queue); > > > - > > > - if (q == ngx_queue_last(queue)) { > > > + if (ngx_queue_head(queue) == ngx_queue_last(queue)) { > > > return; > > > } > > > > > > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { > > > + last = merged; > > > > > > - prev = ngx_queue_prev(q); > > > - next = ngx_queue_next(q); > > > + while (!ngx_queue_empty(queue)) { > > > + /* > > > + * Loop invariant: > > > + * merged[i] must have exactly 0 or 2^i elements in sorted order. > > > + * For each iteration, we take one element from the given queue and > > > + * insert it into merged without violating the invariant condition. > > > + */ > > > > > > - ngx_queue_remove(q); > > > + ngx_queue_t carry, *h; > > > + > > > + h = ngx_queue_head(queue); > > > + ngx_queue_remove(h); > > > + ngx_queue_init(&carry); > > > + ngx_queue_insert_head(&carry, h); > > > > > > - do { > > > - if (cmp(prev, q) <= 0) { > > > - break; > > > - } > > > + for (p = merged; p != last && !ngx_queue_empty(p); p++) { > > > + ngx_queue_merge(p, &carry, cmp); > > > + ngx_queue_move(&carry, p); > > > + } > > > + if (p == last) { > > > + ngx_queue_init(last); > > > + last++; > > > + } > > > + ngx_queue_move(p, &carry); > > > + } > > > > > > - prev = ngx_queue_prev(prev); > > > - > > > - } while (prev != ngx_queue_sentinel(queue)); > > > - > > > - ngx_queue_insert_after(prev, q); > > > + /* Merge all queues into one queue */ > > > + for (p = merged + 1; p != last; p++) { > > > + ngx_queue_merge(p, p-1, cmp); > > > } > > > + ngx_queue_move(queue, last-1); > > > } > > > > While bottom-up merge sort implementation might be more efficient, > > I find it disturbing to use fixed array of queues without any > > checks if we are within the array bounds. > > > > Rather, I would suggest recursive top-bottom merge sort implementation > > instead, which is much simpler and uses stack as temporary storage > > (so it'll naturally die if there will be a queue which requires > > more space for sorting than we have). > > > > Please take a look if it works for you: > > > > diff --git a/src/core/ngx_queue.c b/src/core/ngx_queue.c > > --- a/src/core/ngx_queue.c > > +++ b/src/core/ngx_queue.c > > @@ -9,6 +9,10 @@ > > #include > > > > > > +static void ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)); > > + > > + > > /* > > * find the middle queue element if the queue has odd number of elements > > * or the first element of the queue's second part otherwise > > @@ -45,13 +49,13 @@ ngx_queue_middle(ngx_queue_t *queue) > > } > > > > > > -/* the stable insertion sort */ > > +/* the stable merge sort */ > > > > void > > ngx_queue_sort(ngx_queue_t *queue, > > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > { > > - ngx_queue_t *q, *prev, *next; > > + ngx_queue_t *q, tail; > > > > q = ngx_queue_head(queue); > > > > @@ -59,22 +63,44 @@ ngx_queue_sort(ngx_queue_t *queue, > > return; > > } > > > > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { > > + q = ngx_queue_middle(queue); > > + > > + ngx_queue_split(queue, q, &tail); > > + > > + ngx_queue_sort(queue, cmp); > > + ngx_queue_sort(&tail, cmp); > > + > > + ngx_queue_merge(queue, &tail, cmp); > > +} > > > > - prev = ngx_queue_prev(q); > > - next = ngx_queue_next(q); > > > > - ngx_queue_remove(q); > > +static void > > +ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > +{ > > + ngx_queue_t *q1, *q2; > > + > > + q1 = ngx_queue_head(queue); > > + q2 = ngx_queue_head(tail); > > > > - do { > > - if (cmp(prev, q) <= 0) { > > - break; > > - } > > + for ( ;; ) { > > + if (q1 == ngx_queue_sentinel(queue)) { > > + ngx_queue_add(queue, tail); > > + break; > > + } > > + > > + if (q2 == ngx_queue_sentinel(tail)) { > > + break; > > + } > > > > - prev = ngx_queue_prev(prev); > > + if (cmp(q1, q2) <= 0) { > > + q1 = ngx_queue_next(q1); > > + continue; > > + } > > > > - } while (prev != ngx_queue_sentinel(queue)); > > + ngx_queue_remove(q2); > > + ngx_queue_insert_before(q1, q2); > > > > - ngx_queue_insert_after(prev, q); > > + q2 = ngx_queue_head(tail); > > } > > } > > diff --git a/src/core/ngx_queue.h b/src/core/ngx_queue.h > > --- a/src/core/ngx_queue.h > > +++ b/src/core/ngx_queue.h > > @@ -47,6 +47,9 @@ struct ngx_queue_s { > > (h)->prev = x > > > > > > +#define ngx_queue_insert_before ngx_queue_insert_tail > > + > > + > > #define ngx_queue_head(h) \ > > (h)->next > > > > > > > > -- > > Maxim Dounin > > http://mdounin.ru/ > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Maxim Dounin http://mdounin.ru/ From nojima at ynojima.com Wed Oct 11 10:33:01 2023 From: nojima at ynojima.com (Yusuke Nojima) Date: Wed, 11 Oct 2023 19:33:01 +0900 Subject: [PATCH] Improve performance when starting nginx with a lot of locations In-Reply-To: References: Message-ID: Thank you for your comments and implementation! I am looking forward to the acceleration of nginx startup with this patch. 2023年10月11日(水) 7:56 Maxim Dounin : > > Hello! > > On Thu, Oct 05, 2023 at 10:51:26AM +0900, Yusuke Nojima wrote: > > > Thank you for your comment! > > > > > Could you please provide some more details about the use case, > > > such as how locations are used, and what is the approximate number > > > of locations being used? > > > > Our team provides development environments to our company's engineers and QA. > > In this environment, engineers and QA can freely create VMs and deploy > > applications on them. > > > > Our nginx has the role of routing requests from the internet to all > > applications deployed in this environment. > > Additionally, it allows setting IP address restrictions, BASIC > > authentication, TLS client authentication, and other configurations > > for each application. > > > > To implement these requirements, we generate a location for each application. > > Currently, there are approximately 40,000 locations in our environment. > > Thank you for the details. Such configuration looks somewhat > sub-optimal, but understandable for a development / test > environment. And certainly 40k locations is a lot for the sorting > algorithm currently used. > > > > Further, since each location contains configuration for > > > all modules, such configurations are expected to require a lot of > > > memory > > > > Each of our nginx processes was consuming 5GB of memory in terms of > > resident size. > > This is not a problem as our servers have sufficient memory. > > > > > Rather, I would suggest recursive top-bottom merge sort implementation > > > instead, which is much simpler and uses stack as temporary storage > > > (so it'll naturally die if there will be a queue which requires > > > more space for sorting than we have). > > > > > > Please take a look if it works for you: > > > > I think this implementation is simple and easy to understand. > > Although the number of traversals of the list will increase compared > > to bottom-up, it will not affect the order. > > I believe this will provide sufficient optimization in terms of speed. > > Thanks for looking. In my limited testing, it is slightly faster > than your bottom-up implementation (and significantly faster than > the existing insertion sort when many locations are used). > > Below is the full patch (code unchanged), I'll commit it as soon > as some other nginx developer will review it. > > # HG changeset patch > # User Maxim Dounin > # Date 1696977468 -10800 > # Wed Oct 11 01:37:48 2023 +0300 > # Node ID b891840852ee5cc823eee1769d092ab50928919f > # Parent cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > Core: changed ngx_queue_sort() to use merge sort. > > This improves nginx startup times significantly when using very large number > of locations due computational complexity of the sorting algorithm being > used (insertion sort is O(n*n) on average, while merge sort is O(n*log(n))). > In particular, in a test configuration with 20k locations total startup > time is reduced from 8 seconds to 0.9 seconds. > > Prodded by Yusuke Nojima, > https://mailman.nginx.org/pipermail/nginx-devel/2023-September/NUL3Y2FPPFSHMPTFTL65KXSXNTX3NQMK.html > > diff --git a/src/core/ngx_queue.c b/src/core/ngx_queue.c > --- a/src/core/ngx_queue.c > +++ b/src/core/ngx_queue.c > @@ -9,6 +9,10 @@ > #include > > > +static void ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)); > + > + > /* > * find the middle queue element if the queue has odd number of elements > * or the first element of the queue's second part otherwise > @@ -45,13 +49,13 @@ ngx_queue_middle(ngx_queue_t *queue) > } > > > -/* the stable insertion sort */ > +/* the stable merge sort */ > > void > ngx_queue_sort(ngx_queue_t *queue, > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > { > - ngx_queue_t *q, *prev, *next; > + ngx_queue_t *q, tail; > > q = ngx_queue_head(queue); > > @@ -59,22 +63,44 @@ ngx_queue_sort(ngx_queue_t *queue, > return; > } > > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { > + q = ngx_queue_middle(queue); > + > + ngx_queue_split(queue, q, &tail); > + > + ngx_queue_sort(queue, cmp); > + ngx_queue_sort(&tail, cmp); > + > + ngx_queue_merge(queue, &tail, cmp); > +} > > - prev = ngx_queue_prev(q); > - next = ngx_queue_next(q); > > - ngx_queue_remove(q); > +static void > +ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > +{ > + ngx_queue_t *q1, *q2; > + > + q1 = ngx_queue_head(queue); > + q2 = ngx_queue_head(tail); > > - do { > - if (cmp(prev, q) <= 0) { > - break; > - } > + for ( ;; ) { > + if (q1 == ngx_queue_sentinel(queue)) { > + ngx_queue_add(queue, tail); > + break; > + } > + > + if (q2 == ngx_queue_sentinel(tail)) { > + break; > + } > > - prev = ngx_queue_prev(prev); > + if (cmp(q1, q2) <= 0) { > + q1 = ngx_queue_next(q1); > + continue; > + } > > - } while (prev != ngx_queue_sentinel(queue)); > + ngx_queue_remove(q2); > + ngx_queue_insert_before(q1, q2); > > - ngx_queue_insert_after(prev, q); > + q2 = ngx_queue_head(tail); > } > } > diff --git a/src/core/ngx_queue.h b/src/core/ngx_queue.h > --- a/src/core/ngx_queue.h > +++ b/src/core/ngx_queue.h > @@ -47,6 +47,9 @@ struct ngx_queue_s { > (h)->prev = x > > > +#define ngx_queue_insert_before ngx_queue_insert_tail > + > + > #define ngx_queue_head(h) \ > (h)->next > > > > > > > > > > 2023年9月30日(土) 12:38 Maxim Dounin : > > > > > > Hello! > > > > > > On Fri, Sep 22, 2023 at 03:58:41PM +0900, Yusuke Nojima wrote: > > > > > > > # HG changeset patch > > > > # User Yusuke Nojima > > > > # Date 1679555707 -32400 > > > > # Thu Mar 23 16:15:07 2023 +0900 > > > > # Node ID 6aac98fb135e47ca9cf7ad7d780cf4a10e9aa55c > > > > # Parent 8771d35d55d0a2b1cefaab04401d6f837f5a05a2 > > > > Improve performance when starting nginx with a lot of locations > > > > > > > > Our team has a configuration file with a very large number of > > > > locations, and we found that starting nginx with this file takes an > > > > unacceptable amount of time. After investigating the issue, we > > > > discovered that the root cause of the long startup time is the sorting > > > > of the location list. > > > > > > Interesting. > > > > > > Could you please provide some more details about the use case, > > > such as how locations are used, and what is the approximate number > > > of locations being used? > > > > > > In my practice, it is extremely uncommon to use more than 1k-10k > > > prefix locations (and even these numbers are huge for normal > > > setups). Further, since each location contains configuration for > > > all modules, such configurations are expected to require a lot of > > > memory (currently about 5-10KB per empty location, so about > > > 50-100MB per 10k locations, and 0.5-1G per 100k locations). > > > Accordingly, using other approaches such as map (assuming exact > > > match is actually needed) might be beneficial regardless of the > > > sorting costs. > > > > > > Nevertheless, swapping the sorting algorithm to a faster one looks > > > like an obvious improvement. > > > > > > > > > > > Currently, the sorting algorithm used in nginx is insertion sort, > > > > which requires O(n^2) time for n locations. We have modified the > > > > sorting algorithm to use merge sort instead, which has a time > > > > complexity of O(n log n). > > > > > > > > We have tested the modified code using micro-benchmarks and confirmed > > > > that the new algorithm improves nginx startup time significantly > > > > (shown below). We believe that this change would be valuable for other > > > > users who are experiencing similar issues. > > > > > > > > > > > > Table: nginx startup time in seconds > > > > > > > > n current patched > > > > 2000 0.033 0.018 > > > > 4000 0.047 0.028 > > > > 6000 0.062 0.038 > > > > 8000 0.079 0.050 > > > > 10000 0.091 0.065 > > > > 12000 0.367 0.081 > > > > 14000 0.683 0.086 > > > > 16000 0.899 0.097 > > > > 18000 1.145 0.110 > > > > 20000 1.449 0.122 > > > > 22000 1.650 0.137 > > > > 24000 2.155 0.151 > > > > 26000 3.096 0.155 > > > > 28000 3.711 0.168 > > > > 30000 3.539 0.184 > > > > 32000 3.980 0.193 > > > > 34000 4.543 0.208 > > > > 36000 4.349 0.217 > > > > 38000 5.021 0.229 > > > > 40000 4.918 0.245 > > > > 42000 4.835 0.256 > > > > 44000 5.159 0.262 > > > > 46000 5.802 0.331 > > > > 48000 6.205 0.295 > > > > 50000 5.701 0.308 > > > > 52000 5.992 0.335 > > > > 54000 6.561 0.323 > > > > 56000 6.856 0.333 > > > > 58000 6.515 0.347 > > > > 60000 7.051 0.359 > > > > 62000 6.956 0.377 > > > > 64000 7.376 0.376 > > > > 66000 7.506 0.404 > > > > 68000 7.292 0.407 > > > > 70000 7.422 0.461 > > > > 72000 10.090 0.443 > > > > 74000 18.505 0.463 > > > > 76000 11.857 0.462 > > > > 78000 9.752 0.470 > > > > 80000 12.485 0.481 > > > > 82000 11.027 0.498 > > > > 84000 9.804 0.523 > > > > 86000 8.482 0.515 > > > > 88000 9.838 0.560 > > > > 90000 12.341 0.546 > > > > 92000 13.881 0.648 > > > > 94000 8.309 0.635 > > > > 96000 8.854 0.650 > > > > 98000 12.871 0.674 > > > > 100000 8.261 0.698 > > > > > > This probably can be reduced to something like 3-5 data points. > > > > > > > > > > > diff -r 8771d35d55d0 -r 6aac98fb135e src/core/ngx_queue.c > > > > --- a/src/core/ngx_queue.c Fri Mar 10 07:43:50 2023 +0300 > > > > +++ b/src/core/ngx_queue.c Thu Mar 23 16:15:07 2023 +0900 > > > > @@ -45,36 +45,103 @@ > > > > } > > > > > > > > > > > > -/* the stable insertion sort */ > > > > +/* merge queue2 into queue1. queue2 becomes empty after merge. */ > > > > + > > > > +static void > > > > +ngx_queue_merge(ngx_queue_t *queue1, ngx_queue_t *queue2, > > > > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > > > +{ > > > > + ngx_queue_t *p1, *p2; > > > > > > Nitpicking: there are various style issues here and in other places. > > > > > > > + > > > > + p1 = ngx_queue_head(queue1); > > > > + p2 = ngx_queue_head(queue2); > > > > + > > > > + while (p1 != ngx_queue_sentinel(queue1) > > > > + && p2 != ngx_queue_sentinel(queue2)) { > > > > + > > > > + if (cmp(p1, p2) > 0) { > > > > + ngx_queue_t *next, *prev; > > > > + > > > > + next = ngx_queue_next(p2); > > > > + ngx_queue_remove(p2); > > > > + prev = ngx_queue_prev(p1); > > > > + ngx_queue_insert_after(prev, p2); > > > > + p2 = next; > > > > > > Nitpicking: there is no need to preserve "next" here, since p2 is > > > always the head of queue2 and, and the next element can be > > > obtained by ngx_queue_head(queue2). > > > > > > Also, instead of obtaining "prev" and using > > > ngx_queue_insert_after() it would be easier to use > > > ngx_queue_insert_before(). It is not currently defined, but it is > > > trivial to define one: it is an alias to ngx_queue_insert_tail(), > > > much like ngx_queue_insert_after() is an alias to > > > ngx_queue_insert_head(). > > > > > > > + } else { > > > > + p1 = ngx_queue_next(p1); > > > > + } > > > > + } > > > > + if (p2 != ngx_queue_sentinel(queue2)) { > > > > + ngx_queue_add(queue1, queue2); > > > > + ngx_queue_init(queue2); > > > > + } > > > > +} > > > > + > > > > + > > > > +/* move all elements from src to dest. dest should be empty before call. */ > > > > + > > > > +static void > > > > +ngx_queue_move(ngx_queue_t *dest, ngx_queue_t *src) > > > > +{ > > > > + *dest = *src; > > > > + ngx_queue_init(src); > > > > + > > > > + if (dest->next == src) { > > > > + dest->next = dest; > > > > + } else { > > > > + dest->next->prev = dest; > > > > + } > > > > + if (dest->prev == src) { > > > > + dest->prev = dest; > > > > + } else { > > > > + dest->prev->next = dest; > > > > + } > > > > +} > > > > > > This function looks strange to me. There is the ngx_queue_add() > > > macro, which probably should be used instead (if needed). > > > > > > > + > > > > + > > > > +/* the stable merge sort */ > > > > > > > > void > > > > ngx_queue_sort(ngx_queue_t *queue, > > > > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > > > { > > > > - ngx_queue_t *q, *prev, *next; > > > > + ngx_queue_t merged[64], *p, *last; > > > > > > > > - q = ngx_queue_head(queue); > > > > - > > > > - if (q == ngx_queue_last(queue)) { > > > > + if (ngx_queue_head(queue) == ngx_queue_last(queue)) { > > > > return; > > > > } > > > > > > > > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { > > > > + last = merged; > > > > > > > > - prev = ngx_queue_prev(q); > > > > - next = ngx_queue_next(q); > > > > + while (!ngx_queue_empty(queue)) { > > > > + /* > > > > + * Loop invariant: > > > > + * merged[i] must have exactly 0 or 2^i elements in sorted order. > > > > + * For each iteration, we take one element from the given queue and > > > > + * insert it into merged without violating the invariant condition. > > > > + */ > > > > > > > > - ngx_queue_remove(q); > > > > + ngx_queue_t carry, *h; > > > > + > > > > + h = ngx_queue_head(queue); > > > > + ngx_queue_remove(h); > > > > + ngx_queue_init(&carry); > > > > + ngx_queue_insert_head(&carry, h); > > > > > > > > - do { > > > > - if (cmp(prev, q) <= 0) { > > > > - break; > > > > - } > > > > + for (p = merged; p != last && !ngx_queue_empty(p); p++) { > > > > + ngx_queue_merge(p, &carry, cmp); > > > > + ngx_queue_move(&carry, p); > > > > + } > > > > + if (p == last) { > > > > + ngx_queue_init(last); > > > > + last++; > > > > + } > > > > + ngx_queue_move(p, &carry); > > > > + } > > > > > > > > - prev = ngx_queue_prev(prev); > > > > - > > > > - } while (prev != ngx_queue_sentinel(queue)); > > > > - > > > > - ngx_queue_insert_after(prev, q); > > > > + /* Merge all queues into one queue */ > > > > + for (p = merged + 1; p != last; p++) { > > > > + ngx_queue_merge(p, p-1, cmp); > > > > } > > > > + ngx_queue_move(queue, last-1); > > > > } > > > > > > While bottom-up merge sort implementation might be more efficient, > > > I find it disturbing to use fixed array of queues without any > > > checks if we are within the array bounds. > > > > > > Rather, I would suggest recursive top-bottom merge sort implementation > > > instead, which is much simpler and uses stack as temporary storage > > > (so it'll naturally die if there will be a queue which requires > > > more space for sorting than we have). > > > > > > Please take a look if it works for you: > > > > > > diff --git a/src/core/ngx_queue.c b/src/core/ngx_queue.c > > > --- a/src/core/ngx_queue.c > > > +++ b/src/core/ngx_queue.c > > > @@ -9,6 +9,10 @@ > > > #include > > > > > > > > > +static void ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > > > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)); > > > + > > > + > > > /* > > > * find the middle queue element if the queue has odd number of elements > > > * or the first element of the queue's second part otherwise > > > @@ -45,13 +49,13 @@ ngx_queue_middle(ngx_queue_t *queue) > > > } > > > > > > > > > -/* the stable insertion sort */ > > > +/* the stable merge sort */ > > > > > > void > > > ngx_queue_sort(ngx_queue_t *queue, > > > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > > { > > > - ngx_queue_t *q, *prev, *next; > > > + ngx_queue_t *q, tail; > > > > > > q = ngx_queue_head(queue); > > > > > > @@ -59,22 +63,44 @@ ngx_queue_sort(ngx_queue_t *queue, > > > return; > > > } > > > > > > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { > > > + q = ngx_queue_middle(queue); > > > + > > > + ngx_queue_split(queue, q, &tail); > > > + > > > + ngx_queue_sort(queue, cmp); > > > + ngx_queue_sort(&tail, cmp); > > > + > > > + ngx_queue_merge(queue, &tail, cmp); > > > +} > > > > > > - prev = ngx_queue_prev(q); > > > - next = ngx_queue_next(q); > > > > > > - ngx_queue_remove(q); > > > +static void > > > +ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > > > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > > > +{ > > > + ngx_queue_t *q1, *q2; > > > + > > > + q1 = ngx_queue_head(queue); > > > + q2 = ngx_queue_head(tail); > > > > > > - do { > > > - if (cmp(prev, q) <= 0) { > > > - break; > > > - } > > > + for ( ;; ) { > > > + if (q1 == ngx_queue_sentinel(queue)) { > > > + ngx_queue_add(queue, tail); > > > + break; > > > + } > > > + > > > + if (q2 == ngx_queue_sentinel(tail)) { > > > + break; > > > + } > > > > > > - prev = ngx_queue_prev(prev); > > > + if (cmp(q1, q2) <= 0) { > > > + q1 = ngx_queue_next(q1); > > > + continue; > > > + } > > > > > > - } while (prev != ngx_queue_sentinel(queue)); > > > + ngx_queue_remove(q2); > > > + ngx_queue_insert_before(q1, q2); > > > > > > - ngx_queue_insert_after(prev, q); > > > + q2 = ngx_queue_head(tail); > > > } > > > } > > > diff --git a/src/core/ngx_queue.h b/src/core/ngx_queue.h > > > --- a/src/core/ngx_queue.h > > > +++ b/src/core/ngx_queue.h > > > @@ -47,6 +47,9 @@ struct ngx_queue_s { > > > (h)->prev = x > > > > > > > > > +#define ngx_queue_insert_before ngx_queue_insert_tail > > > + > > > + > > > #define ngx_queue_head(h) \ > > > (h)->next > > > > > > > > > > > > -- > > > Maxim Dounin > > > http://mdounin.ru/ > > > _______________________________________________ > > > nginx-devel mailing list > > > nginx-devel at nginx.org > > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From vl at inspert.ru Wed Oct 11 13:58:47 2023 From: vl at inspert.ru (Vladimir Homutov) Date: Wed, 11 Oct 2023 16:58:47 +0300 Subject: [patch] quic PTO counter fixes Message-ID: Hello, a couple of patches in the quic code: first patch improves a bit debugging, and the second patch contains fixes for PTO counter calculation - see commit log for details. This helps with some clients in interop handshakeloss/handshakecorruption testcases -------------- next part -------------- A non-text attachment was scrubbed... Name: frames_debug.diff Type: text/x-diff Size: 3605 bytes Desc: not available URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: pto.diff Type: text/x-diff Size: 4526 bytes Desc: not available URL: From jt26wzz at gmail.com Thu Oct 12 15:02:16 2023 From: jt26wzz at gmail.com (=?UTF-8?B?5LiK5Yu+5ouz?=) Date: Thu, 12 Oct 2023 23:02:16 +0800 Subject: Memory Leak Issue in Nginx PCRE2 In-Reply-To: References: Message-ID: Dear Maxim, Thanks for your response. I have tested your patch, and it worked well in my case. Absolutely, it should not handle ngx_regex_compile_context in ngx_regex_module_init(), which is more elegant. Thank you for your attention once again. Best regards, Zhenzhong 上勾拳 于2023年9月27日周三 01:13写道: > Dear Nginx Developers, > > I hope this email finds you well. I am reaching out to the mailing list > for the first time to report and discuss an issue I encountered while > working on supporting PCRE2 in OpenResty. If I have made any errors in my > reporting or discussion, please do not hesitate to provide feedback. Your > guidance is greatly appreciated. > > During my recent work, I used the sanitizer to inspect potential issues, > and I identified a small memory leak in the PCRE2 code section of Nginx. > While this issue does not seem to be critical, it could potentially disrupt > memory checking tools. To help you reproduce the problem, I have included a > minimal configuration below. Please note that this issue occurs when Nginx > is configured to use PCRE2, and the version is 1.22.1 or higher. > > *Minimal Configuration for Reproduction:* > worker_processes 1; > daemon off; > master_process off; > error_log > /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/error.log > debug; > pid > /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/nginx.pid; > > http { > access_log > /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/access.log; > #access_log off; > default_type text/plain; > keepalive_timeout 68000ms; > server { > listen 1984; > #placeholder > server_name 'localhost'; > > client_max_body_size 30M; > #client_body_buffer_size 4k; > > # Begin preamble config... > > # End preamble config... > > # Begin test case config... > > location ~ '^/[a-d]$' { > return 200; > } > } > } > events { > accept_mutex off; > > worker_connections 64; > } > > *nginx -V :* > nginx version: nginx/1.25.1 (no pool) > built by gcc 11.4.1 20230605 (Red Hat 11.4.1-2) (GCC) > built with OpenSSL 1.1.1u 30 May 2023 > TLS SNI support enabled > configure arguments: > --prefix=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/work/nginx > --with-threads --with-pcre-jit --with-ipv6 > --with-cc-opt='-fno-omit-frame-pointer -fsanitize=address > -DNGX_LUA_USE_ASSERT -I/opt/pcre2/include -I/opt/ssl/include' > --with-http_v2_module --with-http_v3_module --with-http_realip_module > --with-http_ssl_module > --add-module=/home/zhenzhongw/code/pcre_pr/ndk-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/set-misc-nginx-module > --with-ld-opt='-fsanitize=address -L/opt/pcre2/lib -L/opt/ssl/lib > -Wl,-rpath,/opt/pcre2/lib:/opt/drizzle/lib:/opt/ssl/lib' > --without-mail_pop3_module --without-mail_imap_module > --with-http_image_filter_module --without-mail_smtp_module --with-stream > --with-stream_ssl_module --without-http_upstream_ip_hash_module > --without-http_memcached_module --without-http_auth_basic_module > --without-http_userid_module --with-http_auth_request_module > --add-module=/home/zhenzhongw/code/pcre_pr/echo-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/memc-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/srcache-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/lua-upstream-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/headers-more-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/drizzle-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/rds-json-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/coolkit-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/redis2-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/stream-lua-nginx-module > --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-module > --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-shm-module > --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-delayed-load-module > --with-http_gunzip_module --with-http_dav_module --with-select_module > --with-poll_module --with-debug --with-poll_module --with-cc=gcc > > *The sanitizer tool reported the following error message: * > ================================================================= > ==555798==ERROR: LeakSanitizer: detected memory leaks > > Direct leak of 72 byte(s) in 1 object(s) allocated from: > #0 0x7f502f6b4a07 in __interceptor_malloc (/lib64/libasan.so.6+0xb4a07) > #1 0x4a1737 in ngx_alloc src/os/unix/ngx_alloc.c:22 > #2 0x525796 in ngx_regex_malloc src/core/ngx_regex.c:509 > #3 0x7f502f3e745e in _pcre2_memctl_malloc_8 > (/opt/pcre2/lib/libpcre2-8.so.0+0x1145e) > #4 0x5771ad in ngx_http_regex_compile > src/http/ngx_http_variables.c:2555 > #5 0x536088 in ngx_http_core_regex_location > src/http/ngx_http_core_module.c:3263 > #6 0x537f94 in ngx_http_core_location > src/http/ngx_http_core_module.c:3115 > #7 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > #8 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > #9 0x5391ec in ngx_http_core_server > src/http/ngx_http_core_module.c:2991 > #10 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > #11 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > #12 0x528e4c in ngx_http_block src/http/ngx_http.c:239 > #13 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > #14 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > #15 0x463f74 in ngx_init_cycle src/core/ngx_cycle.c:284 > #12 0x528e4c in ngx_http_block src/http/ngx_http.c:239 > #13 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > #14 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > #15 0x463f74 in ngx_init_cycle src/core/ngx_cycle.c:284 > #16 0x4300c7 in main src/core/nginx.c:295 > #17 0x7ff31a43feaf in __libc_start_call_main (/lib64/libc.so.6+0x3feaf) > > SUMMARY: AddressSanitizer: 72 byte(s) leaked in 1 allocation(s). > > *I have created a patch to address this memory leak issue, which I am > sharing below:* > diff --git a/src/core/ngx_regex.c b/src/core/ngx_regex.c > index 91381f499..71f583789 100644 > --- a/src/core/ngx_regex.c > +++ b/src/core/ngx_regex.c > @@ -600,6 +600,8 @@ ngx_regex_cleanup(void *data) > * the new cycle, these will be re-allocated. > */ > > + ngx_regex_malloc_init(NULL); > + > if (ngx_regex_compile_context) { > pcre2_compile_context_free(ngx_regex_compile_context); > ngx_regex_compile_context = NULL; > @@ -611,6 +613,8 @@ ngx_regex_cleanup(void *data) > ngx_regex_match_data_size = 0; > } > > + ngx_regex_malloc_done(); > + > #endif > } > > @@ -706,7 +710,13 @@ ngx_regex_module_init(ngx_cycle_t *cycle) > ngx_regex_malloc_done(); > > ngx_regex_studies = NULL; > + > #if (NGX_PCRE2) > + if (ngx_regex_compile_context) { > + ngx_regex_malloc_init(NULL); > + pcre2_compile_context_free(ngx_regex_compile_context); > + ngx_regex_malloc_done(); > + } > ngx_regex_compile_context = NULL; > #endif > > I kindly request your assistance in reviewing this matter and considering > the patch for inclusion in Nginx. If you have any questions or need further > information, please feel free to reach out to me. Your expertise and > feedback are highly valuable in resolving this issue. > > Thank you for your time and attention to this matter. > > Best regards, > ZhenZhong > -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Fri Oct 13 15:09:10 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 13 Oct 2023 19:09:10 +0400 Subject: [PATCH 2 of 8] QUIC: added check to prevent packet output with discarded keys In-Reply-To: <20230918070842.s4a6o6ti52xx4fcu@N00W24XTQX> References: <02c86eac80c907adb779.1694099634@enoparse.local> <20230918070842.s4a6o6ti52xx4fcu@N00W24XTQX> Message-ID: <6F6A945C-72BD-40B7-96A7-E68CB19C3FDD@nginx.com> > On 18 Sep 2023, at 11:08, Roman Arutyunyan wrote: > > Hi, > > On Thu, Sep 07, 2023 at 07:13:54PM +0400, Sergey Kandaurov wrote: >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1694041352 -14400 >> # Thu Sep 07 03:02:32 2023 +0400 >> # Node ID 02c86eac80c907adb7790c79ac6892afabcee5f4 >> # Parent be1862a28fd8575a88475215ccfce995e392dfab >> QUIC: added check to prevent packet output with discarded keys. >> >> In addition to triggering alert, it ensures that such packets won't be sent. >> >> With the previous change that marks server keys as discarded by zeroing the >> key lengh, it is now an error to send packets with discarded keys. OpenSSL >> based stacks tolerate such behaviour because key length isn't used in packet >> protection, but BoringSSL will raise the UNSUPPORTED_KEY_SIZE cipher error. >> It won't be possible to use discarded keys with reused crypto contexts. >> >> diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c >> --- a/src/event/quic/ngx_event_quic_output.c >> +++ b/src/event/quic/ngx_event_quic_output.c >> @@ -519,6 +519,21 @@ ngx_quic_output_packet(ngx_connection_t >> >> qc = ngx_quic_get_connection(c); >> >> + if (!ngx_quic_keys_available(qc->keys, ctx->level, 1)) { >> + ngx_log_error(NGX_LOG_ALERT, c->log, 0, "quic %s write keys discarded", >> + ngx_quic_level_name(ctx->level)); >> + >> + while (!ngx_queue_empty(&ctx->frames)) { >> + q = ngx_queue_head(&ctx->frames); >> + ngx_queue_remove(q); >> + >> + f = ngx_queue_data(q, ngx_quic_frame_t, queue); >> + ngx_quic_free_frame(c, f); >> + } >> + >> + return 0; >> + } >> + >> ngx_quic_init_packet(c, ctx, &pkt, qc->path); >> >> min_payload = ngx_quic_payload_size(&pkt, min); > > In ngx_quic_create_datagrams(), before calling ngx_quic_output_packet(), > ngx_quic_generate_ack() generates ACK frames for the current level. > Maybe it's better to add this block before generating ACKs? Otherwise we > generate an ACK and then immediately remove it. This patch is a catch all measure against using non-existing keys, which should normally not happen, hence the alert log level. Frames are removed, because there's no more sense to keep them in the queue (and to avoid repetitive alerts). The actual fix is the next patch in the series. > This technically would require > a similar block in ngx_quic_create_segments(), This creates complexity for no reason, I would rather not. > although the application level > should normally never be in a discarded key situation. Agree. Although discarding all available keys is added in this series as part of connection tear down, it should not affect ngx_quic_create_segments(). > > It's true however that the next patch partially solves the issue by not > generating the last handshake ACK. Indeed, that patch appears incomplete, I will remove it. What you suggest about ngx_quic_generate_ack() seems to be the right direction, see below. > That however does not seem to be a complete > solution since there could be previous ACKs. There should not be previous ACKs sitting in ctx->frames because Initial and Handshake packets involved in establishing TLS handshake are acknowledged immediately. Though, I see other ways to generate ACKs, which my patch didn't address. For example, client may send out of order packets, or multiple packets with holes in packets numbers, resulting in too many ACK ranges. In both cases, ACKs are generated directly in ngx_quic_ack_packet(), without posting a push event. So, I added keys availability check to ngx_quic_ack_packet() as a common place. # HG changeset patch # User Sergey Kandaurov # Date 1697110950 -14400 # Thu Oct 12 15:42:30 2023 +0400 # Node ID 4586b12e6a73811dc60fa78e6c0a82af36ea2957 # Parent 96af5e3a2a3a4e6b11f0dc084c9a60d836de70c4 QUIC: prevented generating ACK frames with discarded keys. Previously it was possible to generate ACK frames using formally discarded protection keys, in particular, when acknowledging a client Handshake packet used to complete the TLS handshake and to discard handshake protection keys. As it happens late in packet processing, it could be possible to generate ACK frames after the keys were already discarded. ACK frames are generated from ngx_quic_ack_packet(), either using a posted push event, which envolves ngx_quic_generate_ack() as a part of the final packet assembling, or directly in ngx_quic_ack_packet(), such as when there is no room to add a new ACK range or when the received packet is out of order. The added keys availability check is used to avoid generating late ACK frames in both cases. diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -907,6 +907,10 @@ ngx_quic_ack_packet(ngx_connection_t *c, " nranges:%ui", pkt->pn, (int64_t) ctx->largest_range, ctx->first_range, ctx->nranges); + if (!ngx_quic_keys_available(qc->keys, ctx->level, 1)) { + return NGX_OK; + } + prev_pending = ctx->pending_ack; if (pkt->need_ack) { -- Sergey Kandaurov From pluknet at nginx.com Fri Oct 13 15:13:00 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 13 Oct 2023 19:13:00 +0400 Subject: [PATCH 5 of 8] QUIC: reusing crypto contexts for packet protection In-Reply-To: <20230919135343.gyryhjjh5igd6xzl@N00W24XTQX> References: <28f7491bc79771f9cfa8.1694099637@enoparse.local> <20230919135343.gyryhjjh5igd6xzl@N00W24XTQX> Message-ID: > On 19 Sep 2023, at 17:53, Roman Arutyunyan wrote: > > Hi, > > On Thu, Sep 07, 2023 at 07:13:57PM +0400, Sergey Kandaurov wrote: >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1694099424 -14400 >> # Thu Sep 07 19:10:24 2023 +0400 >> # Node ID 28f7491bc79771f9cfa882b1b5584fa48ea42e6b >> # Parent 24e5d652ecc861f0c68607d20941abbf3726fdf1 >> QUIC: reusing crypto contexts for packet protection. >> >> diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c >> --- a/src/event/quic/ngx_event_quic.c >> +++ b/src/event/quic/ngx_event_quic.c >> @@ -225,6 +225,7 @@ ngx_quic_new_connection(ngx_connection_t >> { >> ngx_uint_t i; >> ngx_quic_tp_t *ctp; >> + ngx_pool_cleanup_t *cln; >> ngx_quic_connection_t *qc; >> >> qc = ngx_pcalloc(c->pool, sizeof(ngx_quic_connection_t)); >> @@ -237,6 +238,14 @@ ngx_quic_new_connection(ngx_connection_t >> return NULL; >> } >> >> + cln = ngx_pool_cleanup_add(c->pool, 0); >> + if (cln == NULL) { >> + return NULL; >> + } >> + >> + cln->handler = ngx_quic_keys_cleanup; >> + cln->data = qc->keys; > > I think it's better to cleanup keys in ngx_quic_close_connection(). > We do the same with sockets by calling ngx_quic_close_sockets(). > We just have to carefully handle the errors later in this function and cleanup > keys when ngx_quic_open_sockets() fails. While this may look error prone compared with the cleanup handler, you convinced me to remove it because of ngx_quic_send_early_cc(). To be merged: diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -227,7 +227,6 @@ ngx_quic_new_connection(ngx_connection_t { ngx_uint_t i; ngx_quic_tp_t *ctp; - ngx_pool_cleanup_t *cln; ngx_quic_connection_t *qc; qc = ngx_pcalloc(c->pool, sizeof(ngx_quic_connection_t)); @@ -240,14 +239,6 @@ ngx_quic_new_connection(ngx_connection_t return NULL; } - cln = ngx_pool_cleanup_add(c->pool, 0); - if (cln == NULL) { - return NULL; - } - - cln->handler = ngx_quic_keys_cleanup; - cln->data = qc->keys; - qc->version = pkt->version; ngx_rbtree_init(&qc->streams.tree, &qc->streams.sentinel, @@ -344,6 +335,7 @@ ngx_quic_new_connection(ngx_connection_t qc->validated = pkt->validated; if (ngx_quic_open_sockets(c, qc, pkt) != NGX_OK) { + ngx_quic_keys_cleanup(qc->keys); return NULL; } @@ -594,6 +586,8 @@ ngx_quic_close_connection(ngx_connection ngx_quic_close_sockets(c); + ngx_quic_keys_cleanup(qc->keys); + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic close completed"); /* may be tested from SSL callback during SSL shutdown */ diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c +++ b/src/event/quic/ngx_event_quic_output.c @@ -941,13 +941,17 @@ ngx_quic_send_early_cc(ngx_connection_t res.data = dst; if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { + ngx_quic_keys_cleanup(pkt.keys); return NGX_ERROR; } if (ngx_quic_send(c, res.data, res.len, c->sockaddr, c->socklen) < 0) { + ngx_quic_keys_cleanup(pkt.keys); return NGX_ERROR; } + ngx_quic_keys_cleanup(pkt.keys); + return NGX_DONE; } diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -189,10 +189,16 @@ ngx_quic_keys_set_initial_secret(ngx_qui } if (ngx_quic_crypto_init(ciphers.c, server, 1, log) == NGX_ERROR) { - return NGX_ERROR; + goto failed; } return NGX_OK; + +failed: + + ngx_quic_keys_cleanup(keys); + + return NGX_ERROR; } @@ -793,10 +799,8 @@ failed: void -ngx_quic_keys_cleanup(void *data) +ngx_quic_keys_cleanup(ngx_quic_keys_t *keys) { - ngx_quic_keys_t *keys = data; - size_t i; ngx_quic_secrets_t *secrets; diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h +++ b/src/event/quic/ngx_event_quic_protection.h @@ -103,7 +103,7 @@ void ngx_quic_keys_discard(ngx_quic_keys enum ssl_encryption_level_t level); void ngx_quic_keys_switch(ngx_connection_t *c, ngx_quic_keys_t *keys); void ngx_quic_keys_update(ngx_event_t *ev); -void ngx_quic_keys_cleanup(void *data); +void ngx_quic_keys_cleanup(ngx_quic_keys_t *keys); ngx_int_t ngx_quic_encrypt(ngx_quic_header_t *pkt, ngx_str_t *res); ngx_int_t ngx_quic_decrypt(ngx_quic_header_t *pkt, uint64_t *largest_pn); void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); > >> qc->version = pkt->version; >> >> ngx_rbtree_init(&qc->streams.tree, &qc->streams.sentinel, >> diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c >> --- a/src/event/quic/ngx_event_quic_openssl_compat.c >> +++ b/src/event/quic/ngx_event_quic_openssl_compat.c >> @@ -54,9 +54,10 @@ struct ngx_quic_compat_s { >> >> >> static void ngx_quic_compat_keylog_callback(const SSL *ssl, const char *line); >> -static ngx_int_t ngx_quic_compat_set_encryption_secret(ngx_log_t *log, >> +static ngx_int_t ngx_quic_compat_set_encryption_secret(ngx_connection_t *c, >> ngx_quic_compat_keys_t *keys, enum ssl_encryption_level_t level, >> const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); >> +static void ngx_quic_compat_cleanup_encryption_secret(void *data); >> static int ngx_quic_compat_add_transport_params_callback(SSL *ssl, >> unsigned int ext_type, unsigned int context, const unsigned char **out, >> size_t *outlen, X509 *x, size_t chainidx, int *al, void *add_arg); >> @@ -214,14 +215,14 @@ ngx_quic_compat_keylog_callback(const SS >> com->method->set_read_secret((SSL *) ssl, level, cipher, secret, n); >> com->read_record = 0; >> >> - (void) ngx_quic_compat_set_encryption_secret(c->log, &com->keys, level, >> + (void) ngx_quic_compat_set_encryption_secret(c, &com->keys, level, >> cipher, secret, n); >> } >> } >> >> >> static ngx_int_t >> -ngx_quic_compat_set_encryption_secret(ngx_log_t *log, >> +ngx_quic_compat_set_encryption_secret(ngx_connection_t *c, >> ngx_quic_compat_keys_t *keys, enum ssl_encryption_level_t level, >> const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len) >> { >> @@ -231,6 +232,7 @@ ngx_quic_compat_set_encryption_secret(ng >> ngx_quic_hkdf_t seq[2]; >> ngx_quic_secret_t *peer_secret; >> ngx_quic_ciphers_t ciphers; >> + ngx_pool_cleanup_t *cln; >> >> peer_secret = &keys->secret; >> >> @@ -239,12 +241,12 @@ ngx_quic_compat_set_encryption_secret(ng >> key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); >> >> if (key_len == NGX_ERROR) { >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "unexpected cipher"); >> + ngx_ssl_error(NGX_LOG_INFO, c->log, 0, "unexpected cipher"); >> return NGX_ERROR; >> } >> >> if (sizeof(peer_secret->secret.data) < secret_len) { >> - ngx_log_error(NGX_LOG_ALERT, log, 0, >> + ngx_log_error(NGX_LOG_ALERT, c->log, 0, >> "unexpected secret len: %uz", secret_len); >> return NGX_ERROR; >> } >> @@ -262,15 +264,42 @@ ngx_quic_compat_set_encryption_secret(ng >> ngx_quic_hkdf_set(&seq[1], "tls13 iv", &peer_secret->iv, &secret_str); >> >> for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { >> - if (ngx_quic_hkdf_expand(&seq[i], ciphers.d, log) != NGX_OK) { >> + if (ngx_quic_hkdf_expand(&seq[i], ciphers.d, c->log) != NGX_OK) { >> return NGX_ERROR; >> } >> } >> >> + ngx_quic_crypto_cleanup(peer_secret); >> + >> + if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { >> + return NGX_ERROR; >> + } >> + >> + /* register cleanup handler once */ >> + >> + if (level == ssl_encryption_handshake) { > > Does not look perfect, but I don't see a simpler and better solution. I don't see either, without introducing some state (see below). > >> + cln = ngx_pool_cleanup_add(c->pool, 0); >> + if (cln == NULL) { > > Cleanup peer_secret here? > > Alternatively, move this block up. Fixed, thanks. With introducing keys->cleanup: diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c +++ b/src/event/quic/ngx_event_quic_openssl_compat.c @@ -25,6 +25,7 @@ typedef struct { ngx_quic_secret_t secret; ngx_uint_t cipher; + ngx_uint_t cleanup; /* unsigned cleanup:1 */ } ngx_quic_compat_keys_t; @@ -269,15 +270,11 @@ ngx_quic_compat_set_encryption_secret(ng } } - ngx_quic_crypto_cleanup(peer_secret); - - if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { - return NGX_ERROR; - } - /* register cleanup handler once */ - if (level == ssl_encryption_handshake) { + if (!keys->cleanup) { + keys->cleanup = 1; + cln = ngx_pool_cleanup_add(c->pool, 0); if (cln == NULL) { return NGX_ERROR; @@ -287,6 +284,12 @@ ngx_quic_compat_set_encryption_secret(ng cln->data = peer_secret; } + ngx_quic_crypto_cleanup(peer_secret); + + if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { + return NGX_ERROR; + } + return NGX_OK; } > >> + return NGX_ERROR; >> + } >> + >> + cln->handler = ngx_quic_compat_cleanup_encryption_secret; >> + cln->data = peer_secret; >> + } >> + >> return NGX_OK; >> } >> >> >> +static void >> +ngx_quic_compat_cleanup_encryption_secret(void *data) >> +{ >> + ngx_quic_secret_t *secret = data; >> + >> + ngx_quic_crypto_cleanup(secret); >> +} >> + >> + >> static int >> ngx_quic_compat_add_transport_params_callback(SSL *ssl, unsigned int ext_type, >> unsigned int context, const unsigned char **out, size_t *outlen, X509 *x, >> @@ -568,8 +597,7 @@ ngx_quic_compat_create_record(ngx_quic_c >> ngx_memcpy(nonce, secret->iv.data, secret->iv.len); >> ngx_quic_compute_nonce(nonce, sizeof(nonce), rec->number); >> >> - if (ngx_quic_crypto_seal(ciphers.c, secret, &out, >> - nonce, &rec->payload, &ad, rec->log) >> + if (ngx_quic_crypto_seal(secret, &out, nonce, &rec->payload, &ad, rec->log) >> != NGX_OK) >> { >> return NGX_ERROR; >> diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c >> --- a/src/event/quic/ngx_event_quic_protection.c >> +++ b/src/event/quic/ngx_event_quic_protection.c >> @@ -26,9 +26,8 @@ static ngx_int_t ngx_hkdf_extract(u_char >> static uint64_t ngx_quic_parse_pn(u_char **pos, ngx_int_t len, u_char *mask, >> uint64_t *largest_pn); >> >> -static ngx_int_t ngx_quic_crypto_open(const ngx_quic_cipher_t *cipher, >> - ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, >> - ngx_str_t *ad, ngx_log_t *log); >> +static ngx_int_t ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, >> + u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); >> static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, >> ngx_quic_secret_t *s, u_char *out, u_char *in); >> >> @@ -108,13 +107,14 @@ ngx_int_t >> ngx_quic_keys_set_initial_secret(ngx_quic_keys_t *keys, ngx_str_t *secret, >> ngx_log_t *log) >> { >> - size_t is_len; >> - uint8_t is[SHA256_DIGEST_LENGTH]; >> - ngx_str_t iss; >> - ngx_uint_t i; >> - const EVP_MD *digest; >> - ngx_quic_hkdf_t seq[8]; >> - ngx_quic_secret_t *client, *server; >> + size_t is_len; >> + uint8_t is[SHA256_DIGEST_LENGTH]; >> + ngx_str_t iss; >> + ngx_uint_t i; >> + const EVP_MD *digest; >> + ngx_quic_hkdf_t seq[8]; >> + ngx_quic_secret_t *client, *server; >> + ngx_quic_ciphers_t ciphers; >> >> static const uint8_t salt[20] = >> "\x38\x76\x2c\xf7\xf5\x59\x34\xb3\x4d\x17" >> @@ -180,6 +180,18 @@ ngx_quic_keys_set_initial_secret(ngx_qui >> } >> } >> >> + if (ngx_quic_ciphers(0, &ciphers, ssl_encryption_initial) == NGX_ERROR) { >> + return NGX_ERROR; >> + } >> + >> + if (ngx_quic_crypto_init(ciphers.c, client, 0, log) == NGX_ERROR) { >> + return NGX_ERROR; >> + } >> + >> + if (ngx_quic_crypto_init(ciphers.c, server, 1, log) == NGX_ERROR) { > > Call ngx_quic_crypto_cleanup() for client here? Yes, it is needed for ngx_quic_send_early_cc(). Added. > This function is called from ngx_quic_send_early_cc(), which has no keys > cleanup handler (and I propose we remove it from regular QUIC connections as > well). See the above patch. > >> + return NGX_ERROR; >> + } >> + >> return NGX_OK; >> } >> >> @@ -343,9 +355,9 @@ failed: >> } >> >> >> -static ngx_int_t >> -ngx_quic_crypto_open(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, >> - ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) >> +ngx_int_t >> +ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, >> + ngx_int_t enc, ngx_log_t *log) >> { >> >> #ifdef OPENSSL_IS_BORINGSSL >> @@ -357,19 +369,7 @@ ngx_quic_crypto_open(const ngx_quic_ciph >> ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_new() failed"); >> return NGX_ERROR; >> } >> - >> - if (EVP_AEAD_CTX_open(ctx, out->data, &out->len, out->len, nonce, s->iv.len, >> - in->data, in->len, ad->data, ad->len) >> - != 1) >> - { >> - EVP_AEAD_CTX_free(ctx); >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_open() failed"); >> - return NGX_ERROR; >> - } >> - >> - EVP_AEAD_CTX_free(ctx); >> #else >> - int len; >> EVP_CIPHER_CTX *ctx; >> >> ctx = EVP_CIPHER_CTX_new(); >> @@ -378,114 +378,9 @@ ngx_quic_crypto_open(const ngx_quic_ciph >> return NGX_ERROR; >> } >> >> - if (EVP_DecryptInit_ex(ctx, cipher, NULL, NULL, NULL) != 1) { >> - EVP_CIPHER_CTX_free(ctx); >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); >> - return NGX_ERROR; >> - } >> - >> - in->len -= NGX_QUIC_TAG_LEN; >> - >> - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, >> - in->data + in->len) >> - == 0) >> - { >> - EVP_CIPHER_CTX_free(ctx); >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, >> - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); >> - return NGX_ERROR; >> - } >> - >> - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_IVLEN, s->iv.len, NULL) >> - == 0) >> - { >> - EVP_CIPHER_CTX_free(ctx); >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, >> - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_IVLEN) failed"); >> - return NGX_ERROR; >> - } >> - >> - if (EVP_DecryptInit_ex(ctx, NULL, NULL, s->key.data, nonce) != 1) { >> - EVP_CIPHER_CTX_free(ctx); >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); >> - return NGX_ERROR; >> - } >> - >> - if (EVP_CIPHER_mode(cipher) == EVP_CIPH_CCM_MODE >> - && EVP_DecryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) >> - { >> - EVP_CIPHER_CTX_free(ctx); >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); >> - return NGX_ERROR; >> - } >> - >> - if (EVP_DecryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { >> - EVP_CIPHER_CTX_free(ctx); >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); >> - return NGX_ERROR; >> - } >> - >> - if (EVP_DecryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { >> + if (EVP_CipherInit_ex(ctx, cipher, NULL, NULL, NULL, enc) != 1) { >> EVP_CIPHER_CTX_free(ctx); >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); >> - return NGX_ERROR; >> - } >> - >> - out->len = len; >> - >> - if (EVP_DecryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { >> - EVP_CIPHER_CTX_free(ctx); >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptFinal_ex failed"); >> - return NGX_ERROR; >> - } >> - >> - out->len += len; >> - >> - EVP_CIPHER_CTX_free(ctx); >> -#endif >> - >> - return NGX_OK; >> -} >> - >> - >> -ngx_int_t >> -ngx_quic_crypto_seal(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, >> - ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) >> -{ >> - >> -#ifdef OPENSSL_IS_BORINGSSL >> - EVP_AEAD_CTX *ctx; >> - >> - ctx = EVP_AEAD_CTX_new(cipher, s->key.data, s->key.len, >> - EVP_AEAD_DEFAULT_TAG_LENGTH); >> - if (ctx == NULL) { >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_new() failed"); >> - return NGX_ERROR; >> - } >> - >> - if (EVP_AEAD_CTX_seal(ctx, out->data, &out->len, out->len, nonce, s->iv.len, >> - in->data, in->len, ad->data, ad->len) >> - != 1) >> - { >> - EVP_AEAD_CTX_free(ctx); >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_seal() failed"); >> - return NGX_ERROR; >> - } >> - >> - EVP_AEAD_CTX_free(ctx); >> -#else >> - int len; >> - EVP_CIPHER_CTX *ctx; >> - >> - ctx = EVP_CIPHER_CTX_new(); >> - if (ctx == NULL) { >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CIPHER_CTX_new() failed"); >> - return NGX_ERROR; >> - } >> - >> - if (EVP_EncryptInit_ex(ctx, cipher, NULL, NULL, NULL) != 1) { >> - EVP_CIPHER_CTX_free(ctx); >> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); >> + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); >> return NGX_ERROR; >> } >> >> @@ -509,28 +404,121 @@ ngx_quic_crypto_seal(const ngx_quic_ciph >> return NGX_ERROR; >> } >> >> - if (EVP_EncryptInit_ex(ctx, NULL, NULL, s->key.data, nonce) != 1) { >> + if (EVP_CipherInit_ex(ctx, NULL, NULL, s->key.data, NULL, enc) != 1) { >> EVP_CIPHER_CTX_free(ctx); >> + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); >> + return NGX_ERROR; >> + } >> +#endif >> + >> + s->ctx = ctx; >> + return NGX_OK; >> +} >> + >> + >> +static ngx_int_t >> +ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, >> + ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) >> +{ >> + ngx_quic_crypto_ctx_t *ctx; >> + >> + ctx = s->ctx; >> + >> +#ifdef OPENSSL_IS_BORINGSSL >> + if (EVP_AEAD_CTX_open(ctx, out->data, &out->len, out->len, nonce, s->iv.len, >> + in->data, in->len, ad->data, ad->len) >> + != 1) >> + { >> + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_open() failed"); >> + return NGX_ERROR; >> + } >> +#else >> + int len; >> + >> + if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { >> + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); >> + return NGX_ERROR; >> + } >> + >> + in->len -= NGX_QUIC_TAG_LEN; >> + >> + if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, >> + in->data + in->len) >> + == 0) >> + { >> + ngx_ssl_error(NGX_LOG_INFO, log, 0, >> + "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); >> + return NGX_ERROR; >> + } >> + >> + if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE >> + && EVP_DecryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) >> + { >> + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); >> + return NGX_ERROR; >> + } >> + >> + if (EVP_DecryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { >> + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); >> + return NGX_ERROR; >> + } >> + >> + if (EVP_DecryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { >> + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); >> + return NGX_ERROR; >> + } >> + >> + out->len = len; >> + >> + if (EVP_DecryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { >> + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptFinal_ex failed"); >> + return NGX_ERROR; >> + } >> + >> + out->len += len; >> +#endif >> + >> + return NGX_OK; >> +} >> + >> + >> +ngx_int_t >> +ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, >> + ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) >> +{ >> + ngx_quic_crypto_ctx_t *ctx; >> + >> + ctx = s->ctx; >> + >> +#ifdef OPENSSL_IS_BORINGSSL >> + if (EVP_AEAD_CTX_seal(ctx, out->data, &out->len, out->len, nonce, s->iv.len, >> + in->data, in->len, ad->data, ad->len) >> + != 1) >> + { >> + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_seal() failed"); >> + return NGX_ERROR; >> + } >> +#else >> + int len; >> + >> + if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { >> ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); >> return NGX_ERROR; >> } >> >> - if (EVP_CIPHER_mode(cipher) == EVP_CIPH_CCM_MODE >> + if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE >> && EVP_EncryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) >> { >> - EVP_CIPHER_CTX_free(ctx); >> ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); >> return NGX_ERROR; >> } >> >> if (EVP_EncryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { >> - EVP_CIPHER_CTX_free(ctx); >> ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); >> return NGX_ERROR; >> } >> >> if (EVP_EncryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { >> - EVP_CIPHER_CTX_free(ctx); >> ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); >> return NGX_ERROR; >> } >> @@ -538,7 +526,6 @@ ngx_quic_crypto_seal(const ngx_quic_ciph >> out->len = len; >> >> if (EVP_EncryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { >> - EVP_CIPHER_CTX_free(ctx); >> ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptFinal_ex failed"); >> return NGX_ERROR; >> } >> @@ -549,21 +536,30 @@ ngx_quic_crypto_seal(const ngx_quic_ciph >> out->data + out->len) >> == 0) >> { >> - EVP_CIPHER_CTX_free(ctx); >> ngx_ssl_error(NGX_LOG_INFO, log, 0, >> "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed"); >> return NGX_ERROR; >> } >> >> out->len += NGX_QUIC_TAG_LEN; >> - >> - EVP_CIPHER_CTX_free(ctx); >> #endif >> >> return NGX_OK; >> } > > Now that we have universal ngx_quic_crypto_open() which receives "enc", it's > tempting more than ever to combine ngx_quic_crypto_seal() and > ngx_quic_crypto_open() in a single function with "enc". Not a part of this > work though. I'm a little concerned this can impair code readability and slightly decrease efficiency due to additional branching (though it's compensated with branching removal in other places, the net effect should be negligible). Other than that I don't feel why it can't be combined at least for the OpenSSL part. # HG changeset patch # User Sergey Kandaurov # Date 1697131103 -14400 # Thu Oct 12 21:18:23 2023 +0400 # Node ID d6dc53fe48b3522954cc9ec6ac949354d0aae512 # Parent 7055f7a2992f9440a27952f1b60ed1f606aea77e QUIC: common code for crypto open and seal operations. diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -28,6 +28,10 @@ static uint64_t ngx_quic_parse_pn(u_char static ngx_int_t ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); +#ifndef OPENSSL_IS_BORINGSSL +static ngx_int_t ngx_quic_crypto_common(ngx_quic_secret_t *s, ngx_str_t *out, + u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); +#endif static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in); @@ -420,65 +424,19 @@ static ngx_int_t ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) { - ngx_quic_crypto_ctx_t *ctx; - - ctx = s->ctx; - #ifdef OPENSSL_IS_BORINGSSL - if (EVP_AEAD_CTX_open(ctx, out->data, &out->len, out->len, nonce, s->iv.len, - in->data, in->len, ad->data, ad->len) + if (EVP_AEAD_CTX_open(s->ctx, out->data, &out->len, out->len, nonce, + s->iv.len, in->data, in->len, ad->data, ad->len) != 1) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_open() failed"); return NGX_ERROR; } -#else - int len; - - if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); - return NGX_ERROR; - } - - in->len -= NGX_QUIC_TAG_LEN; - - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, - in->data + in->len) - == 0) - { - ngx_ssl_error(NGX_LOG_INFO, log, 0, - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); - return NGX_ERROR; - } - - if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE - && EVP_DecryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) - { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - if (EVP_DecryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - if (EVP_DecryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - out->len = len; - - if (EVP_DecryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptFinal_ex failed"); - return NGX_ERROR; - } - - out->len += len; -#endif return NGX_OK; +#else + return ngx_quic_crypto_common(s, out, nonce, in, ad, log); +#endif } @@ -486,67 +444,96 @@ ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) { - ngx_quic_crypto_ctx_t *ctx; - - ctx = s->ctx; - #ifdef OPENSSL_IS_BORINGSSL - if (EVP_AEAD_CTX_seal(ctx, out->data, &out->len, out->len, nonce, s->iv.len, - in->data, in->len, ad->data, ad->len) + if (EVP_AEAD_CTX_seal(s->ctx, out->data, &out->len, out->len, nonce, + s->iv.len, in->data, in->len, ad->data, ad->len) != 1) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_seal() failed"); return NGX_ERROR; } + + return NGX_OK; #else - int len; + return ngx_quic_crypto_common(s, out, nonce, in, ad, log); +#endif +} + + +#ifndef OPENSSL_IS_BORINGSSL + +static ngx_int_t +ngx_quic_crypto_common(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, + ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) +{ + int len, enc; + ngx_quic_crypto_ctx_t *ctx; - if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); + ctx = s->ctx; + enc = EVP_CIPHER_CTX_encrypting(ctx); + + if (EVP_CipherInit_ex(ctx, NULL, NULL, NULL, nonce, enc) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); return NGX_ERROR; } + if (enc == 0) { + in->len -= NGX_QUIC_TAG_LEN; + + if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, + in->data + in->len) + == 0) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, + "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); + return NGX_ERROR; + } + } + if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE - && EVP_EncryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) + && EVP_CipherUpdate(ctx, NULL, &len, NULL, in->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherUpdate() failed"); return NGX_ERROR; } - if (EVP_EncryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); + if (EVP_CipherUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherUpdate() failed"); return NGX_ERROR; } - if (EVP_EncryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); + if (EVP_CipherUpdate(ctx, out->data, &len, in->data, in->len) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherUpdate() failed"); return NGX_ERROR; } out->len = len; - if (EVP_EncryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptFinal_ex failed"); + if (EVP_CipherFinal_ex(ctx, out->data + out->len, &len) <= 0) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherFinal_ex failed"); return NGX_ERROR; } out->len += len; - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, NGX_QUIC_TAG_LEN, - out->data + out->len) - == 0) - { - ngx_ssl_error(NGX_LOG_INFO, log, 0, - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed"); - return NGX_ERROR; + if (enc == 1) { + if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, NGX_QUIC_TAG_LEN, + out->data + out->len) + == 0) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, + "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed"); + return NGX_ERROR; + } + + out->len += NGX_QUIC_TAG_LEN; } - out->len += NGX_QUIC_TAG_LEN; -#endif - return NGX_OK; } +#endif + void ngx_quic_crypto_cleanup(ngx_quic_secret_t *s) > >> +void >> +ngx_quic_crypto_cleanup(ngx_quic_secret_t *s) >> +{ > > Although we know these functions ignore a NULL argument, I think the code > would still look better under "if (s->ctx) {}". > Indeed, it's made with the knowledge that both API have a check for NULL. Added the check, I've no strict preference on this. >> +#ifdef OPENSSL_IS_BORINGSSL >> + EVP_AEAD_CTX_free(s->ctx); >> +#else >> + EVP_CIPHER_CTX_free(s->ctx); >> +#endif >> + s->ctx = NULL; >> +} >> + >> + >> static ngx_int_t >> ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, >> ngx_quic_secret_t *s, u_char *out, u_char *in) >> @@ -666,6 +662,12 @@ ngx_quic_keys_set_encryption_secret(ngx_ >> } >> } >> >> + if (ngx_quic_crypto_init(ciphers.c, peer_secret, is_write, log) >> + == NGX_ERROR) >> + { >> + return NGX_ERROR; >> + } >> + >> return NGX_OK; >> } >> >> @@ -675,10 +677,10 @@ ngx_quic_keys_available(ngx_quic_keys_t >> enum ssl_encryption_level_t level, ngx_uint_t is_write) >> { >> if (is_write == 0) { >> - return keys->secrets[level].client.key.len != 0; >> + return keys->secrets[level].client.ctx != NULL; >> } >> >> - return keys->secrets[level].server.key.len != 0; >> + return keys->secrets[level].server.ctx != 0; > > Maybe != NULL ? Fixed, tnx. > >> } >> >> >> @@ -686,8 +688,13 @@ void >> ngx_quic_keys_discard(ngx_quic_keys_t *keys, >> enum ssl_encryption_level_t level) >> { >> - keys->secrets[level].client.key.len = 0; >> - keys->secrets[level].server.key.len = 0; >> + ngx_quic_secret_t *client, *server; >> + >> + client = &keys->secrets[level].client; >> + server = &keys->secrets[level].server; >> + >> + ngx_quic_crypto_cleanup(client); >> + ngx_quic_crypto_cleanup(server); >> } >> >> >> @@ -699,6 +706,9 @@ ngx_quic_keys_switch(ngx_connection_t *c >> current = &keys->secrets[ssl_encryption_application]; >> next = &keys->next_key; >> >> + ngx_quic_crypto_cleanup(¤t->client); >> + ngx_quic_crypto_cleanup(¤t->server); >> + >> tmp = *current; >> *current = *next; >> *next = tmp; >> @@ -762,6 +772,16 @@ ngx_quic_keys_update(ngx_event_t *ev) >> } >> } >> >> + if (ngx_quic_crypto_init(ciphers.c, &next->client, 0, c->log) == NGX_ERROR) >> + { >> + goto failed; >> + } >> + >> + if (ngx_quic_crypto_init(ciphers.c, &next->server, 1, c->log) == NGX_ERROR) >> + { >> + goto failed; >> + } >> + >> return; >> >> failed: >> @@ -770,6 +790,26 @@ failed: >> } >> >> >> +void >> +ngx_quic_keys_cleanup(void *data) >> +{ >> + ngx_quic_keys_t *keys = data; >> + >> + size_t i; >> + ngx_quic_secrets_t *secrets; >> + >> + for (i = 0; i < NGX_QUIC_ENCRYPTION_LAST; i++) { >> + secrets = &keys->secrets[i]; >> + ngx_quic_crypto_cleanup(&secrets->client); >> + ngx_quic_crypto_cleanup(&secrets->server); >> + } >> + >> + secrets = &keys->next_key; >> + ngx_quic_crypto_cleanup(&secrets->client); >> + ngx_quic_crypto_cleanup(&secrets->server); >> +} >> + >> + >> static ngx_int_t >> ngx_quic_create_packet(ngx_quic_header_t *pkt, ngx_str_t *res) >> { >> @@ -801,8 +841,7 @@ ngx_quic_create_packet(ngx_quic_header_t >> ngx_memcpy(nonce, secret->iv.data, secret->iv.len); >> ngx_quic_compute_nonce(nonce, sizeof(nonce), pkt->number); >> >> - if (ngx_quic_crypto_seal(ciphers.c, secret, &out, >> - nonce, &pkt->payload, &ad, pkt->log) >> + if (ngx_quic_crypto_seal(secret, &out, nonce, &pkt->payload, &ad, pkt->log) >> != NGX_OK) >> { >> return NGX_ERROR; >> @@ -862,13 +901,18 @@ ngx_quic_create_retry_packet(ngx_quic_he >> ngx_memcpy(secret.key.data, key, sizeof(key)); >> secret.iv.len = NGX_QUIC_IV_LEN; >> >> - if (ngx_quic_crypto_seal(ciphers.c, &secret, &itag, nonce, &in, &ad, >> - pkt->log) >> + if (ngx_quic_crypto_init(ciphers.c, &secret, 1, pkt->log) == NGX_ERROR) { >> + return NGX_ERROR; >> + } >> + >> + if (ngx_quic_crypto_seal(&secret, &itag, nonce, &in, &ad, pkt->log) >> != NGX_OK) >> { > > Need to call ngx_quic_crypto_cleanup() here. Fixed, tnx. I was pondering on reusing a static crypto context to make generating Retry packets more lightweight. Known fixed values for key and nonce make it possible to create a single context and reuse it over all Retry packets. Note that the context memory is kept for reuse after the first retry, it will be freed eventually on process exit, the operating system will take care of it. Not sure though this is a good solution. diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -872,7 +872,6 @@ ngx_quic_create_retry_packet(ngx_quic_he { u_char *start; ngx_str_t ad, itag; - ngx_quic_secret_t secret; ngx_quic_ciphers_t ciphers; /* 5.8. Retry Packet Integrity */ @@ -882,6 +881,8 @@ ngx_quic_create_retry_packet(ngx_quic_he "\x46\x15\x99\xd3\x5d\x63\x2b\xf2\x23\x98\x25\xbb"; static ngx_str_t in = ngx_string(""); + static ngx_quic_secret_t secret; + ad.data = res->data; ad.len = ngx_quic_create_retry_itag(pkt, ad.data, &start); @@ -893,6 +894,10 @@ ngx_quic_create_retry_packet(ngx_quic_he "quic retry itag len:%uz %xV", ad.len, &ad); #endif + if (secret.ctx) { + goto seal; + } + if (ngx_quic_ciphers(0, &ciphers, pkt->level) == NGX_ERROR) { return NGX_ERROR; } @@ -905,14 +910,14 @@ ngx_quic_create_retry_packet(ngx_quic_he return NGX_ERROR; } +seal: + if (ngx_quic_crypto_seal(&secret, &itag, nonce, &in, &ad, pkt->log) != NGX_OK) { return NGX_ERROR; } - ngx_quic_crypto_cleanup(&secret); - res->len = itag.data + itag.len - start; res->data = start; > >> return NGX_ERROR; >> } >> >> + ngx_quic_crypto_cleanup(&secret); >> + >> res->len = itag.data + itag.len - start; >> res->data = start; >> >> @@ -999,7 +1043,7 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, >> u_char *p, *sample; >> size_t len; >> uint64_t pn, lpn; >> - ngx_int_t pnl, rc; >> + ngx_int_t pnl; >> ngx_str_t in, ad; >> ngx_uint_t key_phase; >> ngx_quic_secret_t *secret; >> @@ -1088,9 +1132,9 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, >> pkt->payload.len = in.len - NGX_QUIC_TAG_LEN; >> pkt->payload.data = pkt->plaintext + ad.len; >> >> - rc = ngx_quic_crypto_open(ciphers.c, secret, &pkt->payload, >> - nonce, &in, &ad, pkt->log); >> - if (rc != NGX_OK) { >> + if (ngx_quic_crypto_open(secret, &pkt->payload, nonce, &in, &ad, pkt->log) >> + != NGX_OK) >> + { >> return NGX_DECLINED; >> } >> >> diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h >> --- a/src/event/quic/ngx_event_quic_protection.h >> +++ b/src/event/quic/ngx_event_quic_protection.h >> @@ -26,8 +26,10 @@ >> >> #ifdef OPENSSL_IS_BORINGSSL >> #define ngx_quic_cipher_t EVP_AEAD >> +#define ngx_quic_crypto_ctx_t EVP_AEAD_CTX >> #else >> #define ngx_quic_cipher_t EVP_CIPHER >> +#define ngx_quic_crypto_ctx_t EVP_CIPHER_CTX >> #endif >> >> >> @@ -48,6 +50,7 @@ typedef struct { >> ngx_quic_md_t key; >> ngx_quic_iv_t iv; >> ngx_quic_md_t hp; >> + ngx_quic_crypto_ctx_t *ctx; >> } ngx_quic_secret_t; >> >> >> @@ -100,14 +103,17 @@ void ngx_quic_keys_discard(ngx_quic_keys >> enum ssl_encryption_level_t level); >> void ngx_quic_keys_switch(ngx_connection_t *c, ngx_quic_keys_t *keys); >> void ngx_quic_keys_update(ngx_event_t *ev); >> +void ngx_quic_keys_cleanup(void *data); >> ngx_int_t ngx_quic_encrypt(ngx_quic_header_t *pkt, ngx_str_t *res); >> ngx_int_t ngx_quic_decrypt(ngx_quic_header_t *pkt, uint64_t *largest_pn); >> void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); >> ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers, >> enum ssl_encryption_level_t level); >> -ngx_int_t ngx_quic_crypto_seal(const ngx_quic_cipher_t *cipher, >> - ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, >> - ngx_str_t *ad, ngx_log_t *log); >> +ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, >> + ngx_quic_secret_t *s, ngx_int_t enc, ngx_log_t *log); >> +ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, >> + u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); >> +void ngx_quic_crypto_cleanup(ngx_quic_secret_t *s); >> ngx_int_t ngx_quic_hkdf_expand(ngx_quic_hkdf_t *hkdf, const EVP_MD *digest, >> ngx_log_t *log); -- Sergey Kandaurov From pluknet at nginx.com Fri Oct 13 15:13:55 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 13 Oct 2023 19:13:55 +0400 Subject: [PATCH 6 of 8] QUIC: reusing crypto contexts for header protection In-Reply-To: <20230920121216.kpwwd2qo2xdzhmf4@N00W24XTQX> References: <20230920121216.kpwwd2qo2xdzhmf4@N00W24XTQX> Message-ID: > On 20 Sep 2023, at 16:12, Roman Arutyunyan wrote: > > Hi, > > On Thu, Sep 07, 2023 at 07:13:58PM +0400, Sergey Kandaurov wrote: >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1694099424 -14400 >> # Thu Sep 07 19:10:24 2023 +0400 >> # Node ID cdc5b59309dbdc234c71e53fca142502884e6177 >> # Parent 28f7491bc79771f9cfa882b1b5584fa48ea42e6b >> QUIC: reusing crypto contexts for header protection. >> >> diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c >> --- a/src/event/quic/ngx_event_quic_protection.c >> +++ b/src/event/quic/ngx_event_quic_protection.c >> @@ -28,8 +28,12 @@ static uint64_t ngx_quic_parse_pn(u_char >> >> static ngx_int_t ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, >> u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); >> -static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, >> - ngx_quic_secret_t *s, u_char *out, u_char *in); >> + >> +static ngx_int_t ngx_quic_crypto_hp_init(const EVP_CIPHER *cipher, >> + ngx_quic_secret_t *s, ngx_log_t *log); >> +static ngx_int_t ngx_quic_crypto_hp(ngx_quic_secret_t *s, >> + u_char *out, u_char *in, ngx_log_t *log); >> +static void ngx_quic_crypto_hp_cleanup(ngx_quic_secret_t *s); >> >> static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, >> ngx_str_t *res); >> @@ -192,6 +196,14 @@ ngx_quic_keys_set_initial_secret(ngx_qui >> return NGX_ERROR; >> } >> >> + if (ngx_quic_crypto_hp_init(ciphers.hp, client, log) == NGX_ERROR) { >> + return NGX_ERROR; >> + } >> + >> + if (ngx_quic_crypto_hp_init(ciphers.hp, server, log) == NGX_ERROR) { >> + return NGX_ERROR; >> + } > > Again, as before, in case of errors all ctx's created here should be freed, > since we don't always have a cleanup handler for them, see > ngx_quic_send_early_cc(). > > Also, in ngx_quic_send_early_cc() there's no cleanup at all, and ctx's will > leak in case of successful creation. Converted error path to "goto failed" (based on the updated change in the previous patch thread). > >> return NGX_OK; >> } >> >> @@ -561,53 +573,88 @@ ngx_quic_crypto_cleanup(ngx_quic_secret_ >> >> >> static ngx_int_t >> -ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, >> - ngx_quic_secret_t *s, u_char *out, u_char *in) >> +ngx_quic_crypto_hp_init(const EVP_CIPHER *cipher, ngx_quic_secret_t *s, >> + ngx_log_t *log) >> { >> - int outlen; >> EVP_CIPHER_CTX *ctx; >> - u_char zero[NGX_QUIC_HP_LEN] = {0}; >> >> #ifdef OPENSSL_IS_BORINGSSL >> - uint32_t cnt; >> - >> - ngx_memcpy(&cnt, in, sizeof(uint32_t)); >> - >> - if (cipher == (const EVP_CIPHER *) EVP_aead_chacha20_poly1305()) { >> - CRYPTO_chacha_20(out, zero, NGX_QUIC_HP_LEN, s->hp.data, &in[4], cnt); >> + if (cipher == (EVP_CIPHER *) EVP_aead_chacha20_poly1305()) { >> + /* some bogus value to distinguish ChaCha20 cipher */ >> + s->hp_ctx = (EVP_CIPHER_CTX *) cipher; > > What if we use NULL as the special value? Agree, it looks more clean. Hopefully, TLS won't evolve more ciphers lacking EVP in BoringSSL. # HG changeset patch # User Sergey Kandaurov # Date 1697199167 -14400 # Fri Oct 13 16:12:47 2023 +0400 # Node ID 226da28965e4ed95b200516ed9aa4d5b4804ae24 # Parent 26f1c0864c67f5aac529f06592d915d7de9adb6e QUIC: reusing crypto contexts for header protection. diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -32,8 +32,12 @@ static ngx_int_t ngx_quic_crypto_open(ng static ngx_int_t ngx_quic_crypto_common(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); #endif -static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, - ngx_quic_secret_t *s, u_char *out, u_char *in); + +static ngx_int_t ngx_quic_crypto_hp_init(const EVP_CIPHER *cipher, + ngx_quic_secret_t *s, ngx_log_t *log); +static ngx_int_t ngx_quic_crypto_hp(ngx_quic_secret_t *s, + u_char *out, u_char *in, ngx_log_t *log); +static void ngx_quic_crypto_hp_cleanup(ngx_quic_secret_t *s); static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, ngx_str_t *res); @@ -196,6 +200,14 @@ ngx_quic_keys_set_initial_secret(ngx_qui goto failed; } + if (ngx_quic_crypto_hp_init(ciphers.hp, client, log) == NGX_ERROR) { + goto failed; + } + + if (ngx_quic_crypto_hp_init(ciphers.hp, server, log) == NGX_ERROR) { + goto failed; + } + return NGX_OK; failed: @@ -556,53 +568,82 @@ ngx_quic_crypto_cleanup(ngx_quic_secret_ static ngx_int_t -ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, - ngx_quic_secret_t *s, u_char *out, u_char *in) +ngx_quic_crypto_hp_init(const EVP_CIPHER *cipher, ngx_quic_secret_t *s, + ngx_log_t *log) { - int outlen; EVP_CIPHER_CTX *ctx; - u_char zero[NGX_QUIC_HP_LEN] = {0}; #ifdef OPENSSL_IS_BORINGSSL - uint32_t cnt; - - ngx_memcpy(&cnt, in, sizeof(uint32_t)); - - if (cipher == (const EVP_CIPHER *) EVP_aead_chacha20_poly1305()) { - CRYPTO_chacha_20(out, zero, NGX_QUIC_HP_LEN, s->hp.data, &in[4], cnt); + if (cipher == (EVP_CIPHER *) EVP_aead_chacha20_poly1305()) { + /* no EVP interface */ + s->hp_ctx = NULL; return NGX_OK; } #endif ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CIPHER_CTX_new() failed"); + return NGX_ERROR; + } + + if (EVP_EncryptInit_ex(ctx, cipher, NULL, s->hp.data, NULL) != 1) { + EVP_CIPHER_CTX_free(ctx); + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); return NGX_ERROR; } - if (EVP_EncryptInit_ex(ctx, cipher, NULL, s->hp.data, in) != 1) { + s->hp_ctx = ctx; + return NGX_OK; +} + + +static ngx_int_t +ngx_quic_crypto_hp(ngx_quic_secret_t *s, u_char *out, u_char *in, + ngx_log_t *log) +{ + int outlen; + EVP_CIPHER_CTX *ctx; + u_char zero[NGX_QUIC_HP_LEN] = {0}; + + ctx = s->hp_ctx; + +#ifdef OPENSSL_IS_BORINGSSL + uint32_t cnt; + + if (ctx == NULL) { + ngx_memcpy(&cnt, in, sizeof(uint32_t)); + CRYPTO_chacha_20(out, zero, NGX_QUIC_HP_LEN, s->hp.data, &in[4], cnt); + return NGX_OK; + } +#endif + + if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, in) != 1) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); - goto failed; + return NGX_ERROR; } if (!EVP_EncryptUpdate(ctx, out, &outlen, zero, NGX_QUIC_HP_LEN)) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); - goto failed; + return NGX_ERROR; } if (!EVP_EncryptFinal_ex(ctx, out + NGX_QUIC_HP_LEN, &outlen)) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptFinal_Ex() failed"); - goto failed; + return NGX_ERROR; } - EVP_CIPHER_CTX_free(ctx); - return NGX_OK; +} -failed: - EVP_CIPHER_CTX_free(ctx); - - return NGX_ERROR; +static void +ngx_quic_crypto_hp_cleanup(ngx_quic_secret_t *s) +{ + if (s->hp_ctx) { + EVP_CIPHER_CTX_free(s->hp_ctx); + s->hp_ctx = NULL; + } } @@ -663,6 +704,10 @@ ngx_quic_keys_set_encryption_secret(ngx_ return NGX_ERROR; } + if (ngx_quic_crypto_hp_init(ciphers.hp, peer_secret, log) == NGX_ERROR) { + return NGX_ERROR; + } + return NGX_OK; } @@ -690,6 +735,9 @@ ngx_quic_keys_discard(ngx_quic_keys_t *k ngx_quic_crypto_cleanup(client); ngx_quic_crypto_cleanup(server); + + ngx_quic_crypto_hp_cleanup(client); + ngx_quic_crypto_hp_cleanup(server); } @@ -742,11 +790,13 @@ ngx_quic_keys_update(ngx_event_t *ev) next->client.key.len = current->client.key.len; next->client.iv.len = NGX_QUIC_IV_LEN; next->client.hp = current->client.hp; + next->client.hp_ctx = current->client.hp_ctx; next->server.secret.len = current->server.secret.len; next->server.key.len = current->server.key.len; next->server.iv.len = NGX_QUIC_IV_LEN; next->server.hp = current->server.hp; + next->server.hp_ctx = current->server.hp_ctx; ngx_quic_hkdf_set(&seq[0], "tls13 quic ku", &next->client.secret, ¤t->client.secret); @@ -795,6 +845,9 @@ ngx_quic_keys_cleanup(ngx_quic_keys_t *k secrets = &keys->secrets[i]; ngx_quic_crypto_cleanup(&secrets->client); ngx_quic_crypto_cleanup(&secrets->server); + + ngx_quic_crypto_hp_cleanup(&secrets->client); + ngx_quic_crypto_hp_cleanup(&secrets->server); } secrets = &keys->next_key; @@ -841,9 +894,7 @@ ngx_quic_create_packet(ngx_quic_header_t } sample = &out.data[4 - pkt->num_len]; - if (ngx_quic_crypto_hp(pkt->log, ciphers.hp, secret, mask, sample) - != NGX_OK) - { + if (ngx_quic_crypto_hp(secret, mask, sample, pkt->log) != NGX_OK) { return NGX_ERROR; } @@ -1075,9 +1126,7 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, /* header protection */ - if (ngx_quic_crypto_hp(pkt->log, ciphers.hp, secret, mask, sample) - != NGX_OK) - { + if (ngx_quic_crypto_hp(secret, mask, sample, pkt->log) != NGX_OK) { return NGX_DECLINED; } diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h +++ b/src/event/quic/ngx_event_quic_protection.h @@ -51,6 +51,7 @@ typedef struct { ngx_quic_iv_t iv; ngx_quic_md_t hp; ngx_quic_crypto_ctx_t *ctx; + EVP_CIPHER_CTX *hp_ctx; } ngx_quic_secret_t; -- Sergey Kandaurov From pluknet at nginx.com Fri Oct 13 15:14:07 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 13 Oct 2023 19:14:07 +0400 Subject: [PATCH 7 of 8] QUIC: cleaned up now unused ngx_quic_ciphers() calls In-Reply-To: <8bd0104b7e6b658a1696.1694099639@enoparse.local> References: <8bd0104b7e6b658a1696.1694099639@enoparse.local> Message-ID: > On 7 Sep 2023, at 19:13, Sergey Kandaurov wrote: > > # HG changeset patch > # User Sergey Kandaurov > # Date 1694099425 -14400 > # Thu Sep 07 19:10:25 2023 +0400 > # Node ID 8bd0104b7e6b658a1696fe7f3e2f1868ac2ae1f9 > # Parent cdc5b59309dbdc234c71e53fca142502884e6177 > QUIC: cleaned up now unused ngx_quic_ciphers() calls. > [..] Additionally, as per the discussion with Roman, below is the patch to simplify ngx_quic_ciphers() API. # HG changeset patch # User Sergey Kandaurov # Date 1697208935 -14400 # Fri Oct 13 18:55:35 2023 +0400 # Node ID eb5d6ed379d112a7360991be298852f978ebdb01 # Parent 6b078bc722fffcf69437f8716c2cd610706cc776 QUIC: simplified ngx_quic_ciphers() API. After conversion to reusable crypto ctx, now there's enough caller context to remove the "level" argument from ngx_quic_ciphers(). diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c +++ b/src/event/quic/ngx_event_quic_openssl_compat.c @@ -239,7 +239,7 @@ ngx_quic_compat_set_encryption_secret(ng keys->cipher = SSL_CIPHER_get_id(cipher); - key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); + key_len = ngx_quic_ciphers(keys->cipher, &ciphers); if (key_len == NGX_ERROR) { ngx_ssl_error(NGX_LOG_INFO, c->log, 0, "unexpected cipher"); diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -15,6 +15,8 @@ #define NGX_QUIC_AES_128_KEY_LEN 16 +#define NGX_QUIC_INITIAL_CIPHER TLS1_3_CK_AES_128_GCM_SHA256 + static ngx_int_t ngx_hkdf_expand(u_char *out_key, size_t out_len, const EVP_MD *digest, const u_char *prk, size_t prk_len, @@ -46,15 +48,10 @@ static ngx_int_t ngx_quic_create_retry_p ngx_int_t -ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers, - enum ssl_encryption_level_t level) +ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers) { ngx_int_t len; - if (level == ssl_encryption_initial) { - id = TLS1_3_CK_AES_128_GCM_SHA256; - } - switch (id) { case TLS1_3_CK_AES_128_GCM_SHA256: @@ -188,7 +185,7 @@ ngx_quic_keys_set_initial_secret(ngx_qui } } - if (ngx_quic_ciphers(0, &ciphers, ssl_encryption_initial) == NGX_ERROR) { + if (ngx_quic_ciphers(NGX_QUIC_INITIAL_CIPHER, &ciphers) == NGX_ERROR) { return NGX_ERROR; } @@ -664,7 +661,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ keys->cipher = SSL_CIPHER_get_id(cipher); - key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); + key_len = ngx_quic_ciphers(keys->cipher, &ciphers); if (key_len == NGX_ERROR) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "unexpected cipher"); @@ -780,9 +777,7 @@ ngx_quic_keys_update(ngx_event_t *ev) c->log->action = "updating keys"; - if (ngx_quic_ciphers(keys->cipher, &ciphers, ssl_encryption_application) - == NGX_ERROR) - { + if (ngx_quic_ciphers(keys->cipher, &ciphers) == NGX_ERROR) { goto failed; } @@ -936,7 +931,7 @@ ngx_quic_create_retry_packet(ngx_quic_he goto seal; } - if (ngx_quic_ciphers(0, &ciphers, pkt->level) == NGX_ERROR) { + if (ngx_quic_ciphers(NGX_QUIC_INITIAL_CIPHER, &ciphers) == NGX_ERROR) { return NGX_ERROR; } diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h +++ b/src/event/quic/ngx_event_quic_protection.h @@ -108,8 +108,7 @@ void ngx_quic_keys_cleanup(ngx_quic_keys ngx_int_t ngx_quic_encrypt(ngx_quic_header_t *pkt, ngx_str_t *res); ngx_int_t ngx_quic_decrypt(ngx_quic_header_t *pkt, uint64_t *largest_pn); void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); -ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers, - enum ssl_encryption_level_t level); +ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers); ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, ngx_int_t enc, ngx_log_t *log); ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, -- Sergey Kandaurov From pluknet at nginx.com Fri Oct 13 15:15:09 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 13 Oct 2023 19:15:09 +0400 Subject: [PATCH 8 of 8] QUIC: explicitly zero out unused keying material In-Reply-To: <20230921132930.6m2dmffrzywhe5ba@N00W24XTQX> References: <813128cee322830435a9.1694099640@enoparse.local> <20230921132930.6m2dmffrzywhe5ba@N00W24XTQX> Message-ID: <91BE3821-6828-4804-AAD7-0959D9D6DCBB@nginx.com> > On 21 Sep 2023, at 17:29, Roman Arutyunyan wrote: > > Hi, > > On Thu, Sep 07, 2023 at 07:14:00PM +0400, Sergey Kandaurov wrote: >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1694099425 -14400 >> # Thu Sep 07 19:10:25 2023 +0400 >> # Node ID 813128cee322830435a95903993b17fb24683da7 >> # Parent 8bd0104b7e6b658a1696fe7f3e2f1868ac2ae1f9 >> QUIC: explicitly zero out unused keying material. >> >> diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c >> --- a/src/event/quic/ngx_event_quic_openssl_compat.c >> +++ b/src/event/quic/ngx_event_quic_openssl_compat.c >> @@ -245,15 +245,6 @@ ngx_quic_compat_set_encryption_secret(ng >> return NGX_ERROR; >> } >> >> - if (sizeof(peer_secret->secret.data) < secret_len) { >> - ngx_log_error(NGX_LOG_ALERT, c->log, 0, >> - "unexpected secret len: %uz", secret_len); >> - return NGX_ERROR; >> - } >> - >> - peer_secret->secret.len = secret_len; >> - ngx_memcpy(peer_secret->secret.data, secret, secret_len); >> - >> peer_secret->key.len = key_len; >> peer_secret->iv.len = NGX_QUIC_IV_LEN; >> >> @@ -275,6 +266,9 @@ ngx_quic_compat_set_encryption_secret(ng >> return NGX_ERROR; >> } >> >> + ngx_explicit_memzero(secret_str.data, secret_str.len); >> + ngx_explicit_memzero(peer_secret->key.data, peer_secret->key.len); >> + >> /* register cleanup handler once */ >> >> if (level == ssl_encryption_handshake) { >> diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c >> --- a/src/event/quic/ngx_event_quic_protection.c >> +++ b/src/event/quic/ngx_event_quic_protection.c >> @@ -719,6 +719,8 @@ ngx_quic_keys_set_encryption_secret(ngx_ >> return NGX_ERROR; >> } >> >> + ngx_explicit_memzero(peer_secret->key.data, peer_secret->key.len); >> + >> return NGX_OK; >> } >> >> @@ -749,6 +751,12 @@ ngx_quic_keys_discard(ngx_quic_keys_t *k >> >> ngx_quic_crypto_hp_cleanup(client); >> ngx_quic_crypto_hp_cleanup(server); >> + >> + ngx_explicit_memzero(client->secret.data, client->secret.len); >> + ngx_explicit_memzero(client->key.data, client->key.len); >> + >> + ngx_explicit_memzero(server->secret.data, server->secret.len); >> + ngx_explicit_memzero(server->key.data, server->key.len); >> } >> >> >> @@ -838,6 +846,14 @@ ngx_quic_keys_update(ngx_event_t *ev) >> goto failed; >> } >> >> + ngx_explicit_memzero(current->client.secret.data, >> + current->client.secret.len); >> + ngx_explicit_memzero(current->server.secret.data, >> + current->server.secret.len); >> + >> + ngx_explicit_memzero(next->client.key.data, next->client.key.len); >> + ngx_explicit_memzero(next->server.key.data, next->server.key.len); >> + >> return; >> >> failed: >> @@ -866,6 +882,12 @@ ngx_quic_keys_cleanup(void *data) >> secrets = &keys->next_key; >> ngx_quic_crypto_cleanup(&secrets->client); >> ngx_quic_crypto_cleanup(&secrets->server); >> + >> + ngx_explicit_memzero(secrets->client.secret.data, >> + secrets->client.secret.len); >> + >> + ngx_explicit_memzero(secrets->server.secret.data, >> + secrets->server.secret.len); >> } >> >> > > Maybe also we need to zero out the secret in ngx_quic_compat_keylog_callback()? > Yes, OpenSSL behaviour should be preserved. It was just done the wrong way, patch on top: diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c +++ b/src/event/quic/ngx_event_quic_openssl_compat.c @@ -219,6 +219,8 @@ ngx_quic_compat_keylog_callback(const SS (void) ngx_quic_compat_set_encryption_secret(c, &com->keys, level, cipher, secret, n); } + + ngx_explicit_memzero(secret, n); } @@ -281,7 +283,6 @@ ngx_quic_compat_set_encryption_secret(ng return NGX_ERROR; } - ngx_explicit_memzero(secret_str.data, secret_str.len); ngx_explicit_memzero(peer_secret->key.data, peer_secret->key.len); return NGX_OK; > Also, this patch made me think about removing key and hp from ngx_quic_secret_t. > Since we have ctx/hp_ctx now, we only need them when creating these contexts. > This will reduce the amount of sensitive data we permanently store in memory. > This was addressed, we can additionally reduce sizeof(ngx_quic_secret_t), now that it became possible for "key". > Attached is my effort towards making key and hp local. "hp" is needed for BoringSSL, but we can through away "key". Anyway, "key" is the key part of cryptography, keeping "hp" should be ok. # HG changeset patch # User Sergey Kandaurov # Date 1697209278 -14400 # Fri Oct 13 19:01:18 2023 +0400 # Node ID 1316dd35650b2a95d6454515100d889d44b7fa8b # Parent 16bf91cd32ca6667967b5321232f076a42e7200e QUIC: removed key field from ngx_quic_secret_t. It is made local as it is only needed now when creating crypto context. BoringSSL lacks EVP interface for ChaCha20, providing instead a function for one-shot encryption, thus hp is still preserved. Based on a patch by Roman Arutyunyan. diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c +++ b/src/event/quic/ngx_event_quic_openssl_compat.c @@ -232,6 +232,7 @@ ngx_quic_compat_set_encryption_secret(ng ngx_int_t key_len; ngx_str_t secret_str; ngx_uint_t i; + ngx_quic_md_t key; ngx_quic_hkdf_t seq[2]; ngx_quic_secret_t *peer_secret; ngx_quic_ciphers_t ciphers; @@ -248,13 +249,14 @@ ngx_quic_compat_set_encryption_secret(ng return NGX_ERROR; } - peer_secret->key.len = key_len; + key.len = key_len; + peer_secret->iv.len = NGX_QUIC_IV_LEN; secret_str.len = secret_len; secret_str.data = (u_char *) secret; - ngx_quic_hkdf_set(&seq[0], "tls13 key", &peer_secret->key, &secret_str); + ngx_quic_hkdf_set(&seq[0], "tls13 key", &key, &secret_str); ngx_quic_hkdf_set(&seq[1], "tls13 iv", &peer_secret->iv, &secret_str); for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { @@ -279,11 +281,13 @@ ngx_quic_compat_set_encryption_secret(ng ngx_quic_crypto_cleanup(peer_secret); - if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, peer_secret, &key, 1, c->log) + == NGX_ERROR) + { return NGX_ERROR; } - ngx_explicit_memzero(peer_secret->key.data, peer_secret->key.len); + ngx_explicit_memzero(key.data, key.len); return NGX_OK; } diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -117,6 +117,7 @@ ngx_quic_keys_set_initial_secret(ngx_qui ngx_str_t iss; ngx_uint_t i; const EVP_MD *digest; + ngx_quic_md_t client_key, server_key; ngx_quic_hkdf_t seq[8]; ngx_quic_secret_t *client, *server; ngx_quic_ciphers_t ciphers; @@ -160,8 +161,8 @@ ngx_quic_keys_set_initial_secret(ngx_qui client->secret.len = SHA256_DIGEST_LENGTH; server->secret.len = SHA256_DIGEST_LENGTH; - client->key.len = NGX_QUIC_AES_128_KEY_LEN; - server->key.len = NGX_QUIC_AES_128_KEY_LEN; + client_key.len = NGX_QUIC_AES_128_KEY_LEN; + server_key.len = NGX_QUIC_AES_128_KEY_LEN; client->hp.len = NGX_QUIC_AES_128_KEY_LEN; server->hp.len = NGX_QUIC_AES_128_KEY_LEN; @@ -171,11 +172,11 @@ ngx_quic_keys_set_initial_secret(ngx_qui /* labels per RFC 9001, 5.1. Packet Protection Keys */ ngx_quic_hkdf_set(&seq[0], "tls13 client in", &client->secret, &iss); - ngx_quic_hkdf_set(&seq[1], "tls13 quic key", &client->key, &client->secret); + ngx_quic_hkdf_set(&seq[1], "tls13 quic key", &client_key, &client->secret); ngx_quic_hkdf_set(&seq[2], "tls13 quic iv", &client->iv, &client->secret); ngx_quic_hkdf_set(&seq[3], "tls13 quic hp", &client->hp, &client->secret); ngx_quic_hkdf_set(&seq[4], "tls13 server in", &server->secret, &iss); - ngx_quic_hkdf_set(&seq[5], "tls13 quic key", &server->key, &server->secret); + ngx_quic_hkdf_set(&seq[5], "tls13 quic key", &server_key, &server->secret); ngx_quic_hkdf_set(&seq[6], "tls13 quic iv", &server->iv, &server->secret); ngx_quic_hkdf_set(&seq[7], "tls13 quic hp", &server->hp, &server->secret); @@ -189,11 +190,15 @@ ngx_quic_keys_set_initial_secret(ngx_qui return NGX_ERROR; } - if (ngx_quic_crypto_init(ciphers.c, client, 0, log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, client, &client_key, 0, log) + == NGX_ERROR) + { return NGX_ERROR; } - if (ngx_quic_crypto_init(ciphers.c, server, 1, log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, server, &server_key, 1, log) + == NGX_ERROR) + { goto failed; } @@ -376,13 +381,13 @@ failed: ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, - ngx_int_t enc, ngx_log_t *log) + ngx_quic_md_t *key, ngx_int_t enc, ngx_log_t *log) { #ifdef OPENSSL_IS_BORINGSSL EVP_AEAD_CTX *ctx; - ctx = EVP_AEAD_CTX_new(cipher, s->key.data, s->key.len, + ctx = EVP_AEAD_CTX_new(cipher, key->data, key->len, EVP_AEAD_DEFAULT_TAG_LENGTH); if (ctx == NULL) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_new() failed"); @@ -423,7 +428,7 @@ ngx_quic_crypto_init(const ngx_quic_ciph return NGX_ERROR; } - if (EVP_CipherInit_ex(ctx, NULL, NULL, s->key.data, NULL, enc) != 1) { + if (EVP_CipherInit_ex(ctx, NULL, NULL, key->data, NULL, enc) != 1) { EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); return NGX_ERROR; @@ -652,6 +657,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ ngx_int_t key_len; ngx_str_t secret_str; ngx_uint_t i; + ngx_quic_md_t key; ngx_quic_hkdf_t seq[3]; ngx_quic_secret_t *peer_secret; ngx_quic_ciphers_t ciphers; @@ -677,15 +683,14 @@ ngx_quic_keys_set_encryption_secret(ngx_ peer_secret->secret.len = secret_len; ngx_memcpy(peer_secret->secret.data, secret, secret_len); - peer_secret->key.len = key_len; + key.len = key_len; peer_secret->iv.len = NGX_QUIC_IV_LEN; peer_secret->hp.len = key_len; secret_str.len = secret_len; secret_str.data = (u_char *) secret; - ngx_quic_hkdf_set(&seq[0], "tls13 quic key", - &peer_secret->key, &secret_str); + ngx_quic_hkdf_set(&seq[0], "tls13 quic key", &key, &secret_str); ngx_quic_hkdf_set(&seq[1], "tls13 quic iv", &peer_secret->iv, &secret_str); ngx_quic_hkdf_set(&seq[2], "tls13 quic hp", &peer_secret->hp, &secret_str); @@ -695,7 +700,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ } } - if (ngx_quic_crypto_init(ciphers.c, peer_secret, is_write, log) + if (ngx_quic_crypto_init(ciphers.c, peer_secret, &key, is_write, log) == NGX_ERROR) { return NGX_ERROR; @@ -705,7 +710,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ return NGX_ERROR; } - ngx_explicit_memzero(peer_secret->key.data, peer_secret->key.len); + ngx_explicit_memzero(key.data, key.len); return NGX_OK; } @@ -739,10 +744,7 @@ ngx_quic_keys_discard(ngx_quic_keys_t *k ngx_quic_crypto_hp_cleanup(server); ngx_explicit_memzero(client->secret.data, client->secret.len); - ngx_explicit_memzero(client->key.data, client->key.len); - ngx_explicit_memzero(server->secret.data, server->secret.len); - ngx_explicit_memzero(server->key.data, server->key.len); } @@ -766,7 +768,9 @@ ngx_quic_keys_switch(ngx_connection_t *c void ngx_quic_keys_update(ngx_event_t *ev) { + ngx_int_t key_len; ngx_uint_t i; + ngx_quic_md_t client_key, server_key; ngx_quic_hkdf_t seq[6]; ngx_quic_keys_t *keys; ngx_connection_t *c; @@ -785,18 +789,21 @@ ngx_quic_keys_update(ngx_event_t *ev) c->log->action = "updating keys"; - if (ngx_quic_ciphers(keys->cipher, &ciphers) == NGX_ERROR) { + key_len = ngx_quic_ciphers(keys->cipher, &ciphers); + + if (key_len == NGX_ERROR) { goto failed; } + client_key.len = key_len; + server_key.len = key_len; + next->client.secret.len = current->client.secret.len; - next->client.key.len = current->client.key.len; next->client.iv.len = NGX_QUIC_IV_LEN; next->client.hp = current->client.hp; next->client.hp_ctx = current->client.hp_ctx; next->server.secret.len = current->server.secret.len; - next->server.key.len = current->server.key.len; next->server.iv.len = NGX_QUIC_IV_LEN; next->server.hp = current->server.hp; next->server.hp_ctx = current->server.hp_ctx; @@ -804,13 +811,13 @@ ngx_quic_keys_update(ngx_event_t *ev) ngx_quic_hkdf_set(&seq[0], "tls13 quic ku", &next->client.secret, ¤t->client.secret); ngx_quic_hkdf_set(&seq[1], "tls13 quic key", - &next->client.key, &next->client.secret); + &client_key, &next->client.secret); ngx_quic_hkdf_set(&seq[2], "tls13 quic iv", &next->client.iv, &next->client.secret); ngx_quic_hkdf_set(&seq[3], "tls13 quic ku", &next->server.secret, ¤t->server.secret); ngx_quic_hkdf_set(&seq[4], "tls13 quic key", - &next->server.key, &next->server.secret); + &server_key, &next->server.secret); ngx_quic_hkdf_set(&seq[5], "tls13 quic iv", &next->server.iv, &next->server.secret); @@ -820,12 +827,14 @@ ngx_quic_keys_update(ngx_event_t *ev) } } - if (ngx_quic_crypto_init(ciphers.c, &next->client, 0, c->log) == NGX_ERROR) + if (ngx_quic_crypto_init(ciphers.c, &next->client, &client_key, 0, c->log) + == NGX_ERROR) { goto failed; } - if (ngx_quic_crypto_init(ciphers.c, &next->server, 1, c->log) == NGX_ERROR) + if (ngx_quic_crypto_init(ciphers.c, &next->server, &server_key, 1, c->log) + == NGX_ERROR) { goto failed; } @@ -835,8 +844,8 @@ ngx_quic_keys_update(ngx_event_t *ev) ngx_explicit_memzero(current->server.secret.data, current->server.secret.len); - ngx_explicit_memzero(next->client.key.data, next->client.key.len); - ngx_explicit_memzero(next->server.key.data, next->server.key.len); + ngx_explicit_memzero(client_key.data, client_key.len); + ngx_explicit_memzero(server_key.data, server_key.len); return; @@ -927,10 +936,11 @@ ngx_quic_create_retry_packet(ngx_quic_he { u_char *start; ngx_str_t ad, itag; + ngx_quic_md_t key; ngx_quic_ciphers_t ciphers; /* 5.8. Retry Packet Integrity */ - static u_char key[16] = + static u_char key_data[16] = "\xbe\x0c\x69\x0b\x9f\x66\x57\x5a\x1d\x76\x6b\x54\xe3\x68\xc8\x4e"; static u_char nonce[NGX_QUIC_IV_LEN] = "\x46\x15\x99\xd3\x5d\x63\x2b\xf2\x23\x98\x25\xbb"; @@ -957,11 +967,13 @@ ngx_quic_create_retry_packet(ngx_quic_he return NGX_ERROR; } - secret.key.len = sizeof(key); - ngx_memcpy(secret.key.data, key, sizeof(key)); + key.len = sizeof(key_data); + ngx_memcpy(key.data, key_data, sizeof(key_data)); secret.iv.len = NGX_QUIC_IV_LEN; - if (ngx_quic_crypto_init(ciphers.c, &secret, 1, pkt->log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, &secret, &key, 1, pkt->log) + == NGX_ERROR) + { return NGX_ERROR; } diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h +++ b/src/event/quic/ngx_event_quic_protection.h @@ -47,7 +47,6 @@ typedef struct { typedef struct { ngx_quic_md_t secret; - ngx_quic_md_t key; ngx_quic_iv_t iv; ngx_quic_md_t hp; ngx_quic_crypto_ctx_t *ctx; @@ -110,7 +109,7 @@ ngx_int_t ngx_quic_decrypt(ngx_quic_head void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers); ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, - ngx_quic_secret_t *s, ngx_int_t enc, ngx_log_t *log); + ngx_quic_secret_t *s, ngx_quic_md_t *key, ngx_int_t enc, ngx_log_t *log); ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); void ngx_quic_crypto_cleanup(ngx_quic_secret_t *s); -- Sergey Kandaurov From thorvaldur.thorvaldsson at gmail.com Sun Oct 15 19:47:51 2023 From: thorvaldur.thorvaldsson at gmail.com (Thorvaldur Thorvaldsson) Date: Sun, 15 Oct 2023 19:47:51 +0000 Subject: [patch] quic PTO counter fixes In-Reply-To: References: Message-ID: Unsubscribe On Wed, 11 Oct 2023 at 13:59, Vladimir Homutov via nginx-devel < nginx-devel at nginx.org> wrote: > Hello, > > a couple of patches in the quic code: > > first patch improves a bit debugging, and the second patch contains > fixes for PTO counter calculation - see commit log for details. > > This helps with some clients in interop handshakeloss/handshakecorruption > testcases > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Mon Oct 16 14:19:48 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 16 Oct 2023 18:19:48 +0400 Subject: Memory Leak Issue in Nginx PCRE2 In-Reply-To: References: Message-ID: > On 11 Oct 2023, at 02:04, Maxim Dounin wrote: > > Hello! > > On Wed, Sep 27, 2023 at 01:13:44AM +0800, 上勾拳 wrote: > >> Dear Nginx Developers, >> >> I hope this email finds you well. I am reaching out to the mailing list for >> the first time to report and discuss an issue I encountered while working >> on supporting PCRE2 in OpenResty. If I have made any errors in my reporting >> or discussion, please do not hesitate to provide feedback. Your guidance is >> greatly appreciated. >> >> During my recent work, I used the sanitizer to inspect potential issues, >> and I identified a small memory leak in the PCRE2 code section of Nginx. >> While this issue does not seem to be critical, it could potentially disrupt >> memory checking tools. To help you reproduce the problem, I have included a >> minimal configuration below. Please note that this issue occurs when Nginx >> is configured to use PCRE2, and the version is 1.22.1 or higher. >> >> *Minimal Configuration for Reproduction:* >> worker_processes 1; >> daemon off; >> master_process off; >> error_log >> /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/error.log >> debug; >> pid >> /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/nginx.pid; >> >> http { >> access_log >> /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/access.log; >> #access_log off; >> default_type text/plain; >> keepalive_timeout 68000ms; >> server { >> listen 1984; >> #placeholder >> server_name 'localhost'; >> >> client_max_body_size 30M; >> #client_body_buffer_size 4k; >> >> # Begin preamble config... >> >> # End preamble config... >> >> # Begin test case config... >> >> location ~ '^/[a-d]$' { >> return 200; >> } >> } >> } >> events { >> accept_mutex off; >> >> worker_connections 64; >> } >> >> *nginx -V :* >> nginx version: nginx/1.25.1 (no pool) >> built by gcc 11.4.1 20230605 (Red Hat 11.4.1-2) (GCC) >> built with OpenSSL 1.1.1u 30 May 2023 >> TLS SNI support enabled >> configure arguments: >> --prefix=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/work/nginx >> --with-threads --with-pcre-jit --with-ipv6 >> --with-cc-opt='-fno-omit-frame-pointer -fsanitize=address >> -DNGX_LUA_USE_ASSERT -I/opt/pcre2/include -I/opt/ssl/include' >> --with-http_v2_module --with-http_v3_module --with-http_realip_module >> --with-http_ssl_module >> --add-module=/home/zhenzhongw/code/pcre_pr/ndk-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/set-misc-nginx-module >> --with-ld-opt='-fsanitize=address -L/opt/pcre2/lib -L/opt/ssl/lib >> -Wl,-rpath,/opt/pcre2/lib:/opt/drizzle/lib:/opt/ssl/lib' >> --without-mail_pop3_module --without-mail_imap_module >> --with-http_image_filter_module --without-mail_smtp_module --with-stream >> --with-stream_ssl_module --without-http_upstream_ip_hash_module >> --without-http_memcached_module --without-http_auth_basic_module >> --without-http_userid_module --with-http_auth_request_module >> --add-module=/home/zhenzhongw/code/pcre_pr/echo-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/memc-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/srcache-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/lua-upstream-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/headers-more-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/drizzle-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/rds-json-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/coolkit-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/redis2-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/stream-lua-nginx-module >> --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-module >> --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-shm-module >> --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-delayed-load-module >> --with-http_gunzip_module --with-http_dav_module --with-select_module >> --with-poll_module --with-debug --with-poll_module --with-cc=gcc >> >> *The sanitizer tool reported the following error message: * >> ================================================================= >> ==555798==ERROR: LeakSanitizer: detected memory leaks >> >> Direct leak of 72 byte(s) in 1 object(s) allocated from: >> #0 0x7f502f6b4a07 in __interceptor_malloc (/lib64/libasan.so.6+0xb4a07) >> #1 0x4a1737 in ngx_alloc src/os/unix/ngx_alloc.c:22 >> #2 0x525796 in ngx_regex_malloc src/core/ngx_regex.c:509 >> #3 0x7f502f3e745e in _pcre2_memctl_malloc_8 >> (/opt/pcre2/lib/libpcre2-8.so.0+0x1145e) >> #4 0x5771ad in ngx_http_regex_compile src/http/ngx_http_variables.c:2555 >> #5 0x536088 in ngx_http_core_regex_location >> src/http/ngx_http_core_module.c:3263 >> #6 0x537f94 in ngx_http_core_location >> src/http/ngx_http_core_module.c:3115 >> #7 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 >> #8 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 >> #9 0x5391ec in ngx_http_core_server src/http/ngx_http_core_module.c:2991 >> #10 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 >> #11 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 >> #12 0x528e4c in ngx_http_block src/http/ngx_http.c:239 >> #13 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 >> #14 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 >> #15 0x463f74 in ngx_init_cycle src/core/ngx_cycle.c:284 >> #12 0x528e4c in ngx_http_block src/http/ngx_http.c:239 >> #13 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 >> #14 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 >> #15 0x463f74 in ngx_init_cycle src/core/ngx_cycle.c:284 >> #16 0x4300c7 in main src/core/nginx.c:295 >> #17 0x7ff31a43feaf in __libc_start_call_main (/lib64/libc.so.6+0x3feaf) >> >> SUMMARY: AddressSanitizer: 72 byte(s) leaked in 1 allocation(s). >> >> *I have created a patch to address this memory leak issue, which I am >> sharing below:* >> diff --git a/src/core/ngx_regex.c b/src/core/ngx_regex.c >> index 91381f499..71f583789 100644 >> --- a/src/core/ngx_regex.c >> +++ b/src/core/ngx_regex.c >> @@ -600,6 +600,8 @@ ngx_regex_cleanup(void *data) >> * the new cycle, these will be re-allocated. >> */ >> >> + ngx_regex_malloc_init(NULL); >> + >> if (ngx_regex_compile_context) { >> pcre2_compile_context_free(ngx_regex_compile_context); >> ngx_regex_compile_context = NULL; >> @@ -611,6 +613,8 @@ ngx_regex_cleanup(void *data) >> ngx_regex_match_data_size = 0; >> } >> >> + ngx_regex_malloc_done(); >> + >> #endif >> } >> >> @@ -706,7 +710,13 @@ ngx_regex_module_init(ngx_cycle_t *cycle) >> ngx_regex_malloc_done(); >> >> ngx_regex_studies = NULL; >> + >> #if (NGX_PCRE2) >> + if (ngx_regex_compile_context) { >> + ngx_regex_malloc_init(NULL); >> + pcre2_compile_context_free(ngx_regex_compile_context); >> + ngx_regex_malloc_done(); >> + } >> ngx_regex_compile_context = NULL; >> #endif >> >> I kindly request your assistance in reviewing this matter and considering >> the patch for inclusion in Nginx. If you have any questions or need further >> information, please feel free to reach out to me. Your expertise and >> feedback are highly valuable in resolving this issue. > > Thank you for the report. > > Indeed, this looks like a small leak which manifests itself during > reconfiguration when nginx is compiled with PCRE2. > > The patch looks correct to me, though I think it would be better > to don't do anything with ngx_regex_compile_context in > ngx_regex_module_init(). Please take a look if the following > patch looks good to you: > > # HG changeset patch > # User Maxim Dounin > # Date 1696950530 -10800 > # Tue Oct 10 18:08:50 2023 +0300 > # Node ID 0ceb96f57592b77618fba4200797df977241ec9b > # Parent cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > Core: fixed memory leak on configuration reload with PCRE2. > > In ngx_regex_cleanup() allocator wasn't configured when calling > pcre2_compile_context_free() and pcre2_match_data_free(), resulting > in no ngx_free() call and leaked memory. Fix is ensure that allocator > is configured for global allocations, so that ngx_free() is actually > called to free memory. > > Additionally, ngx_regex_compile_context was cleared in > ngx_regex_module_init(). It should be either not cleared, so it will > be freed by ngx_regex_cleanup(), or properly freed. Fix is to > not clear it, so ngx_regex_cleanup() will be able to free it. > > Reported by ZhenZhong Wu, > https://mailman.nginx.org/pipermail/nginx-devel/2023-September/3Z5FIKUDRN2WBSL3JWTZJ7SXDA6YIWPB.html > > diff --git a/src/core/ngx_regex.c b/src/core/ngx_regex.c > --- a/src/core/ngx_regex.c > +++ b/src/core/ngx_regex.c > @@ -600,6 +600,8 @@ ngx_regex_cleanup(void *data) > * the new cycle, these will be re-allocated. > */ > > + ngx_regex_malloc_init(NULL); > + > if (ngx_regex_compile_context) { > pcre2_compile_context_free(ngx_regex_compile_context); > ngx_regex_compile_context = NULL; > @@ -611,6 +613,8 @@ ngx_regex_cleanup(void *data) > ngx_regex_match_data_size = 0; > } > > + ngx_regex_malloc_done(); > + > #endif > } > > @@ -706,9 +710,6 @@ ngx_regex_module_init(ngx_cycle_t *cycle > ngx_regex_malloc_done(); > > ngx_regex_studies = NULL; > -#if (NGX_PCRE2) > - ngx_regex_compile_context = NULL; > -#endif > > return NGX_OK; > } > Note that direct leaks are also reported when pcre2_compile_context_create() is called in runtime from a worker process, such as in ngx_http_ssi_regex_match(). The patch stands correct in this regard though. Anyway, leaking a compile context in a worker process doesn't seem to be harmful as they do not accumulate: it's only possible to allocate up to two contexts in a worker process: the 1st borrowed from the master process, and the 2nd allocated in runtime (replacing the COW'ed one). Currently, nginx configuration doesn't seem to provide a way to alter the default values in the compile context (per man pcre2api). So the proposed change does look correct. But potentially, if we'd want to change this behaviour, the patch has a subtle downside, which may prevent to apply such configuration changes: - on the 1st configuration reload, ngx_regex_compile() will be called with the compile context borrowed from a previous cycle (if any), so any compile context changes won't be applied to the new configuration - on subsequent reloads, this is not an issue, because a previous cycle cleanup will free and reset ngx_regex_compile_context, such that a current cycle runs will ngx_regex_compile_context == NULL, and it will cause to allocate a new context in the next cycle. To illustrate: compile[gen#0]: ngx_regex_compile_context 0x0 compile: ngx_regex_compile_context = 0x607000003580 init[gen#0]: ngx_regex_compile_context 0x607000003580 reload#1 compile[gen#1]: ngx_regex_compile_context 0x607000003580 init[gen#1]: ngx_regex_compile_context 0x607000003580 cleanup[gen#0]: ngx_regex_compile_context 0x607000003580 reload#2 compile[gen#2]: ngx_regex_compile_context 0x0 compile: ngx_regex_compile_context = 0x60700000a510 init[gen#2]: ngx_regex_compile_context 0x60700000a510 cleanup[gen#1]: ngx_regex_compile_context 0x60700000a510 Other than that, it looks good. -- Sergey Kandaurov From xeioex at nginx.com Tue Oct 17 01:10:42 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 17 Oct 2023 01:10:42 +0000 Subject: [njs] Modules: fixed items() method of a shared dictionary. Message-ID: details: https://hg.nginx.org/njs/rev/714fae197d83 branches: changeset: 2220:714fae197d83 user: Dmitry Volyntsev date: Mon Oct 16 18:09:37 2023 -0700 description: Modules: fixed items() method of a shared dictionary. Previously, under a memory pressure the method might return prematurely while still holding the mutex for the shared memory. This fixes #676 issue on Github. diffstat: nginx/ngx_js_shared_dict.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r b49a98886c02 -r 714fae197d83 nginx/ngx_js_shared_dict.c --- a/nginx/ngx_js_shared_dict.c Fri Oct 06 16:52:23 2023 -0700 +++ b/nginx/ngx_js_shared_dict.c Mon Oct 16 18:09:37 2023 -0700 @@ -819,7 +819,7 @@ njs_js_ext_shared_dict_items(njs_vm_t *v rc = njs_vm_array_alloc(vm, kv, 2); if (rc != NJS_OK) { - return NJS_ERROR; + goto fail; } value = njs_vm_array_push(vm, kv); From mdounin at mdounin.ru Tue Oct 17 04:01:30 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 17 Oct 2023 07:01:30 +0300 Subject: Memory Leak Issue in Nginx PCRE2 In-Reply-To: References: Message-ID: Hello! On Mon, Oct 16, 2023 at 06:19:48PM +0400, Sergey Kandaurov wrote: > > > On 11 Oct 2023, at 02:04, Maxim Dounin wrote: > > > > Hello! > > > > On Wed, Sep 27, 2023 at 01:13:44AM +0800, 上勾拳 wrote: > > > >> Dear Nginx Developers, > >> > >> I hope this email finds you well. I am reaching out to the mailing list for > >> the first time to report and discuss an issue I encountered while working > >> on supporting PCRE2 in OpenResty. If I have made any errors in my reporting > >> or discussion, please do not hesitate to provide feedback. Your guidance is > >> greatly appreciated. > >> > >> During my recent work, I used the sanitizer to inspect potential issues, > >> and I identified a small memory leak in the PCRE2 code section of Nginx. > >> While this issue does not seem to be critical, it could potentially disrupt > >> memory checking tools. To help you reproduce the problem, I have included a > >> minimal configuration below. Please note that this issue occurs when Nginx > >> is configured to use PCRE2, and the version is 1.22.1 or higher. > >> > >> *Minimal Configuration for Reproduction:* > >> worker_processes 1; > >> daemon off; > >> master_process off; > >> error_log > >> /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/error.log > >> debug; > >> pid > >> /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/nginx.pid; > >> > >> http { > >> access_log > >> /home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/servroot/logs/access.log; > >> #access_log off; > >> default_type text/plain; > >> keepalive_timeout 68000ms; > >> server { > >> listen 1984; > >> #placeholder > >> server_name 'localhost'; > >> > >> client_max_body_size 30M; > >> #client_body_buffer_size 4k; > >> > >> # Begin preamble config... > >> > >> # End preamble config... > >> > >> # Begin test case config... > >> > >> location ~ '^/[a-d]$' { > >> return 200; > >> } > >> } > >> } > >> events { > >> accept_mutex off; > >> > >> worker_connections 64; > >> } > >> > >> *nginx -V :* > >> nginx version: nginx/1.25.1 (no pool) > >> built by gcc 11.4.1 20230605 (Red Hat 11.4.1-2) (GCC) > >> built with OpenSSL 1.1.1u 30 May 2023 > >> TLS SNI support enabled > >> configure arguments: > >> --prefix=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/work/nginx > >> --with-threads --with-pcre-jit --with-ipv6 > >> --with-cc-opt='-fno-omit-frame-pointer -fsanitize=address > >> -DNGX_LUA_USE_ASSERT -I/opt/pcre2/include -I/opt/ssl/include' > >> --with-http_v2_module --with-http_v3_module --with-http_realip_module > >> --with-http_ssl_module > >> --add-module=/home/zhenzhongw/code/pcre_pr/ndk-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/set-misc-nginx-module > >> --with-ld-opt='-fsanitize=address -L/opt/pcre2/lib -L/opt/ssl/lib > >> -Wl,-rpath,/opt/pcre2/lib:/opt/drizzle/lib:/opt/ssl/lib' > >> --without-mail_pop3_module --without-mail_imap_module > >> --with-http_image_filter_module --without-mail_smtp_module --with-stream > >> --with-stream_ssl_module --without-http_upstream_ip_hash_module > >> --without-http_memcached_module --without-http_auth_basic_module > >> --without-http_userid_module --with-http_auth_request_module > >> --add-module=/home/zhenzhongw/code/pcre_pr/echo-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/memc-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/srcache-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/lua-upstream-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/headers-more-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/drizzle-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/rds-json-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/coolkit-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/redis2-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/stream-lua-nginx-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-shm-module > >> --add-module=/home/zhenzhongw/code/pcre_pr/lua-nginx-module/t/data/fake-delayed-load-module > >> --with-http_gunzip_module --with-http_dav_module --with-select_module > >> --with-poll_module --with-debug --with-poll_module --with-cc=gcc > >> > >> *The sanitizer tool reported the following error message: * > >> ================================================================= > >> ==555798==ERROR: LeakSanitizer: detected memory leaks > >> > >> Direct leak of 72 byte(s) in 1 object(s) allocated from: > >> #0 0x7f502f6b4a07 in __interceptor_malloc (/lib64/libasan.so.6+0xb4a07) > >> #1 0x4a1737 in ngx_alloc src/os/unix/ngx_alloc.c:22 > >> #2 0x525796 in ngx_regex_malloc src/core/ngx_regex.c:509 > >> #3 0x7f502f3e745e in _pcre2_memctl_malloc_8 > >> (/opt/pcre2/lib/libpcre2-8.so.0+0x1145e) > >> #4 0x5771ad in ngx_http_regex_compile src/http/ngx_http_variables.c:2555 > >> #5 0x536088 in ngx_http_core_regex_location > >> src/http/ngx_http_core_module.c:3263 > >> #6 0x537f94 in ngx_http_core_location > >> src/http/ngx_http_core_module.c:3115 > >> #7 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > >> #8 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > >> #9 0x5391ec in ngx_http_core_server src/http/ngx_http_core_module.c:2991 > >> #10 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > >> #11 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > >> #12 0x528e4c in ngx_http_block src/http/ngx_http.c:239 > >> #13 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > >> #14 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > >> #15 0x463f74 in ngx_init_cycle src/core/ngx_cycle.c:284 > >> #12 0x528e4c in ngx_http_block src/http/ngx_http.c:239 > >> #13 0x46ba0a in ngx_conf_handler src/core/ngx_conf_file.c:463 > >> #14 0x46ba0a in ngx_conf_parse src/core/ngx_conf_file.c:319 > >> #15 0x463f74 in ngx_init_cycle src/core/ngx_cycle.c:284 > >> #16 0x4300c7 in main src/core/nginx.c:295 > >> #17 0x7ff31a43feaf in __libc_start_call_main (/lib64/libc.so.6+0x3feaf) > >> > >> SUMMARY: AddressSanitizer: 72 byte(s) leaked in 1 allocation(s). > >> > >> *I have created a patch to address this memory leak issue, which I am > >> sharing below:* > >> diff --git a/src/core/ngx_regex.c b/src/core/ngx_regex.c > >> index 91381f499..71f583789 100644 > >> --- a/src/core/ngx_regex.c > >> +++ b/src/core/ngx_regex.c > >> @@ -600,6 +600,8 @@ ngx_regex_cleanup(void *data) > >> * the new cycle, these will be re-allocated. > >> */ > >> > >> + ngx_regex_malloc_init(NULL); > >> + > >> if (ngx_regex_compile_context) { > >> pcre2_compile_context_free(ngx_regex_compile_context); > >> ngx_regex_compile_context = NULL; > >> @@ -611,6 +613,8 @@ ngx_regex_cleanup(void *data) > >> ngx_regex_match_data_size = 0; > >> } > >> > >> + ngx_regex_malloc_done(); > >> + > >> #endif > >> } > >> > >> @@ -706,7 +710,13 @@ ngx_regex_module_init(ngx_cycle_t *cycle) > >> ngx_regex_malloc_done(); > >> > >> ngx_regex_studies = NULL; > >> + > >> #if (NGX_PCRE2) > >> + if (ngx_regex_compile_context) { > >> + ngx_regex_malloc_init(NULL); > >> + pcre2_compile_context_free(ngx_regex_compile_context); > >> + ngx_regex_malloc_done(); > >> + } > >> ngx_regex_compile_context = NULL; > >> #endif > >> > >> I kindly request your assistance in reviewing this matter and considering > >> the patch for inclusion in Nginx. If you have any questions or need further > >> information, please feel free to reach out to me. Your expertise and > >> feedback are highly valuable in resolving this issue. > > > > Thank you for the report. > > > > Indeed, this looks like a small leak which manifests itself during > > reconfiguration when nginx is compiled with PCRE2. > > > > The patch looks correct to me, though I think it would be better > > to don't do anything with ngx_regex_compile_context in > > ngx_regex_module_init(). Please take a look if the following > > patch looks good to you: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1696950530 -10800 > > # Tue Oct 10 18:08:50 2023 +0300 > > # Node ID 0ceb96f57592b77618fba4200797df977241ec9b > > # Parent cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > > Core: fixed memory leak on configuration reload with PCRE2. > > > > In ngx_regex_cleanup() allocator wasn't configured when calling > > pcre2_compile_context_free() and pcre2_match_data_free(), resulting > > in no ngx_free() call and leaked memory. Fix is ensure that allocator > > is configured for global allocations, so that ngx_free() is actually > > called to free memory. > > > > Additionally, ngx_regex_compile_context was cleared in > > ngx_regex_module_init(). It should be either not cleared, so it will > > be freed by ngx_regex_cleanup(), or properly freed. Fix is to > > not clear it, so ngx_regex_cleanup() will be able to free it. > > > > Reported by ZhenZhong Wu, > > https://mailman.nginx.org/pipermail/nginx-devel/2023-September/3Z5FIKUDRN2WBSL3JWTZJ7SXDA6YIWPB.html > > > > diff --git a/src/core/ngx_regex.c b/src/core/ngx_regex.c > > --- a/src/core/ngx_regex.c > > +++ b/src/core/ngx_regex.c > > @@ -600,6 +600,8 @@ ngx_regex_cleanup(void *data) > > * the new cycle, these will be re-allocated. > > */ > > > > + ngx_regex_malloc_init(NULL); > > + > > if (ngx_regex_compile_context) { > > pcre2_compile_context_free(ngx_regex_compile_context); > > ngx_regex_compile_context = NULL; > > @@ -611,6 +613,8 @@ ngx_regex_cleanup(void *data) > > ngx_regex_match_data_size = 0; > > } > > > > + ngx_regex_malloc_done(); > > + > > #endif > > } > > > > @@ -706,9 +710,6 @@ ngx_regex_module_init(ngx_cycle_t *cycle > > ngx_regex_malloc_done(); > > > > ngx_regex_studies = NULL; > > -#if (NGX_PCRE2) > > - ngx_regex_compile_context = NULL; > > -#endif > > > > return NGX_OK; > > } > > > > Note that direct leaks are also reported when > pcre2_compile_context_create() is called in runtime from a worker > process, such as in ngx_http_ssi_regex_match(). The patch stands > correct in this regard though. Anyway, leaking a compile context > in a worker process doesn't seem to be harmful as they do not > accumulate: it's only possible to allocate up to two contexts > in a worker process: the 1st borrowed from the master process, > and the 2nd allocated in runtime (replacing the COW'ed one). In ngx_http_ssi_regex_match(), compile context is only allocated (via ngx_regex_compile()) if ngx_regex_compile_context is NULL, that is, no additional contexts will be allocated if COW'ed one is available. And with the patch, if there is an allocated compile context, it will be properly freed on cycle destruction in ngx_regex_cleanup(). > Currently, nginx configuration doesn't seem to provide a way to > alter the default values in the compile context (per man pcre2api). > So the proposed change does look correct. But potentially, if we'd > want to change this behaviour, the patch has a subtle downside, > which may prevent to apply such configuration changes: > - on the 1st configuration reload, ngx_regex_compile() will be called > with the compile context borrowed from a previous cycle (if any), so > any compile context changes won't be applied to the new configuration > - on subsequent reloads, this is not an issue, because a previous > cycle cleanup will free and reset ngx_regex_compile_context, such > that a current cycle runs will ngx_regex_compile_context == NULL, > and it will cause to allocate a new context in the next cycle. Sure. Compile contexts are expected to be equivalent regardless of when a particular context allocation happens, and the existing code relies on this. If we'll consider changing this, it would be required to somehow provide access from ngx_regex_compile() to the relevant cycle (that is, either the cycle being created during configuration parsing, or ngx_cycle for runtime compilation). And with such access, the ngx_regex_compile_context global variable and direct allocation from heap won't be needed, as it would be easier to simply allocate compile context from the cycle pool. > Other than that, it looks good. Thanks for looking, pushed to http://mdounin.ru/hg/nginx/ -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Tue Oct 17 10:38:23 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 17 Oct 2023 14:38:23 +0400 Subject: [PATCH 5 of 8] QUIC: reusing crypto contexts for packet protection In-Reply-To: References: <28f7491bc79771f9cfa8.1694099637@enoparse.local> <20230919135343.gyryhjjh5igd6xzl@N00W24XTQX> Message-ID: > On 13 Oct 2023, at 19:13, Sergey Kandaurov wrote: > > >> On 19 Sep 2023, at 17:53, Roman Arutyunyan wrote: >> >> Hi, >> >> On Thu, Sep 07, 2023 at 07:13:57PM +0400, Sergey Kandaurov wrote: >>> # HG changeset patch >>> # User Sergey Kandaurov >>> # Date 1694099424 -14400 >>> # Thu Sep 07 19:10:24 2023 +0400 >>> # Node ID 28f7491bc79771f9cfa882b1b5584fa48ea42e6b >>> # Parent 24e5d652ecc861f0c68607d20941abbf3726fdf1 >>> QUIC: reusing crypto contexts for packet protection. >>> >>> diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c >>> --- a/src/event/quic/ngx_event_quic.c >>> +++ b/src/event/quic/ngx_event_quic.c >>> @@ -225,6 +225,7 @@ ngx_quic_new_connection(ngx_connection_t >>> { >>> ngx_uint_t i; >>> ngx_quic_tp_t *ctp; >>> + ngx_pool_cleanup_t *cln; >>> ngx_quic_connection_t *qc; >>> >>> qc = ngx_pcalloc(c->pool, sizeof(ngx_quic_connection_t)); >>> @@ -237,6 +238,14 @@ ngx_quic_new_connection(ngx_connection_t >>> return NULL; >>> } >>> >>> + cln = ngx_pool_cleanup_add(c->pool, 0); >>> + if (cln == NULL) { >>> + return NULL; >>> + } >>> + >>> + cln->handler = ngx_quic_keys_cleanup; >>> + cln->data = qc->keys; >> >> I think it's better to cleanup keys in ngx_quic_close_connection(). >> We do the same with sockets by calling ngx_quic_close_sockets(). >> We just have to carefully handle the errors later in this function and cleanup >> keys when ngx_quic_open_sockets() fails. > > While this may look error prone compared with the cleanup handler, > you convinced me to remove it because of ngx_quic_send_early_cc(). > To be merged: > > diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c > --- a/src/event/quic/ngx_event_quic.c > +++ b/src/event/quic/ngx_event_quic.c > @@ -227,7 +227,6 @@ ngx_quic_new_connection(ngx_connection_t > { > ngx_uint_t i; > ngx_quic_tp_t *ctp; > - ngx_pool_cleanup_t *cln; > ngx_quic_connection_t *qc; > > qc = ngx_pcalloc(c->pool, sizeof(ngx_quic_connection_t)); > @@ -240,14 +239,6 @@ ngx_quic_new_connection(ngx_connection_t > return NULL; > } > > - cln = ngx_pool_cleanup_add(c->pool, 0); > - if (cln == NULL) { > - return NULL; > - } > - > - cln->handler = ngx_quic_keys_cleanup; > - cln->data = qc->keys; > - > qc->version = pkt->version; > > ngx_rbtree_init(&qc->streams.tree, &qc->streams.sentinel, > @@ -344,6 +335,7 @@ ngx_quic_new_connection(ngx_connection_t > qc->validated = pkt->validated; > > if (ngx_quic_open_sockets(c, qc, pkt) != NGX_OK) { > + ngx_quic_keys_cleanup(qc->keys); > return NULL; > } > > @@ -594,6 +586,8 @@ ngx_quic_close_connection(ngx_connection > > ngx_quic_close_sockets(c); > > + ngx_quic_keys_cleanup(qc->keys); > + > ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic close completed"); > > /* may be tested from SSL callback during SSL shutdown */ > diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c > --- a/src/event/quic/ngx_event_quic_output.c > +++ b/src/event/quic/ngx_event_quic_output.c > @@ -941,13 +941,17 @@ ngx_quic_send_early_cc(ngx_connection_t > res.data = dst; > > if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { > + ngx_quic_keys_cleanup(pkt.keys); > return NGX_ERROR; > } > > if (ngx_quic_send(c, res.data, res.len, c->sockaddr, c->socklen) < 0) { > + ngx_quic_keys_cleanup(pkt.keys); > return NGX_ERROR; > } > > + ngx_quic_keys_cleanup(pkt.keys); > + > return NGX_DONE; > } > > diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c > --- a/src/event/quic/ngx_event_quic_protection.c > +++ b/src/event/quic/ngx_event_quic_protection.c > @@ -189,10 +189,16 @@ ngx_quic_keys_set_initial_secret(ngx_qui > } > > if (ngx_quic_crypto_init(ciphers.c, server, 1, log) == NGX_ERROR) { > - return NGX_ERROR; > + goto failed; > } > > return NGX_OK; > + > +failed: > + > + ngx_quic_keys_cleanup(keys); > + > + return NGX_ERROR; > } > > > @@ -793,10 +799,8 @@ failed: > > > void > -ngx_quic_keys_cleanup(void *data) > +ngx_quic_keys_cleanup(ngx_quic_keys_t *keys) > { > - ngx_quic_keys_t *keys = data; > - > size_t i; > ngx_quic_secrets_t *secrets; > > diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h > --- a/src/event/quic/ngx_event_quic_protection.h > +++ b/src/event/quic/ngx_event_quic_protection.h > @@ -103,7 +103,7 @@ void ngx_quic_keys_discard(ngx_quic_keys > enum ssl_encryption_level_t level); > void ngx_quic_keys_switch(ngx_connection_t *c, ngx_quic_keys_t *keys); > void ngx_quic_keys_update(ngx_event_t *ev); > -void ngx_quic_keys_cleanup(void *data); > +void ngx_quic_keys_cleanup(ngx_quic_keys_t *keys); > ngx_int_t ngx_quic_encrypt(ngx_quic_header_t *pkt, ngx_str_t *res); > ngx_int_t ngx_quic_decrypt(ngx_quic_header_t *pkt, uint64_t *largest_pn); > void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); > > >> >>> qc->version = pkt->version; >>> >>> ngx_rbtree_init(&qc->streams.tree, &qc->streams.sentinel, >>> diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c >>> --- a/src/event/quic/ngx_event_quic_openssl_compat.c >>> +++ b/src/event/quic/ngx_event_quic_openssl_compat.c >>> @@ -54,9 +54,10 @@ struct ngx_quic_compat_s { >>> >>> >>> static void ngx_quic_compat_keylog_callback(const SSL *ssl, const char *line); >>> -static ngx_int_t ngx_quic_compat_set_encryption_secret(ngx_log_t *log, >>> +static ngx_int_t ngx_quic_compat_set_encryption_secret(ngx_connection_t *c, >>> ngx_quic_compat_keys_t *keys, enum ssl_encryption_level_t level, >>> const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); >>> +static void ngx_quic_compat_cleanup_encryption_secret(void *data); >>> static int ngx_quic_compat_add_transport_params_callback(SSL *ssl, >>> unsigned int ext_type, unsigned int context, const unsigned char **out, >>> size_t *outlen, X509 *x, size_t chainidx, int *al, void *add_arg); >>> @@ -214,14 +215,14 @@ ngx_quic_compat_keylog_callback(const SS >>> com->method->set_read_secret((SSL *) ssl, level, cipher, secret, n); >>> com->read_record = 0; >>> >>> - (void) ngx_quic_compat_set_encryption_secret(c->log, &com->keys, level, >>> + (void) ngx_quic_compat_set_encryption_secret(c, &com->keys, level, >>> cipher, secret, n); >>> } >>> } >>> >>> >>> static ngx_int_t >>> -ngx_quic_compat_set_encryption_secret(ngx_log_t *log, >>> +ngx_quic_compat_set_encryption_secret(ngx_connection_t *c, >>> ngx_quic_compat_keys_t *keys, enum ssl_encryption_level_t level, >>> const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len) >>> { >>> @@ -231,6 +232,7 @@ ngx_quic_compat_set_encryption_secret(ng >>> ngx_quic_hkdf_t seq[2]; >>> ngx_quic_secret_t *peer_secret; >>> ngx_quic_ciphers_t ciphers; >>> + ngx_pool_cleanup_t *cln; >>> >>> peer_secret = &keys->secret; >>> >>> @@ -239,12 +241,12 @@ ngx_quic_compat_set_encryption_secret(ng >>> key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); >>> >>> if (key_len == NGX_ERROR) { >>> - ngx_ssl_error(NGX_LOG_INFO, log, 0, "unexpected cipher"); >>> + ngx_ssl_error(NGX_LOG_INFO, c->log, 0, "unexpected cipher"); >>> return NGX_ERROR; >>> } >>> >>> if (sizeof(peer_secret->secret.data) < secret_len) { >>> - ngx_log_error(NGX_LOG_ALERT, log, 0, >>> + ngx_log_error(NGX_LOG_ALERT, c->log, 0, >>> "unexpected secret len: %uz", secret_len); >>> return NGX_ERROR; >>> } >>> @@ -262,15 +264,42 @@ ngx_quic_compat_set_encryption_secret(ng >>> ngx_quic_hkdf_set(&seq[1], "tls13 iv", &peer_secret->iv, &secret_str); >>> >>> for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { >>> - if (ngx_quic_hkdf_expand(&seq[i], ciphers.d, log) != NGX_OK) { >>> + if (ngx_quic_hkdf_expand(&seq[i], ciphers.d, c->log) != NGX_OK) { >>> return NGX_ERROR; >>> } >>> } >>> >>> + ngx_quic_crypto_cleanup(peer_secret); >>> + >>> + if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { >>> + return NGX_ERROR; >>> + } >>> + >>> + /* register cleanup handler once */ >>> + >>> + if (level == ssl_encryption_handshake) { >> >> Does not look perfect, but I don't see a simpler and better solution. > > I don't see either, without introducing some state (see below). > >> >>> + cln = ngx_pool_cleanup_add(c->pool, 0); >>> + if (cln == NULL) { >> >> Cleanup peer_secret here? >> >> Alternatively, move this block up. > > Fixed, thanks. > > With introducing keys->cleanup: > > diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c > --- a/src/event/quic/ngx_event_quic_openssl_compat.c > +++ b/src/event/quic/ngx_event_quic_openssl_compat.c > @@ -25,6 +25,7 @@ > typedef struct { > ngx_quic_secret_t secret; > ngx_uint_t cipher; > + ngx_uint_t cleanup; /* unsigned cleanup:1 */ > } ngx_quic_compat_keys_t; > > > @@ -269,15 +270,11 @@ ngx_quic_compat_set_encryption_secret(ng > } > } > > - ngx_quic_crypto_cleanup(peer_secret); > - > - if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { > - return NGX_ERROR; > - } > - > /* register cleanup handler once */ > > - if (level == ssl_encryption_handshake) { > + if (!keys->cleanup) { > + keys->cleanup = 1; > + > cln = ngx_pool_cleanup_add(c->pool, 0); > if (cln == NULL) { > return NGX_ERROR; > @@ -287,6 +284,12 @@ ngx_quic_compat_set_encryption_secret(ng > cln->data = peer_secret; > } > > + ngx_quic_crypto_cleanup(peer_secret); > + > + if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { > + return NGX_ERROR; > + } > + > return NGX_OK; > } > Version without a cleanup field (to be merged). While it may look less readable (and I like it less), it allows to save ngx_uint_t per connection. diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c +++ b/src/event/quic/ngx_event_quic_openssl_compat.c @@ -269,15 +269,12 @@ ngx_quic_compat_set_encryption_secret(ng } } - ngx_quic_crypto_cleanup(peer_secret); - - if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { - return NGX_ERROR; - } - /* register cleanup handler once */ - if (level == ssl_encryption_handshake) { + if (peer_secret->ctx) { + ngx_quic_crypto_cleanup(peer_secret); + + } else { cln = ngx_pool_cleanup_add(c->pool, 0); if (cln == NULL) { return NGX_ERROR; @@ -287,6 +284,10 @@ ngx_quic_compat_set_encryption_secret(ng cln->data = peer_secret; } + if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { + return NGX_ERROR; + } + return NGX_OK; } > >> >>> + return NGX_ERROR; >>> + } >>> + >>> + cln->handler = ngx_quic_compat_cleanup_encryption_secret; >>> + cln->data = peer_secret; >>> + } >>> + >>> return NGX_OK; >>> } >>> >>> >>> +static void >>> +ngx_quic_compat_cleanup_encryption_secret(void *data) >>> +{ >>> + ngx_quic_secret_t *secret = data; >>> + >>> + ngx_quic_crypto_cleanup(secret); >>> +} >>> + >>> + [..] -- Sergey Kandaurov From pluknet at nginx.com Tue Oct 17 11:25:41 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 17 Oct 2023 15:25:41 +0400 Subject: [PATCH] Improve performance when starting nginx with a lot of locations In-Reply-To: References: Message-ID: <6335624B-292F-4068-B99C-E9DABC64922B@nginx.com> > On 11 Oct 2023, at 02:56, Maxim Dounin wrote: > > Hello! > > On Thu, Oct 05, 2023 at 10:51:26AM +0900, Yusuke Nojima wrote: > >> Thank you for your comment! >> >>> Could you please provide some more details about the use case, >>> such as how locations are used, and what is the approximate number >>> of locations being used? >> >> Our team provides development environments to our company's engineers and QA. >> In this environment, engineers and QA can freely create VMs and deploy >> applications on them. >> >> Our nginx has the role of routing requests from the internet to all >> applications deployed in this environment. >> Additionally, it allows setting IP address restrictions, BASIC >> authentication, TLS client authentication, and other configurations >> for each application. >> >> To implement these requirements, we generate a location for each application. >> Currently, there are approximately 40,000 locations in our environment. > > Thank you for the details. Such configuration looks somewhat > sub-optimal, but understandable for a development / test > environment. And certainly 40k locations is a lot for the sorting > algorithm currently used. > >>> Further, since each location contains configuration for >>> all modules, such configurations are expected to require a lot of >>> memory >> >> Each of our nginx processes was consuming 5GB of memory in terms of >> resident size. >> This is not a problem as our servers have sufficient memory. >> >>> Rather, I would suggest recursive top-bottom merge sort implementation >>> instead, which is much simpler and uses stack as temporary storage >>> (so it'll naturally die if there will be a queue which requires >>> more space for sorting than we have). For the record, in my tests on M1 sorting 26^3 locations fit into 32k stack size (16k stack size renders the environment unusable). Judging by this (unscientific) test, running out of stack should not be a practicable issue. >>> >>> Please take a look if it works for you: >> >> I think this implementation is simple and easy to understand. >> Although the number of traversals of the list will increase compared >> to bottom-up, it will not affect the order. >> I believe this will provide sufficient optimization in terms of speed. > > Thanks for looking. In my limited testing, it is slightly faster > than your bottom-up implementation (and significantly faster than > the existing insertion sort when many locations are used). > > Below is the full patch (code unchanged), I'll commit it as soon > as some other nginx developer will review it. > > # HG changeset patch > # User Maxim Dounin > # Date 1696977468 -10800 > # Wed Oct 11 01:37:48 2023 +0300 > # Node ID b891840852ee5cc823eee1769d092ab50928919f > # Parent cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > Core: changed ngx_queue_sort() to use merge sort. > > This improves nginx startup times significantly when using very large number > of locations due computational complexity of the sorting algorithm being due to > used (insertion sort is O(n*n) on average, while merge sort is O(n*log(n))). nitpicking: E2MANYPARENS Using a colon might looks better (I don't mind though): used: insertion sort is O(n*n) on average, while merge sort is O(n*log(n)). > In particular, in a test configuration with 20k locations total startup > time is reduced from 8 seconds to 0.9 seconds. > > Prodded by Yusuke Nojima, > https://mailman.nginx.org/pipermail/nginx-devel/2023-September/NUL3Y2FPPFSHMPTFTL65KXSXNTX3NQMK.html I like the change, please commit. The thing to keep in mind is that it pessimizes the best case of sorted locations, which is O(n) with the insertion sort. Though given that both old and new algorithms give relatively good numbers for the best case (and it is hard to get a noticeable startup delay with very large number of locations in practice using merge sort), especially compared to the worst case of sorting perfectly reverse sorted locations with the insertion sort, I believe this is acceptable. > > diff --git a/src/core/ngx_queue.c b/src/core/ngx_queue.c > --- a/src/core/ngx_queue.c > +++ b/src/core/ngx_queue.c > @@ -9,6 +9,10 @@ > #include > > > +static void ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)); > + > + > /* > * find the middle queue element if the queue has odd number of elements > * or the first element of the queue's second part otherwise > @@ -45,13 +49,13 @@ ngx_queue_middle(ngx_queue_t *queue) > } > > > -/* the stable insertion sort */ > +/* the stable merge sort */ > > void > ngx_queue_sort(ngx_queue_t *queue, > ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > { > - ngx_queue_t *q, *prev, *next; > + ngx_queue_t *q, tail; > > q = ngx_queue_head(queue); > > @@ -59,22 +63,44 @@ ngx_queue_sort(ngx_queue_t *queue, > return; > } > > - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { > + q = ngx_queue_middle(queue); > + > + ngx_queue_split(queue, q, &tail); > + > + ngx_queue_sort(queue, cmp); > + ngx_queue_sort(&tail, cmp); > + > + ngx_queue_merge(queue, &tail, cmp); > +} > > - prev = ngx_queue_prev(q); > - next = ngx_queue_next(q); > > - ngx_queue_remove(q); > +static void > +ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, > + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) > +{ > + ngx_queue_t *q1, *q2; > + > + q1 = ngx_queue_head(queue); > + q2 = ngx_queue_head(tail); > > - do { > - if (cmp(prev, q) <= 0) { > - break; > - } > + for ( ;; ) { > + if (q1 == ngx_queue_sentinel(queue)) { > + ngx_queue_add(queue, tail); > + break; > + } > + > + if (q2 == ngx_queue_sentinel(tail)) { > + break; > + } > > - prev = ngx_queue_prev(prev); > + if (cmp(q1, q2) <= 0) { > + q1 = ngx_queue_next(q1); > + continue; > + } > > - } while (prev != ngx_queue_sentinel(queue)); > + ngx_queue_remove(q2); > + ngx_queue_insert_before(q1, q2); > > - ngx_queue_insert_after(prev, q); > + q2 = ngx_queue_head(tail); > } > } > diff --git a/src/core/ngx_queue.h b/src/core/ngx_queue.h > --- a/src/core/ngx_queue.h > +++ b/src/core/ngx_queue.h > @@ -47,6 +47,9 @@ struct ngx_queue_s { > (h)->prev = x > > > +#define ngx_queue_insert_before ngx_queue_insert_tail > + > + > #define ngx_queue_head(h) \ > (h)->next > > [..] -- Sergey Kandaurov From pluknet at nginx.com Tue Oct 17 16:22:19 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Tue, 17 Oct 2023 16:22:19 +0000 Subject: [nginx] Core: fixed memory leak on configuration reload with PCRE2. Message-ID: details: https://hg.nginx.org/nginx/rev/533bc2336df4 branches: changeset: 9166:533bc2336df4 user: Maxim Dounin date: Tue Oct 17 02:39:38 2023 +0300 description: Core: fixed memory leak on configuration reload with PCRE2. In ngx_regex_cleanup() allocator wasn't configured when calling pcre2_compile_context_free() and pcre2_match_data_free(), resulting in no ngx_free() call and leaked memory. Fix is ensure that allocator is configured for global allocations, so that ngx_free() is actually called to free memory. Additionally, ngx_regex_compile_context was cleared in ngx_regex_module_init(). It should be either not cleared, so it will be freed by ngx_regex_cleanup(), or properly freed. Fix is to not clear it, so ngx_regex_cleanup() will be able to free it. Reported by ZhenZhong Wu, https://mailman.nginx.org/pipermail/nginx-devel/2023-September/3Z5FIKUDRN2WBSL3JWTZJ7SXDA6YIWPB.html diffstat: src/core/ngx_regex.c | 7 ++++--- 1 files changed, 4 insertions(+), 3 deletions(-) diffs (31 lines): diff -r cdda286c0f1b -r 533bc2336df4 src/core/ngx_regex.c --- a/src/core/ngx_regex.c Tue Oct 10 15:13:39 2023 +0300 +++ b/src/core/ngx_regex.c Tue Oct 17 02:39:38 2023 +0300 @@ -600,6 +600,8 @@ ngx_regex_cleanup(void *data) * the new cycle, these will be re-allocated. */ + ngx_regex_malloc_init(NULL); + if (ngx_regex_compile_context) { pcre2_compile_context_free(ngx_regex_compile_context); ngx_regex_compile_context = NULL; @@ -611,6 +613,8 @@ ngx_regex_cleanup(void *data) ngx_regex_match_data_size = 0; } + ngx_regex_malloc_done(); + #endif } @@ -706,9 +710,6 @@ ngx_regex_module_init(ngx_cycle_t *cycle ngx_regex_malloc_done(); ngx_regex_studies = NULL; -#if (NGX_PCRE2) - ngx_regex_compile_context = NULL; -#endif return NGX_OK; } From xeioex at nginx.com Wed Oct 18 00:52:30 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Wed, 18 Oct 2023 00:52:30 +0000 Subject: [njs] Fixed RegExp.prototype.exec() with global regexp and unicode input. Message-ID: details: https://hg.nginx.org/njs/rev/c0ff44d66ffb branches: changeset: 2221:c0ff44d66ffb user: Dmitry Volyntsev date: Tue Oct 17 17:51:39 2023 -0700 description: Fixed RegExp.prototype.exec() with global regexp and unicode input. Previously, when exactly 32 characters unicode string was provided and the "lastIndex" value of "this" regexp was equal to 32 too, the njs_string_utf8_offset() was called with invalid index argument (longer than a size of the string). As a result njs_string_utf8_offset() returned garbage values. This was manifested in the following ways: 1) InternalError: pcre2_match() failed: bad offset value 2) Very slow replace calls with global regexps, for example in expressions like: str.replace(//g). This fixes #677 on Github. diffstat: src/njs_regexp.c | 11 ++++++++--- src/test/njs_unit_test.c | 6 ++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diffs (37 lines): diff -r 714fae197d83 -r c0ff44d66ffb src/njs_regexp.c --- a/src/njs_regexp.c Mon Oct 16 18:09:37 2023 -0700 +++ b/src/njs_regexp.c Tue Oct 17 17:51:39 2023 -0700 @@ -936,9 +936,14 @@ njs_regexp_builtin_exec(njs_vm_t *vm, nj offset = last_index; } else { - offset = njs_string_utf8_offset(string.start, - string.start + string.size, last_index) - - string.start; + if ((size_t) last_index < string.length) { + offset = njs_string_utf8_offset(string.start, + string.start + string.size, + last_index) + - string.start; + } else { + offset = string.size; + } } ret = njs_regexp_match(vm, &pattern->regex[type], string.start, offset, diff -r 714fae197d83 -r c0ff44d66ffb src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Mon Oct 16 18:09:37 2023 -0700 +++ b/src/test/njs_unit_test.c Tue Oct 17 17:51:39 2023 -0700 @@ -9261,6 +9261,12 @@ static njs_unit_test_t njs_test[] = { njs_str("'abc'.replaceAll(/^/g, '|$&|')"), njs_str("||abc") }, + { njs_str("('α'.repeat(30) + 'aa').replace(/a/g, '#')"), + njs_str("αααααααααααααααααααααααααααααα##") }, + + { njs_str("('α'.repeat(30) + 'aa').replaceAll(/a/g, '#')"), + njs_str("αααααααααααααααααααααααααααααα##") }, + { njs_str("var uri ='/u/v1/Aa/bB?type=m3u8&mt=42';" "uri.replace(/^\\/u\\/v1\\/[^/]*\\/([^\?]*)\\?.*(mt=[^&]*).*$/, '$1|$2')"), njs_str("bB|mt=42") }, From mdounin at mdounin.ru Wed Oct 18 02:02:32 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 18 Oct 2023 05:02:32 +0300 Subject: [PATCH] Improve performance when starting nginx with a lot of locations In-Reply-To: <6335624B-292F-4068-B99C-E9DABC64922B@nginx.com> References: <6335624B-292F-4068-B99C-E9DABC64922B@nginx.com> Message-ID: Hello! On Tue, Oct 17, 2023 at 03:25:41PM +0400, Sergey Kandaurov wrote: > > On 11 Oct 2023, at 02:56, Maxim Dounin wrote: > > > > Hello! > > > > On Thu, Oct 05, 2023 at 10:51:26AM +0900, Yusuke Nojima wrote: > > > >> Thank you for your comment! > >> > >>> Could you please provide some more details about the use case, > >>> such as how locations are used, and what is the approximate number > >>> of locations being used? > >> > >> Our team provides development environments to our company's engineers and QA. > >> In this environment, engineers and QA can freely create VMs and deploy > >> applications on them. > >> > >> Our nginx has the role of routing requests from the internet to all > >> applications deployed in this environment. > >> Additionally, it allows setting IP address restrictions, BASIC > >> authentication, TLS client authentication, and other configurations > >> for each application. > >> > >> To implement these requirements, we generate a location for each application. > >> Currently, there are approximately 40,000 locations in our environment. > > > > Thank you for the details. Such configuration looks somewhat > > sub-optimal, but understandable for a development / test > > environment. And certainly 40k locations is a lot for the sorting > > algorithm currently used. > > > >>> Further, since each location contains configuration for > >>> all modules, such configurations are expected to require a lot of > >>> memory > >> > >> Each of our nginx processes was consuming 5GB of memory in terms of > >> resident size. > >> This is not a problem as our servers have sufficient memory. > >> > >>> Rather, I would suggest recursive top-bottom merge sort implementation > >>> instead, which is much simpler and uses stack as temporary storage > >>> (so it'll naturally die if there will be a queue which requires > >>> more space for sorting than we have). > > For the record, in my tests on M1 sorting 26^3 locations fit into > 32k stack size (16k stack size renders the environment unusable). > Judging by this (unscientific) test, running out of stack should > not be a practicable issue. The recursive function uses ngx_queue_t *q, tail; on stack, that is, 3 pointers, so it would need more than 1000 recursive calls to overflow 32k stack, which is not expected to happen unless there are more than 2^1000 locations (and it is certainly not possible to have more than 2^64 locations on 64-bit platforms). For 26^3 locations, sorting would use maximum recursion depth of 15, so would require something like 360 bytes of stack (which is certainly less than required by other nginx functions). > >>> > >>> Please take a look if it works for you: > >> > >> I think this implementation is simple and easy to understand. > >> Although the number of traversals of the list will increase compared > >> to bottom-up, it will not affect the order. > >> I believe this will provide sufficient optimization in terms of speed. > > > > Thanks for looking. In my limited testing, it is slightly faster > > than your bottom-up implementation (and significantly faster than > > the existing insertion sort when many locations are used). > > > > Below is the full patch (code unchanged), I'll commit it as soon > > as some other nginx developer will review it. > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1696977468 -10800 > > # Wed Oct 11 01:37:48 2023 +0300 > > # Node ID b891840852ee5cc823eee1769d092ab50928919f > > # Parent cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > > Core: changed ngx_queue_sort() to use merge sort. > > > > This improves nginx startup times significantly when using very large number > > of locations due computational complexity of the sorting algorithm being > > due to Fixed, thnx. > > used (insertion sort is O(n*n) on average, while merge sort is O(n*log(n))). > > nitpicking: E2MANYPARENS > > Using a colon might looks better (I don't mind though): > used: insertion sort is O(n*n) on average, while merge sort is O(n*log(n)). Thanks, changed. > > In particular, in a test configuration with 20k locations total startup > > time is reduced from 8 seconds to 0.9 seconds. > > > > Prodded by Yusuke Nojima, > > https://mailman.nginx.org/pipermail/nginx-devel/2023-September/NUL3Y2FPPFSHMPTFTL65KXSXNTX3NQMK.html > > I like the change, please commit. > > The thing to keep in mind is that it pessimizes the best case > of sorted locations, which is O(n) with the insertion sort. > Though given that both old and new algorithms give relatively > good numbers for the best case (and it is hard to get a > noticeable startup delay with very large number of locations > in practice using merge sort), especially compared to the worst > case of sorting perfectly reverse sorted locations with the > insertion sort, I believe this is acceptable. Sure, theoretically insertion sort's best case is slightly better, but I don't think it matters: if at all, the difference is negligible and unlikely can be measured, while noticeable startup delays with large number of non-sorted locations are easily observed with insertion sort. For the record, here are "nginx -t" times with 20k pre-sorted locations, with insertion sort (before the patch): 0.64 real 0.49 user 0.14 sys 0.59 real 0.51 user 0.07 sys 0.61 real 0.48 user 0.12 sys 0.60 real 0.49 user 0.11 sys 0.61 real 0.51 user 0.09 sys And with merge sort (after the patch): 0.65 real 0.50 user 0.14 sys 0.63 real 0.49 user 0.14 sys 0.63 real 0.52 user 0.10 sys 0.62 real 0.49 user 0.12 sys 0.67 real 0.55 user 0.11 sys And ministat(1) for the user times: $ ministat insert merge x insert + merge +------------------------------------------------------------------------------+ | * x | |x * + x + +| | |_|_____M______A___M_________|A___________________________| | +------------------------------------------------------------------------------+ N Min Max Median Avg Stddev x 5 0.48 0.51 0.49 0.496 0.013416408 + 5 0.49 0.55 0.5 0.51 0.025495098 No difference proven at 95.0% confidence Thanks for looking, pushed to http://mdounin.ru/hg/nginx/ -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Wed Oct 18 10:38:47 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 10:38:47 +0000 Subject: [nginx] Core: changed ngx_queue_sort() to use merge sort. Message-ID: details: https://hg.nginx.org/nginx/rev/3038bd4d7816 branches: changeset: 9167:3038bd4d7816 user: Maxim Dounin date: Wed Oct 18 04:30:11 2023 +0300 description: Core: changed ngx_queue_sort() to use merge sort. This improves nginx startup times significantly when using very large number of locations due to computational complexity of the sorting algorithm being used: insertion sort is O(n*n) on average, while merge sort is O(n*log(n)). In particular, in a test configuration with 20k locations total startup time is reduced from 8 seconds to 0.9 seconds. Prodded by Yusuke Nojima, https://mailman.nginx.org/pipermail/nginx-devel/2023-September/NUL3Y2FPPFSHMPTFTL65KXSXNTX3NQMK.html diffstat: src/core/ngx_queue.c | 52 +++++++++++++++++++++++++++++++++++++++------------- src/core/ngx_queue.h | 3 +++ 2 files changed, 42 insertions(+), 13 deletions(-) diffs (99 lines): diff -r 533bc2336df4 -r 3038bd4d7816 src/core/ngx_queue.c --- a/src/core/ngx_queue.c Tue Oct 17 02:39:38 2023 +0300 +++ b/src/core/ngx_queue.c Wed Oct 18 04:30:11 2023 +0300 @@ -9,6 +9,10 @@ #include +static void ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)); + + /* * find the middle queue element if the queue has odd number of elements * or the first element of the queue's second part otherwise @@ -45,13 +49,13 @@ ngx_queue_middle(ngx_queue_t *queue) } -/* the stable insertion sort */ +/* the stable merge sort */ void ngx_queue_sort(ngx_queue_t *queue, ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) { - ngx_queue_t *q, *prev, *next; + ngx_queue_t *q, tail; q = ngx_queue_head(queue); @@ -59,22 +63,44 @@ ngx_queue_sort(ngx_queue_t *queue, return; } - for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) { + q = ngx_queue_middle(queue); + + ngx_queue_split(queue, q, &tail); + + ngx_queue_sort(queue, cmp); + ngx_queue_sort(&tail, cmp); + + ngx_queue_merge(queue, &tail, cmp); +} - prev = ngx_queue_prev(q); - next = ngx_queue_next(q); - ngx_queue_remove(q); +static void +ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail, + ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *)) +{ + ngx_queue_t *q1, *q2; + + q1 = ngx_queue_head(queue); + q2 = ngx_queue_head(tail); - do { - if (cmp(prev, q) <= 0) { - break; - } + for ( ;; ) { + if (q1 == ngx_queue_sentinel(queue)) { + ngx_queue_add(queue, tail); + break; + } + + if (q2 == ngx_queue_sentinel(tail)) { + break; + } - prev = ngx_queue_prev(prev); + if (cmp(q1, q2) <= 0) { + q1 = ngx_queue_next(q1); + continue; + } - } while (prev != ngx_queue_sentinel(queue)); + ngx_queue_remove(q2); + ngx_queue_insert_before(q1, q2); - ngx_queue_insert_after(prev, q); + q2 = ngx_queue_head(tail); } } diff -r 533bc2336df4 -r 3038bd4d7816 src/core/ngx_queue.h --- a/src/core/ngx_queue.h Tue Oct 17 02:39:38 2023 +0300 +++ b/src/core/ngx_queue.h Wed Oct 18 04:30:11 2023 +0300 @@ -47,6 +47,9 @@ struct ngx_queue_s { (h)->prev = x +#define ngx_queue_insert_before ngx_queue_insert_tail + + #define ngx_queue_head(h) \ (h)->next From pluknet at nginx.com Wed Oct 18 15:26:42 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:42 +0400 Subject: [PATCH 00 of 11] [quic] reusing crypto contexts, and more #2 In-Reply-To: References: Message-ID: Updated series to address arut@ comments: - patches #1, #2, #4 unchanged - patch #3 replaced with keys check in ngx_quic_ack_packet - #5 updates ngx_quic_keys_cleanup and ngx_quic_compat_set_encryption_secret - factored out common code for ngx_quic_crypto_open/seal - ngx_quic_crypto_hp cleanup for CRYPTO_chacha_20 - new change to simpify ngx_quic_ciphers() API. - new change to remove key field from ngx_quic_secret_t - assorted fixes in using ngx_explicit_memzero From pluknet at nginx.com Wed Oct 18 15:26:43 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:43 +0400 Subject: [PATCH 01 of 11] QUIC: split keys availability checks to read and write sides In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1693497250 -14400 # Thu Aug 31 19:54:10 2023 +0400 # Node ID ff98ae7d261e1a7f58963ac91eac9caecc9d6aee # Parent 3038bd4d78169a5e8a2624d79cf76f45f0805ddc QUIC: split keys availability checks to read and write sides. Keys may be released by TLS stack in different times, so it makes sense to check this independently as well. This allows to fine-tune what key direction is used when checking keys availability. When discarding, server keys are now marked in addition to client keys. diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -530,7 +530,7 @@ ngx_quic_close_connection(ngx_connection for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { ctx = &qc->send_ctx[i]; - if (!ngx_quic_keys_available(qc->keys, ctx->level)) { + if (!ngx_quic_keys_available(qc->keys, ctx->level, 1)) { continue; } @@ -959,7 +959,7 @@ ngx_quic_handle_payload(ngx_connection_t c->log->action = "decrypting packet"; - if (!ngx_quic_keys_available(qc->keys, pkt->level)) { + if (!ngx_quic_keys_available(qc->keys, pkt->level, 0)) { ngx_log_error(NGX_LOG_INFO, c->log, 0, "quic no %s keys, ignoring packet", ngx_quic_level_name(pkt->level)); @@ -1082,7 +1082,9 @@ ngx_quic_discard_ctx(ngx_connection_t *c qc = ngx_quic_get_connection(c); - if (!ngx_quic_keys_available(qc->keys, level)) { + if (!ngx_quic_keys_available(qc->keys, level, 0) + && !ngx_quic_keys_available(qc->keys, level, 1)) + { return; } diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -672,9 +672,13 @@ ngx_quic_keys_set_encryption_secret(ngx_ ngx_uint_t ngx_quic_keys_available(ngx_quic_keys_t *keys, - enum ssl_encryption_level_t level) + enum ssl_encryption_level_t level, ngx_uint_t is_write) { - return keys->secrets[level].client.key.len != 0; + if (is_write == 0) { + return keys->secrets[level].client.key.len != 0; + } + + return keys->secrets[level].server.key.len != 0; } @@ -683,6 +687,7 @@ ngx_quic_keys_discard(ngx_quic_keys_t *k enum ssl_encryption_level_t level) { keys->secrets[level].client.key.len = 0; + keys->secrets[level].server.key.len = 0; } diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h +++ b/src/event/quic/ngx_event_quic_protection.h @@ -95,7 +95,7 @@ ngx_int_t ngx_quic_keys_set_encryption_s enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); ngx_uint_t ngx_quic_keys_available(ngx_quic_keys_t *keys, - enum ssl_encryption_level_t level); + enum ssl_encryption_level_t level, ngx_uint_t is_write); void ngx_quic_keys_discard(ngx_quic_keys_t *keys, enum ssl_encryption_level_t level); void ngx_quic_keys_switch(ngx_connection_t *c, ngx_quic_keys_t *keys); diff --git a/src/event/quic/ngx_event_quic_ssl.c b/src/event/quic/ngx_event_quic_ssl.c --- a/src/event/quic/ngx_event_quic_ssl.c +++ b/src/event/quic/ngx_event_quic_ssl.c @@ -434,7 +434,7 @@ ngx_quic_crypto_input(ngx_connection_t * } if (n <= 0 || SSL_in_init(ssl_conn)) { - if (ngx_quic_keys_available(qc->keys, ssl_encryption_early_data) + if (ngx_quic_keys_available(qc->keys, ssl_encryption_early_data, 0) && qc->client_tp_done) { if (ngx_quic_init_streams(c) != NGX_OK) { From pluknet at nginx.com Wed Oct 18 15:26:44 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:44 +0400 Subject: [PATCH 02 of 11] QUIC: added safety belt to prevent using discarded keys In-Reply-To: References: Message-ID: <0ff1706a4affd40d847f.1697642804@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1697633594 -14400 # Wed Oct 18 16:53:14 2023 +0400 # Node ID 0ff1706a4affd40d847fd39d7c3c98f459cae6f1 # Parent ff98ae7d261e1a7f58963ac91eac9caecc9d6aee QUIC: added safety belt to prevent using discarded keys. In addition to triggering alert, it ensures that such packets won't be sent. With the previous change that marks server keys as discarded by zeroing the key lengh, it is now an error to send packets with discarded keys. OpenSSL based stacks tolerate such behaviour because key length isn't used in packet protection, but BoringSSL will raise the UNSUPPORTED_KEY_SIZE cipher error. It won't be possible to use discarded keys with reused crypto contexts as it happens in subsequent changes. diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c +++ b/src/event/quic/ngx_event_quic_output.c @@ -519,6 +519,21 @@ ngx_quic_output_packet(ngx_connection_t qc = ngx_quic_get_connection(c); + if (!ngx_quic_keys_available(qc->keys, ctx->level, 1)) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, "quic %s write keys discarded", + ngx_quic_level_name(ctx->level)); + + while (!ngx_queue_empty(&ctx->frames)) { + q = ngx_queue_head(&ctx->frames); + ngx_queue_remove(q); + + f = ngx_queue_data(q, ngx_quic_frame_t, queue); + ngx_quic_free_frame(c, f); + } + + return 0; + } + ngx_quic_init_packet(c, ctx, &pkt, qc->path); min_payload = ngx_quic_payload_size(&pkt, min); From pluknet at nginx.com Wed Oct 18 15:26:45 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:45 +0400 Subject: [PATCH 03 of 11] QUIC: prevented generating ACK frames with discarded keys In-Reply-To: References: Message-ID: <156fe168964961d3496e.1697642805@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1697633595 -14400 # Wed Oct 18 16:53:15 2023 +0400 # Node ID 156fe168964961d3496e6fe0567b375bdb42c784 # Parent 0ff1706a4affd40d847fd39d7c3c98f459cae6f1 QUIC: prevented generating ACK frames with discarded keys. Previously it was possible to generate ACK frames using formally discarded protection keys, in particular, when acknowledging a client Handshake packet used to complete the TLS handshake and to discard handshake protection keys. As it happens late in packet processing, it could be possible to generate ACK frames after the keys were already discarded. ACK frames are generated from ngx_quic_ack_packet(), either using a posted push event, which envolves ngx_quic_generate_ack() as a part of the final packet assembling, or directly in ngx_quic_ack_packet(), such as when there is no room to add a new ACK range or when the received packet is out of order. The added keys availability check is used to avoid generating late ACK frames in both cases. diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -907,6 +907,10 @@ ngx_quic_ack_packet(ngx_connection_t *c, " nranges:%ui", pkt->pn, (int64_t) ctx->largest_range, ctx->first_range, ctx->nranges); + if (!ngx_quic_keys_available(qc->keys, ctx->level, 1)) { + return NGX_OK; + } + prev_pending = ctx->pending_ack; if (pkt->need_ack) { From pluknet at nginx.com Wed Oct 18 15:26:46 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:46 +0400 Subject: [PATCH 04 of 11] QUIC: renamed protection functions In-Reply-To: References: Message-ID: <823ff4d0e09a2406b535.1697642806@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1697633598 -14400 # Wed Oct 18 16:53:18 2023 +0400 # Node ID 823ff4d0e09a2406b53556ceba0b7537d2c14feb # Parent 156fe168964961d3496e6fe0567b375bdb42c784 QUIC: renamed protection functions. Now these functions have names ngx_quic_crypto_XXX(): - ngx_quic_tls_open() -> ngx_quic_crypto_open() - ngx_quic_tls_seal() -> ngx_quic_crypto_seal() - ngx_quic_tls_hp() -> ngx_quic_crypto_hp() diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c +++ b/src/event/quic/ngx_event_quic_openssl_compat.c @@ -578,8 +578,8 @@ ngx_quic_compat_create_record(ngx_quic_c ngx_memcpy(nonce, secret->iv.data, secret->iv.len); ngx_quic_compute_nonce(nonce, sizeof(nonce), rec->number); - if (ngx_quic_tls_seal(ciphers.c, secret, &out, - nonce, &rec->payload, &ad, rec->log) + if (ngx_quic_crypto_seal(ciphers.c, secret, &out, + nonce, &rec->payload, &ad, rec->log) != NGX_OK) { return NGX_ERROR; diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -26,10 +26,10 @@ static ngx_int_t ngx_hkdf_extract(u_char static uint64_t ngx_quic_parse_pn(u_char **pos, ngx_int_t len, u_char *mask, uint64_t *largest_pn); -static ngx_int_t ngx_quic_tls_open(const ngx_quic_cipher_t *cipher, +static ngx_int_t ngx_quic_crypto_open(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); -static ngx_int_t ngx_quic_tls_hp(ngx_log_t *log, const EVP_CIPHER *cipher, +static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in); static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, @@ -344,7 +344,7 @@ failed: static ngx_int_t -ngx_quic_tls_open(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, +ngx_quic_crypto_open(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) { @@ -449,7 +449,7 @@ ngx_quic_tls_open(const ngx_quic_cipher_ ngx_int_t -ngx_quic_tls_seal(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, +ngx_quic_crypto_seal(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) { @@ -565,7 +565,7 @@ ngx_quic_tls_seal(const ngx_quic_cipher_ static ngx_int_t -ngx_quic_tls_hp(ngx_log_t *log, const EVP_CIPHER *cipher, +ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in) { int outlen; @@ -801,15 +801,15 @@ ngx_quic_create_packet(ngx_quic_header_t ngx_memcpy(nonce, secret->iv.data, secret->iv.len); ngx_quic_compute_nonce(nonce, sizeof(nonce), pkt->number); - if (ngx_quic_tls_seal(ciphers.c, secret, &out, - nonce, &pkt->payload, &ad, pkt->log) + if (ngx_quic_crypto_seal(ciphers.c, secret, &out, + nonce, &pkt->payload, &ad, pkt->log) != NGX_OK) { return NGX_ERROR; } sample = &out.data[4 - pkt->num_len]; - if (ngx_quic_tls_hp(pkt->log, ciphers.hp, secret, mask, sample) + if (ngx_quic_crypto_hp(pkt->log, ciphers.hp, secret, mask, sample) != NGX_OK) { return NGX_ERROR; @@ -862,7 +862,8 @@ ngx_quic_create_retry_packet(ngx_quic_he ngx_memcpy(secret.key.data, key, sizeof(key)); secret.iv.len = NGX_QUIC_IV_LEN; - if (ngx_quic_tls_seal(ciphers.c, &secret, &itag, nonce, &in, &ad, pkt->log) + if (ngx_quic_crypto_seal(ciphers.c, &secret, &itag, nonce, &in, &ad, + pkt->log) != NGX_OK) { return NGX_ERROR; @@ -1032,7 +1033,7 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, /* header protection */ - if (ngx_quic_tls_hp(pkt->log, ciphers.hp, secret, mask, sample) + if (ngx_quic_crypto_hp(pkt->log, ciphers.hp, secret, mask, sample) != NGX_OK) { return NGX_DECLINED; @@ -1087,8 +1088,8 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, pkt->payload.len = in.len - NGX_QUIC_TAG_LEN; pkt->payload.data = pkt->plaintext + ad.len; - rc = ngx_quic_tls_open(ciphers.c, secret, &pkt->payload, - nonce, &in, &ad, pkt->log); + rc = ngx_quic_crypto_open(ciphers.c, secret, &pkt->payload, + nonce, &in, &ad, pkt->log); if (rc != NGX_OK) { return NGX_DECLINED; } diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h +++ b/src/event/quic/ngx_event_quic_protection.h @@ -105,7 +105,7 @@ ngx_int_t ngx_quic_decrypt(ngx_quic_head void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers, enum ssl_encryption_level_t level); -ngx_int_t ngx_quic_tls_seal(const ngx_quic_cipher_t *cipher, +ngx_int_t ngx_quic_crypto_seal(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); ngx_int_t ngx_quic_hkdf_expand(ngx_quic_hkdf_t *hkdf, const EVP_MD *digest, From pluknet at nginx.com Wed Oct 18 15:26:47 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:47 +0400 Subject: [PATCH 05 of 11] QUIC: reusing crypto contexts for packet protection In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1697642584 -14400 # Wed Oct 18 19:23:04 2023 +0400 # Node ID a634242fbb8c40fbadcb43c1d3183996ff422728 # Parent 823ff4d0e09a2406b53556ceba0b7537d2c14feb QUIC: reusing crypto contexts for packet protection. diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -335,6 +335,7 @@ ngx_quic_new_connection(ngx_connection_t qc->validated = pkt->validated; if (ngx_quic_open_sockets(c, qc, pkt) != NGX_OK) { + ngx_quic_keys_cleanup(qc->keys); return NULL; } @@ -585,6 +586,8 @@ ngx_quic_close_connection(ngx_connection ngx_quic_close_sockets(c); + ngx_quic_keys_cleanup(qc->keys); + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic close completed"); /* may be tested from SSL callback during SSL shutdown */ diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c +++ b/src/event/quic/ngx_event_quic_openssl_compat.c @@ -54,9 +54,10 @@ struct ngx_quic_compat_s { static void ngx_quic_compat_keylog_callback(const SSL *ssl, const char *line); -static ngx_int_t ngx_quic_compat_set_encryption_secret(ngx_log_t *log, +static ngx_int_t ngx_quic_compat_set_encryption_secret(ngx_connection_t *c, ngx_quic_compat_keys_t *keys, enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); +static void ngx_quic_compat_cleanup_encryption_secret(void *data); static int ngx_quic_compat_add_transport_params_callback(SSL *ssl, unsigned int ext_type, unsigned int context, const unsigned char **out, size_t *outlen, X509 *x, size_t chainidx, int *al, void *add_arg); @@ -214,14 +215,14 @@ ngx_quic_compat_keylog_callback(const SS com->method->set_read_secret((SSL *) ssl, level, cipher, secret, n); com->read_record = 0; - (void) ngx_quic_compat_set_encryption_secret(c->log, &com->keys, level, + (void) ngx_quic_compat_set_encryption_secret(c, &com->keys, level, cipher, secret, n); } } static ngx_int_t -ngx_quic_compat_set_encryption_secret(ngx_log_t *log, +ngx_quic_compat_set_encryption_secret(ngx_connection_t *c, ngx_quic_compat_keys_t *keys, enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len) { @@ -231,6 +232,7 @@ ngx_quic_compat_set_encryption_secret(ng ngx_quic_hkdf_t seq[2]; ngx_quic_secret_t *peer_secret; ngx_quic_ciphers_t ciphers; + ngx_pool_cleanup_t *cln; peer_secret = &keys->secret; @@ -239,12 +241,12 @@ ngx_quic_compat_set_encryption_secret(ng key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); if (key_len == NGX_ERROR) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "unexpected cipher"); + ngx_ssl_error(NGX_LOG_INFO, c->log, 0, "unexpected cipher"); return NGX_ERROR; } if (sizeof(peer_secret->secret.data) < secret_len) { - ngx_log_error(NGX_LOG_ALERT, log, 0, + ngx_log_error(NGX_LOG_ALERT, c->log, 0, "unexpected secret len: %uz", secret_len); return NGX_ERROR; } @@ -262,15 +264,43 @@ ngx_quic_compat_set_encryption_secret(ng ngx_quic_hkdf_set(&seq[1], "tls13 iv", &peer_secret->iv, &secret_str); for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { - if (ngx_quic_hkdf_expand(&seq[i], ciphers.d, log) != NGX_OK) { + if (ngx_quic_hkdf_expand(&seq[i], ciphers.d, c->log) != NGX_OK) { return NGX_ERROR; } } + /* register cleanup handler once */ + + if (peer_secret->ctx) { + ngx_quic_crypto_cleanup(peer_secret); + + } else { + cln = ngx_pool_cleanup_add(c->pool, 0); + if (cln == NULL) { + return NGX_ERROR; + } + + cln->handler = ngx_quic_compat_cleanup_encryption_secret; + cln->data = peer_secret; + } + + if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { + return NGX_ERROR; + } + return NGX_OK; } +static void +ngx_quic_compat_cleanup_encryption_secret(void *data) +{ + ngx_quic_secret_t *secret = data; + + ngx_quic_crypto_cleanup(secret); +} + + static int ngx_quic_compat_add_transport_params_callback(SSL *ssl, unsigned int ext_type, unsigned int context, const unsigned char **out, size_t *outlen, X509 *x, @@ -578,8 +608,7 @@ ngx_quic_compat_create_record(ngx_quic_c ngx_memcpy(nonce, secret->iv.data, secret->iv.len); ngx_quic_compute_nonce(nonce, sizeof(nonce), rec->number); - if (ngx_quic_crypto_seal(ciphers.c, secret, &out, - nonce, &rec->payload, &ad, rec->log) + if (ngx_quic_crypto_seal(secret, &out, nonce, &rec->payload, &ad, rec->log) != NGX_OK) { return NGX_ERROR; diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c +++ b/src/event/quic/ngx_event_quic_output.c @@ -941,13 +941,17 @@ ngx_quic_send_early_cc(ngx_connection_t res.data = dst; if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { + ngx_quic_keys_cleanup(pkt.keys); return NGX_ERROR; } if (ngx_quic_send(c, res.data, res.len, c->sockaddr, c->socklen) < 0) { + ngx_quic_keys_cleanup(pkt.keys); return NGX_ERROR; } + ngx_quic_keys_cleanup(pkt.keys); + return NGX_DONE; } diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -26,9 +26,8 @@ static ngx_int_t ngx_hkdf_extract(u_char static uint64_t ngx_quic_parse_pn(u_char **pos, ngx_int_t len, u_char *mask, uint64_t *largest_pn); -static ngx_int_t ngx_quic_crypto_open(const ngx_quic_cipher_t *cipher, - ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, - ngx_str_t *ad, ngx_log_t *log); +static ngx_int_t ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, + u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in); @@ -108,13 +107,14 @@ ngx_int_t ngx_quic_keys_set_initial_secret(ngx_quic_keys_t *keys, ngx_str_t *secret, ngx_log_t *log) { - size_t is_len; - uint8_t is[SHA256_DIGEST_LENGTH]; - ngx_str_t iss; - ngx_uint_t i; - const EVP_MD *digest; - ngx_quic_hkdf_t seq[8]; - ngx_quic_secret_t *client, *server; + size_t is_len; + uint8_t is[SHA256_DIGEST_LENGTH]; + ngx_str_t iss; + ngx_uint_t i; + const EVP_MD *digest; + ngx_quic_hkdf_t seq[8]; + ngx_quic_secret_t *client, *server; + ngx_quic_ciphers_t ciphers; static const uint8_t salt[20] = "\x38\x76\x2c\xf7\xf5\x59\x34\xb3\x4d\x17" @@ -180,7 +180,25 @@ ngx_quic_keys_set_initial_secret(ngx_qui } } + if (ngx_quic_ciphers(0, &ciphers, ssl_encryption_initial) == NGX_ERROR) { + return NGX_ERROR; + } + + if (ngx_quic_crypto_init(ciphers.c, client, 0, log) == NGX_ERROR) { + return NGX_ERROR; + } + + if (ngx_quic_crypto_init(ciphers.c, server, 1, log) == NGX_ERROR) { + goto failed; + } + return NGX_OK; + +failed: + + ngx_quic_keys_cleanup(keys); + + return NGX_ERROR; } @@ -343,9 +361,9 @@ failed: } -static ngx_int_t -ngx_quic_crypto_open(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, - ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) +ngx_int_t +ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, + ngx_int_t enc, ngx_log_t *log) { #ifdef OPENSSL_IS_BORINGSSL @@ -357,19 +375,7 @@ ngx_quic_crypto_open(const ngx_quic_ciph ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_new() failed"); return NGX_ERROR; } - - if (EVP_AEAD_CTX_open(ctx, out->data, &out->len, out->len, nonce, s->iv.len, - in->data, in->len, ad->data, ad->len) - != 1) - { - EVP_AEAD_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_open() failed"); - return NGX_ERROR; - } - - EVP_AEAD_CTX_free(ctx); #else - int len; EVP_CIPHER_CTX *ctx; ctx = EVP_CIPHER_CTX_new(); @@ -378,114 +384,9 @@ ngx_quic_crypto_open(const ngx_quic_ciph return NGX_ERROR; } - if (EVP_DecryptInit_ex(ctx, cipher, NULL, NULL, NULL) != 1) { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); - return NGX_ERROR; - } - - in->len -= NGX_QUIC_TAG_LEN; - - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, - in->data + in->len) - == 0) - { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); - return NGX_ERROR; - } - - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_IVLEN, s->iv.len, NULL) - == 0) - { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_IVLEN) failed"); - return NGX_ERROR; - } - - if (EVP_DecryptInit_ex(ctx, NULL, NULL, s->key.data, nonce) != 1) { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); - return NGX_ERROR; - } - - if (EVP_CIPHER_mode(cipher) == EVP_CIPH_CCM_MODE - && EVP_DecryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) - { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - if (EVP_DecryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - if (EVP_DecryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { + if (EVP_CipherInit_ex(ctx, cipher, NULL, NULL, NULL, enc) != 1) { EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - out->len = len; - - if (EVP_DecryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptFinal_ex failed"); - return NGX_ERROR; - } - - out->len += len; - - EVP_CIPHER_CTX_free(ctx); -#endif - - return NGX_OK; -} - - -ngx_int_t -ngx_quic_crypto_seal(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, - ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) -{ - -#ifdef OPENSSL_IS_BORINGSSL - EVP_AEAD_CTX *ctx; - - ctx = EVP_AEAD_CTX_new(cipher, s->key.data, s->key.len, - EVP_AEAD_DEFAULT_TAG_LENGTH); - if (ctx == NULL) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_new() failed"); - return NGX_ERROR; - } - - if (EVP_AEAD_CTX_seal(ctx, out->data, &out->len, out->len, nonce, s->iv.len, - in->data, in->len, ad->data, ad->len) - != 1) - { - EVP_AEAD_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_seal() failed"); - return NGX_ERROR; - } - - EVP_AEAD_CTX_free(ctx); -#else - int len; - EVP_CIPHER_CTX *ctx; - - ctx = EVP_CIPHER_CTX_new(); - if (ctx == NULL) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CIPHER_CTX_new() failed"); - return NGX_ERROR; - } - - if (EVP_EncryptInit_ex(ctx, cipher, NULL, NULL, NULL) != 1) { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); return NGX_ERROR; } @@ -509,28 +410,121 @@ ngx_quic_crypto_seal(const ngx_quic_ciph return NGX_ERROR; } - if (EVP_EncryptInit_ex(ctx, NULL, NULL, s->key.data, nonce) != 1) { + if (EVP_CipherInit_ex(ctx, NULL, NULL, s->key.data, NULL, enc) != 1) { EVP_CIPHER_CTX_free(ctx); + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); + return NGX_ERROR; + } +#endif + + s->ctx = ctx; + return NGX_OK; +} + + +static ngx_int_t +ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, + ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) +{ + ngx_quic_crypto_ctx_t *ctx; + + ctx = s->ctx; + +#ifdef OPENSSL_IS_BORINGSSL + if (EVP_AEAD_CTX_open(ctx, out->data, &out->len, out->len, nonce, s->iv.len, + in->data, in->len, ad->data, ad->len) + != 1) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_open() failed"); + return NGX_ERROR; + } +#else + int len; + + if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); + return NGX_ERROR; + } + + in->len -= NGX_QUIC_TAG_LEN; + + if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, + in->data + in->len) + == 0) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, + "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); + return NGX_ERROR; + } + + if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE + && EVP_DecryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); + return NGX_ERROR; + } + + if (EVP_DecryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); + return NGX_ERROR; + } + + if (EVP_DecryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); + return NGX_ERROR; + } + + out->len = len; + + if (EVP_DecryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptFinal_ex failed"); + return NGX_ERROR; + } + + out->len += len; +#endif + + return NGX_OK; +} + + +ngx_int_t +ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, + ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) +{ + ngx_quic_crypto_ctx_t *ctx; + + ctx = s->ctx; + +#ifdef OPENSSL_IS_BORINGSSL + if (EVP_AEAD_CTX_seal(ctx, out->data, &out->len, out->len, nonce, s->iv.len, + in->data, in->len, ad->data, ad->len) + != 1) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_seal() failed"); + return NGX_ERROR; + } +#else + int len; + + if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); return NGX_ERROR; } - if (EVP_CIPHER_mode(cipher) == EVP_CIPH_CCM_MODE + if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE && EVP_EncryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) { - EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); return NGX_ERROR; } if (EVP_EncryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { - EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); return NGX_ERROR; } if (EVP_EncryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { - EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); return NGX_ERROR; } @@ -538,7 +532,6 @@ ngx_quic_crypto_seal(const ngx_quic_ciph out->len = len; if (EVP_EncryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { - EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptFinal_ex failed"); return NGX_ERROR; } @@ -549,21 +542,32 @@ ngx_quic_crypto_seal(const ngx_quic_ciph out->data + out->len) == 0) { - EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed"); return NGX_ERROR; } out->len += NGX_QUIC_TAG_LEN; - - EVP_CIPHER_CTX_free(ctx); #endif return NGX_OK; } +void +ngx_quic_crypto_cleanup(ngx_quic_secret_t *s) +{ + if (s->ctx) { +#ifdef OPENSSL_IS_BORINGSSL + EVP_AEAD_CTX_free(s->ctx); +#else + EVP_CIPHER_CTX_free(s->ctx); +#endif + s->ctx = NULL; + } +} + + static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in) @@ -666,6 +670,12 @@ ngx_quic_keys_set_encryption_secret(ngx_ } } + if (ngx_quic_crypto_init(ciphers.c, peer_secret, is_write, log) + == NGX_ERROR) + { + return NGX_ERROR; + } + return NGX_OK; } @@ -675,10 +685,10 @@ ngx_quic_keys_available(ngx_quic_keys_t enum ssl_encryption_level_t level, ngx_uint_t is_write) { if (is_write == 0) { - return keys->secrets[level].client.key.len != 0; + return keys->secrets[level].client.ctx != NULL; } - return keys->secrets[level].server.key.len != 0; + return keys->secrets[level].server.ctx != NULL; } @@ -686,8 +696,13 @@ void ngx_quic_keys_discard(ngx_quic_keys_t *keys, enum ssl_encryption_level_t level) { - keys->secrets[level].client.key.len = 0; - keys->secrets[level].server.key.len = 0; + ngx_quic_secret_t *client, *server; + + client = &keys->secrets[level].client; + server = &keys->secrets[level].server; + + ngx_quic_crypto_cleanup(client); + ngx_quic_crypto_cleanup(server); } @@ -699,6 +714,9 @@ ngx_quic_keys_switch(ngx_connection_t *c current = &keys->secrets[ssl_encryption_application]; next = &keys->next_key; + ngx_quic_crypto_cleanup(¤t->client); + ngx_quic_crypto_cleanup(¤t->server); + tmp = *current; *current = *next; *next = tmp; @@ -762,6 +780,16 @@ ngx_quic_keys_update(ngx_event_t *ev) } } + if (ngx_quic_crypto_init(ciphers.c, &next->client, 0, c->log) == NGX_ERROR) + { + goto failed; + } + + if (ngx_quic_crypto_init(ciphers.c, &next->server, 1, c->log) == NGX_ERROR) + { + goto failed; + } + return; failed: @@ -770,6 +798,23 @@ failed: } +void +ngx_quic_keys_cleanup(ngx_quic_keys_t *keys) +{ + ngx_uint_t i; + ngx_quic_secrets_t *next; + + for (i = 0; i < NGX_QUIC_ENCRYPTION_LAST; i++) { + ngx_quic_keys_discard(keys, i); + } + + next = &keys->next_key; + + ngx_quic_crypto_cleanup(&next->client); + ngx_quic_crypto_cleanup(&next->server); +} + + static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, ngx_str_t *res) { @@ -801,8 +846,7 @@ ngx_quic_create_packet(ngx_quic_header_t ngx_memcpy(nonce, secret->iv.data, secret->iv.len); ngx_quic_compute_nonce(nonce, sizeof(nonce), pkt->number); - if (ngx_quic_crypto_seal(ciphers.c, secret, &out, - nonce, &pkt->payload, &ad, pkt->log) + if (ngx_quic_crypto_seal(secret, &out, nonce, &pkt->payload, &ad, pkt->log) != NGX_OK) { return NGX_ERROR; @@ -862,13 +906,19 @@ ngx_quic_create_retry_packet(ngx_quic_he ngx_memcpy(secret.key.data, key, sizeof(key)); secret.iv.len = NGX_QUIC_IV_LEN; - if (ngx_quic_crypto_seal(ciphers.c, &secret, &itag, nonce, &in, &ad, - pkt->log) + if (ngx_quic_crypto_init(ciphers.c, &secret, 1, pkt->log) == NGX_ERROR) { + return NGX_ERROR; + } + + if (ngx_quic_crypto_seal(&secret, &itag, nonce, &in, &ad, pkt->log) != NGX_OK) { + ngx_quic_crypto_cleanup(&secret); return NGX_ERROR; } + ngx_quic_crypto_cleanup(&secret); + res->len = itag.data + itag.len - start; res->data = start; @@ -999,7 +1049,7 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, u_char *p, *sample; size_t len; uint64_t pn, lpn; - ngx_int_t pnl, rc; + ngx_int_t pnl; ngx_str_t in, ad; ngx_uint_t key_phase; ngx_quic_secret_t *secret; @@ -1088,9 +1138,9 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, pkt->payload.len = in.len - NGX_QUIC_TAG_LEN; pkt->payload.data = pkt->plaintext + ad.len; - rc = ngx_quic_crypto_open(ciphers.c, secret, &pkt->payload, - nonce, &in, &ad, pkt->log); - if (rc != NGX_OK) { + if (ngx_quic_crypto_open(secret, &pkt->payload, nonce, &in, &ad, pkt->log) + != NGX_OK) + { return NGX_DECLINED; } diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h +++ b/src/event/quic/ngx_event_quic_protection.h @@ -26,8 +26,10 @@ #ifdef OPENSSL_IS_BORINGSSL #define ngx_quic_cipher_t EVP_AEAD +#define ngx_quic_crypto_ctx_t EVP_AEAD_CTX #else #define ngx_quic_cipher_t EVP_CIPHER +#define ngx_quic_crypto_ctx_t EVP_CIPHER_CTX #endif @@ -48,6 +50,7 @@ typedef struct { ngx_quic_md_t key; ngx_quic_iv_t iv; ngx_quic_md_t hp; + ngx_quic_crypto_ctx_t *ctx; } ngx_quic_secret_t; @@ -100,14 +103,17 @@ void ngx_quic_keys_discard(ngx_quic_keys enum ssl_encryption_level_t level); void ngx_quic_keys_switch(ngx_connection_t *c, ngx_quic_keys_t *keys); void ngx_quic_keys_update(ngx_event_t *ev); +void ngx_quic_keys_cleanup(ngx_quic_keys_t *keys); ngx_int_t ngx_quic_encrypt(ngx_quic_header_t *pkt, ngx_str_t *res); ngx_int_t ngx_quic_decrypt(ngx_quic_header_t *pkt, uint64_t *largest_pn); void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers, enum ssl_encryption_level_t level); -ngx_int_t ngx_quic_crypto_seal(const ngx_quic_cipher_t *cipher, - ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, - ngx_str_t *ad, ngx_log_t *log); +ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, + ngx_quic_secret_t *s, ngx_int_t enc, ngx_log_t *log); +ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, + u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); +void ngx_quic_crypto_cleanup(ngx_quic_secret_t *s); ngx_int_t ngx_quic_hkdf_expand(ngx_quic_hkdf_t *hkdf, const EVP_MD *digest, ngx_log_t *log); From pluknet at nginx.com Wed Oct 18 15:26:48 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:48 +0400 Subject: [PATCH 06 of 11] QUIC: common code for crypto open and seal operations In-Reply-To: References: Message-ID: <2c6b0e0650c76a246549.1697642808@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1697642585 -14400 # Wed Oct 18 19:23:05 2023 +0400 # Node ID 2c6b0e0650c76a246549ae973233ac108b68f760 # Parent a634242fbb8c40fbadcb43c1d3183996ff422728 QUIC: common code for crypto open and seal operations. diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -28,6 +28,10 @@ static uint64_t ngx_quic_parse_pn(u_char static ngx_int_t ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); +#ifndef OPENSSL_IS_BORINGSSL +static ngx_int_t ngx_quic_crypto_common(ngx_quic_secret_t *s, ngx_str_t *out, + u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); +#endif static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in); @@ -426,65 +430,19 @@ static ngx_int_t ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) { - ngx_quic_crypto_ctx_t *ctx; - - ctx = s->ctx; - #ifdef OPENSSL_IS_BORINGSSL - if (EVP_AEAD_CTX_open(ctx, out->data, &out->len, out->len, nonce, s->iv.len, - in->data, in->len, ad->data, ad->len) + if (EVP_AEAD_CTX_open(s->ctx, out->data, &out->len, out->len, nonce, + s->iv.len, in->data, in->len, ad->data, ad->len) != 1) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_open() failed"); return NGX_ERROR; } -#else - int len; - - if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); - return NGX_ERROR; - } - - in->len -= NGX_QUIC_TAG_LEN; - - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, - in->data + in->len) - == 0) - { - ngx_ssl_error(NGX_LOG_INFO, log, 0, - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); - return NGX_ERROR; - } - - if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE - && EVP_DecryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) - { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - if (EVP_DecryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - if (EVP_DecryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - out->len = len; - - if (EVP_DecryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptFinal_ex failed"); - return NGX_ERROR; - } - - out->len += len; -#endif return NGX_OK; +#else + return ngx_quic_crypto_common(s, out, nonce, in, ad, log); +#endif } @@ -492,67 +450,96 @@ ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) { - ngx_quic_crypto_ctx_t *ctx; - - ctx = s->ctx; - #ifdef OPENSSL_IS_BORINGSSL - if (EVP_AEAD_CTX_seal(ctx, out->data, &out->len, out->len, nonce, s->iv.len, - in->data, in->len, ad->data, ad->len) + if (EVP_AEAD_CTX_seal(s->ctx, out->data, &out->len, out->len, nonce, + s->iv.len, in->data, in->len, ad->data, ad->len) != 1) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_seal() failed"); return NGX_ERROR; } + + return NGX_OK; #else - int len; + return ngx_quic_crypto_common(s, out, nonce, in, ad, log); +#endif +} + + +#ifndef OPENSSL_IS_BORINGSSL + +static ngx_int_t +ngx_quic_crypto_common(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, + ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) +{ + int len, enc; + ngx_quic_crypto_ctx_t *ctx; - if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); + ctx = s->ctx; + enc = EVP_CIPHER_CTX_encrypting(ctx); + + if (EVP_CipherInit_ex(ctx, NULL, NULL, NULL, nonce, enc) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); return NGX_ERROR; } + if (enc == 0) { + in->len -= NGX_QUIC_TAG_LEN; + + if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, + in->data + in->len) + == 0) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, + "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); + return NGX_ERROR; + } + } + if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE - && EVP_EncryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) + && EVP_CipherUpdate(ctx, NULL, &len, NULL, in->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherUpdate() failed"); return NGX_ERROR; } - if (EVP_EncryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); + if (EVP_CipherUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherUpdate() failed"); return NGX_ERROR; } - if (EVP_EncryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); + if (EVP_CipherUpdate(ctx, out->data, &len, in->data, in->len) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherUpdate() failed"); return NGX_ERROR; } out->len = len; - if (EVP_EncryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptFinal_ex failed"); + if (EVP_CipherFinal_ex(ctx, out->data + out->len, &len) <= 0) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherFinal_ex failed"); return NGX_ERROR; } out->len += len; - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, NGX_QUIC_TAG_LEN, - out->data + out->len) - == 0) - { - ngx_ssl_error(NGX_LOG_INFO, log, 0, - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed"); - return NGX_ERROR; + if (enc == 1) { + if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, NGX_QUIC_TAG_LEN, + out->data + out->len) + == 0) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, + "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed"); + return NGX_ERROR; + } + + out->len += NGX_QUIC_TAG_LEN; } - out->len += NGX_QUIC_TAG_LEN; -#endif - return NGX_OK; } +#endif + void ngx_quic_crypto_cleanup(ngx_quic_secret_t *s) From pluknet at nginx.com Wed Oct 18 15:26:49 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:49 +0400 Subject: [PATCH 07 of 11] QUIC: reusing crypto contexts for header protection In-Reply-To: References: Message-ID: <72e780dcbd73aa331847.1697642809@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1697642586 -14400 # Wed Oct 18 19:23:06 2023 +0400 # Node ID 72e780dcbd73aa331847f86b4115a9d33378c8ef # Parent 2c6b0e0650c76a246549ae973233ac108b68f760 QUIC: reusing crypto contexts for header protection. diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -32,8 +32,12 @@ static ngx_int_t ngx_quic_crypto_open(ng static ngx_int_t ngx_quic_crypto_common(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); #endif -static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, - ngx_quic_secret_t *s, u_char *out, u_char *in); + +static ngx_int_t ngx_quic_crypto_hp_init(const EVP_CIPHER *cipher, + ngx_quic_secret_t *s, ngx_log_t *log); +static ngx_int_t ngx_quic_crypto_hp(ngx_quic_secret_t *s, + u_char *out, u_char *in, ngx_log_t *log); +static void ngx_quic_crypto_hp_cleanup(ngx_quic_secret_t *s); static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, ngx_str_t *res); @@ -196,6 +200,14 @@ ngx_quic_keys_set_initial_secret(ngx_qui goto failed; } + if (ngx_quic_crypto_hp_init(ciphers.hp, client, log) == NGX_ERROR) { + goto failed; + } + + if (ngx_quic_crypto_hp_init(ciphers.hp, server, log) == NGX_ERROR) { + goto failed; + } + return NGX_OK; failed: @@ -556,53 +568,82 @@ ngx_quic_crypto_cleanup(ngx_quic_secret_ static ngx_int_t -ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, - ngx_quic_secret_t *s, u_char *out, u_char *in) +ngx_quic_crypto_hp_init(const EVP_CIPHER *cipher, ngx_quic_secret_t *s, + ngx_log_t *log) { - int outlen; EVP_CIPHER_CTX *ctx; - u_char zero[NGX_QUIC_HP_LEN] = {0}; #ifdef OPENSSL_IS_BORINGSSL - uint32_t cnt; - - ngx_memcpy(&cnt, in, sizeof(uint32_t)); - - if (cipher == (const EVP_CIPHER *) EVP_aead_chacha20_poly1305()) { - CRYPTO_chacha_20(out, zero, NGX_QUIC_HP_LEN, s->hp.data, &in[4], cnt); + if (cipher == (EVP_CIPHER *) EVP_aead_chacha20_poly1305()) { + /* no EVP interface */ + s->hp_ctx = NULL; return NGX_OK; } #endif ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CIPHER_CTX_new() failed"); + return NGX_ERROR; + } + + if (EVP_EncryptInit_ex(ctx, cipher, NULL, s->hp.data, NULL) != 1) { + EVP_CIPHER_CTX_free(ctx); + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); return NGX_ERROR; } - if (EVP_EncryptInit_ex(ctx, cipher, NULL, s->hp.data, in) != 1) { + s->hp_ctx = ctx; + return NGX_OK; +} + + +static ngx_int_t +ngx_quic_crypto_hp(ngx_quic_secret_t *s, u_char *out, u_char *in, + ngx_log_t *log) +{ + int outlen; + EVP_CIPHER_CTX *ctx; + u_char zero[NGX_QUIC_HP_LEN] = {0}; + + ctx = s->hp_ctx; + +#ifdef OPENSSL_IS_BORINGSSL + uint32_t cnt; + + if (ctx == NULL) { + ngx_memcpy(&cnt, in, sizeof(uint32_t)); + CRYPTO_chacha_20(out, zero, NGX_QUIC_HP_LEN, s->hp.data, &in[4], cnt); + return NGX_OK; + } +#endif + + if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, in) != 1) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); - goto failed; + return NGX_ERROR; } if (!EVP_EncryptUpdate(ctx, out, &outlen, zero, NGX_QUIC_HP_LEN)) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); - goto failed; + return NGX_ERROR; } if (!EVP_EncryptFinal_ex(ctx, out + NGX_QUIC_HP_LEN, &outlen)) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptFinal_Ex() failed"); - goto failed; + return NGX_ERROR; } - EVP_CIPHER_CTX_free(ctx); - return NGX_OK; +} -failed: - EVP_CIPHER_CTX_free(ctx); - - return NGX_ERROR; +static void +ngx_quic_crypto_hp_cleanup(ngx_quic_secret_t *s) +{ + if (s->hp_ctx) { + EVP_CIPHER_CTX_free(s->hp_ctx); + s->hp_ctx = NULL; + } } @@ -663,6 +704,10 @@ ngx_quic_keys_set_encryption_secret(ngx_ return NGX_ERROR; } + if (ngx_quic_crypto_hp_init(ciphers.hp, peer_secret, log) == NGX_ERROR) { + return NGX_ERROR; + } + return NGX_OK; } @@ -690,6 +735,9 @@ ngx_quic_keys_discard(ngx_quic_keys_t *k ngx_quic_crypto_cleanup(client); ngx_quic_crypto_cleanup(server); + + ngx_quic_crypto_hp_cleanup(client); + ngx_quic_crypto_hp_cleanup(server); } @@ -742,11 +790,13 @@ ngx_quic_keys_update(ngx_event_t *ev) next->client.key.len = current->client.key.len; next->client.iv.len = NGX_QUIC_IV_LEN; next->client.hp = current->client.hp; + next->client.hp_ctx = current->client.hp_ctx; next->server.secret.len = current->server.secret.len; next->server.key.len = current->server.key.len; next->server.iv.len = NGX_QUIC_IV_LEN; next->server.hp = current->server.hp; + next->server.hp_ctx = current->server.hp_ctx; ngx_quic_hkdf_set(&seq[0], "tls13 quic ku", &next->client.secret, ¤t->client.secret); @@ -840,9 +890,7 @@ ngx_quic_create_packet(ngx_quic_header_t } sample = &out.data[4 - pkt->num_len]; - if (ngx_quic_crypto_hp(pkt->log, ciphers.hp, secret, mask, sample) - != NGX_OK) - { + if (ngx_quic_crypto_hp(secret, mask, sample, pkt->log) != NGX_OK) { return NGX_ERROR; } @@ -1070,9 +1118,7 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, /* header protection */ - if (ngx_quic_crypto_hp(pkt->log, ciphers.hp, secret, mask, sample) - != NGX_OK) - { + if (ngx_quic_crypto_hp(secret, mask, sample, pkt->log) != NGX_OK) { return NGX_DECLINED; } diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h +++ b/src/event/quic/ngx_event_quic_protection.h @@ -51,6 +51,7 @@ typedef struct { ngx_quic_iv_t iv; ngx_quic_md_t hp; ngx_quic_crypto_ctx_t *ctx; + EVP_CIPHER_CTX *hp_ctx; } ngx_quic_secret_t; From pluknet at nginx.com Wed Oct 18 15:26:50 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:50 +0400 Subject: [PATCH 08 of 11] QUIC: cleaned up now unused ngx_quic_ciphers() calls In-Reply-To: References: Message-ID: <4cab026f67a8873c6198.1697642810@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1697642587 -14400 # Wed Oct 18 19:23:07 2023 +0400 # Node ID 4cab026f67a8873c61983d56e46502ce42830bff # Parent 72e780dcbd73aa331847f86b4115a9d33378c8ef QUIC: cleaned up now unused ngx_quic_ciphers() calls. diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c +++ b/src/event/quic/ngx_event_quic_openssl_compat.c @@ -582,10 +582,9 @@ ngx_quic_compat_create_header(ngx_quic_c static ngx_int_t ngx_quic_compat_create_record(ngx_quic_compat_record_t *rec, ngx_str_t *res) { - ngx_str_t ad, out; - ngx_quic_secret_t *secret; - ngx_quic_ciphers_t ciphers; - u_char nonce[NGX_QUIC_IV_LEN]; + ngx_str_t ad, out; + ngx_quic_secret_t *secret; + u_char nonce[NGX_QUIC_IV_LEN]; ad.data = res->data; ad.len = ngx_quic_compat_create_header(rec, ad.data, 0); @@ -598,11 +597,6 @@ ngx_quic_compat_create_record(ngx_quic_c "quic compat ad len:%uz %xV", ad.len, &ad); #endif - if (ngx_quic_ciphers(rec->keys->cipher, &ciphers, rec->level) == NGX_ERROR) - { - return NGX_ERROR; - } - secret = &rec->keys->secret; ngx_memcpy(nonce, secret->iv.data, secret->iv.len); diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -855,12 +855,11 @@ ngx_quic_keys_cleanup(ngx_quic_keys_t *k static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, ngx_str_t *res) { - u_char *pnp, *sample; - ngx_str_t ad, out; - ngx_uint_t i; - ngx_quic_secret_t *secret; - ngx_quic_ciphers_t ciphers; - u_char nonce[NGX_QUIC_IV_LEN], mask[NGX_QUIC_HP_LEN]; + u_char *pnp, *sample; + ngx_str_t ad, out; + ngx_uint_t i; + ngx_quic_secret_t *secret; + u_char nonce[NGX_QUIC_IV_LEN], mask[NGX_QUIC_HP_LEN]; ad.data = res->data; ad.len = ngx_quic_create_header(pkt, ad.data, &pnp); @@ -873,11 +872,6 @@ ngx_quic_create_packet(ngx_quic_header_t "quic ad len:%uz %xV", ad.len, &ad); #endif - if (ngx_quic_ciphers(pkt->keys->cipher, &ciphers, pkt->level) == NGX_ERROR) - { - return NGX_ERROR; - } - secret = &pkt->keys->secrets[pkt->level].server; ngx_memcpy(nonce, secret->iv.data, secret->iv.len); @@ -1081,20 +1075,14 @@ ngx_quic_encrypt(ngx_quic_header_t *pkt, ngx_int_t ngx_quic_decrypt(ngx_quic_header_t *pkt, uint64_t *largest_pn) { - u_char *p, *sample; - size_t len; - uint64_t pn, lpn; - ngx_int_t pnl; - ngx_str_t in, ad; - ngx_uint_t key_phase; - ngx_quic_secret_t *secret; - ngx_quic_ciphers_t ciphers; - uint8_t nonce[NGX_QUIC_IV_LEN], mask[NGX_QUIC_HP_LEN]; - - if (ngx_quic_ciphers(pkt->keys->cipher, &ciphers, pkt->level) == NGX_ERROR) - { - return NGX_ERROR; - } + u_char *p, *sample; + size_t len; + uint64_t pn, lpn; + ngx_int_t pnl; + ngx_str_t in, ad; + ngx_uint_t key_phase; + ngx_quic_secret_t *secret; + uint8_t nonce[NGX_QUIC_IV_LEN], mask[NGX_QUIC_HP_LEN]; secret = &pkt->keys->secrets[pkt->level].client; From pluknet at nginx.com Wed Oct 18 15:26:51 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:51 +0400 Subject: [PATCH 09 of 11] QUIC: simplified ngx_quic_ciphers() API In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1697642588 -14400 # Wed Oct 18 19:23:08 2023 +0400 # Node ID c4ff3592cf17c02c397eb0827e682b14dc775d3d # Parent 4cab026f67a8873c61983d56e46502ce42830bff QUIC: simplified ngx_quic_ciphers() API. After conversion to reusable crypto ctx, now there's enough caller context to remove the "level" argument from ngx_quic_ciphers(). diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c +++ b/src/event/quic/ngx_event_quic_openssl_compat.c @@ -238,7 +238,7 @@ ngx_quic_compat_set_encryption_secret(ng keys->cipher = SSL_CIPHER_get_id(cipher); - key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); + key_len = ngx_quic_ciphers(keys->cipher, &ciphers); if (key_len == NGX_ERROR) { ngx_ssl_error(NGX_LOG_INFO, c->log, 0, "unexpected cipher"); diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -15,6 +15,8 @@ #define NGX_QUIC_AES_128_KEY_LEN 16 +#define NGX_QUIC_INITIAL_CIPHER TLS1_3_CK_AES_128_GCM_SHA256 + static ngx_int_t ngx_hkdf_expand(u_char *out_key, size_t out_len, const EVP_MD *digest, const u_char *prk, size_t prk_len, @@ -46,15 +48,10 @@ static ngx_int_t ngx_quic_create_retry_p ngx_int_t -ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers, - enum ssl_encryption_level_t level) +ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers) { ngx_int_t len; - if (level == ssl_encryption_initial) { - id = TLS1_3_CK_AES_128_GCM_SHA256; - } - switch (id) { case TLS1_3_CK_AES_128_GCM_SHA256: @@ -188,7 +185,7 @@ ngx_quic_keys_set_initial_secret(ngx_qui } } - if (ngx_quic_ciphers(0, &ciphers, ssl_encryption_initial) == NGX_ERROR) { + if (ngx_quic_ciphers(NGX_QUIC_INITIAL_CIPHER, &ciphers) == NGX_ERROR) { return NGX_ERROR; } @@ -664,7 +661,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ keys->cipher = SSL_CIPHER_get_id(cipher); - key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); + key_len = ngx_quic_ciphers(keys->cipher, &ciphers); if (key_len == NGX_ERROR) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "unexpected cipher"); @@ -780,9 +777,7 @@ ngx_quic_keys_update(ngx_event_t *ev) c->log->action = "updating keys"; - if (ngx_quic_ciphers(keys->cipher, &ciphers, ssl_encryption_application) - == NGX_ERROR) - { + if (ngx_quic_ciphers(keys->cipher, &ciphers) == NGX_ERROR) { goto failed; } @@ -927,7 +922,7 @@ ngx_quic_create_retry_packet(ngx_quic_he "quic retry itag len:%uz %xV", ad.len, &ad); #endif - if (ngx_quic_ciphers(0, &ciphers, pkt->level) == NGX_ERROR) { + if (ngx_quic_ciphers(NGX_QUIC_INITIAL_CIPHER, &ciphers) == NGX_ERROR) { return NGX_ERROR; } diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h +++ b/src/event/quic/ngx_event_quic_protection.h @@ -108,8 +108,7 @@ void ngx_quic_keys_cleanup(ngx_quic_keys ngx_int_t ngx_quic_encrypt(ngx_quic_header_t *pkt, ngx_str_t *res); ngx_int_t ngx_quic_decrypt(ngx_quic_header_t *pkt, uint64_t *largest_pn); void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); -ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers, - enum ssl_encryption_level_t level); +ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers); ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, ngx_int_t enc, ngx_log_t *log); ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, From pluknet at nginx.com Wed Oct 18 15:26:52 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:52 +0400 Subject: [PATCH 10 of 11] QUIC: removed key field from ngx_quic_secret_t In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1697642588 -14400 # Wed Oct 18 19:23:08 2023 +0400 # Node ID bf17c61cac3616078466bbdd809d84071689715d # Parent c4ff3592cf17c02c397eb0827e682b14dc775d3d QUIC: removed key field from ngx_quic_secret_t. It is made local as it is only needed now when creating crypto context. BoringSSL lacks EVP interface for ChaCha20, providing instead a function for one-shot encryption, thus hp is still preserved. Based on a patch by Roman Arutyunyan. diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c +++ b/src/event/quic/ngx_event_quic_openssl_compat.c @@ -229,6 +229,7 @@ ngx_quic_compat_set_encryption_secret(ng ngx_int_t key_len; ngx_str_t secret_str; ngx_uint_t i; + ngx_quic_md_t key; ngx_quic_hkdf_t seq[2]; ngx_quic_secret_t *peer_secret; ngx_quic_ciphers_t ciphers; @@ -254,13 +255,14 @@ ngx_quic_compat_set_encryption_secret(ng peer_secret->secret.len = secret_len; ngx_memcpy(peer_secret->secret.data, secret, secret_len); - peer_secret->key.len = key_len; + key.len = key_len; + peer_secret->iv.len = NGX_QUIC_IV_LEN; secret_str.len = secret_len; secret_str.data = (u_char *) secret; - ngx_quic_hkdf_set(&seq[0], "tls13 key", &peer_secret->key, &secret_str); + ngx_quic_hkdf_set(&seq[0], "tls13 key", &key, &secret_str); ngx_quic_hkdf_set(&seq[1], "tls13 iv", &peer_secret->iv, &secret_str); for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { @@ -284,7 +286,9 @@ ngx_quic_compat_set_encryption_secret(ng cln->data = peer_secret; } - if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, peer_secret, &key, 1, c->log) + == NGX_ERROR) + { return NGX_ERROR; } diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -117,6 +117,7 @@ ngx_quic_keys_set_initial_secret(ngx_qui ngx_str_t iss; ngx_uint_t i; const EVP_MD *digest; + ngx_quic_md_t client_key, server_key; ngx_quic_hkdf_t seq[8]; ngx_quic_secret_t *client, *server; ngx_quic_ciphers_t ciphers; @@ -160,8 +161,8 @@ ngx_quic_keys_set_initial_secret(ngx_qui client->secret.len = SHA256_DIGEST_LENGTH; server->secret.len = SHA256_DIGEST_LENGTH; - client->key.len = NGX_QUIC_AES_128_KEY_LEN; - server->key.len = NGX_QUIC_AES_128_KEY_LEN; + client_key.len = NGX_QUIC_AES_128_KEY_LEN; + server_key.len = NGX_QUIC_AES_128_KEY_LEN; client->hp.len = NGX_QUIC_AES_128_KEY_LEN; server->hp.len = NGX_QUIC_AES_128_KEY_LEN; @@ -171,11 +172,11 @@ ngx_quic_keys_set_initial_secret(ngx_qui /* labels per RFC 9001, 5.1. Packet Protection Keys */ ngx_quic_hkdf_set(&seq[0], "tls13 client in", &client->secret, &iss); - ngx_quic_hkdf_set(&seq[1], "tls13 quic key", &client->key, &client->secret); + ngx_quic_hkdf_set(&seq[1], "tls13 quic key", &client_key, &client->secret); ngx_quic_hkdf_set(&seq[2], "tls13 quic iv", &client->iv, &client->secret); ngx_quic_hkdf_set(&seq[3], "tls13 quic hp", &client->hp, &client->secret); ngx_quic_hkdf_set(&seq[4], "tls13 server in", &server->secret, &iss); - ngx_quic_hkdf_set(&seq[5], "tls13 quic key", &server->key, &server->secret); + ngx_quic_hkdf_set(&seq[5], "tls13 quic key", &server_key, &server->secret); ngx_quic_hkdf_set(&seq[6], "tls13 quic iv", &server->iv, &server->secret); ngx_quic_hkdf_set(&seq[7], "tls13 quic hp", &server->hp, &server->secret); @@ -189,11 +190,15 @@ ngx_quic_keys_set_initial_secret(ngx_qui return NGX_ERROR; } - if (ngx_quic_crypto_init(ciphers.c, client, 0, log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, client, &client_key, 0, log) + == NGX_ERROR) + { return NGX_ERROR; } - if (ngx_quic_crypto_init(ciphers.c, server, 1, log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, server, &server_key, 1, log) + == NGX_ERROR) + { goto failed; } @@ -376,13 +381,13 @@ failed: ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, - ngx_int_t enc, ngx_log_t *log) + ngx_quic_md_t *key, ngx_int_t enc, ngx_log_t *log) { #ifdef OPENSSL_IS_BORINGSSL EVP_AEAD_CTX *ctx; - ctx = EVP_AEAD_CTX_new(cipher, s->key.data, s->key.len, + ctx = EVP_AEAD_CTX_new(cipher, key->data, key->len, EVP_AEAD_DEFAULT_TAG_LENGTH); if (ctx == NULL) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_new() failed"); @@ -423,7 +428,7 @@ ngx_quic_crypto_init(const ngx_quic_ciph return NGX_ERROR; } - if (EVP_CipherInit_ex(ctx, NULL, NULL, s->key.data, NULL, enc) != 1) { + if (EVP_CipherInit_ex(ctx, NULL, NULL, key->data, NULL, enc) != 1) { EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); return NGX_ERROR; @@ -652,6 +657,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ ngx_int_t key_len; ngx_str_t secret_str; ngx_uint_t i; + ngx_quic_md_t key; ngx_quic_hkdf_t seq[3]; ngx_quic_secret_t *peer_secret; ngx_quic_ciphers_t ciphers; @@ -677,15 +683,14 @@ ngx_quic_keys_set_encryption_secret(ngx_ peer_secret->secret.len = secret_len; ngx_memcpy(peer_secret->secret.data, secret, secret_len); - peer_secret->key.len = key_len; + key.len = key_len; peer_secret->iv.len = NGX_QUIC_IV_LEN; peer_secret->hp.len = key_len; secret_str.len = secret_len; secret_str.data = (u_char *) secret; - ngx_quic_hkdf_set(&seq[0], "tls13 quic key", - &peer_secret->key, &secret_str); + ngx_quic_hkdf_set(&seq[0], "tls13 quic key", &key, &secret_str); ngx_quic_hkdf_set(&seq[1], "tls13 quic iv", &peer_secret->iv, &secret_str); ngx_quic_hkdf_set(&seq[2], "tls13 quic hp", &peer_secret->hp, &secret_str); @@ -695,7 +700,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ } } - if (ngx_quic_crypto_init(ciphers.c, peer_secret, is_write, log) + if (ngx_quic_crypto_init(ciphers.c, peer_secret, &key, is_write, log) == NGX_ERROR) { return NGX_ERROR; @@ -758,7 +763,9 @@ ngx_quic_keys_switch(ngx_connection_t *c void ngx_quic_keys_update(ngx_event_t *ev) { + ngx_int_t key_len; ngx_uint_t i; + ngx_quic_md_t client_key, server_key; ngx_quic_hkdf_t seq[6]; ngx_quic_keys_t *keys; ngx_connection_t *c; @@ -777,18 +784,21 @@ ngx_quic_keys_update(ngx_event_t *ev) c->log->action = "updating keys"; - if (ngx_quic_ciphers(keys->cipher, &ciphers) == NGX_ERROR) { + key_len = ngx_quic_ciphers(keys->cipher, &ciphers); + + if (key_len == NGX_ERROR) { goto failed; } + client_key.len = key_len; + server_key.len = key_len; + next->client.secret.len = current->client.secret.len; - next->client.key.len = current->client.key.len; next->client.iv.len = NGX_QUIC_IV_LEN; next->client.hp = current->client.hp; next->client.hp_ctx = current->client.hp_ctx; next->server.secret.len = current->server.secret.len; - next->server.key.len = current->server.key.len; next->server.iv.len = NGX_QUIC_IV_LEN; next->server.hp = current->server.hp; next->server.hp_ctx = current->server.hp_ctx; @@ -796,13 +806,13 @@ ngx_quic_keys_update(ngx_event_t *ev) ngx_quic_hkdf_set(&seq[0], "tls13 quic ku", &next->client.secret, ¤t->client.secret); ngx_quic_hkdf_set(&seq[1], "tls13 quic key", - &next->client.key, &next->client.secret); + &client_key, &next->client.secret); ngx_quic_hkdf_set(&seq[2], "tls13 quic iv", &next->client.iv, &next->client.secret); ngx_quic_hkdf_set(&seq[3], "tls13 quic ku", &next->server.secret, ¤t->server.secret); ngx_quic_hkdf_set(&seq[4], "tls13 quic key", - &next->server.key, &next->server.secret); + &server_key, &next->server.secret); ngx_quic_hkdf_set(&seq[5], "tls13 quic iv", &next->server.iv, &next->server.secret); @@ -812,12 +822,14 @@ ngx_quic_keys_update(ngx_event_t *ev) } } - if (ngx_quic_crypto_init(ciphers.c, &next->client, 0, c->log) == NGX_ERROR) + if (ngx_quic_crypto_init(ciphers.c, &next->client, &client_key, 0, c->log) + == NGX_ERROR) { goto failed; } - if (ngx_quic_crypto_init(ciphers.c, &next->server, 1, c->log) == NGX_ERROR) + if (ngx_quic_crypto_init(ciphers.c, &next->server, &server_key, 1, c->log) + == NGX_ERROR) { goto failed; } @@ -901,11 +913,12 @@ ngx_quic_create_retry_packet(ngx_quic_he { u_char *start; ngx_str_t ad, itag; + ngx_quic_md_t key; ngx_quic_secret_t secret; ngx_quic_ciphers_t ciphers; /* 5.8. Retry Packet Integrity */ - static u_char key[16] = + static u_char key_data[16] = "\xbe\x0c\x69\x0b\x9f\x66\x57\x5a\x1d\x76\x6b\x54\xe3\x68\xc8\x4e"; static u_char nonce[NGX_QUIC_IV_LEN] = "\x46\x15\x99\xd3\x5d\x63\x2b\xf2\x23\x98\x25\xbb"; @@ -926,11 +939,13 @@ ngx_quic_create_retry_packet(ngx_quic_he return NGX_ERROR; } - secret.key.len = sizeof(key); - ngx_memcpy(secret.key.data, key, sizeof(key)); + key.len = sizeof(key_data); + ngx_memcpy(key.data, key_data, sizeof(key_data)); secret.iv.len = NGX_QUIC_IV_LEN; - if (ngx_quic_crypto_init(ciphers.c, &secret, 1, pkt->log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, &secret, &key, 1, pkt->log) + == NGX_ERROR) + { return NGX_ERROR; } diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h +++ b/src/event/quic/ngx_event_quic_protection.h @@ -47,7 +47,6 @@ typedef struct { typedef struct { ngx_quic_md_t secret; - ngx_quic_md_t key; ngx_quic_iv_t iv; ngx_quic_md_t hp; ngx_quic_crypto_ctx_t *ctx; @@ -110,7 +109,7 @@ ngx_int_t ngx_quic_decrypt(ngx_quic_head void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers); ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, - ngx_quic_secret_t *s, ngx_int_t enc, ngx_log_t *log); + ngx_quic_secret_t *s, ngx_quic_md_t *key, ngx_int_t enc, ngx_log_t *log); ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); void ngx_quic_crypto_cleanup(ngx_quic_secret_t *s); From pluknet at nginx.com Wed Oct 18 15:26:53 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Wed, 18 Oct 2023 19:26:53 +0400 Subject: [PATCH 11 of 11] QUIC: explicitly zero out unused keying material In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1697642617 -14400 # Wed Oct 18 19:23:37 2023 +0400 # Node ID dda4eeff4bc526cd9df92a753c8fd3e8de37b5d7 # Parent bf17c61cac3616078466bbdd809d84071689715d QUIC: explicitly zero out unused keying material. diff --git a/src/event/quic/ngx_event_quic_openssl_compat.c b/src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c +++ b/src/event/quic/ngx_event_quic_openssl_compat.c @@ -218,6 +218,8 @@ ngx_quic_compat_keylog_callback(const SS (void) ngx_quic_compat_set_encryption_secret(c, &com->keys, level, cipher, secret, n); } + + ngx_explicit_memzero(secret, n); } @@ -246,15 +248,6 @@ ngx_quic_compat_set_encryption_secret(ng return NGX_ERROR; } - if (sizeof(peer_secret->secret.data) < secret_len) { - ngx_log_error(NGX_LOG_ALERT, c->log, 0, - "unexpected secret len: %uz", secret_len); - return NGX_ERROR; - } - - peer_secret->secret.len = secret_len; - ngx_memcpy(peer_secret->secret.data, secret, secret_len); - key.len = key_len; peer_secret->iv.len = NGX_QUIC_IV_LEN; @@ -292,6 +285,8 @@ ngx_quic_compat_set_encryption_secret(ng return NGX_ERROR; } + ngx_explicit_memzero(key.data, key.len); + return NGX_OK; } diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -710,6 +710,8 @@ ngx_quic_keys_set_encryption_secret(ngx_ return NGX_ERROR; } + ngx_explicit_memzero(key.data, key.len); + return NGX_OK; } @@ -740,6 +742,9 @@ ngx_quic_keys_discard(ngx_quic_keys_t *k ngx_quic_crypto_hp_cleanup(client); ngx_quic_crypto_hp_cleanup(server); + + ngx_explicit_memzero(client->secret.data, client->secret.len); + ngx_explicit_memzero(server->secret.data, server->secret.len); } @@ -834,6 +839,14 @@ ngx_quic_keys_update(ngx_event_t *ev) goto failed; } + ngx_explicit_memzero(current->client.secret.data, + current->client.secret.len); + ngx_explicit_memzero(current->server.secret.data, + current->server.secret.len); + + ngx_explicit_memzero(client_key.data, client_key.len); + ngx_explicit_memzero(server_key.data, server_key.len); + return; failed: @@ -856,6 +869,11 @@ ngx_quic_keys_cleanup(ngx_quic_keys_t *k ngx_quic_crypto_cleanup(&next->client); ngx_quic_crypto_cleanup(&next->server); + + ngx_explicit_memzero(next->client.secret.data, + next->client.secret.len); + ngx_explicit_memzero(next->server.secret.data, + next->server.secret.len); } From teotyrov at gmail.com Wed Oct 18 18:38:24 2023 From: teotyrov at gmail.com (Teo Tyrov) Date: Wed, 18 Oct 2023 21:38:24 +0300 Subject: [PATCH] http option for server identification removal Message-ID: # HG changeset patch # User Theodoros Tyrovouzis # Date 1697653906 -10800 # Wed Oct 18 21:31:46 2023 +0300 # Node ID 112e223511c087fac000065c7eb99dd88e66b174 # Parent cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc Add "server_identification" http option that hides server information disclosure in responses In its responses, nginx by default sends a "Server" header which contains "nginx" and the nginx version. Most production systems would want this information hidden, as it is technical information disclosure ( https://portswigger.net/web-security/information-disclosure). nginx does provide the option "server_tokens off;" which hides the version, but in order to get rid of the header, nginx needs to be compiled with the headers_more module, for the option "more_clear_headers". This patch provides an http option for hiding that information, which also hides the server information from the default error responses. An alternative would be to add a new option to server_tokens, e.g. "incognito". diff -r cdda286c0f1b -r 112e223511c0 src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c Tue Oct 10 15:13:39 2023 +0300 +++ b/src/http/ngx_http_core_module.c Wed Oct 18 21:31:46 2023 +0300 @@ -129,6 +129,13 @@ }; +static ngx_conf_enum_t ngx_http_core_server_identification[] = { + { ngx_string("off"), NGX_HTTP_SERVER_IDENTIFICATION_OFF }, + { ngx_string("on"), NGX_HTTP_SERVER_IDENTIFICATION_ON }, + { ngx_null_string, 0 } +}; + + static ngx_conf_enum_t ngx_http_core_if_modified_since[] = { { ngx_string("off"), NGX_HTTP_IMS_OFF }, { ngx_string("exact"), NGX_HTTP_IMS_EXACT }, @@ -635,6 +642,13 @@ offsetof(ngx_http_core_loc_conf_t, server_tokens), &ngx_http_core_server_tokens }, + { ngx_string("server_identification"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, + ngx_conf_set_enum_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_core_loc_conf_t, server_identification), + &ngx_http_core_server_identification }, + { ngx_string("if_modified_since"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, ngx_conf_set_enum_slot, @@ -3623,6 +3637,7 @@ clcf->chunked_transfer_encoding = NGX_CONF_UNSET; clcf->etag = NGX_CONF_UNSET; clcf->server_tokens = NGX_CONF_UNSET_UINT; + clcf->server_identification = NGX_CONF_UNSET_UINT; clcf->types_hash_max_size = NGX_CONF_UNSET_UINT; clcf->types_hash_bucket_size = NGX_CONF_UNSET_UINT; @@ -3901,6 +3916,9 @@ ngx_conf_merge_uint_value(conf->server_tokens, prev->server_tokens, NGX_HTTP_SERVER_TOKENS_ON); + ngx_conf_merge_uint_value(conf->server_identification, prev->server_identification, + NGX_HTTP_SERVER_IDENTIFICATION_ON); + ngx_conf_merge_ptr_value(conf->open_file_cache, prev->open_file_cache, NULL); diff -r cdda286c0f1b -r 112e223511c0 src/http/ngx_http_core_module.h --- a/src/http/ngx_http_core_module.h Tue Oct 10 15:13:39 2023 +0300 +++ b/src/http/ngx_http_core_module.h Wed Oct 18 21:31:46 2023 +0300 @@ -55,6 +55,10 @@ #define NGX_HTTP_KEEPALIVE_DISABLE_SAFARI 0x0008 +#define NGX_HTTP_SERVER_IDENTIFICATION_OFF 0 +#define NGX_HTTP_SERVER_IDENTIFICATION_ON 1 + + #define NGX_HTTP_SERVER_TOKENS_OFF 0 #define NGX_HTTP_SERVER_TOKENS_ON 1 #define NGX_HTTP_SERVER_TOKENS_BUILD 2 @@ -405,6 +409,7 @@ ngx_flag_t log_subrequest; /* log_subrequest */ ngx_flag_t recursive_error_pages; /* recursive_error_pages */ ngx_uint_t server_tokens; /* server_tokens */ + ngx_uint_t server_identification; /* server_identification */ ngx_flag_t chunked_transfer_encoding; /* chunked_transfer_encoding */ ngx_flag_t etag; /* etag */ diff -r cdda286c0f1b -r 112e223511c0 src/http/ngx_http_header_filter_module.c --- a/src/http/ngx_http_header_filter_module.c Tue Oct 10 15:13:39 2023 +0300 +++ b/src/http/ngx_http_header_filter_module.c Wed Oct 18 21:31:46 2023 +0300 @@ -283,7 +283,7 @@ clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); - if (r->headers_out.server == NULL) { + if (r->headers_out.server == NULL && clcf->server_identification == NGX_HTTP_SERVER_IDENTIFICATION_ON) { if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_ON) { len += sizeof(ngx_http_server_full_string) - 1; @@ -452,7 +452,7 @@ } *b->last++ = CR; *b->last++ = LF; - if (r->headers_out.server == NULL) { + if (r->headers_out.server == NULL && clcf->server_identification == NGX_HTTP_SERVER_IDENTIFICATION_ON) { if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_ON) { p = ngx_http_server_full_string; len = sizeof(ngx_http_server_full_string) - 1; diff -r cdda286c0f1b -r 112e223511c0 src/http/ngx_http_special_response.c --- a/src/http/ngx_http_special_response.c Tue Oct 10 15:13:39 2023 +0300 +++ b/src/http/ngx_http_special_response.c Wed Oct 18 21:31:46 2023 +0300 @@ -39,6 +39,12 @@ ; +static u_char ngx_http_error_tail_minimal[] = +"" CRLF +"" CRLF +; + + static u_char ngx_http_msie_padding[] = "" CRLF "" CRLF @@ -680,17 +686,22 @@ ngx_uint_t msie_padding; ngx_chain_t out[3]; - if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_ON) { - len = sizeof(ngx_http_error_full_tail) - 1; - tail = ngx_http_error_full_tail; + if (clcf->server_identification == NGX_HTTP_SERVER_IDENTIFICATION_ON) { + if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_ON) { + len = sizeof(ngx_http_error_full_tail) - 1; + tail = ngx_http_error_full_tail; - } else if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_BUILD) { - len = sizeof(ngx_http_error_build_tail) - 1; - tail = ngx_http_error_build_tail; + } else if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_BUILD) { + len = sizeof(ngx_http_error_build_tail) - 1; + tail = ngx_http_error_build_tail; + } else { + len = sizeof(ngx_http_error_tail) - 1; + tail = ngx_http_error_tail; + } } else { - len = sizeof(ngx_http_error_tail) - 1; - tail = ngx_http_error_tail; + len = sizeof(ngx_http_error_tail_minimal) - 1; + tail = ngx_http_error_tail_minimal; } msie_padding = 0; diff -r cdda286c0f1b -r 112e223511c0 src/http/v2/ngx_http_v2_filter_module.c --- a/src/http/v2/ngx_http_v2_filter_module.c Tue Oct 10 15:13:39 2023 +0300 +++ b/src/http/v2/ngx_http_v2_filter_module.c Wed Oct 18 21:31:46 2023 +0300 @@ -217,7 +217,7 @@ clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); - if (r->headers_out.server == NULL) { + if (r->headers_out.server == NULL && clcf->server_identification == NGX_HTTP_SERVER_IDENTIFICATION_ON) { if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_ON) { len += 1 + nginx_ver_len; @@ -421,7 +421,7 @@ pos = ngx_sprintf(pos, "%03ui", r->headers_out.status); } - if (r->headers_out.server == NULL) { + if (r->headers_out.server == NULL && clcf->server_identification == NGX_HTTP_SERVER_IDENTIFICATION_ON) { if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_ON) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, diff -r cdda286c0f1b -r 112e223511c0 src/http/v3/ngx_http_v3_filter_module.c --- a/src/http/v3/ngx_http_v3_filter_module.c Tue Oct 10 15:13:39 2023 +0300 +++ b/src/http/v3/ngx_http_v3_filter_module.c Wed Oct 18 21:31:46 2023 +0300 @@ -158,7 +158,7 @@ clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); - if (r->headers_out.server == NULL) { + if (r->headers_out.server == NULL && clcf->server_identification == NGX_HTTP_SERVER_IDENTIFICATION_ON) { if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_ON) { n = sizeof(NGINX_VER) - 1; @@ -339,7 +339,7 @@ b->last = ngx_sprintf(b->last, "%03ui", r->headers_out.status); } - if (r->headers_out.server == NULL) { + if (r->headers_out.server == NULL && clcf->server_identification == NGX_HTTP_SERVER_IDENTIFICATION_ON) { if (clcf->server_tokens == NGX_HTTP_SERVER_TOKENS_ON) { p = (u_char *) NGINX_VER; n = sizeof(NGINX_VER) - 1; -------------- next part -------------- An HTML attachment was scrubbed... URL: From al-nginx at none.at Wed Oct 18 19:05:41 2023 From: al-nginx at none.at (Aleksandar Lazic) Date: Wed, 18 Oct 2023 21:05:41 +0200 Subject: [PATCH] http option for server identification removal In-Reply-To: References: Message-ID: <5a86dd68-873d-4153-98ac-45827b888df9@none.at> Hi Teo. On 2023-10-18 (Mi.) 20:38, Teo Tyrov wrote: > # HG changeset patch > # User Theodoros Tyrovouzis > > # Date 1697653906 -10800 > #      Wed Oct 18 21:31:46 2023 +0300 > # Node ID 112e223511c087fac000065c7eb99dd88e66b174 > # Parent  cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > Add "server_identification" http option that hides server information > disclosure in responses > > In its responses, nginx by default sends a "Server" header which > contains "nginx" and the nginx version. Most production systems would > want this information hidden, as it is technical information disclosure > (https://portswigger.net/web-security/information-disclosure). nginx > does provide the option "server_tokens off;" which hides the version, > but in order to get rid of the header, nginx needs to be compiled with > the headers_more module, for the option "more_clear_headers". This patch > provides an http option for hiding that information, which also hides > the server information from the default error responses. > > An alternative would be to add a new option to server_tokens, e.g. > "incognito". What's wrong with this directive? http://nginx.org/en/docs/http/ngx_http_core_module.html#server_tokens [snipp] Regards Alex From xeioex at nginx.com Thu Oct 19 01:50:44 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Thu, 19 Oct 2023 01:50:44 +0000 Subject: [njs] XML: fixed compilation with certain GCC versions. Message-ID: details: https://hg.nginx.org/njs/rev/3a7526c8694c branches: changeset: 2222:3a7526c8694c user: Dmitry Volyntsev date: Wed Oct 18 18:36:00 2023 -0700 description: XML: fixed compilation with certain GCC versions. external/njs_xml_module.c:541:16: error: 'name.length' may be used uninitialized [-Werror=maybe-uninitialized] 541 | if (name.length != njs_strlen(node->name). diffstat: external/njs_xml_module.c | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) diffs (15 lines): diff -r c0ff44d66ffb -r 3a7526c8694c external/njs_xml_module.c --- a/external/njs_xml_module.c Tue Oct 17 17:51:39 2023 -0700 +++ b/external/njs_xml_module.c Wed Oct 18 18:36:00 2023 -0700 @@ -527,6 +527,11 @@ njs_xml_doc_ext_root(njs_vm_t *vm, njs_o njs_value_undefined_set(retval); return NJS_DECLINED; } + + } else { + /* To suppress warning. */ + name.length = 0; + name.start = NULL; } for (node = xmlDocGetRootElement(tree->doc); From teotyrov at gmail.com Thu Oct 19 13:16:14 2023 From: teotyrov at gmail.com (Teo Tyrov) Date: Thu, 19 Oct 2023 16:16:14 +0300 Subject: [PATCH] http option for server identification removal In-Reply-To: <36a780b1-b65c-4a4c-91ab-6a944ecba6b4@none.at> References: <5a86dd68-873d-4153-98ac-45827b888df9@none.at> <36a780b1-b65c-4a4c-91ab-6a944ecba6b4@none.at> Message-ID: Sorry, I forgot to add the mailing list to the recipients Best, Thodoris On Wed, Oct 18, 2023 at 11:17 PM Aleksandar Lazic wrote: > Hi Teo. > > On 2023-10-18 (Mi.) 21:18, Teo Tyrov wrote: > > Hello Alex, > > > > This directive removes only the version, so it is still disclosed that > > the nginx server is used. I would be asked to remove the entire header > > in my previous company, which as far as I know, is not possible without > > external modules. > > got it. > > > On Wed, Oct 18, 2023 at 10:05 PM Aleksandar Lazic > > wrote: > > > > Hi Teo. > > > > On 2023-10-18 (Mi.) 20:38, Teo Tyrov wrote: > > > # HG changeset patch > > > # User Theodoros Tyrovouzis > > >> > > > # Date 1697653906 -10800 > > > # Wed Oct 18 21:31:46 2023 +0300 > > > # Node ID 112e223511c087fac000065c7eb99dd88e66b174 > > > # Parent cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > > > Add "server_identification" http option that hides server > > information > > > disclosure in responses > > > > > > In its responses, nginx by default sends a "Server" header which > > > contains "nginx" and the nginx version. Most production systems > > would > > > want this information hidden, as it is technical information > > disclosure > > > (https://portswigger.net/web-security/information-disclosure > > ). > nginx > > > does provide the option "server_tokens off;" which hides the > > version, > > > but in order to get rid of the header, nginx needs to be compiled > > with > > > the headers_more module, for the option "more_clear_headers". > > This patch > > > provides an http option for hiding that information, which also > > hides > > > the server information from the default error responses. > > > > > > An alternative would be to add a new option to server_tokens, e.g. > > > "incognito". > > > > What's wrong with this directive? > > > http://nginx.org/en/docs/http/ngx_http_core_module.html#server_tokens < > http://nginx.org/en/docs/http/ngx_http_core_module.html#server_tokens> > > > > [snipp] > > > > Regards > > Alex > > > > -------------- next part -------------- An HTML attachment was scrubbed... URL: From antoine.bonavita at gmail.com Thu Oct 19 13:49:03 2023 From: antoine.bonavita at gmail.com (Antoine Bonavita) Date: Thu, 19 Oct 2023 15:49:03 +0200 Subject: [PATCH] http option for server identification removal In-Reply-To: References: <5a86dd68-873d-4153-98ac-45827b888df9@none.at> <36a780b1-b65c-4a4c-91ab-6a944ecba6b4@none.at> Message-ID: Teo, You might want to have a look at: https://trac.nginx.org/nginx/ticket/936 If my understanding is correct, this feature is already offered as part of Nginx Plus. Hope this helps, A. On Thu, Oct 19, 2023 at 3:16 PM Teo Tyrov wrote: > Sorry, I forgot to add the mailing list to the recipients > > Best, > Thodoris > > On Wed, Oct 18, 2023 at 11:17 PM Aleksandar Lazic > wrote: > >> Hi Teo. >> >> On 2023-10-18 (Mi.) 21:18, Teo Tyrov wrote: >> > Hello Alex, >> > >> > This directive removes only the version, so it is still disclosed that >> > the nginx server is used. I would be asked to remove the entire header >> > in my previous company, which as far as I know, is not possible without >> > external modules. >> >> got it. >> >> > On Wed, Oct 18, 2023 at 10:05 PM Aleksandar Lazic > > > wrote: >> > >> > Hi Teo. >> > >> > On 2023-10-18 (Mi.) 20:38, Teo Tyrov wrote: >> > > # HG changeset patch >> > > # User Theodoros Tyrovouzis > > > > >> >> > > # Date 1697653906 -10800 >> > > # Wed Oct 18 21:31:46 2023 +0300 >> > > # Node ID 112e223511c087fac000065c7eb99dd88e66b174 >> > > # Parent cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc >> > > Add "server_identification" http option that hides server >> > information >> > > disclosure in responses >> > > >> > > In its responses, nginx by default sends a "Server" header which >> > > contains "nginx" and the nginx version. Most production systems >> > would >> > > want this information hidden, as it is technical information >> > disclosure >> > > (https://portswigger.net/web-security/information-disclosure >> > ). >> nginx >> > > does provide the option "server_tokens off;" which hides the >> > version, >> > > but in order to get rid of the header, nginx needs to be compiled >> > with >> > > the headers_more module, for the option "more_clear_headers". >> > This patch >> > > provides an http option for hiding that information, which also >> > hides >> > > the server information from the default error responses. >> > > >> > > An alternative would be to add a new option to server_tokens, >> e.g. >> > > "incognito". >> > >> > What's wrong with this directive? >> > >> http://nginx.org/en/docs/http/ngx_http_core_module.html#server_tokens < >> http://nginx.org/en/docs/http/ngx_http_core_module.html#server_tokens> >> > >> > [snipp] >> > >> > Regards >> > Alex >> > >> >> _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From artem.konev at nginx.com Thu Oct 19 15:52:28 2023 From: artem.konev at nginx.com (=?iso-8859-1?q?Artem_Konev?=) Date: Thu, 19 Oct 2023 16:52:28 +0100 Subject: [PATCH] Added info about the Unit 1.31.1 release Message-ID: xml/index.xml | 7 +++++++ 1 files changed, 7 insertions(+), 0 deletions(-) # HG changeset patch # User Artem Konev # Date 1697730616 -3600 # Thu Oct 19 16:50:16 2023 +0100 # Node ID c8d0acb26a459a706ec4482ca96b02cbba7dccdc # Parent 58f0d9d7fe1dae6b771db1bf4de6f6f9f35ffe84 Added info about the Unit 1.31.1 release. diff --git a/xml/index.xml b/xml/index.xml --- a/xml/index.xml +++ b/xml/index.xml @@ -7,6 +7,13 @@ + + +unit-1.31.1 maintenance version has been +released. + + + njs-0.8.1 From pluknet at nginx.com Thu Oct 19 16:13:29 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 19 Oct 2023 20:13:29 +0400 Subject: [PATCH] Added info about the Unit 1.31.1 release In-Reply-To: References: Message-ID: > On 19 Oct 2023, at 19:52, Artem Konev wrote: > > xml/index.xml | 7 +++++++ > 1 files changed, 7 insertions(+), 0 deletions(-) > > > # HG changeset patch > # User Artem Konev > # Date 1697730616 -3600 > # Thu Oct 19 16:50:16 2023 +0100 > # Node ID c8d0acb26a459a706ec4482ca96b02cbba7dccdc > # Parent 58f0d9d7fe1dae6b771db1bf4de6f6f9f35ffe84 > Added info about the Unit 1.31.1 release. > > diff --git a/xml/index.xml b/xml/index.xml > --- a/xml/index.xml > +++ b/xml/index.xml > @@ -7,6 +7,13 @@ > > > > + > + > +unit-1.31.1 maintenance version has been > +released. > + > + > + > > > njs-0.8.1 I'd rewrap long lines, otherwise looks good. -- Sergey Kandaurov From yar at nginx.com Thu Oct 19 16:16:41 2023 From: yar at nginx.com (Yaroslav Zhuravlev) Date: Thu, 19 Oct 2023 17:16:41 +0100 Subject: [PATCH] Added info about the Unit 1.31.1 release In-Reply-To: References: Message-ID: <1C545592-5F1F-433B-8502-1EAC09E1B7F7@nginx.com> Hi Artem, > On 19 Oct 2023, at 16:52, Artem Konev wrote: > > xml/index.xml | 7 +++++++ > 1 files changed, 7 insertions(+), 0 deletions(-) > > > # HG changeset patch > # User Artem Konev > # Date 1697730616 -3600 > # Thu Oct 19 16:50:16 2023 +0100 > # Node ID c8d0acb26a459a706ec4482ca96b02cbba7dccdc > # Parent 58f0d9d7fe1dae6b771db1bf4de6f6f9f35ffe84 > Added info about the Unit 1.31.1 release. > > diff --git a/xml/index.xml b/xml/index.xml > --- a/xml/index.xml > +++ b/xml/index.xml > @@ -7,6 +7,13 @@ > > > > + > + > +unit-1.31.1 maintenance version has been Perhaps a line break after "version" would be good as the line is longer than 80 symbols, otherwise all good. > +released. > + > + > + > > > njs-0.8.1 > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From arut at nginx.com Fri Oct 20 07:27:22 2023 From: arut at nginx.com (Roman Arutyunyan) Date: Fri, 20 Oct 2023 11:27:22 +0400 Subject: [PATCH 00 of 11] [quic] reusing crypto contexts, and more #2 In-Reply-To: References: Message-ID: <20231020072722.o7lk2rzjhwwl5exm@N00W24XTQX> Hi, On Wed, Oct 18, 2023 at 07:26:42PM +0400, Sergey Kandaurov wrote: > Updated series to address arut@ comments: > - patches #1, #2, #4 unchanged > - patch #3 replaced with keys check in ngx_quic_ack_packet > - #5 updates ngx_quic_keys_cleanup and ngx_quic_compat_set_encryption_secret > - factored out common code for ngx_quic_crypto_open/seal > - ngx_quic_crypto_hp cleanup for CRYPTO_chacha_20 > - new change to simpify ngx_quic_ciphers() API. > - new change to remove key field from ngx_quic_secret_t > - assorted fixes in using ngx_explicit_memzero The series looks ok. -- Roman From pluknet at nginx.com Fri Oct 20 13:23:45 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 17:23:45 +0400 Subject: [PATCH] HTTP/2: fixed buffer management with HTTP/2 auto-detection Message-ID: <318c8ace6aa24506004b.1697808225@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1697808142 -14400 # Fri Oct 20 17:22:22 2023 +0400 # Node ID 318c8ace6aa24506004bfbb7d52674f61a3716a5 # Parent 3038bd4d78169a5e8a2624d79cf76f45f0805ddc HTTP/2: fixed buffer management with HTTP/2 auto-detection. As part of normal HTTP/2 processing, incomplete frames are saved in the control state using a fixed size memcpy of NGX_HTTP_V2_STATE_BUFFER_SIZE. For this matter, two state buffers are reserved in the HTTP/2 recv buffer. As part of HTTP/2 auto-detection on plain TCP connections, initial data is first read into a buffer specified by the client_header_buffer_size directive that doesn't have state reservation. Previously, this made it possible to over-read the buffer as part of saving the state. The fix is to read the available buffer size rather than a fixed size. Although memcpy of a fixed size can produce a better optimized code, handling of incomplete frames isn't a common execution path, so it was sacrificed for the sake of simplicity of the fix. diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c +++ b/src/http/v2/ngx_http_v2.c @@ -386,13 +386,11 @@ ngx_http_v2_read_handler(ngx_event_t *re h2mcf = ngx_http_get_module_main_conf(h2c->http_connection->conf_ctx, ngx_http_v2_module); - available = h2mcf->recv_buffer_size - 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE; + available = h2mcf->recv_buffer_size - NGX_HTTP_V2_STATE_BUFFER_SIZE; do { p = h2mcf->recv_buffer; - - ngx_memcpy(p, h2c->state.buffer, NGX_HTTP_V2_STATE_BUFFER_SIZE); - end = p + h2c->state.buffer_used; + end = ngx_cpymem(p, h2c->state.buffer, h2c->state.buffer_used); n = c->recv(c, end, available); @@ -2592,7 +2590,7 @@ ngx_http_v2_state_save(ngx_http_v2_conne return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); } - ngx_memcpy(h2c->state.buffer, pos, NGX_HTTP_V2_STATE_BUFFER_SIZE); + ngx_memcpy(h2c->state.buffer, pos, size); h2c->state.buffer_used = size; h2c->state.handler = handler; diff --git a/src/http/v2/ngx_http_v2_module.c b/src/http/v2/ngx_http_v2_module.c --- a/src/http/v2/ngx_http_v2_module.c +++ b/src/http/v2/ngx_http_v2_module.c @@ -388,7 +388,7 @@ ngx_http_v2_recv_buffer_size(ngx_conf_t { size_t *sp = data; - if (*sp <= 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE) { + if (*sp <= NGX_HTTP_V2_STATE_BUFFER_SIZE) { return "value is too small"; } From pluknet at nginx.com Fri Oct 20 14:04:32 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 20 Oct 2023 18:04:32 +0400 Subject: [PATCH] HTTP/2: fixed buffer management with HTTP/2 auto-detection In-Reply-To: <318c8ace6aa24506004b.1697808225@enoparse.local> References: <318c8ace6aa24506004b.1697808225@enoparse.local> Message-ID: <71BC1B29-32B5-4A61-8D80-EDCE0495D34A@nginx.com> > On 20 Oct 2023, at 17:23, Sergey Kandaurov wrote: > > # HG changeset patch > # User Sergey Kandaurov > # Date 1697808142 -14400 > # Fri Oct 20 17:22:22 2023 +0400 > # Node ID 318c8ace6aa24506004bfbb7d52674f61a3716a5 > # Parent 3038bd4d78169a5e8a2624d79cf76f45f0805ddc > HTTP/2: fixed buffer management with HTTP/2 auto-detection. > > As part of normal HTTP/2 processing, incomplete frames are saved in the > control state using a fixed size memcpy of NGX_HTTP_V2_STATE_BUFFER_SIZE. > For this matter, two state buffers are reserved in the HTTP/2 recv buffer. > > As part of HTTP/2 auto-detection on plain TCP connections, initial data > is first read into a buffer specified by the client_header_buffer_size > directive that doesn't have state reservation. Previously, this made it > possible to over-read the buffer as part of saving the state. > > The fix is to read the available buffer size rather than a fixed size. > Although memcpy of a fixed size can produce a better optimized code, From my limited testing, replacing a fixed size with an available size degrades "-O" optimized memcpy from SSE instructions over XMM registers to simple MOVs. > handling of incomplete frames isn't a common execution path, so it was > sacrificed for the sake of simplicity of the fix. Another approach is to displace initial data into the recv buffer for subsequent processing, which would require additional handling in ngx_http_v2_init(). After some pondering I declined it due to added complexity without a good reason. > > diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c > --- a/src/http/v2/ngx_http_v2.c > +++ b/src/http/v2/ngx_http_v2.c > @@ -386,13 +386,11 @@ ngx_http_v2_read_handler(ngx_event_t *re > h2mcf = ngx_http_get_module_main_conf(h2c->http_connection->conf_ctx, > ngx_http_v2_module); > > - available = h2mcf->recv_buffer_size - 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE; > + available = h2mcf->recv_buffer_size - NGX_HTTP_V2_STATE_BUFFER_SIZE; > > do { > p = h2mcf->recv_buffer; > - > - ngx_memcpy(p, h2c->state.buffer, NGX_HTTP_V2_STATE_BUFFER_SIZE); > - end = p + h2c->state.buffer_used; > + end = ngx_cpymem(p, h2c->state.buffer, h2c->state.buffer_used); > > n = c->recv(c, end, available); > > @@ -2592,7 +2590,7 @@ ngx_http_v2_state_save(ngx_http_v2_conne > return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); > } > > - ngx_memcpy(h2c->state.buffer, pos, NGX_HTTP_V2_STATE_BUFFER_SIZE); > + ngx_memcpy(h2c->state.buffer, pos, size); > > h2c->state.buffer_used = size; > h2c->state.handler = handler; > diff --git a/src/http/v2/ngx_http_v2_module.c b/src/http/v2/ngx_http_v2_module.c > --- a/src/http/v2/ngx_http_v2_module.c > +++ b/src/http/v2/ngx_http_v2_module.c > @@ -388,7 +388,7 @@ ngx_http_v2_recv_buffer_size(ngx_conf_t > { > size_t *sp = data; > > - if (*sp <= 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE) { > + if (*sp <= NGX_HTTP_V2_STATE_BUFFER_SIZE) { > return "value is too small"; > } > -- Sergey Kandaurov From pluknet at nginx.com Fri Oct 20 14:42:12 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 14:42:12 +0000 Subject: [nginx] QUIC: split keys availability checks to read and write sides. Message-ID: details: https://hg.nginx.org/nginx/rev/ff98ae7d261e branches: changeset: 9168:ff98ae7d261e user: Sergey Kandaurov date: Thu Aug 31 19:54:10 2023 +0400 description: QUIC: split keys availability checks to read and write sides. Keys may be released by TLS stack in different times, so it makes sense to check this independently as well. This allows to fine-tune what key direction is used when checking keys availability. When discarding, server keys are now marked in addition to client keys. diffstat: src/event/quic/ngx_event_quic.c | 8 +++++--- src/event/quic/ngx_event_quic_protection.c | 9 +++++++-- src/event/quic/ngx_event_quic_protection.h | 2 +- src/event/quic/ngx_event_quic_ssl.c | 2 +- 4 files changed, 14 insertions(+), 7 deletions(-) diffs (83 lines): diff -r 3038bd4d7816 -r ff98ae7d261e src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c Wed Oct 18 04:30:11 2023 +0300 +++ b/src/event/quic/ngx_event_quic.c Thu Aug 31 19:54:10 2023 +0400 @@ -530,7 +530,7 @@ ngx_quic_close_connection(ngx_connection for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { ctx = &qc->send_ctx[i]; - if (!ngx_quic_keys_available(qc->keys, ctx->level)) { + if (!ngx_quic_keys_available(qc->keys, ctx->level, 1)) { continue; } @@ -959,7 +959,7 @@ ngx_quic_handle_payload(ngx_connection_t c->log->action = "decrypting packet"; - if (!ngx_quic_keys_available(qc->keys, pkt->level)) { + if (!ngx_quic_keys_available(qc->keys, pkt->level, 0)) { ngx_log_error(NGX_LOG_INFO, c->log, 0, "quic no %s keys, ignoring packet", ngx_quic_level_name(pkt->level)); @@ -1082,7 +1082,9 @@ ngx_quic_discard_ctx(ngx_connection_t *c qc = ngx_quic_get_connection(c); - if (!ngx_quic_keys_available(qc->keys, level)) { + if (!ngx_quic_keys_available(qc->keys, level, 0) + && !ngx_quic_keys_available(qc->keys, level, 1)) + { return; } diff -r 3038bd4d7816 -r ff98ae7d261e src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c Wed Oct 18 04:30:11 2023 +0300 +++ b/src/event/quic/ngx_event_quic_protection.c Thu Aug 31 19:54:10 2023 +0400 @@ -672,9 +672,13 @@ ngx_quic_keys_set_encryption_secret(ngx_ ngx_uint_t ngx_quic_keys_available(ngx_quic_keys_t *keys, - enum ssl_encryption_level_t level) + enum ssl_encryption_level_t level, ngx_uint_t is_write) { - return keys->secrets[level].client.key.len != 0; + if (is_write == 0) { + return keys->secrets[level].client.key.len != 0; + } + + return keys->secrets[level].server.key.len != 0; } @@ -683,6 +687,7 @@ ngx_quic_keys_discard(ngx_quic_keys_t *k enum ssl_encryption_level_t level) { keys->secrets[level].client.key.len = 0; + keys->secrets[level].server.key.len = 0; } diff -r 3038bd4d7816 -r ff98ae7d261e src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h Wed Oct 18 04:30:11 2023 +0300 +++ b/src/event/quic/ngx_event_quic_protection.h Thu Aug 31 19:54:10 2023 +0400 @@ -95,7 +95,7 @@ ngx_int_t ngx_quic_keys_set_encryption_s enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); ngx_uint_t ngx_quic_keys_available(ngx_quic_keys_t *keys, - enum ssl_encryption_level_t level); + enum ssl_encryption_level_t level, ngx_uint_t is_write); void ngx_quic_keys_discard(ngx_quic_keys_t *keys, enum ssl_encryption_level_t level); void ngx_quic_keys_switch(ngx_connection_t *c, ngx_quic_keys_t *keys); diff -r 3038bd4d7816 -r ff98ae7d261e src/event/quic/ngx_event_quic_ssl.c --- a/src/event/quic/ngx_event_quic_ssl.c Wed Oct 18 04:30:11 2023 +0300 +++ b/src/event/quic/ngx_event_quic_ssl.c Thu Aug 31 19:54:10 2023 +0400 @@ -434,7 +434,7 @@ ngx_quic_crypto_input(ngx_connection_t * } if (n <= 0 || SSL_in_init(ssl_conn)) { - if (ngx_quic_keys_available(qc->keys, ssl_encryption_early_data) + if (ngx_quic_keys_available(qc->keys, ssl_encryption_early_data, 0) && qc->client_tp_done) { if (ngx_quic_init_streams(c) != NGX_OK) { From pluknet at nginx.com Fri Oct 20 14:42:15 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 14:42:15 +0000 Subject: [nginx] QUIC: added safety belt to prevent using discarded keys. Message-ID: details: https://hg.nginx.org/nginx/rev/60c4e8d3151f branches: changeset: 9169:60c4e8d3151f user: Sergey Kandaurov date: Fri Oct 20 18:05:07 2023 +0400 description: QUIC: added safety belt to prevent using discarded keys. In addition to triggering alert, it ensures that such packets won't be sent. With the previous change that marks server keys as discarded by zeroing the key lengh, it is now an error to send packets with discarded keys. OpenSSL based stacks tolerate such behaviour because key length isn't used in packet protection, but BoringSSL will raise the UNSUPPORTED_KEY_SIZE cipher error. It won't be possible to use discarded keys with reused crypto contexts as it happens in subsequent changes. diffstat: src/event/quic/ngx_event_quic_output.c | 15 +++++++++++++++ 1 files changed, 15 insertions(+), 0 deletions(-) diffs (25 lines): diff -r ff98ae7d261e -r 60c4e8d3151f src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c Thu Aug 31 19:54:10 2023 +0400 +++ b/src/event/quic/ngx_event_quic_output.c Fri Oct 20 18:05:07 2023 +0400 @@ -519,6 +519,21 @@ ngx_quic_output_packet(ngx_connection_t qc = ngx_quic_get_connection(c); + if (!ngx_quic_keys_available(qc->keys, ctx->level, 1)) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, "quic %s write keys discarded", + ngx_quic_level_name(ctx->level)); + + while (!ngx_queue_empty(&ctx->frames)) { + q = ngx_queue_head(&ctx->frames); + ngx_queue_remove(q); + + f = ngx_queue_data(q, ngx_quic_frame_t, queue); + ngx_quic_free_frame(c, f); + } + + return 0; + } + ngx_quic_init_packet(c, ctx, &pkt, qc->path); min_payload = ngx_quic_payload_size(&pkt, min); From pluknet at nginx.com Fri Oct 20 14:42:18 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 14:42:18 +0000 Subject: [nginx] QUIC: prevented generating ACK frames with discarded keys. Message-ID: details: https://hg.nginx.org/nginx/rev/c80d111340dc branches: changeset: 9170:c80d111340dc user: Sergey Kandaurov date: Fri Oct 20 18:05:07 2023 +0400 description: QUIC: prevented generating ACK frames with discarded keys. Previously it was possible to generate ACK frames using formally discarded protection keys, in particular, when acknowledging a client Handshake packet used to complete the TLS handshake and to discard handshake protection keys. As it happens late in packet processing, it could be possible to generate ACK frames after the keys were already discarded. ACK frames are generated from ngx_quic_ack_packet(), either using a posted push event, which envolves ngx_quic_generate_ack() as a part of the final packet assembling, or directly in ngx_quic_ack_packet(), such as when there is no room to add a new ACK range or when the received packet is out of order. The added keys availability check is used to avoid generating late ACK frames in both cases. diffstat: src/event/quic/ngx_event_quic_ack.c | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diffs (14 lines): diff -r 60c4e8d3151f -r c80d111340dc src/event/quic/ngx_event_quic_ack.c --- a/src/event/quic/ngx_event_quic_ack.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_ack.c Fri Oct 20 18:05:07 2023 +0400 @@ -907,6 +907,10 @@ ngx_quic_ack_packet(ngx_connection_t *c, " nranges:%ui", pkt->pn, (int64_t) ctx->largest_range, ctx->first_range, ctx->nranges); + if (!ngx_quic_keys_available(qc->keys, ctx->level, 1)) { + return NGX_OK; + } + prev_pending = ctx->pending_ack; if (pkt->need_ack) { From pluknet at nginx.com Fri Oct 20 14:42:21 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 14:42:21 +0000 Subject: [nginx] QUIC: renamed protection functions. Message-ID: details: https://hg.nginx.org/nginx/rev/f98636db77ef branches: changeset: 9171:f98636db77ef user: Sergey Kandaurov date: Fri Oct 20 18:05:07 2023 +0400 description: QUIC: renamed protection functions. Now these functions have names ngx_quic_crypto_XXX(): - ngx_quic_tls_open() -> ngx_quic_crypto_open() - ngx_quic_tls_seal() -> ngx_quic_crypto_seal() - ngx_quic_tls_hp() -> ngx_quic_crypto_hp() diffstat: src/event/quic/ngx_event_quic_openssl_compat.c | 4 ++-- src/event/quic/ngx_event_quic_protection.c | 25 +++++++++++++------------ src/event/quic/ngx_event_quic_protection.h | 2 +- 3 files changed, 16 insertions(+), 15 deletions(-) diffs (118 lines): diff -r c80d111340dc -r f98636db77ef src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 @@ -578,8 +578,8 @@ ngx_quic_compat_create_record(ngx_quic_c ngx_memcpy(nonce, secret->iv.data, secret->iv.len); ngx_quic_compute_nonce(nonce, sizeof(nonce), rec->number); - if (ngx_quic_tls_seal(ciphers.c, secret, &out, - nonce, &rec->payload, &ad, rec->log) + if (ngx_quic_crypto_seal(ciphers.c, secret, &out, + nonce, &rec->payload, &ad, rec->log) != NGX_OK) { return NGX_ERROR; diff -r c80d111340dc -r f98636db77ef src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 @@ -26,10 +26,10 @@ static ngx_int_t ngx_hkdf_extract(u_char static uint64_t ngx_quic_parse_pn(u_char **pos, ngx_int_t len, u_char *mask, uint64_t *largest_pn); -static ngx_int_t ngx_quic_tls_open(const ngx_quic_cipher_t *cipher, +static ngx_int_t ngx_quic_crypto_open(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); -static ngx_int_t ngx_quic_tls_hp(ngx_log_t *log, const EVP_CIPHER *cipher, +static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in); static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, @@ -344,7 +344,7 @@ failed: static ngx_int_t -ngx_quic_tls_open(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, +ngx_quic_crypto_open(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) { @@ -449,7 +449,7 @@ ngx_quic_tls_open(const ngx_quic_cipher_ ngx_int_t -ngx_quic_tls_seal(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, +ngx_quic_crypto_seal(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) { @@ -565,7 +565,7 @@ ngx_quic_tls_seal(const ngx_quic_cipher_ static ngx_int_t -ngx_quic_tls_hp(ngx_log_t *log, const EVP_CIPHER *cipher, +ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in) { int outlen; @@ -801,15 +801,15 @@ ngx_quic_create_packet(ngx_quic_header_t ngx_memcpy(nonce, secret->iv.data, secret->iv.len); ngx_quic_compute_nonce(nonce, sizeof(nonce), pkt->number); - if (ngx_quic_tls_seal(ciphers.c, secret, &out, - nonce, &pkt->payload, &ad, pkt->log) + if (ngx_quic_crypto_seal(ciphers.c, secret, &out, + nonce, &pkt->payload, &ad, pkt->log) != NGX_OK) { return NGX_ERROR; } sample = &out.data[4 - pkt->num_len]; - if (ngx_quic_tls_hp(pkt->log, ciphers.hp, secret, mask, sample) + if (ngx_quic_crypto_hp(pkt->log, ciphers.hp, secret, mask, sample) != NGX_OK) { return NGX_ERROR; @@ -862,7 +862,8 @@ ngx_quic_create_retry_packet(ngx_quic_he ngx_memcpy(secret.key.data, key, sizeof(key)); secret.iv.len = NGX_QUIC_IV_LEN; - if (ngx_quic_tls_seal(ciphers.c, &secret, &itag, nonce, &in, &ad, pkt->log) + if (ngx_quic_crypto_seal(ciphers.c, &secret, &itag, nonce, &in, &ad, + pkt->log) != NGX_OK) { return NGX_ERROR; @@ -1032,7 +1033,7 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, /* header protection */ - if (ngx_quic_tls_hp(pkt->log, ciphers.hp, secret, mask, sample) + if (ngx_quic_crypto_hp(pkt->log, ciphers.hp, secret, mask, sample) != NGX_OK) { return NGX_DECLINED; @@ -1087,8 +1088,8 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, pkt->payload.len = in.len - NGX_QUIC_TAG_LEN; pkt->payload.data = pkt->plaintext + ad.len; - rc = ngx_quic_tls_open(ciphers.c, secret, &pkt->payload, - nonce, &in, &ad, pkt->log); + rc = ngx_quic_crypto_open(ciphers.c, secret, &pkt->payload, + nonce, &in, &ad, pkt->log); if (rc != NGX_OK) { return NGX_DECLINED; } diff -r c80d111340dc -r f98636db77ef src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.h Fri Oct 20 18:05:07 2023 +0400 @@ -105,7 +105,7 @@ ngx_int_t ngx_quic_decrypt(ngx_quic_head void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers, enum ssl_encryption_level_t level); -ngx_int_t ngx_quic_tls_seal(const ngx_quic_cipher_t *cipher, +ngx_int_t ngx_quic_crypto_seal(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); ngx_int_t ngx_quic_hkdf_expand(ngx_quic_hkdf_t *hkdf, const EVP_MD *digest, From pluknet at nginx.com Fri Oct 20 14:42:24 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 14:42:24 +0000 Subject: [nginx] QUIC: reusing crypto contexts for packet protection. Message-ID: details: https://hg.nginx.org/nginx/rev/4ccb0d973206 branches: changeset: 9172:4ccb0d973206 user: Sergey Kandaurov date: Fri Oct 20 18:05:07 2023 +0400 description: QUIC: reusing crypto contexts for packet protection. diffstat: src/event/quic/ngx_event_quic.c | 3 + src/event/quic/ngx_event_quic_openssl_compat.c | 45 ++- src/event/quic/ngx_event_quic_output.c | 4 + src/event/quic/ngx_event_quic_protection.c | 356 ++++++++++++++---------- src/event/quic/ngx_event_quic_protection.h | 12 +- 5 files changed, 256 insertions(+), 164 deletions(-) diffs (725 lines): diff -r f98636db77ef -r 4ccb0d973206 src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic.c Fri Oct 20 18:05:07 2023 +0400 @@ -335,6 +335,7 @@ ngx_quic_new_connection(ngx_connection_t qc->validated = pkt->validated; if (ngx_quic_open_sockets(c, qc, pkt) != NGX_OK) { + ngx_quic_keys_cleanup(qc->keys); return NULL; } @@ -585,6 +586,8 @@ ngx_quic_close_connection(ngx_connection ngx_quic_close_sockets(c); + ngx_quic_keys_cleanup(qc->keys); + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic close completed"); /* may be tested from SSL callback during SSL shutdown */ diff -r f98636db77ef -r 4ccb0d973206 src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 @@ -54,9 +54,10 @@ struct ngx_quic_compat_s { static void ngx_quic_compat_keylog_callback(const SSL *ssl, const char *line); -static ngx_int_t ngx_quic_compat_set_encryption_secret(ngx_log_t *log, +static ngx_int_t ngx_quic_compat_set_encryption_secret(ngx_connection_t *c, ngx_quic_compat_keys_t *keys, enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len); +static void ngx_quic_compat_cleanup_encryption_secret(void *data); static int ngx_quic_compat_add_transport_params_callback(SSL *ssl, unsigned int ext_type, unsigned int context, const unsigned char **out, size_t *outlen, X509 *x, size_t chainidx, int *al, void *add_arg); @@ -214,14 +215,14 @@ ngx_quic_compat_keylog_callback(const SS com->method->set_read_secret((SSL *) ssl, level, cipher, secret, n); com->read_record = 0; - (void) ngx_quic_compat_set_encryption_secret(c->log, &com->keys, level, + (void) ngx_quic_compat_set_encryption_secret(c, &com->keys, level, cipher, secret, n); } } static ngx_int_t -ngx_quic_compat_set_encryption_secret(ngx_log_t *log, +ngx_quic_compat_set_encryption_secret(ngx_connection_t *c, ngx_quic_compat_keys_t *keys, enum ssl_encryption_level_t level, const SSL_CIPHER *cipher, const uint8_t *secret, size_t secret_len) { @@ -231,6 +232,7 @@ ngx_quic_compat_set_encryption_secret(ng ngx_quic_hkdf_t seq[2]; ngx_quic_secret_t *peer_secret; ngx_quic_ciphers_t ciphers; + ngx_pool_cleanup_t *cln; peer_secret = &keys->secret; @@ -239,12 +241,12 @@ ngx_quic_compat_set_encryption_secret(ng key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); if (key_len == NGX_ERROR) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "unexpected cipher"); + ngx_ssl_error(NGX_LOG_INFO, c->log, 0, "unexpected cipher"); return NGX_ERROR; } if (sizeof(peer_secret->secret.data) < secret_len) { - ngx_log_error(NGX_LOG_ALERT, log, 0, + ngx_log_error(NGX_LOG_ALERT, c->log, 0, "unexpected secret len: %uz", secret_len); return NGX_ERROR; } @@ -262,15 +264,43 @@ ngx_quic_compat_set_encryption_secret(ng ngx_quic_hkdf_set(&seq[1], "tls13 iv", &peer_secret->iv, &secret_str); for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { - if (ngx_quic_hkdf_expand(&seq[i], ciphers.d, log) != NGX_OK) { + if (ngx_quic_hkdf_expand(&seq[i], ciphers.d, c->log) != NGX_OK) { return NGX_ERROR; } } + /* register cleanup handler once */ + + if (peer_secret->ctx) { + ngx_quic_crypto_cleanup(peer_secret); + + } else { + cln = ngx_pool_cleanup_add(c->pool, 0); + if (cln == NULL) { + return NGX_ERROR; + } + + cln->handler = ngx_quic_compat_cleanup_encryption_secret; + cln->data = peer_secret; + } + + if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { + return NGX_ERROR; + } + return NGX_OK; } +static void +ngx_quic_compat_cleanup_encryption_secret(void *data) +{ + ngx_quic_secret_t *secret = data; + + ngx_quic_crypto_cleanup(secret); +} + + static int ngx_quic_compat_add_transport_params_callback(SSL *ssl, unsigned int ext_type, unsigned int context, const unsigned char **out, size_t *outlen, X509 *x, @@ -578,8 +608,7 @@ ngx_quic_compat_create_record(ngx_quic_c ngx_memcpy(nonce, secret->iv.data, secret->iv.len); ngx_quic_compute_nonce(nonce, sizeof(nonce), rec->number); - if (ngx_quic_crypto_seal(ciphers.c, secret, &out, - nonce, &rec->payload, &ad, rec->log) + if (ngx_quic_crypto_seal(secret, &out, nonce, &rec->payload, &ad, rec->log) != NGX_OK) { return NGX_ERROR; diff -r f98636db77ef -r 4ccb0d973206 src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_output.c Fri Oct 20 18:05:07 2023 +0400 @@ -941,13 +941,17 @@ ngx_quic_send_early_cc(ngx_connection_t res.data = dst; if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { + ngx_quic_keys_cleanup(pkt.keys); return NGX_ERROR; } if (ngx_quic_send(c, res.data, res.len, c->sockaddr, c->socklen) < 0) { + ngx_quic_keys_cleanup(pkt.keys); return NGX_ERROR; } + ngx_quic_keys_cleanup(pkt.keys); + return NGX_DONE; } diff -r f98636db77ef -r 4ccb0d973206 src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 @@ -26,9 +26,8 @@ static ngx_int_t ngx_hkdf_extract(u_char static uint64_t ngx_quic_parse_pn(u_char **pos, ngx_int_t len, u_char *mask, uint64_t *largest_pn); -static ngx_int_t ngx_quic_crypto_open(const ngx_quic_cipher_t *cipher, - ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, - ngx_str_t *ad, ngx_log_t *log); +static ngx_int_t ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, + u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in); @@ -108,13 +107,14 @@ ngx_int_t ngx_quic_keys_set_initial_secret(ngx_quic_keys_t *keys, ngx_str_t *secret, ngx_log_t *log) { - size_t is_len; - uint8_t is[SHA256_DIGEST_LENGTH]; - ngx_str_t iss; - ngx_uint_t i; - const EVP_MD *digest; - ngx_quic_hkdf_t seq[8]; - ngx_quic_secret_t *client, *server; + size_t is_len; + uint8_t is[SHA256_DIGEST_LENGTH]; + ngx_str_t iss; + ngx_uint_t i; + const EVP_MD *digest; + ngx_quic_hkdf_t seq[8]; + ngx_quic_secret_t *client, *server; + ngx_quic_ciphers_t ciphers; static const uint8_t salt[20] = "\x38\x76\x2c\xf7\xf5\x59\x34\xb3\x4d\x17" @@ -180,7 +180,25 @@ ngx_quic_keys_set_initial_secret(ngx_qui } } + if (ngx_quic_ciphers(0, &ciphers, ssl_encryption_initial) == NGX_ERROR) { + return NGX_ERROR; + } + + if (ngx_quic_crypto_init(ciphers.c, client, 0, log) == NGX_ERROR) { + return NGX_ERROR; + } + + if (ngx_quic_crypto_init(ciphers.c, server, 1, log) == NGX_ERROR) { + goto failed; + } + return NGX_OK; + +failed: + + ngx_quic_keys_cleanup(keys); + + return NGX_ERROR; } @@ -343,9 +361,9 @@ failed: } -static ngx_int_t -ngx_quic_crypto_open(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, - ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) +ngx_int_t +ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, + ngx_int_t enc, ngx_log_t *log) { #ifdef OPENSSL_IS_BORINGSSL @@ -357,19 +375,7 @@ ngx_quic_crypto_open(const ngx_quic_ciph ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_new() failed"); return NGX_ERROR; } - - if (EVP_AEAD_CTX_open(ctx, out->data, &out->len, out->len, nonce, s->iv.len, - in->data, in->len, ad->data, ad->len) - != 1) - { - EVP_AEAD_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_open() failed"); - return NGX_ERROR; - } - - EVP_AEAD_CTX_free(ctx); #else - int len; EVP_CIPHER_CTX *ctx; ctx = EVP_CIPHER_CTX_new(); @@ -378,114 +384,9 @@ ngx_quic_crypto_open(const ngx_quic_ciph return NGX_ERROR; } - if (EVP_DecryptInit_ex(ctx, cipher, NULL, NULL, NULL) != 1) { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); - return NGX_ERROR; - } - - in->len -= NGX_QUIC_TAG_LEN; - - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, - in->data + in->len) - == 0) - { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); - return NGX_ERROR; - } - - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_IVLEN, s->iv.len, NULL) - == 0) - { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_IVLEN) failed"); - return NGX_ERROR; - } - - if (EVP_DecryptInit_ex(ctx, NULL, NULL, s->key.data, nonce) != 1) { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); - return NGX_ERROR; - } - - if (EVP_CIPHER_mode(cipher) == EVP_CIPH_CCM_MODE - && EVP_DecryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) - { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - if (EVP_DecryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - if (EVP_DecryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { + if (EVP_CipherInit_ex(ctx, cipher, NULL, NULL, NULL, enc) != 1) { EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - out->len = len; - - if (EVP_DecryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptFinal_ex failed"); - return NGX_ERROR; - } - - out->len += len; - - EVP_CIPHER_CTX_free(ctx); -#endif - - return NGX_OK; -} - - -ngx_int_t -ngx_quic_crypto_seal(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, - ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) -{ - -#ifdef OPENSSL_IS_BORINGSSL - EVP_AEAD_CTX *ctx; - - ctx = EVP_AEAD_CTX_new(cipher, s->key.data, s->key.len, - EVP_AEAD_DEFAULT_TAG_LENGTH); - if (ctx == NULL) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_new() failed"); - return NGX_ERROR; - } - - if (EVP_AEAD_CTX_seal(ctx, out->data, &out->len, out->len, nonce, s->iv.len, - in->data, in->len, ad->data, ad->len) - != 1) - { - EVP_AEAD_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_seal() failed"); - return NGX_ERROR; - } - - EVP_AEAD_CTX_free(ctx); -#else - int len; - EVP_CIPHER_CTX *ctx; - - ctx = EVP_CIPHER_CTX_new(); - if (ctx == NULL) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CIPHER_CTX_new() failed"); - return NGX_ERROR; - } - - if (EVP_EncryptInit_ex(ctx, cipher, NULL, NULL, NULL) != 1) { - EVP_CIPHER_CTX_free(ctx); - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); return NGX_ERROR; } @@ -509,28 +410,121 @@ ngx_quic_crypto_seal(const ngx_quic_ciph return NGX_ERROR; } - if (EVP_EncryptInit_ex(ctx, NULL, NULL, s->key.data, nonce) != 1) { + if (EVP_CipherInit_ex(ctx, NULL, NULL, s->key.data, NULL, enc) != 1) { EVP_CIPHER_CTX_free(ctx); + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); + return NGX_ERROR; + } +#endif + + s->ctx = ctx; + return NGX_OK; +} + + +static ngx_int_t +ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, + ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) +{ + ngx_quic_crypto_ctx_t *ctx; + + ctx = s->ctx; + +#ifdef OPENSSL_IS_BORINGSSL + if (EVP_AEAD_CTX_open(ctx, out->data, &out->len, out->len, nonce, s->iv.len, + in->data, in->len, ad->data, ad->len) + != 1) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_open() failed"); + return NGX_ERROR; + } +#else + int len; + + if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); + return NGX_ERROR; + } + + in->len -= NGX_QUIC_TAG_LEN; + + if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, + in->data + in->len) + == 0) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, + "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); + return NGX_ERROR; + } + + if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE + && EVP_DecryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); + return NGX_ERROR; + } + + if (EVP_DecryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); + return NGX_ERROR; + } + + if (EVP_DecryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); + return NGX_ERROR; + } + + out->len = len; + + if (EVP_DecryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptFinal_ex failed"); + return NGX_ERROR; + } + + out->len += len; +#endif + + return NGX_OK; +} + + +ngx_int_t +ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, + ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) +{ + ngx_quic_crypto_ctx_t *ctx; + + ctx = s->ctx; + +#ifdef OPENSSL_IS_BORINGSSL + if (EVP_AEAD_CTX_seal(ctx, out->data, &out->len, out->len, nonce, s->iv.len, + in->data, in->len, ad->data, ad->len) + != 1) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_seal() failed"); + return NGX_ERROR; + } +#else + int len; + + if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); return NGX_ERROR; } - if (EVP_CIPHER_mode(cipher) == EVP_CIPH_CCM_MODE + if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE && EVP_EncryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) { - EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); return NGX_ERROR; } if (EVP_EncryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { - EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); return NGX_ERROR; } if (EVP_EncryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { - EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); return NGX_ERROR; } @@ -538,7 +532,6 @@ ngx_quic_crypto_seal(const ngx_quic_ciph out->len = len; if (EVP_EncryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { - EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptFinal_ex failed"); return NGX_ERROR; } @@ -549,21 +542,32 @@ ngx_quic_crypto_seal(const ngx_quic_ciph out->data + out->len) == 0) { - EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed"); return NGX_ERROR; } out->len += NGX_QUIC_TAG_LEN; - - EVP_CIPHER_CTX_free(ctx); #endif return NGX_OK; } +void +ngx_quic_crypto_cleanup(ngx_quic_secret_t *s) +{ + if (s->ctx) { +#ifdef OPENSSL_IS_BORINGSSL + EVP_AEAD_CTX_free(s->ctx); +#else + EVP_CIPHER_CTX_free(s->ctx); +#endif + s->ctx = NULL; + } +} + + static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in) @@ -666,6 +670,12 @@ ngx_quic_keys_set_encryption_secret(ngx_ } } + if (ngx_quic_crypto_init(ciphers.c, peer_secret, is_write, log) + == NGX_ERROR) + { + return NGX_ERROR; + } + return NGX_OK; } @@ -675,10 +685,10 @@ ngx_quic_keys_available(ngx_quic_keys_t enum ssl_encryption_level_t level, ngx_uint_t is_write) { if (is_write == 0) { - return keys->secrets[level].client.key.len != 0; + return keys->secrets[level].client.ctx != NULL; } - return keys->secrets[level].server.key.len != 0; + return keys->secrets[level].server.ctx != NULL; } @@ -686,8 +696,13 @@ void ngx_quic_keys_discard(ngx_quic_keys_t *keys, enum ssl_encryption_level_t level) { - keys->secrets[level].client.key.len = 0; - keys->secrets[level].server.key.len = 0; + ngx_quic_secret_t *client, *server; + + client = &keys->secrets[level].client; + server = &keys->secrets[level].server; + + ngx_quic_crypto_cleanup(client); + ngx_quic_crypto_cleanup(server); } @@ -699,6 +714,9 @@ ngx_quic_keys_switch(ngx_connection_t *c current = &keys->secrets[ssl_encryption_application]; next = &keys->next_key; + ngx_quic_crypto_cleanup(¤t->client); + ngx_quic_crypto_cleanup(¤t->server); + tmp = *current; *current = *next; *next = tmp; @@ -762,6 +780,16 @@ ngx_quic_keys_update(ngx_event_t *ev) } } + if (ngx_quic_crypto_init(ciphers.c, &next->client, 0, c->log) == NGX_ERROR) + { + goto failed; + } + + if (ngx_quic_crypto_init(ciphers.c, &next->server, 1, c->log) == NGX_ERROR) + { + goto failed; + } + return; failed: @@ -770,6 +798,23 @@ failed: } +void +ngx_quic_keys_cleanup(ngx_quic_keys_t *keys) +{ + ngx_uint_t i; + ngx_quic_secrets_t *next; + + for (i = 0; i < NGX_QUIC_ENCRYPTION_LAST; i++) { + ngx_quic_keys_discard(keys, i); + } + + next = &keys->next_key; + + ngx_quic_crypto_cleanup(&next->client); + ngx_quic_crypto_cleanup(&next->server); +} + + static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, ngx_str_t *res) { @@ -801,8 +846,7 @@ ngx_quic_create_packet(ngx_quic_header_t ngx_memcpy(nonce, secret->iv.data, secret->iv.len); ngx_quic_compute_nonce(nonce, sizeof(nonce), pkt->number); - if (ngx_quic_crypto_seal(ciphers.c, secret, &out, - nonce, &pkt->payload, &ad, pkt->log) + if (ngx_quic_crypto_seal(secret, &out, nonce, &pkt->payload, &ad, pkt->log) != NGX_OK) { return NGX_ERROR; @@ -862,13 +906,19 @@ ngx_quic_create_retry_packet(ngx_quic_he ngx_memcpy(secret.key.data, key, sizeof(key)); secret.iv.len = NGX_QUIC_IV_LEN; - if (ngx_quic_crypto_seal(ciphers.c, &secret, &itag, nonce, &in, &ad, - pkt->log) + if (ngx_quic_crypto_init(ciphers.c, &secret, 1, pkt->log) == NGX_ERROR) { + return NGX_ERROR; + } + + if (ngx_quic_crypto_seal(&secret, &itag, nonce, &in, &ad, pkt->log) != NGX_OK) { + ngx_quic_crypto_cleanup(&secret); return NGX_ERROR; } + ngx_quic_crypto_cleanup(&secret); + res->len = itag.data + itag.len - start; res->data = start; @@ -999,7 +1049,7 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, u_char *p, *sample; size_t len; uint64_t pn, lpn; - ngx_int_t pnl, rc; + ngx_int_t pnl; ngx_str_t in, ad; ngx_uint_t key_phase; ngx_quic_secret_t *secret; @@ -1088,9 +1138,9 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, pkt->payload.len = in.len - NGX_QUIC_TAG_LEN; pkt->payload.data = pkt->plaintext + ad.len; - rc = ngx_quic_crypto_open(ciphers.c, secret, &pkt->payload, - nonce, &in, &ad, pkt->log); - if (rc != NGX_OK) { + if (ngx_quic_crypto_open(secret, &pkt->payload, nonce, &in, &ad, pkt->log) + != NGX_OK) + { return NGX_DECLINED; } diff -r f98636db77ef -r 4ccb0d973206 src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.h Fri Oct 20 18:05:07 2023 +0400 @@ -26,8 +26,10 @@ #ifdef OPENSSL_IS_BORINGSSL #define ngx_quic_cipher_t EVP_AEAD +#define ngx_quic_crypto_ctx_t EVP_AEAD_CTX #else #define ngx_quic_cipher_t EVP_CIPHER +#define ngx_quic_crypto_ctx_t EVP_CIPHER_CTX #endif @@ -48,6 +50,7 @@ typedef struct { ngx_quic_md_t key; ngx_quic_iv_t iv; ngx_quic_md_t hp; + ngx_quic_crypto_ctx_t *ctx; } ngx_quic_secret_t; @@ -100,14 +103,17 @@ void ngx_quic_keys_discard(ngx_quic_keys enum ssl_encryption_level_t level); void ngx_quic_keys_switch(ngx_connection_t *c, ngx_quic_keys_t *keys); void ngx_quic_keys_update(ngx_event_t *ev); +void ngx_quic_keys_cleanup(ngx_quic_keys_t *keys); ngx_int_t ngx_quic_encrypt(ngx_quic_header_t *pkt, ngx_str_t *res); ngx_int_t ngx_quic_decrypt(ngx_quic_header_t *pkt, uint64_t *largest_pn); void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers, enum ssl_encryption_level_t level); -ngx_int_t ngx_quic_crypto_seal(const ngx_quic_cipher_t *cipher, - ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, - ngx_str_t *ad, ngx_log_t *log); +ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, + ngx_quic_secret_t *s, ngx_int_t enc, ngx_log_t *log); +ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, + u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); +void ngx_quic_crypto_cleanup(ngx_quic_secret_t *s); ngx_int_t ngx_quic_hkdf_expand(ngx_quic_hkdf_t *hkdf, const EVP_MD *digest, ngx_log_t *log); From pluknet at nginx.com Fri Oct 20 14:42:27 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 14:42:27 +0000 Subject: [nginx] QUIC: common code for crypto open and seal operations. Message-ID: details: https://hg.nginx.org/nginx/rev/904a54092d5b branches: changeset: 9173:904a54092d5b user: Sergey Kandaurov date: Fri Oct 20 18:05:07 2023 +0400 description: QUIC: common code for crypto open and seal operations. diffstat: src/event/quic/ngx_event_quic_protection.c | 143 +++++++++++++--------------- 1 files changed, 65 insertions(+), 78 deletions(-) diffs (209 lines): diff -r 4ccb0d973206 -r 904a54092d5b src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 @@ -28,6 +28,10 @@ static uint64_t ngx_quic_parse_pn(u_char static ngx_int_t ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); +#ifndef OPENSSL_IS_BORINGSSL +static ngx_int_t ngx_quic_crypto_common(ngx_quic_secret_t *s, ngx_str_t *out, + u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); +#endif static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, ngx_quic_secret_t *s, u_char *out, u_char *in); @@ -426,65 +430,19 @@ static ngx_int_t ngx_quic_crypto_open(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) { - ngx_quic_crypto_ctx_t *ctx; - - ctx = s->ctx; - #ifdef OPENSSL_IS_BORINGSSL - if (EVP_AEAD_CTX_open(ctx, out->data, &out->len, out->len, nonce, s->iv.len, - in->data, in->len, ad->data, ad->len) + if (EVP_AEAD_CTX_open(s->ctx, out->data, &out->len, out->len, nonce, + s->iv.len, in->data, in->len, ad->data, ad->len) != 1) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_open() failed"); return NGX_ERROR; } -#else - int len; - - if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptInit_ex() failed"); - return NGX_ERROR; - } - - in->len -= NGX_QUIC_TAG_LEN; - - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, - in->data + in->len) - == 0) - { - ngx_ssl_error(NGX_LOG_INFO, log, 0, - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); - return NGX_ERROR; - } - - if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE - && EVP_DecryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) - { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - if (EVP_DecryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - if (EVP_DecryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptUpdate() failed"); - return NGX_ERROR; - } - - out->len = len; - - if (EVP_DecryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_DecryptFinal_ex failed"); - return NGX_ERROR; - } - - out->len += len; -#endif return NGX_OK; +#else + return ngx_quic_crypto_common(s, out, nonce, in, ad, log); +#endif } @@ -492,67 +450,96 @@ ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) { - ngx_quic_crypto_ctx_t *ctx; - - ctx = s->ctx; - #ifdef OPENSSL_IS_BORINGSSL - if (EVP_AEAD_CTX_seal(ctx, out->data, &out->len, out->len, nonce, s->iv.len, - in->data, in->len, ad->data, ad->len) + if (EVP_AEAD_CTX_seal(s->ctx, out->data, &out->len, out->len, nonce, + s->iv.len, in->data, in->len, ad->data, ad->len) != 1) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_seal() failed"); return NGX_ERROR; } + + return NGX_OK; #else - int len; + return ngx_quic_crypto_common(s, out, nonce, in, ad, log); +#endif +} + + +#ifndef OPENSSL_IS_BORINGSSL + +static ngx_int_t +ngx_quic_crypto_common(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, + ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log) +{ + int len, enc; + ngx_quic_crypto_ctx_t *ctx; - if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, nonce) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); + ctx = s->ctx; + enc = EVP_CIPHER_CTX_encrypting(ctx); + + if (EVP_CipherInit_ex(ctx, NULL, NULL, NULL, nonce, enc) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); return NGX_ERROR; } + if (enc == 0) { + in->len -= NGX_QUIC_TAG_LEN; + + if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, NGX_QUIC_TAG_LEN, + in->data + in->len) + == 0) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, + "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed"); + return NGX_ERROR; + } + } + if (EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(ctx)) == EVP_CIPH_CCM_MODE - && EVP_EncryptUpdate(ctx, NULL, &len, NULL, in->len) != 1) + && EVP_CipherUpdate(ctx, NULL, &len, NULL, in->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherUpdate() failed"); return NGX_ERROR; } - if (EVP_EncryptUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); + if (EVP_CipherUpdate(ctx, NULL, &len, ad->data, ad->len) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherUpdate() failed"); return NGX_ERROR; } - if (EVP_EncryptUpdate(ctx, out->data, &len, in->data, in->len) != 1) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); + if (EVP_CipherUpdate(ctx, out->data, &len, in->data, in->len) != 1) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherUpdate() failed"); return NGX_ERROR; } out->len = len; - if (EVP_EncryptFinal_ex(ctx, out->data + out->len, &len) <= 0) { - ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptFinal_ex failed"); + if (EVP_CipherFinal_ex(ctx, out->data + out->len, &len) <= 0) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherFinal_ex failed"); return NGX_ERROR; } out->len += len; - if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, NGX_QUIC_TAG_LEN, - out->data + out->len) - == 0) - { - ngx_ssl_error(NGX_LOG_INFO, log, 0, - "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed"); - return NGX_ERROR; + if (enc == 1) { + if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, NGX_QUIC_TAG_LEN, + out->data + out->len) + == 0) + { + ngx_ssl_error(NGX_LOG_INFO, log, 0, + "EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed"); + return NGX_ERROR; + } + + out->len += NGX_QUIC_TAG_LEN; } - out->len += NGX_QUIC_TAG_LEN; -#endif - return NGX_OK; } +#endif + void ngx_quic_crypto_cleanup(ngx_quic_secret_t *s) From pluknet at nginx.com Fri Oct 20 14:42:31 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 14:42:31 +0000 Subject: [nginx] QUIC: reusing crypto contexts for header protection. Message-ID: details: https://hg.nginx.org/nginx/rev/31702c53d2db branches: changeset: 9174:31702c53d2db user: Sergey Kandaurov date: Fri Oct 20 18:05:07 2023 +0400 description: QUIC: reusing crypto contexts for header protection. diffstat: src/event/quic/ngx_event_quic_protection.c | 102 +++++++++++++++++++++------- src/event/quic/ngx_event_quic_protection.h | 1 + 2 files changed, 75 insertions(+), 28 deletions(-) diffs (204 lines): diff -r 904a54092d5b -r 31702c53d2db src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 @@ -32,8 +32,12 @@ static ngx_int_t ngx_quic_crypto_open(ng static ngx_int_t ngx_quic_crypto_common(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); #endif -static ngx_int_t ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, - ngx_quic_secret_t *s, u_char *out, u_char *in); + +static ngx_int_t ngx_quic_crypto_hp_init(const EVP_CIPHER *cipher, + ngx_quic_secret_t *s, ngx_log_t *log); +static ngx_int_t ngx_quic_crypto_hp(ngx_quic_secret_t *s, + u_char *out, u_char *in, ngx_log_t *log); +static void ngx_quic_crypto_hp_cleanup(ngx_quic_secret_t *s); static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, ngx_str_t *res); @@ -196,6 +200,14 @@ ngx_quic_keys_set_initial_secret(ngx_qui goto failed; } + if (ngx_quic_crypto_hp_init(ciphers.hp, client, log) == NGX_ERROR) { + goto failed; + } + + if (ngx_quic_crypto_hp_init(ciphers.hp, server, log) == NGX_ERROR) { + goto failed; + } + return NGX_OK; failed: @@ -556,53 +568,82 @@ ngx_quic_crypto_cleanup(ngx_quic_secret_ static ngx_int_t -ngx_quic_crypto_hp(ngx_log_t *log, const EVP_CIPHER *cipher, - ngx_quic_secret_t *s, u_char *out, u_char *in) +ngx_quic_crypto_hp_init(const EVP_CIPHER *cipher, ngx_quic_secret_t *s, + ngx_log_t *log) { - int outlen; EVP_CIPHER_CTX *ctx; - u_char zero[NGX_QUIC_HP_LEN] = {0}; #ifdef OPENSSL_IS_BORINGSSL - uint32_t cnt; - - ngx_memcpy(&cnt, in, sizeof(uint32_t)); - - if (cipher == (const EVP_CIPHER *) EVP_aead_chacha20_poly1305()) { - CRYPTO_chacha_20(out, zero, NGX_QUIC_HP_LEN, s->hp.data, &in[4], cnt); + if (cipher == (EVP_CIPHER *) EVP_aead_chacha20_poly1305()) { + /* no EVP interface */ + s->hp_ctx = NULL; return NGX_OK; } #endif ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) { + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CIPHER_CTX_new() failed"); + return NGX_ERROR; + } + + if (EVP_EncryptInit_ex(ctx, cipher, NULL, s->hp.data, NULL) != 1) { + EVP_CIPHER_CTX_free(ctx); + ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); return NGX_ERROR; } - if (EVP_EncryptInit_ex(ctx, cipher, NULL, s->hp.data, in) != 1) { + s->hp_ctx = ctx; + return NGX_OK; +} + + +static ngx_int_t +ngx_quic_crypto_hp(ngx_quic_secret_t *s, u_char *out, u_char *in, + ngx_log_t *log) +{ + int outlen; + EVP_CIPHER_CTX *ctx; + u_char zero[NGX_QUIC_HP_LEN] = {0}; + + ctx = s->hp_ctx; + +#ifdef OPENSSL_IS_BORINGSSL + uint32_t cnt; + + if (ctx == NULL) { + ngx_memcpy(&cnt, in, sizeof(uint32_t)); + CRYPTO_chacha_20(out, zero, NGX_QUIC_HP_LEN, s->hp.data, &in[4], cnt); + return NGX_OK; + } +#endif + + if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, in) != 1) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptInit_ex() failed"); - goto failed; + return NGX_ERROR; } if (!EVP_EncryptUpdate(ctx, out, &outlen, zero, NGX_QUIC_HP_LEN)) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptUpdate() failed"); - goto failed; + return NGX_ERROR; } if (!EVP_EncryptFinal_ex(ctx, out + NGX_QUIC_HP_LEN, &outlen)) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_EncryptFinal_Ex() failed"); - goto failed; + return NGX_ERROR; } - EVP_CIPHER_CTX_free(ctx); - return NGX_OK; +} -failed: - EVP_CIPHER_CTX_free(ctx); - - return NGX_ERROR; +static void +ngx_quic_crypto_hp_cleanup(ngx_quic_secret_t *s) +{ + if (s->hp_ctx) { + EVP_CIPHER_CTX_free(s->hp_ctx); + s->hp_ctx = NULL; + } } @@ -663,6 +704,10 @@ ngx_quic_keys_set_encryption_secret(ngx_ return NGX_ERROR; } + if (ngx_quic_crypto_hp_init(ciphers.hp, peer_secret, log) == NGX_ERROR) { + return NGX_ERROR; + } + return NGX_OK; } @@ -690,6 +735,9 @@ ngx_quic_keys_discard(ngx_quic_keys_t *k ngx_quic_crypto_cleanup(client); ngx_quic_crypto_cleanup(server); + + ngx_quic_crypto_hp_cleanup(client); + ngx_quic_crypto_hp_cleanup(server); } @@ -742,11 +790,13 @@ ngx_quic_keys_update(ngx_event_t *ev) next->client.key.len = current->client.key.len; next->client.iv.len = NGX_QUIC_IV_LEN; next->client.hp = current->client.hp; + next->client.hp_ctx = current->client.hp_ctx; next->server.secret.len = current->server.secret.len; next->server.key.len = current->server.key.len; next->server.iv.len = NGX_QUIC_IV_LEN; next->server.hp = current->server.hp; + next->server.hp_ctx = current->server.hp_ctx; ngx_quic_hkdf_set(&seq[0], "tls13 quic ku", &next->client.secret, ¤t->client.secret); @@ -840,9 +890,7 @@ ngx_quic_create_packet(ngx_quic_header_t } sample = &out.data[4 - pkt->num_len]; - if (ngx_quic_crypto_hp(pkt->log, ciphers.hp, secret, mask, sample) - != NGX_OK) - { + if (ngx_quic_crypto_hp(secret, mask, sample, pkt->log) != NGX_OK) { return NGX_ERROR; } @@ -1070,9 +1118,7 @@ ngx_quic_decrypt(ngx_quic_header_t *pkt, /* header protection */ - if (ngx_quic_crypto_hp(pkt->log, ciphers.hp, secret, mask, sample) - != NGX_OK) - { + if (ngx_quic_crypto_hp(secret, mask, sample, pkt->log) != NGX_OK) { return NGX_DECLINED; } diff -r 904a54092d5b -r 31702c53d2db src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.h Fri Oct 20 18:05:07 2023 +0400 @@ -51,6 +51,7 @@ typedef struct { ngx_quic_iv_t iv; ngx_quic_md_t hp; ngx_quic_crypto_ctx_t *ctx; + EVP_CIPHER_CTX *hp_ctx; } ngx_quic_secret_t; From pluknet at nginx.com Fri Oct 20 14:42:33 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 14:42:33 +0000 Subject: [nginx] QUIC: cleaned up now unused ngx_quic_ciphers() calls. Message-ID: details: https://hg.nginx.org/nginx/rev/f7c9cd726298 branches: changeset: 9175:f7c9cd726298 user: Sergey Kandaurov date: Fri Oct 20 18:05:07 2023 +0400 description: QUIC: cleaned up now unused ngx_quic_ciphers() calls. diffstat: src/event/quic/ngx_event_quic_openssl_compat.c | 12 ++------ src/event/quic/ngx_event_quic_protection.c | 38 ++++++++----------------- 2 files changed, 16 insertions(+), 34 deletions(-) diffs (91 lines): diff -r 31702c53d2db -r f7c9cd726298 src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 @@ -582,10 +582,9 @@ ngx_quic_compat_create_header(ngx_quic_c static ngx_int_t ngx_quic_compat_create_record(ngx_quic_compat_record_t *rec, ngx_str_t *res) { - ngx_str_t ad, out; - ngx_quic_secret_t *secret; - ngx_quic_ciphers_t ciphers; - u_char nonce[NGX_QUIC_IV_LEN]; + ngx_str_t ad, out; + ngx_quic_secret_t *secret; + u_char nonce[NGX_QUIC_IV_LEN]; ad.data = res->data; ad.len = ngx_quic_compat_create_header(rec, ad.data, 0); @@ -598,11 +597,6 @@ ngx_quic_compat_create_record(ngx_quic_c "quic compat ad len:%uz %xV", ad.len, &ad); #endif - if (ngx_quic_ciphers(rec->keys->cipher, &ciphers, rec->level) == NGX_ERROR) - { - return NGX_ERROR; - } - secret = &rec->keys->secret; ngx_memcpy(nonce, secret->iv.data, secret->iv.len); diff -r 31702c53d2db -r f7c9cd726298 src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 @@ -855,12 +855,11 @@ ngx_quic_keys_cleanup(ngx_quic_keys_t *k static ngx_int_t ngx_quic_create_packet(ngx_quic_header_t *pkt, ngx_str_t *res) { - u_char *pnp, *sample; - ngx_str_t ad, out; - ngx_uint_t i; - ngx_quic_secret_t *secret; - ngx_quic_ciphers_t ciphers; - u_char nonce[NGX_QUIC_IV_LEN], mask[NGX_QUIC_HP_LEN]; + u_char *pnp, *sample; + ngx_str_t ad, out; + ngx_uint_t i; + ngx_quic_secret_t *secret; + u_char nonce[NGX_QUIC_IV_LEN], mask[NGX_QUIC_HP_LEN]; ad.data = res->data; ad.len = ngx_quic_create_header(pkt, ad.data, &pnp); @@ -873,11 +872,6 @@ ngx_quic_create_packet(ngx_quic_header_t "quic ad len:%uz %xV", ad.len, &ad); #endif - if (ngx_quic_ciphers(pkt->keys->cipher, &ciphers, pkt->level) == NGX_ERROR) - { - return NGX_ERROR; - } - secret = &pkt->keys->secrets[pkt->level].server; ngx_memcpy(nonce, secret->iv.data, secret->iv.len); @@ -1081,20 +1075,14 @@ ngx_quic_encrypt(ngx_quic_header_t *pkt, ngx_int_t ngx_quic_decrypt(ngx_quic_header_t *pkt, uint64_t *largest_pn) { - u_char *p, *sample; - size_t len; - uint64_t pn, lpn; - ngx_int_t pnl; - ngx_str_t in, ad; - ngx_uint_t key_phase; - ngx_quic_secret_t *secret; - ngx_quic_ciphers_t ciphers; - uint8_t nonce[NGX_QUIC_IV_LEN], mask[NGX_QUIC_HP_LEN]; - - if (ngx_quic_ciphers(pkt->keys->cipher, &ciphers, pkt->level) == NGX_ERROR) - { - return NGX_ERROR; - } + u_char *p, *sample; + size_t len; + uint64_t pn, lpn; + ngx_int_t pnl; + ngx_str_t in, ad; + ngx_uint_t key_phase; + ngx_quic_secret_t *secret; + uint8_t nonce[NGX_QUIC_IV_LEN], mask[NGX_QUIC_HP_LEN]; secret = &pkt->keys->secrets[pkt->level].client; From pluknet at nginx.com Fri Oct 20 14:42:37 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 14:42:37 +0000 Subject: [nginx] QUIC: simplified ngx_quic_ciphers() API. Message-ID: details: https://hg.nginx.org/nginx/rev/8dacf87e4007 branches: changeset: 9176:8dacf87e4007 user: Sergey Kandaurov date: Fri Oct 20 18:05:07 2023 +0400 description: QUIC: simplified ngx_quic_ciphers() API. After conversion to reusable crypto ctx, now there's enough caller context to remove the "level" argument from ngx_quic_ciphers(). diffstat: src/event/quic/ngx_event_quic_openssl_compat.c | 2 +- src/event/quic/ngx_event_quic_protection.c | 19 +++++++------------ src/event/quic/ngx_event_quic_protection.h | 3 +-- 3 files changed, 9 insertions(+), 15 deletions(-) diffs (92 lines): diff -r f7c9cd726298 -r 8dacf87e4007 src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 @@ -238,7 +238,7 @@ ngx_quic_compat_set_encryption_secret(ng keys->cipher = SSL_CIPHER_get_id(cipher); - key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); + key_len = ngx_quic_ciphers(keys->cipher, &ciphers); if (key_len == NGX_ERROR) { ngx_ssl_error(NGX_LOG_INFO, c->log, 0, "unexpected cipher"); diff -r f7c9cd726298 -r 8dacf87e4007 src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 @@ -15,6 +15,8 @@ #define NGX_QUIC_AES_128_KEY_LEN 16 +#define NGX_QUIC_INITIAL_CIPHER TLS1_3_CK_AES_128_GCM_SHA256 + static ngx_int_t ngx_hkdf_expand(u_char *out_key, size_t out_len, const EVP_MD *digest, const u_char *prk, size_t prk_len, @@ -46,15 +48,10 @@ static ngx_int_t ngx_quic_create_retry_p ngx_int_t -ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers, - enum ssl_encryption_level_t level) +ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers) { ngx_int_t len; - if (level == ssl_encryption_initial) { - id = TLS1_3_CK_AES_128_GCM_SHA256; - } - switch (id) { case TLS1_3_CK_AES_128_GCM_SHA256: @@ -188,7 +185,7 @@ ngx_quic_keys_set_initial_secret(ngx_qui } } - if (ngx_quic_ciphers(0, &ciphers, ssl_encryption_initial) == NGX_ERROR) { + if (ngx_quic_ciphers(NGX_QUIC_INITIAL_CIPHER, &ciphers) == NGX_ERROR) { return NGX_ERROR; } @@ -664,7 +661,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ keys->cipher = SSL_CIPHER_get_id(cipher); - key_len = ngx_quic_ciphers(keys->cipher, &ciphers, level); + key_len = ngx_quic_ciphers(keys->cipher, &ciphers); if (key_len == NGX_ERROR) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "unexpected cipher"); @@ -780,9 +777,7 @@ ngx_quic_keys_update(ngx_event_t *ev) c->log->action = "updating keys"; - if (ngx_quic_ciphers(keys->cipher, &ciphers, ssl_encryption_application) - == NGX_ERROR) - { + if (ngx_quic_ciphers(keys->cipher, &ciphers) == NGX_ERROR) { goto failed; } @@ -927,7 +922,7 @@ ngx_quic_create_retry_packet(ngx_quic_he "quic retry itag len:%uz %xV", ad.len, &ad); #endif - if (ngx_quic_ciphers(0, &ciphers, pkt->level) == NGX_ERROR) { + if (ngx_quic_ciphers(NGX_QUIC_INITIAL_CIPHER, &ciphers) == NGX_ERROR) { return NGX_ERROR; } diff -r f7c9cd726298 -r 8dacf87e4007 src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.h Fri Oct 20 18:05:07 2023 +0400 @@ -108,8 +108,7 @@ void ngx_quic_keys_cleanup(ngx_quic_keys ngx_int_t ngx_quic_encrypt(ngx_quic_header_t *pkt, ngx_str_t *res); ngx_int_t ngx_quic_decrypt(ngx_quic_header_t *pkt, uint64_t *largest_pn); void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); -ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers, - enum ssl_encryption_level_t level); +ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers); ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, ngx_int_t enc, ngx_log_t *log); ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, From pluknet at nginx.com Fri Oct 20 14:42:40 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 14:42:40 +0000 Subject: [nginx] QUIC: removed key field from ngx_quic_secret_t. Message-ID: details: https://hg.nginx.org/nginx/rev/22d110af473c branches: changeset: 9177:22d110af473c user: Sergey Kandaurov date: Fri Oct 20 18:05:07 2023 +0400 description: QUIC: removed key field from ngx_quic_secret_t. It is made local as it is only needed now when creating crypto context. BoringSSL lacks EVP interface for ChaCha20, providing instead a function for one-shot encryption, thus hp is still preserved. Based on a patch by Roman Arutyunyan. diffstat: src/event/quic/ngx_event_quic_openssl_compat.c | 10 ++- src/event/quic/ngx_event_quic_protection.c | 63 ++++++++++++++++--------- src/event/quic/ngx_event_quic_protection.h | 3 +- 3 files changed, 47 insertions(+), 29 deletions(-) diffs (272 lines): diff -r 8dacf87e4007 -r 22d110af473c src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 @@ -229,6 +229,7 @@ ngx_quic_compat_set_encryption_secret(ng ngx_int_t key_len; ngx_str_t secret_str; ngx_uint_t i; + ngx_quic_md_t key; ngx_quic_hkdf_t seq[2]; ngx_quic_secret_t *peer_secret; ngx_quic_ciphers_t ciphers; @@ -254,13 +255,14 @@ ngx_quic_compat_set_encryption_secret(ng peer_secret->secret.len = secret_len; ngx_memcpy(peer_secret->secret.data, secret, secret_len); - peer_secret->key.len = key_len; + key.len = key_len; + peer_secret->iv.len = NGX_QUIC_IV_LEN; secret_str.len = secret_len; secret_str.data = (u_char *) secret; - ngx_quic_hkdf_set(&seq[0], "tls13 key", &peer_secret->key, &secret_str); + ngx_quic_hkdf_set(&seq[0], "tls13 key", &key, &secret_str); ngx_quic_hkdf_set(&seq[1], "tls13 iv", &peer_secret->iv, &secret_str); for (i = 0; i < (sizeof(seq) / sizeof(seq[0])); i++) { @@ -284,7 +286,9 @@ ngx_quic_compat_set_encryption_secret(ng cln->data = peer_secret; } - if (ngx_quic_crypto_init(ciphers.c, peer_secret, 1, c->log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, peer_secret, &key, 1, c->log) + == NGX_ERROR) + { return NGX_ERROR; } diff -r 8dacf87e4007 -r 22d110af473c src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 @@ -117,6 +117,7 @@ ngx_quic_keys_set_initial_secret(ngx_qui ngx_str_t iss; ngx_uint_t i; const EVP_MD *digest; + ngx_quic_md_t client_key, server_key; ngx_quic_hkdf_t seq[8]; ngx_quic_secret_t *client, *server; ngx_quic_ciphers_t ciphers; @@ -160,8 +161,8 @@ ngx_quic_keys_set_initial_secret(ngx_qui client->secret.len = SHA256_DIGEST_LENGTH; server->secret.len = SHA256_DIGEST_LENGTH; - client->key.len = NGX_QUIC_AES_128_KEY_LEN; - server->key.len = NGX_QUIC_AES_128_KEY_LEN; + client_key.len = NGX_QUIC_AES_128_KEY_LEN; + server_key.len = NGX_QUIC_AES_128_KEY_LEN; client->hp.len = NGX_QUIC_AES_128_KEY_LEN; server->hp.len = NGX_QUIC_AES_128_KEY_LEN; @@ -171,11 +172,11 @@ ngx_quic_keys_set_initial_secret(ngx_qui /* labels per RFC 9001, 5.1. Packet Protection Keys */ ngx_quic_hkdf_set(&seq[0], "tls13 client in", &client->secret, &iss); - ngx_quic_hkdf_set(&seq[1], "tls13 quic key", &client->key, &client->secret); + ngx_quic_hkdf_set(&seq[1], "tls13 quic key", &client_key, &client->secret); ngx_quic_hkdf_set(&seq[2], "tls13 quic iv", &client->iv, &client->secret); ngx_quic_hkdf_set(&seq[3], "tls13 quic hp", &client->hp, &client->secret); ngx_quic_hkdf_set(&seq[4], "tls13 server in", &server->secret, &iss); - ngx_quic_hkdf_set(&seq[5], "tls13 quic key", &server->key, &server->secret); + ngx_quic_hkdf_set(&seq[5], "tls13 quic key", &server_key, &server->secret); ngx_quic_hkdf_set(&seq[6], "tls13 quic iv", &server->iv, &server->secret); ngx_quic_hkdf_set(&seq[7], "tls13 quic hp", &server->hp, &server->secret); @@ -189,11 +190,15 @@ ngx_quic_keys_set_initial_secret(ngx_qui return NGX_ERROR; } - if (ngx_quic_crypto_init(ciphers.c, client, 0, log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, client, &client_key, 0, log) + == NGX_ERROR) + { return NGX_ERROR; } - if (ngx_quic_crypto_init(ciphers.c, server, 1, log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, server, &server_key, 1, log) + == NGX_ERROR) + { goto failed; } @@ -376,13 +381,13 @@ failed: ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, ngx_quic_secret_t *s, - ngx_int_t enc, ngx_log_t *log) + ngx_quic_md_t *key, ngx_int_t enc, ngx_log_t *log) { #ifdef OPENSSL_IS_BORINGSSL EVP_AEAD_CTX *ctx; - ctx = EVP_AEAD_CTX_new(cipher, s->key.data, s->key.len, + ctx = EVP_AEAD_CTX_new(cipher, key->data, key->len, EVP_AEAD_DEFAULT_TAG_LENGTH); if (ctx == NULL) { ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_AEAD_CTX_new() failed"); @@ -423,7 +428,7 @@ ngx_quic_crypto_init(const ngx_quic_ciph return NGX_ERROR; } - if (EVP_CipherInit_ex(ctx, NULL, NULL, s->key.data, NULL, enc) != 1) { + if (EVP_CipherInit_ex(ctx, NULL, NULL, key->data, NULL, enc) != 1) { EVP_CIPHER_CTX_free(ctx); ngx_ssl_error(NGX_LOG_INFO, log, 0, "EVP_CipherInit_ex() failed"); return NGX_ERROR; @@ -652,6 +657,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ ngx_int_t key_len; ngx_str_t secret_str; ngx_uint_t i; + ngx_quic_md_t key; ngx_quic_hkdf_t seq[3]; ngx_quic_secret_t *peer_secret; ngx_quic_ciphers_t ciphers; @@ -677,15 +683,14 @@ ngx_quic_keys_set_encryption_secret(ngx_ peer_secret->secret.len = secret_len; ngx_memcpy(peer_secret->secret.data, secret, secret_len); - peer_secret->key.len = key_len; + key.len = key_len; peer_secret->iv.len = NGX_QUIC_IV_LEN; peer_secret->hp.len = key_len; secret_str.len = secret_len; secret_str.data = (u_char *) secret; - ngx_quic_hkdf_set(&seq[0], "tls13 quic key", - &peer_secret->key, &secret_str); + ngx_quic_hkdf_set(&seq[0], "tls13 quic key", &key, &secret_str); ngx_quic_hkdf_set(&seq[1], "tls13 quic iv", &peer_secret->iv, &secret_str); ngx_quic_hkdf_set(&seq[2], "tls13 quic hp", &peer_secret->hp, &secret_str); @@ -695,7 +700,7 @@ ngx_quic_keys_set_encryption_secret(ngx_ } } - if (ngx_quic_crypto_init(ciphers.c, peer_secret, is_write, log) + if (ngx_quic_crypto_init(ciphers.c, peer_secret, &key, is_write, log) == NGX_ERROR) { return NGX_ERROR; @@ -758,7 +763,9 @@ ngx_quic_keys_switch(ngx_connection_t *c void ngx_quic_keys_update(ngx_event_t *ev) { + ngx_int_t key_len; ngx_uint_t i; + ngx_quic_md_t client_key, server_key; ngx_quic_hkdf_t seq[6]; ngx_quic_keys_t *keys; ngx_connection_t *c; @@ -777,18 +784,21 @@ ngx_quic_keys_update(ngx_event_t *ev) c->log->action = "updating keys"; - if (ngx_quic_ciphers(keys->cipher, &ciphers) == NGX_ERROR) { + key_len = ngx_quic_ciphers(keys->cipher, &ciphers); + + if (key_len == NGX_ERROR) { goto failed; } + client_key.len = key_len; + server_key.len = key_len; + next->client.secret.len = current->client.secret.len; - next->client.key.len = current->client.key.len; next->client.iv.len = NGX_QUIC_IV_LEN; next->client.hp = current->client.hp; next->client.hp_ctx = current->client.hp_ctx; next->server.secret.len = current->server.secret.len; - next->server.key.len = current->server.key.len; next->server.iv.len = NGX_QUIC_IV_LEN; next->server.hp = current->server.hp; next->server.hp_ctx = current->server.hp_ctx; @@ -796,13 +806,13 @@ ngx_quic_keys_update(ngx_event_t *ev) ngx_quic_hkdf_set(&seq[0], "tls13 quic ku", &next->client.secret, ¤t->client.secret); ngx_quic_hkdf_set(&seq[1], "tls13 quic key", - &next->client.key, &next->client.secret); + &client_key, &next->client.secret); ngx_quic_hkdf_set(&seq[2], "tls13 quic iv", &next->client.iv, &next->client.secret); ngx_quic_hkdf_set(&seq[3], "tls13 quic ku", &next->server.secret, ¤t->server.secret); ngx_quic_hkdf_set(&seq[4], "tls13 quic key", - &next->server.key, &next->server.secret); + &server_key, &next->server.secret); ngx_quic_hkdf_set(&seq[5], "tls13 quic iv", &next->server.iv, &next->server.secret); @@ -812,12 +822,14 @@ ngx_quic_keys_update(ngx_event_t *ev) } } - if (ngx_quic_crypto_init(ciphers.c, &next->client, 0, c->log) == NGX_ERROR) + if (ngx_quic_crypto_init(ciphers.c, &next->client, &client_key, 0, c->log) + == NGX_ERROR) { goto failed; } - if (ngx_quic_crypto_init(ciphers.c, &next->server, 1, c->log) == NGX_ERROR) + if (ngx_quic_crypto_init(ciphers.c, &next->server, &server_key, 1, c->log) + == NGX_ERROR) { goto failed; } @@ -901,11 +913,12 @@ ngx_quic_create_retry_packet(ngx_quic_he { u_char *start; ngx_str_t ad, itag; + ngx_quic_md_t key; ngx_quic_secret_t secret; ngx_quic_ciphers_t ciphers; /* 5.8. Retry Packet Integrity */ - static u_char key[16] = + static u_char key_data[16] = "\xbe\x0c\x69\x0b\x9f\x66\x57\x5a\x1d\x76\x6b\x54\xe3\x68\xc8\x4e"; static u_char nonce[NGX_QUIC_IV_LEN] = "\x46\x15\x99\xd3\x5d\x63\x2b\xf2\x23\x98\x25\xbb"; @@ -926,11 +939,13 @@ ngx_quic_create_retry_packet(ngx_quic_he return NGX_ERROR; } - secret.key.len = sizeof(key); - ngx_memcpy(secret.key.data, key, sizeof(key)); + key.len = sizeof(key_data); + ngx_memcpy(key.data, key_data, sizeof(key_data)); secret.iv.len = NGX_QUIC_IV_LEN; - if (ngx_quic_crypto_init(ciphers.c, &secret, 1, pkt->log) == NGX_ERROR) { + if (ngx_quic_crypto_init(ciphers.c, &secret, &key, 1, pkt->log) + == NGX_ERROR) + { return NGX_ERROR; } diff -r 8dacf87e4007 -r 22d110af473c src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.h Fri Oct 20 18:05:07 2023 +0400 @@ -47,7 +47,6 @@ typedef struct { typedef struct { ngx_quic_md_t secret; - ngx_quic_md_t key; ngx_quic_iv_t iv; ngx_quic_md_t hp; ngx_quic_crypto_ctx_t *ctx; @@ -110,7 +109,7 @@ ngx_int_t ngx_quic_decrypt(ngx_quic_head void ngx_quic_compute_nonce(u_char *nonce, size_t len, uint64_t pn); ngx_int_t ngx_quic_ciphers(ngx_uint_t id, ngx_quic_ciphers_t *ciphers); ngx_int_t ngx_quic_crypto_init(const ngx_quic_cipher_t *cipher, - ngx_quic_secret_t *s, ngx_int_t enc, ngx_log_t *log); + ngx_quic_secret_t *s, ngx_quic_md_t *key, ngx_int_t enc, ngx_log_t *log); ngx_int_t ngx_quic_crypto_seal(ngx_quic_secret_t *s, ngx_str_t *out, u_char *nonce, ngx_str_t *in, ngx_str_t *ad, ngx_log_t *log); void ngx_quic_crypto_cleanup(ngx_quic_secret_t *s); From pluknet at nginx.com Fri Oct 20 14:42:43 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 20 Oct 2023 14:42:43 +0000 Subject: [nginx] QUIC: explicitly zero out unused keying material. Message-ID: details: https://hg.nginx.org/nginx/rev/b74f891053c7 branches: changeset: 9178:b74f891053c7 user: Sergey Kandaurov date: Fri Oct 20 18:05:07 2023 +0400 description: QUIC: explicitly zero out unused keying material. diffstat: src/event/quic/ngx_event_quic_openssl_compat.c | 13 ++++--------- src/event/quic/ngx_event_quic_protection.c | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+), 9 deletions(-) diffs (86 lines): diff -r 22d110af473c -r b74f891053c7 src/event/quic/ngx_event_quic_openssl_compat.c --- a/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_openssl_compat.c Fri Oct 20 18:05:07 2023 +0400 @@ -218,6 +218,8 @@ ngx_quic_compat_keylog_callback(const SS (void) ngx_quic_compat_set_encryption_secret(c, &com->keys, level, cipher, secret, n); } + + ngx_explicit_memzero(secret, n); } @@ -246,15 +248,6 @@ ngx_quic_compat_set_encryption_secret(ng return NGX_ERROR; } - if (sizeof(peer_secret->secret.data) < secret_len) { - ngx_log_error(NGX_LOG_ALERT, c->log, 0, - "unexpected secret len: %uz", secret_len); - return NGX_ERROR; - } - - peer_secret->secret.len = secret_len; - ngx_memcpy(peer_secret->secret.data, secret, secret_len); - key.len = key_len; peer_secret->iv.len = NGX_QUIC_IV_LEN; @@ -292,6 +285,8 @@ ngx_quic_compat_set_encryption_secret(ng return NGX_ERROR; } + ngx_explicit_memzero(key.data, key.len); + return NGX_OK; } diff -r 22d110af473c -r b74f891053c7 src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/event/quic/ngx_event_quic_protection.c Fri Oct 20 18:05:07 2023 +0400 @@ -710,6 +710,8 @@ ngx_quic_keys_set_encryption_secret(ngx_ return NGX_ERROR; } + ngx_explicit_memzero(key.data, key.len); + return NGX_OK; } @@ -740,6 +742,9 @@ ngx_quic_keys_discard(ngx_quic_keys_t *k ngx_quic_crypto_hp_cleanup(client); ngx_quic_crypto_hp_cleanup(server); + + ngx_explicit_memzero(client->secret.data, client->secret.len); + ngx_explicit_memzero(server->secret.data, server->secret.len); } @@ -834,6 +839,14 @@ ngx_quic_keys_update(ngx_event_t *ev) goto failed; } + ngx_explicit_memzero(current->client.secret.data, + current->client.secret.len); + ngx_explicit_memzero(current->server.secret.data, + current->server.secret.len); + + ngx_explicit_memzero(client_key.data, client_key.len); + ngx_explicit_memzero(server_key.data, server_key.len); + return; failed: @@ -856,6 +869,11 @@ ngx_quic_keys_cleanup(ngx_quic_keys_t *k ngx_quic_crypto_cleanup(&next->client); ngx_quic_crypto_cleanup(&next->server); + + ngx_explicit_memzero(next->client.secret.data, + next->client.secret.len); + ngx_explicit_memzero(next->server.secret.data, + next->server.secret.len); } From xeioex at nginx.com Fri Oct 20 15:46:17 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Fri, 20 Oct 2023 15:46:17 +0000 Subject: [njs] Modules: fixed delete() method of a shared dictionary. Message-ID: details: https://hg.nginx.org/njs/rev/d83c6616f2b1 branches: changeset: 2223:d83c6616f2b1 user: Dmitry Volyntsev date: Fri Oct 20 08:44:52 2023 -0700 description: Modules: fixed delete() method of a shared dictionary. This fixes #679 issue on Github. diffstat: nginx/ngx_js_shared_dict.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 3a7526c8694c -r d83c6616f2b1 nginx/ngx_js_shared_dict.c --- a/nginx/ngx_js_shared_dict.c Wed Oct 18 18:36:00 2023 -0700 +++ b/nginx/ngx_js_shared_dict.c Fri Oct 20 08:44:52 2023 -0700 @@ -1240,7 +1240,7 @@ ngx_js_dict_delete(njs_vm_t *vm, ngx_js_ ngx_time_t *tp; ngx_js_dict_node_t *node; - ngx_rwlock_rlock(&dict->sh->rwlock); + ngx_rwlock_wlock(&dict->sh->rwlock); node = ngx_js_dict_lookup(dict, key); From mdounin at mdounin.ru Fri Oct 20 19:05:11 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 20 Oct 2023 22:05:11 +0300 Subject: [PATCH] HTTP/2: fixed buffer management with HTTP/2 auto-detection In-Reply-To: <71BC1B29-32B5-4A61-8D80-EDCE0495D34A@nginx.com> References: <318c8ace6aa24506004b.1697808225@enoparse.local> <71BC1B29-32B5-4A61-8D80-EDCE0495D34A@nginx.com> Message-ID: Hello! On Fri, Oct 20, 2023 at 06:04:32PM +0400, Sergey Kandaurov wrote: > > On 20 Oct 2023, at 17:23, Sergey Kandaurov wrote: > > > > # HG changeset patch > > # User Sergey Kandaurov > > # Date 1697808142 -14400 > > # Fri Oct 20 17:22:22 2023 +0400 > > # Node ID 318c8ace6aa24506004bfbb7d52674f61a3716a5 > > # Parent 3038bd4d78169a5e8a2624d79cf76f45f0805ddc > > HTTP/2: fixed buffer management with HTTP/2 auto-detection. > > > > As part of normal HTTP/2 processing, incomplete frames are saved in the > > control state using a fixed size memcpy of NGX_HTTP_V2_STATE_BUFFER_SIZE. > > For this matter, two state buffers are reserved in the HTTP/2 recv buffer. > > > > As part of HTTP/2 auto-detection on plain TCP connections, initial data > > is first read into a buffer specified by the client_header_buffer_size > > directive that doesn't have state reservation. Previously, this made it > > possible to over-read the buffer as part of saving the state. > > > > The fix is to read the available buffer size rather than a fixed size. > > Although memcpy of a fixed size can produce a better optimized code, > > From my limited testing, replacing a fixed size with an available size > degrades "-O" optimized memcpy from SSE instructions over XMM registers > to simple MOVs. I don't think it matters compared to other costs within the loop. > > handling of incomplete frames isn't a common execution path, so it was > > sacrificed for the sake of simplicity of the fix. > > Another approach is to displace initial data into the recv buffer > for subsequent processing, which would require additional handling > in ngx_http_v2_init(). After some pondering I declined it due to > added complexity without a good reason. > > > > > diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c > > --- a/src/http/v2/ngx_http_v2.c > > +++ b/src/http/v2/ngx_http_v2.c > > @@ -386,13 +386,11 @@ ngx_http_v2_read_handler(ngx_event_t *re > > h2mcf = ngx_http_get_module_main_conf(h2c->http_connection->conf_ctx, > > ngx_http_v2_module); > > > > - available = h2mcf->recv_buffer_size - 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE; > > + available = h2mcf->recv_buffer_size - NGX_HTTP_V2_STATE_BUFFER_SIZE; > > > > do { > > p = h2mcf->recv_buffer; > > - > > - ngx_memcpy(p, h2c->state.buffer, NGX_HTTP_V2_STATE_BUFFER_SIZE); > > - end = p + h2c->state.buffer_used; > > + end = ngx_cpymem(p, h2c->state.buffer, h2c->state.buffer_used); > > > > n = c->recv(c, end, available); > > > > @@ -2592,7 +2590,7 @@ ngx_http_v2_state_save(ngx_http_v2_conne > > return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); > > } > > > > - ngx_memcpy(h2c->state.buffer, pos, NGX_HTTP_V2_STATE_BUFFER_SIZE); > > + ngx_memcpy(h2c->state.buffer, pos, size); > > > > h2c->state.buffer_used = size; > > h2c->state.handler = handler; > > diff --git a/src/http/v2/ngx_http_v2_module.c b/src/http/v2/ngx_http_v2_module.c > > --- a/src/http/v2/ngx_http_v2_module.c > > +++ b/src/http/v2/ngx_http_v2_module.c > > @@ -388,7 +388,7 @@ ngx_http_v2_recv_buffer_size(ngx_conf_t > > { > > size_t *sp = data; > > > > - if (*sp <= 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE) { > > + if (*sp <= NGX_HTTP_V2_STATE_BUFFER_SIZE) { > > return "value is too small"; > > } > > Looks good. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Sat Oct 21 14:54:42 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Sat, 21 Oct 2023 14:54:42 +0000 Subject: [nginx] HTTP/2: fixed buffer management with HTTP/2 auto-detection. Message-ID: details: https://hg.nginx.org/nginx/rev/ea1f29c2010c branches: changeset: 9179:ea1f29c2010c user: Sergey Kandaurov date: Sat Oct 21 18:48:24 2023 +0400 description: HTTP/2: fixed buffer management with HTTP/2 auto-detection. As part of normal HTTP/2 processing, incomplete frames are saved in the control state using a fixed size memcpy of NGX_HTTP_V2_STATE_BUFFER_SIZE. For this matter, two state buffers are reserved in the HTTP/2 recv buffer. As part of HTTP/2 auto-detection on plain TCP connections, initial data is first read into a buffer specified by the client_header_buffer_size directive that doesn't have state reservation. Previously, this made it possible to over-read the buffer as part of saving the state. The fix is to read the available buffer size rather than a fixed size. Although memcpy of a fixed size can produce a better optimized code, handling of incomplete frames isn't a common execution path, so it was sacrificed for the sake of simplicity of the fix. diffstat: src/http/v2/ngx_http_v2.c | 8 +++----- src/http/v2/ngx_http_v2_module.c | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diffs (40 lines): diff -r b74f891053c7 -r ea1f29c2010c src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/http/v2/ngx_http_v2.c Sat Oct 21 18:48:24 2023 +0400 @@ -386,13 +386,11 @@ ngx_http_v2_read_handler(ngx_event_t *re h2mcf = ngx_http_get_module_main_conf(h2c->http_connection->conf_ctx, ngx_http_v2_module); - available = h2mcf->recv_buffer_size - 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE; + available = h2mcf->recv_buffer_size - NGX_HTTP_V2_STATE_BUFFER_SIZE; do { p = h2mcf->recv_buffer; - - ngx_memcpy(p, h2c->state.buffer, NGX_HTTP_V2_STATE_BUFFER_SIZE); - end = p + h2c->state.buffer_used; + end = ngx_cpymem(p, h2c->state.buffer, h2c->state.buffer_used); n = c->recv(c, end, available); @@ -2592,7 +2590,7 @@ ngx_http_v2_state_save(ngx_http_v2_conne return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR); } - ngx_memcpy(h2c->state.buffer, pos, NGX_HTTP_V2_STATE_BUFFER_SIZE); + ngx_memcpy(h2c->state.buffer, pos, size); h2c->state.buffer_used = size; h2c->state.handler = handler; diff -r b74f891053c7 -r ea1f29c2010c src/http/v2/ngx_http_v2_module.c --- a/src/http/v2/ngx_http_v2_module.c Fri Oct 20 18:05:07 2023 +0400 +++ b/src/http/v2/ngx_http_v2_module.c Sat Oct 21 18:48:24 2023 +0400 @@ -388,7 +388,7 @@ ngx_http_v2_recv_buffer_size(ngx_conf_t { size_t *sp = data; - if (*sp <= 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE) { + if (*sp <= NGX_HTTP_V2_STATE_BUFFER_SIZE) { return "value is too small"; } From pluknet at nginx.com Mon Oct 23 22:37:58 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 24 Oct 2023 02:37:58 +0400 Subject: [PATCH 5 of 8] QUIC: reusing crypto contexts for packet protection In-Reply-To: References: <28f7491bc79771f9cfa8.1694099637@enoparse.local> <20230919135343.gyryhjjh5igd6xzl@N00W24XTQX> Message-ID: <4B44A74D-974D-4BD0-B160-70BBEF15A5BF@nginx.com> > On 13 Oct 2023, at 19:13, Sergey Kandaurov wrote: > > [..] > > I was pondering on reusing a static crypto context > to make generating Retry packets more lightweight. > Known fixed values for key and nonce make it possible to create > a single context and reuse it over all Retry packets. > > Note that the context memory is kept for reuse after the first > retry, it will be freed eventually on process exit, > the operating system will take care of it. > Not sure though this is a good solution. > > diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c > --- a/src/event/quic/ngx_event_quic_protection.c > +++ b/src/event/quic/ngx_event_quic_protection.c > @@ -872,7 +872,6 @@ ngx_quic_create_retry_packet(ngx_quic_he > { > u_char *start; > ngx_str_t ad, itag; > - ngx_quic_secret_t secret; > ngx_quic_ciphers_t ciphers; > > /* 5.8. Retry Packet Integrity */ > @@ -882,6 +881,8 @@ ngx_quic_create_retry_packet(ngx_quic_he > "\x46\x15\x99\xd3\x5d\x63\x2b\xf2\x23\x98\x25\xbb"; > static ngx_str_t in = ngx_string(""); > > + static ngx_quic_secret_t secret; > + > ad.data = res->data; > ad.len = ngx_quic_create_retry_itag(pkt, ad.data, &start); > > @@ -893,6 +894,10 @@ ngx_quic_create_retry_packet(ngx_quic_he > "quic retry itag len:%uz %xV", ad.len, &ad); > #endif > > + if (secret.ctx) { > + goto seal; > + } > + > if (ngx_quic_ciphers(0, &ciphers, pkt->level) == NGX_ERROR) { > return NGX_ERROR; > } > @@ -905,14 +910,14 @@ ngx_quic_create_retry_packet(ngx_quic_he > return NGX_ERROR; > } > > +seal: > + > if (ngx_quic_crypto_seal(&secret, &itag, nonce, &in, &ad, pkt->log) > != NGX_OK) > { > return NGX_ERROR; > } > > - ngx_quic_crypto_cleanup(&secret); > - > res->len = itag.data + itag.len - start; > res->data = start; > > Another approach is to create a single context in the master process, used to cleanup the context in explicit manner on process shutdown. Note that ngx_quic_conf_t is already taken, had to pick a new one. # HG changeset patch # User Sergey Kandaurov # Date 1698099112 -14400 # Tue Oct 24 02:11:52 2023 +0400 # Node ID acdb54a32cdebf5cd987cf343b5e836e12d50967 # Parent af5ab04c7037f33960efc595cd76b4f4a0bf4a86 QUIC: reusing crypto context for Retry packets. diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -30,10 +30,13 @@ static ngx_int_t ngx_quic_handle_frames( static void ngx_quic_push_handler(ngx_event_t *ev); +static void *ngx_quic_create_conf(ngx_cycle_t *cycle); +static void ngx_quic_cleanup(void *data); + static ngx_core_module_t ngx_quic_module_ctx = { ngx_string("quic"), - NULL, + ngx_quic_create_conf, NULL }; @@ -1454,3 +1457,52 @@ ngx_quic_shutdown_quic(ngx_connection_t ngx_quic_finalize_connection(c, qc->shutdown_code, qc->shutdown_reason); } } + + +static void * +ngx_quic_create_conf(ngx_cycle_t *cycle) +{ + ngx_quic_ciphers_t ciphers; + ngx_pool_cleanup_t *cln; + ngx_quic_module_conf_t *qcf; + + /* RFC 9001, 5.8. Retry Packet Integrity */ + static ngx_quic_md_t key = { + NGX_QUIC_AES_128_KEY_LEN, + "\xbe\x0c\x69\x0b\x9f\x66\x57\x5a\x1d\x76\x6b\x54\xe3\x68\xc8\x4e" + }; + + qcf = ngx_pcalloc(cycle->pool, sizeof(ngx_quic_module_conf_t)); + if (qcf == NULL) { + return NULL; + } + + if (ngx_quic_ciphers(NGX_QUIC_INITIAL_CIPHER, &ciphers) == NGX_ERROR) { + return NULL; + } + + cln = ngx_pool_cleanup_add(cycle->pool, 0); + if (cln == NULL) { + return NULL; + } + + cln->handler = ngx_quic_cleanup; + cln->data = qcf; + + if (ngx_quic_crypto_init(ciphers.c, &qcf->retry, &key, 1, cycle->log) + == NGX_ERROR) + { + return NULL; + } + + return qcf; +} + + +static void +ngx_quic_cleanup(void *data) +{ + ngx_quic_module_conf_t *qcf = data; + + ngx_quic_crypto_cleanup(&qcf->retry); +} diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h --- a/src/event/quic/ngx_event_quic_connection.h +++ b/src/event/quic/ngx_event_quic_connection.h @@ -41,6 +41,11 @@ typedef struct ngx_quic_keys_s ng #include +typedef struct { + ngx_quic_secret_t retry; +} ngx_quic_module_conf_t; + + /* RFC 9002, 6.2.2. Handshakes and New Paths: kInitialRtt */ #define NGX_QUIC_INITIAL_RTT 333 /* ms */ @@ -293,4 +298,8 @@ void ngx_quic_connstate_dbg(ngx_connecti #define ngx_quic_connstate_dbg(c) #endif + +extern ngx_module_t ngx_quic_module; + + #endif /* _NGX_EVENT_QUIC_CONNECTION_H_INCLUDED_ */ diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -13,10 +13,6 @@ /* RFC 9001, 5.4.1. Header Protection Application: 5-byte mask */ #define NGX_QUIC_HP_LEN 5 -#define NGX_QUIC_AES_128_KEY_LEN 16 - -#define NGX_QUIC_INITIAL_CIPHER TLS1_3_CK_AES_128_GCM_SHA256 - static ngx_int_t ngx_hkdf_expand(u_char *out_key, size_t out_len, const EVP_MD *digest, const u_char *prk, size_t prk_len, @@ -929,15 +925,11 @@ ngx_quic_create_packet(ngx_quic_header_t static ngx_int_t ngx_quic_create_retry_packet(ngx_quic_header_t *pkt, ngx_str_t *res) { - u_char *start; - ngx_str_t ad, itag; - ngx_quic_md_t key; - ngx_quic_secret_t secret; - ngx_quic_ciphers_t ciphers; + u_char *start; + ngx_str_t ad, itag; + ngx_quic_module_conf_t *qcf; - /* 5.8. Retry Packet Integrity */ - static u_char key_data[16] = - "\xbe\x0c\x69\x0b\x9f\x66\x57\x5a\x1d\x76\x6b\x54\xe3\x68\xc8\x4e"; + /* RFC 9001, 5.8. Retry Packet Integrity */ static u_char nonce[NGX_QUIC_IV_LEN] = "\x46\x15\x99\xd3\x5d\x63\x2b\xf2\x23\x98\x25\xbb"; static ngx_str_t in = ngx_string(""); @@ -953,28 +945,15 @@ ngx_quic_create_retry_packet(ngx_quic_he "quic retry itag len:%uz %xV", ad.len, &ad); #endif - if (ngx_quic_ciphers(NGX_QUIC_INITIAL_CIPHER, &ciphers) == NGX_ERROR) { - return NGX_ERROR; - } + qcf = (ngx_quic_module_conf_t *) ngx_get_conf(ngx_cycle->conf_ctx, + ngx_quic_module); - key.len = sizeof(key_data); - ngx_memcpy(key.data, key_data, sizeof(key_data)); - - if (ngx_quic_crypto_init(ciphers.c, &secret, &key, 1, pkt->log) - == NGX_ERROR) + if (ngx_quic_crypto_seal(&qcf->retry, &itag, nonce, &in, &ad, pkt->log) + != NGX_OK) { return NGX_ERROR; } - if (ngx_quic_crypto_seal(&secret, &itag, nonce, &in, &ad, pkt->log) - != NGX_OK) - { - ngx_quic_crypto_cleanup(&secret); - return NGX_ERROR; - } - - ngx_quic_crypto_cleanup(&secret); - res->len = itag.data + itag.len - start; res->data = start; diff --git a/src/event/quic/ngx_event_quic_protection.h b/src/event/quic/ngx_event_quic_protection.h --- a/src/event/quic/ngx_event_quic_protection.h +++ b/src/event/quic/ngx_event_quic_protection.h @@ -23,6 +23,10 @@ /* largest hash used in TLS is SHA-384 */ #define NGX_QUIC_MAX_MD_SIZE 48 +#define NGX_QUIC_AES_128_KEY_LEN 16 + +#define NGX_QUIC_INITIAL_CIPHER TLS1_3_CK_AES_128_GCM_SHA256 + #ifdef OPENSSL_IS_BORINGSSL #define ngx_quic_cipher_t EVP_AEAD -- Sergey Kandaurov From pl080516 at gmail.com Tue Oct 24 04:15:34 2023 From: pl080516 at gmail.com (Yu Zhu) Date: Tue, 24 Oct 2023 12:15:34 +0800 Subject: refactor ngx_quic_send_ctx_t? Message-ID: Hi, nginx quic splits 4-tuple into ngx_quic_socket_t for receiving packets and ngx_quic_path_t for sending packets. nginx also has struct ngx_quic_send_ctx_t for parsing received packets and assembling the packet to be sent. So, shall we split ngx_quic_send_ctx_t based on rx and tx? ngx_quic_send_ctx_t enum ssl_encryption_level_t level; ngx_quic_buffer_t crypto; uint64_t crypto_sent; uint64_t largest_ack; /* received from peer */ uint64_t pnum; /* to be sent */ ngx_queue_t frames; /* generated frames */ ngx_queue_t sending; /* frames assigned to pkt */ ngx_queue_t sent; /* frames waiting ACK */ ngx_quic_recv_ctx_t enum ssl_encryption_level_t level; uint64_t largest_pn; /* received from peer */ uint64_t pending_ack; /* non sent ack-eliciting */ uint64_t largest_range; uint64_t first_range; ngx_msec_t largest_received; ngx_msec_t ack_delay_start; ngx_uint_t nranges; ngx_quic_ack_range_t ranges[NGX_QUIC_MAX_RANGES]; ngx_uint_t send_ack; Best regards Yu Zhu -------------- next part -------------- An HTML attachment was scrubbed... URL: From xeioex at nginx.com Tue Oct 24 04:39:04 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 24 Oct 2023 04:39:04 +0000 Subject: [njs] Parser: fixed return statement parsing with invalid expression. Message-ID: details: https://hg.nginx.org/njs/rev/c081cc5377a8 branches: changeset: 2224:c081cc5377a8 user: Vadim Zhestikov date: Mon Oct 23 21:19:03 2023 -0700 description: Parser: fixed return statement parsing with invalid expression. diffstat: src/njs_parser.c | 15 +++++++++------ src/test/njs_unit_test.c | 9 +++++++++ 2 files changed, 18 insertions(+), 6 deletions(-) diffs (51 lines): diff -r d83c6616f2b1 -r c081cc5377a8 src/njs_parser.c --- a/src/njs_parser.c Fri Oct 20 08:44:52 2023 -0700 +++ b/src/njs_parser.c Mon Oct 23 21:19:03 2023 -0700 @@ -6347,10 +6347,12 @@ njs_parser_return_statement(njs_parser_t parser->node = NULL; - njs_parser_next(parser, njs_parser_expression); - - return njs_parser_after(parser, current, node, 0, - njs_parser_return_statement_after); + if (token->type != NJS_TOKEN_CLOSE_BRACE) { + njs_parser_next(parser, njs_parser_expression); + + return njs_parser_after(parser, current, node, 0, + njs_parser_return_statement_after); + } } parser->node = node; @@ -6364,8 +6366,9 @@ njs_parser_return_statement_after(njs_pa njs_lexer_token_t *token, njs_queue_link_t *current) { if (parser->ret != NJS_OK) { - parser->node = parser->target; - return njs_parser_stack_pop(parser); + njs_parser_syntax_error(parser, "Unexpected token \"%V\"", + &token->text); + return NJS_DONE; } if (njs_parser_expect_semicolon(parser, token) != NJS_OK) { diff -r d83c6616f2b1 -r c081cc5377a8 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Fri Oct 20 08:44:52 2023 -0700 +++ b/src/test/njs_unit_test.c Mon Oct 23 21:19:03 2023 -0700 @@ -10093,6 +10093,15 @@ static njs_unit_test_t njs_test[] = { njs_str("\n{\nreturn;\n}"), njs_str("SyntaxError: Illegal return statement in 3") }, + { njs_str("function f () {return a +}"), + njs_str("SyntaxError: Unexpected token \"}\" in 1") }, + + { njs_str("`${function(){return n=>}}`"), + njs_str("SyntaxError: Unexpected token \"}\" in 1") }, + + { njs_str("(function(){return a +})"), + njs_str("SyntaxError: Unexpected token \"}\" in 1") }, + { njs_str("if (1) function f(){}"), njs_str("SyntaxError: Functions can only be declared at top level or inside a block in 1") }, From xeioex at nginx.com Tue Oct 24 04:43:13 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 24 Oct 2023 04:43:13 +0000 Subject: [njs] Version 0.8.2. Message-ID: details: https://hg.nginx.org/njs/rev/45f81882c780 branches: changeset: 2225:45f81882c780 user: Dmitry Volyntsev date: Mon Oct 23 21:42:29 2023 -0700 description: Version 0.8.2. diffstat: CHANGES | 27 +++++++++++++++++++++++++++ 1 files changed, 27 insertions(+), 0 deletions(-) diffs (34 lines): diff -r c081cc5377a8 -r 45f81882c780 CHANGES --- a/CHANGES Mon Oct 23 21:19:03 2023 -0700 +++ b/CHANGES Mon Oct 23 21:42:29 2023 -0700 @@ -1,3 +1,30 @@ +Changes with njs 0.8.2 24 Oct 2023 + + nginx modules: + + *) Feature: introduced console object. The following methods + were introduced: error(), info(), log(), time(), timeEnd(), + warn(). + + *) Bugfix: fixed HEAD response handling with large Content-Length + in fetch API. + + *) Bugfix: fixed items() method for a shared dictionary. + + *) Bugfix: fixed delete() method for a shared dictionary. + + Core: + + *) Feature: extended "fs" module. Added existsSync(). + + *) Bugfix: fixed "xml" module. Fixed broken XML exception handling + in parse() method. + + *) Bugfix: fixed RegExp.prototype.exec() with global regexp and + unicode input. + + *) Bugfix: fixed return statement parsing with invalid expression. + Changes with njs 0.8.1 12 Sep 2023 nginx modules: From xeioex at nginx.com Tue Oct 24 04:43:15 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 24 Oct 2023 04:43:15 +0000 Subject: [njs] Added tag 0.8.2 for changeset 45f81882c780 Message-ID: details: https://hg.nginx.org/njs/rev/096576dd8cb0 branches: changeset: 2226:096576dd8cb0 user: Dmitry Volyntsev date: Mon Oct 23 21:42:43 2023 -0700 description: Added tag 0.8.2 for changeset 45f81882c780 diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r 45f81882c780 -r 096576dd8cb0 .hgtags --- a/.hgtags Mon Oct 23 21:42:29 2023 -0700 +++ b/.hgtags Mon Oct 23 21:42:43 2023 -0700 @@ -64,3 +64,4 @@ 26dd3824b9f343e2768609c1b673f788e3a5e154 a1faa64d4972020413fd168e2b542bcc150819c0 0.7.12 0ed1952588ab1e0e1c18425fe7923b2b76f38a65 0.8.0 a52b49f9afcf410597dc6657ad39ae3dbbfeec56 0.8.1 +45f81882c780a12e56be519cd3106c4fe5567a64 0.8.2 From mdounin at mdounin.ru Tue Oct 24 12:34:01 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 24 Oct 2023 15:34:01 +0300 Subject: nginx 1.25.3 changes draft Message-ID: Hello! Changes with nginx 1.25.3 24 Oct 2023 *) Change: improved detection of misbehaving clients when using HTTP/2. *) Feature: startup speedup when using a large number of locations. Thanks to Yusuke Nojima. *) Bugfix: a segmentation fault might occur in a worker process when using HTTP/2 without SSL; the bug had appeared in 1.25.1. *) Bugfix: the "Status" backend response header line with an empty reason phrase was handled incorrectly. *) Bugfix: memory leak during reconfiguration when using the PCRE2 library. Thanks to ZhenZhong Wu. *) Bugfixes and improvements in HTTP/3. Изменения в nginx 1.25.3 24.10.2023 *) Изменение: улучшено детектирование некорректного поведения клиентов при использовании HTTP/2. *) Добавление: уменьшение времени запуска при использовании большого количества location'ов. Спасибо Yusuke Nojima. *) Исправление: при использовании HTTP/2 без SSL в рабочем процессе мог произойти segmentation fault; ошибка появилась в 1.25.1. *) Исправление: строка "Status" в заголовке ответа бэкенда с пустой поясняющей фразой обрабатывалась некорректно. *) Исправление: утечки памяти во время переконфигурации при использовании библиотеки PCRE2. Спасибо ZhenZhong Wu. *) Исправления и улучшения в HTTP/3. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Tue Oct 24 13:07:46 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 24 Oct 2023 17:07:46 +0400 Subject: nginx 1.25.3 changes draft In-Reply-To: References: Message-ID: > On 24 Oct 2023, at 16:34, Maxim Dounin wrote: > > Hello! > > > Changes with nginx 1.25.3 24 Oct 2023 > > *) Change: improved detection of misbehaving clients when using HTTP/2. > > *) Feature: startup speedup when using a large number of locations. > Thanks to Yusuke Nojima. > > *) Bugfix: a segmentation fault might occur in a worker process when > using HTTP/2 without SSL; the bug had appeared in 1.25.1. > > *) Bugfix: the "Status" backend response header line with an empty > reason phrase was handled incorrectly. > > *) Bugfix: memory leak during reconfiguration when using the PCRE2 > library. > Thanks to ZhenZhong Wu. > > *) Bugfixes and improvements in HTTP/3. > > > Изменения в nginx 1.25.3 24.10.2023 > > *) Изменение: улучшено детектирование некорректного поведения клиентов > при использовании HTTP/2. > > *) Добавление: уменьшение времени запуска при использовании большого > количества location'ов. > Спасибо Yusuke Nojima. > > *) Исправление: при использовании HTTP/2 без SSL в рабочем процессе мог > произойти segmentation fault; ошибка появилась в 1.25.1. > > *) Исправление: строка "Status" в заголовке ответа бэкенда с пустой > поясняющей фразой обрабатывалась некорректно. > > *) Исправление: утечки памяти во время переконфигурации при > использовании библиотеки PCRE2. > Спасибо ZhenZhong Wu. > > *) Исправления и улучшения в HTTP/3. > > Looks good. -- Sergey Kandaurov From mdounin at mdounin.ru Tue Oct 24 14:03:33 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 24 Oct 2023 17:03:33 +0300 Subject: nginx 1.25.3 changes draft In-Reply-To: References: Message-ID: Hello! On Tue, Oct 24, 2023 at 05:07:46PM +0400, Sergey Kandaurov wrote: > > > On 24 Oct 2023, at 16:34, Maxim Dounin wrote: > > > > Hello! > > > > > > Changes with nginx 1.25.3 24 Oct 2023 > > > > *) Change: improved detection of misbehaving clients when using HTTP/2. > > > > *) Feature: startup speedup when using a large number of locations. > > Thanks to Yusuke Nojima. > > > > *) Bugfix: a segmentation fault might occur in a worker process when > > using HTTP/2 without SSL; the bug had appeared in 1.25.1. > > > > *) Bugfix: the "Status" backend response header line with an empty > > reason phrase was handled incorrectly. > > > > *) Bugfix: memory leak during reconfiguration when using the PCRE2 > > library. > > Thanks to ZhenZhong Wu. > > > > *) Bugfixes and improvements in HTTP/3. > > > > > > Изменения в nginx 1.25.3 24.10.2023 > > > > *) Изменение: улучшено детектирование некорректного поведения клиентов > > при использовании HTTP/2. > > > > *) Добавление: уменьшение времени запуска при использовании большого > > количества location'ов. > > Спасибо Yusuke Nojima. > > > > *) Исправление: при использовании HTTP/2 без SSL в рабочем процессе мог > > произойти segmentation fault; ошибка появилась в 1.25.1. > > > > *) Исправление: строка "Status" в заголовке ответа бэкенда с пустой > > поясняющей фразой обрабатывалась некорректно. > > > > *) Исправление: утечки памяти во время переконфигурации при > > использовании библиотеки PCRE2. > > Спасибо ZhenZhong Wu. > > > > *) Исправления и улучшения в HTTP/3. > > > > > > Looks good. Thanks, pushed: http://mdounin.ru/hg/nginx http://mdounin.ru/hg/nginx.org Release files: http://mdounin.ru/temp/nginx-1.25.3.tar.gz http://mdounin.ru/temp/nginx-1.25.3.tar.gz.asc http://mdounin.ru/temp/nginx-1.25.3.zip http://mdounin.ru/temp/nginx-1.25.3.zip.asc -- Maxim Dounin http://mdounin.ru/ From thresh at nginx.com Tue Oct 24 15:37:29 2023 From: thresh at nginx.com (=?utf-8?q?Konstantin_Pavlov?=) Date: Tue, 24 Oct 2023 15:37:29 +0000 Subject: [nginx] Updated OpenSSL and zlib used for win32 builds. Message-ID: details: https://hg.nginx.org/nginx/rev/782535848b3e branches: changeset: 9180:782535848b3e user: Maxim Dounin date: Mon Oct 23 21:50:26 2023 +0300 description: Updated OpenSSL and zlib used for win32 builds. diffstat: misc/GNUmakefile | 4 ++-- 1 файлов изменено, 2 вставок(+), 2 удалений(-) различия (14 строк): diff -r ea1f29c2010c -r 782535848b3e misc/GNUmakefile --- a/misc/GNUmakefile Sat Oct 21 18:48:24 2023 +0400 +++ b/misc/GNUmakefile Mon Oct 23 21:50:26 2023 +0300 @@ -6,8 +6,8 @@ TEMP = tmp CC = cl OBJS = objs.msvc8 -OPENSSL = openssl-3.0.10 -ZLIB = zlib-1.2.13 +OPENSSL = openssl-3.0.11 +ZLIB = zlib-1.3 PCRE = pcre2-10.39 From thresh at nginx.com Tue Oct 24 15:37:32 2023 From: thresh at nginx.com (=?utf-8?q?Konstantin_Pavlov?=) Date: Tue, 24 Oct 2023 15:37:32 +0000 Subject: [nginx] nginx-1.25.3-RELEASE Message-ID: details: https://hg.nginx.org/nginx/rev/294a3d07234f branches: changeset: 9181:294a3d07234f user: Maxim Dounin date: Tue Oct 24 16:46:46 2023 +0300 description: nginx-1.25.3-RELEASE diffstat: docs/xml/nginx/changes.xml | 75 ++++++++++++++++++++++++++++++++++++++++++++++ 1 файлов изменено, 75 вставок(+), 0 удалений(-) различия (85 строк): diff -r 782535848b3e -r 294a3d07234f docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml Mon Oct 23 21:50:26 2023 +0300 +++ b/docs/xml/nginx/changes.xml Tue Oct 24 16:46:46 2023 +0300 @@ -5,6 +5,81 @@ + + + + +улучшено детектирование некорректного поведения клиентов +при использовании HTTP/2. + + +improved detection of misbehaving clients +when using HTTP/2. + + + + + +уменьшение времени запуска +при использовании большого количества location'ов.
+Спасибо Yusuke Nojima. +
+ +startup speedup +when using a large number of locations.
+Thanks to Yusuke Nojima. +
+
+ + + +при использовании HTTP/2 без SSL +в рабочем процессе мог произойти segmentation fault; +ошибка появилась в 1.25.1. + + +a segmentation fault might occur in a worker process +when using HTTP/2 without SSL; +the bug had appeared in 1.25.1. + + + + + +строка "Status" в заголовке ответа бэкенда с пустой поясняющей фразой +обрабатывалась некорректно. + + +the "Status" backend response header line with an empty reason phrase +was handled incorrectly. + + + + + +утечки памяти во время переконфигурации +при использовании библиотеки PCRE2.
+Спасибо ZhenZhong Wu. +
+ +memory leak during reconfiguration +when using the PCRE2 library.
+Thanks to ZhenZhong Wu. +
+
+ + + +Исправления и улучшения в HTTP/3. + + +Bugfixes and improvements in HTTP/3. + + + +
+ + From thresh at nginx.com Tue Oct 24 15:37:35 2023 From: thresh at nginx.com (=?utf-8?q?Konstantin_Pavlov?=) Date: Tue, 24 Oct 2023 15:37:35 +0000 Subject: [nginx] release-1.25.3 tag Message-ID: details: https://hg.nginx.org/nginx/rev/25a2efd97a3e branches: changeset: 9182:25a2efd97a3e user: Maxim Dounin date: Tue Oct 24 16:46:47 2023 +0300 description: release-1.25.3 tag diffstat: .hgtags | 1 + 1 файлов изменено, 1 вставок(+), 0 удалений(-) различия (8 строк): diff -r 294a3d07234f -r 25a2efd97a3e .hgtags --- a/.hgtags Tue Oct 24 16:46:46 2023 +0300 +++ b/.hgtags Tue Oct 24 16:46:47 2023 +0300 @@ -475,3 +475,4 @@ ac779115ed6ee4f3039e9aea414a54e560450ee2 12dcf92b0c2c68552398f19644ce3104459807d7 release-1.25.0 f8134640e8615448205785cf00b0bc810489b495 release-1.25.1 1d839f05409d1a50d0f15a2bf36547001f99ae40 release-1.25.2 +294a3d07234f8f65d7b0e0b0e2c5b05c12c5da0a release-1.25.3 From thresh at nginx.com Tue Oct 24 22:16:47 2023 From: thresh at nginx.com (=?iso-8859-1?q?Konstantin_Pavlov?=) Date: Tue, 24 Oct 2023 15:16:47 -0700 Subject: [PATCH] Linux packages: documented nginx-module-otel package Message-ID: # HG changeset patch # User Konstantin Pavlov # Date 1698185777 25200 # Tue Oct 24 15:16:17 2023 -0700 # Node ID aa09c0e4358bfbc98b051e536c25b74f5568f393 # Parent 00c220310f537af2654cd3a04780f36ef5518014 Linux packages: documented nginx-module-otel package. diff -r 00c220310f53 -r aa09c0e4358b xml/en/linux_packages.xml --- a/xml/en/linux_packages.xml Tue Oct 24 17:13:13 2023 +0100 +++ b/xml/en/linux_packages.xml Tue Oct 24 15:16:17 2023 -0700 @@ -7,7 +7,7 @@
+ rev="91">
@@ -654,6 +654,11 @@ nginx-module-njs nginx-module-perl nginx-module-xslt +Additionally, since version 1.25.3 the following module is shipped as a +separate package: + +nginx-module-otel +
diff -r 00c220310f53 -r aa09c0e4358b xml/ru/linux_packages.xml --- a/xml/ru/linux_packages.xml Tue Oct 24 17:13:13 2023 +0100 +++ b/xml/ru/linux_packages.xml Tue Oct 24 15:16:17 2023 -0700 @@ -7,7 +7,7 @@
+ rev="91">
@@ -651,6 +651,11 @@ nginx-module-njs nginx-module-perl nginx-module-xslt +В дополнение к этому, с версии 1.25.3 следующий модуль поставляется в виде +отдельного пакета: + +nginx-module-otel +
From yar at nginx.com Wed Oct 25 09:52:40 2023 From: yar at nginx.com (Yaroslav Zhuravlev) Date: Wed, 25 Oct 2023 10:52:40 +0100 Subject: [PATCH] Linux packages: documented nginx-module-otel package In-Reply-To: References: Message-ID: > On 24 Oct 2023, at 23:16, Konstantin Pavlov wrote: > > # HG changeset patch > # User Konstantin Pavlov > # Date 1698185777 25200 > # Tue Oct 24 15:16:17 2023 -0700 > # Node ID aa09c0e4358bfbc98b051e536c25b74f5568f393 > # Parent 00c220310f537af2654cd3a04780f36ef5518014 > Linux packages: documented nginx-module-otel package. > > diff -r 00c220310f53 -r aa09c0e4358b xml/en/linux_packages.xml > --- a/xml/en/linux_packages.xml Tue Oct 24 17:13:13 2023 +0100 > +++ b/xml/en/linux_packages.xml Tue Oct 24 15:16:17 2023 -0700 > @@ -7,7 +7,7 @@ >
link="/en/linux_packages.html" > lang="en" > - rev="90"> > + rev="91"> > >
> > @@ -654,6 +654,11 @@ nginx-module-njs > nginx-module-perl > nginx-module-xslt > > +Additionally, since version 1.25.3 comma needed after 1.25.3 (for consistency with similar places in the doc) > the following module is shipped as a > +separate package: > + > +nginx-module-otel > + > > >
> diff -r 00c220310f53 -r aa09c0e4358b xml/ru/linux_packages.xml > --- a/xml/ru/linux_packages.xml Tue Oct 24 17:13:13 2023 +0100 > +++ b/xml/ru/linux_packages.xml Tue Oct 24 15:16:17 2023 -0700 > @@ -7,7 +7,7 @@ >
link="/ru/linux_packages.html" > lang="ru" > - rev="90"> > + rev="91"> > >
> > @@ -651,6 +651,11 @@ nginx-module-njs > nginx-module-perl > nginx-module-xslt > > +В дополнение к этому, с версии 1.25.3 следующий модуль поставляется в виде начиная с > +отдельного пакета: > + > +nginx-module-otel > + > > >
As an optional variant to consider, perhaps it might be good to reflect that it's a third party module authored by nginx devs, e.g: diff --git a/xml/en/linux_packages.xml b/xml/en/linux_packages.xml --- a/xml/en/linux_packages.xml +++ b/xml/en/linux_packages.xml @@ -7,7 +7,7 @@
+ rev="91">
@@ -654,6 +654,14 @@ nginx-module-perl nginx-module-xslt + +Additionally, since version 1.25.3 +nginx-authored third-party module +nginx-otel +is built as dynamic and shipped as a separate package: + +nginx-module-otel +
diff --git a/xml/ru/linux_packages.xml b/xml/ru/linux_packages.xml --- a/xml/ru/linux_packages.xml +++ b/xml/ru/linux_packages.xml @@ -7,7 +7,7 @@
+ rev="91">
@@ -651,6 +651,15 @@ nginx-module-perl nginx-module-xslt + +Кроме того, начиная с версии 1.25.3 +сторонний модуль +nginx-otel, +созданный в nginx, +собирается как динамический и поставляется в виде отдельного пакета: + +nginx-module-otel +
> _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From thresh at nginx.com Wed Oct 25 18:35:51 2023 From: thresh at nginx.com (Konstantin Pavlov) Date: Wed, 25 Oct 2023 11:35:51 -0700 Subject: [PATCH] Linux packages: documented nginx-module-otel package In-Reply-To: References: Message-ID: Hi Yaroslav, On 25/10/2023 2:52 AM, Yaroslav Zhuravlev wrote: >> On 24 Oct 2023, at 23:16, Konstantin Pavlov wrote: >> >> # HG changeset patch >> # User Konstantin Pavlov >> # Date 1698185777 25200 >> # Tue Oct 24 15:16:17 2023 -0700 >> # Node ID aa09c0e4358bfbc98b051e536c25b74f5568f393 >> # Parent 00c220310f537af2654cd3a04780f36ef5518014 >> Linux packages: documented nginx-module-otel package. >> >> diff -r 00c220310f53 -r aa09c0e4358b xml/en/linux_packages.xml >> --- a/xml/en/linux_packages.xml Tue Oct 24 17:13:13 2023 +0100 >> +++ b/xml/en/linux_packages.xml Tue Oct 24 15:16:17 2023 -0700 >> @@ -7,7 +7,7 @@ >>
> link="/en/linux_packages.html" >> lang="en" >> - rev="90"> >> + rev="91"> >> >>
>> >> @@ -654,6 +654,11 @@ nginx-module-njs >> nginx-module-perl >> nginx-module-xslt >> >> +Additionally, since version 1.25.3 > comma needed after 1.25.3 (for consistency with similar places in the doc) > >> the following module is shipped as a >> +separate package: >> + >> +nginx-module-otel >> + >> >> >>
>> diff -r 00c220310f53 -r aa09c0e4358b xml/ru/linux_packages.xml >> --- a/xml/ru/linux_packages.xml Tue Oct 24 17:13:13 2023 +0100 >> +++ b/xml/ru/linux_packages.xml Tue Oct 24 15:16:17 2023 -0700 >> @@ -7,7 +7,7 @@ >>
> link="/ru/linux_packages.html" >> lang="ru" >> - rev="90"> >> + rev="91"> >> >>
>> >> @@ -651,6 +651,11 @@ nginx-module-njs >> nginx-module-perl >> nginx-module-xslt >> >> +В дополнение к этому, с версии 1.25.3 следующий модуль поставляется в виде > начиная с > >> +отдельного пакета: >> + >> +nginx-module-otel >> + >> >> >>
Thanks! > As an optional variant to consider, perhaps it might be good > to reflect that it's a third party module authored by nginx devs, e.g: The same applies to njs as well if we want to go this route. -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Wed Oct 25 23:08:55 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 26 Oct 2023 03:08:55 +0400 Subject: [patch] quic PTO counter fixes In-Reply-To: References: Message-ID: <20231025230855.gkob3yoigbmtazcl@Y9MQ9X2QVV> On Wed, Oct 11, 2023 at 04:58:47PM +0300, Vladimir Homutov via nginx-devel wrote: > Hello, > > a couple of patches in the quic code: > > first patch improves a bit debugging, and the second patch contains > fixes for PTO counter calculation - see commit log for details. > > This helps with some clients in interop handshakeloss/handshakecorruption > testcases > > > # HG changeset patch > # User Vladimir Khomutov > # Date 1697031939 -10800 > # Wed Oct 11 16:45:39 2023 +0300 > # Node ID 1f188102fbd944df797e8710f70cccee76164add > # Parent cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc > QUIC: improved packet and frames debug tracing. > > Currently, packets generated by ngx_quic_frame_sendto() and > ngx_quic_send_early_cc() are not logged, thus making it hard > to read logs due to gaps appearing in packet numbers sequence. > For such special packets, a frame type being sent is also output. > > At frames level, it is handy to see immediately packet number > in which they arrived or being sent. > > diff --git a/src/event/quic/ngx_event_quic_frames.c b/src/event/quic/ngx_event_quic_frames.c > --- a/src/event/quic/ngx_event_quic_frames.c > +++ b/src/event/quic/ngx_event_quic_frames.c > @@ -886,8 +886,8 @@ ngx_quic_log_frame(ngx_log_t *log, ngx_q > break; > } > > - ngx_log_debug4(NGX_LOG_DEBUG_EVENT, log, 0, "quic frame %s %s %*s", > - tx ? "tx" : "rx", ngx_quic_level_name(f->level), > + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, log, 0, "quic frame %s %s:%ui %*s", > + tx ? "tx" : "rx", ngx_quic_level_name(f->level), f->pnum, > p - buf, buf); > } > > diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c > --- a/src/event/quic/ngx_event_quic_output.c > +++ b/src/event/quic/ngx_event_quic_output.c > @@ -563,8 +563,6 @@ ngx_quic_output_packet(ngx_connection_t > pkt.need_ack = 1; > } > > - ngx_quic_log_frame(c->log, f, 1); > - > flen = ngx_quic_create_frame(p, f); > if (flen == -1) { > return NGX_ERROR; > @@ -578,6 +576,8 @@ ngx_quic_output_packet(ngx_connection_t > f->last = now; > f->plen = 0; > > + ngx_quic_log_frame(c->log, f, 1); > + > nframes++; I'd rather move setting frame fields before calling ngx_quic_log_frame()/ngx_quic_create_frame() to preserve consistency with other places, i.e.: - set fields - log frame - create frame To look as follows: : f->pnum = ctx->pnum; : f->first = now; : f->last = now; : f->plen = 0; : : ngx_quic_log_frame(c->log, f, 1); : : flen = ngx_quic_create_frame(p, f); : > } > > @@ -925,6 +925,13 @@ ngx_quic_send_early_cc(ngx_connection_t > > res.data = dst; > > + ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, > + "quic packet tx %s bytes:%ui need_ack:%d" > + " number:%L encoded nl:%d trunc:0x%xD frame:%ui]", typo: closing square bracket Not sure we need logging for a (particular) frame in packet logging, not to say that it looks like a layering violation. Anyway, it is logged nearby, for example: quic frame tx init:0 CONNECTION_CLOSE err:11 invalid address validation token ft:0 quic packet tx init bytes:36 need_ack:0 number:0 encoded nl:1 trunc:0x0 So I'd remove this part. > + ngx_quic_level_name(pkt.level), pkt.payload.len, > + pkt.need_ack, pkt.number, pkt.num_len, pkt.trunc, > + frame.type); > + > if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { > return NGX_ERROR; > } > @@ -1179,6 +1186,10 @@ ngx_quic_frame_sendto(ngx_connection_t * > pad = 4 - pkt.num_len; > min_payload = ngx_max(min_payload, pad); > > +#if (NGX_DEBUG) > + frame->pnum = pkt.number; > +#endif > + > len = ngx_quic_create_frame(NULL, frame); > if (len > NGX_QUIC_MAX_UDP_PAYLOAD_SIZE) { > return NGX_ERROR; > @@ -1201,6 +1212,13 @@ ngx_quic_frame_sendto(ngx_connection_t * > > res.data = dst; > > + ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, > + "quic packet tx %s bytes:%ui need_ack:%d" > + " number:%L encoded nl:%d trunc:0x%xD frame:%ui", Same here. > + ngx_quic_level_name(pkt.level), pkt.payload.len, > + pkt.need_ack, pkt.number, pkt.num_len, pkt.trunc, > + frame->type); > + BTW, it would make sense to get a new macro / inline function for packet tx logging, similar to ngx_quic_log_frame(), since we will have three places with identical ngx_log_debug7(). > if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { > return NGX_ERROR; > } > diff --git a/src/event/quic/ngx_event_quic_transport.c b/src/event/quic/ngx_event_quic_transport.c > --- a/src/event/quic/ngx_event_quic_transport.c > +++ b/src/event/quic/ngx_event_quic_transport.c > @@ -1135,6 +1135,9 @@ ngx_quic_parse_frame(ngx_quic_header_t * > } > > f->level = pkt->level; > +#if (NGX_DEBUG) > + f->pnum = pkt->pn; > +#endif > > return p - start; > > # HG changeset patch > # User Vladimir Khomutov > # Date 1697031803 -10800 > # Wed Oct 11 16:43:23 2023 +0300 > # Node ID 9ba2840e88f62343b3bd794e43900781dab43686 > # Parent 1f188102fbd944df797e8710f70cccee76164add > QUIC: fixed handling of PTO counter. > > The RFC 9002 clearly says in "6.2. Probe Timeout": > ... > As with loss detection, the PTO is per packet number space. > That is, a PTO value is computed per packet number space. > > Despite that, current code is using per-connection PTO counter. > For example, this may lead to situation when packet loss at handshake > level will affect PTO calculation for initial packets, preventing > send of new probes. Although PTO value is per packet number space, PTO backoff is not, see "6.2.1 Computing PTO": : When ack-eliciting packets in multiple packet number spaces are in flight, the : timer MUST be set to the earlier value of the Initial and Handshake packet : number spaces. But: : When a PTO timer expires, the PTO backoff MUST be increased <..> : This exponential reduction in the sender's rate is important because consecutive : PTOs might be caused by loss of packets or acknowledgments due to severe : congestion. Even when there are ack-eliciting packets in flight in multiple : packet number spaces, the exponential increase in PTO occurs across all spaces : to prevent excess load on the network. For example, a timeout in the Initial : packet number space doubles the length of the timeout in the Handshake packet : number space. Even if that would be proven otherwise, I don't think the description provides detailed explanation. It describes a pretty specific use case, when both Initial and Handshake packet number spaces have in-flight packets with different PTO timeout (i.e. different f->last). Typically they are sent coalesced (e.g. CRYPTO frames for ServerHello and (at least) EncryptedExtensions TLS messages). In interop tests, though, it might be different: such packets may be sent separately, with Handshake packet thus having a later PTO timeout. If such, PTO timer will first fire for the Initial packet, then for Handshake, which will result in PTO backoff accumulated for each packet: t1: <- Initial (lost) t2: <- Handshake (lost) t1': pto(t1) timeout <- Initial (pto_count=1) t2': pto(t2) timeout <- Handshake (pto_count=2) t1'': pto(t1') timeout <- Initial (pto_count=3) So, I would supplement the description with the phrase that that's fair typically with uncoalesced packets seen in interop tests, and that the same is true vice verse with packet loss at initial packet number space affecting PTO backoff in handshake packet number space. But see above about PTO backoff increase across all spaces. > > Additionally, one case of successful ACK receiving was missing: > PING frames are not stored in the ctx->sent queue, thus PTO was not > reset when corresponding packets were acknowledged. See below. > > diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c > --- a/src/event/quic/ngx_event_quic.c > +++ b/src/event/quic/ngx_event_quic.c > @@ -1088,8 +1088,6 @@ ngx_quic_discard_ctx(ngx_connection_t *c > > ngx_quic_keys_discard(qc->keys, level); > > - qc->pto_count = 0; > - > ctx = ngx_quic_get_send_ctx(qc, level); > > ngx_quic_free_buffer(c, &ctx->crypto); > @@ -1120,6 +1118,7 @@ ngx_quic_discard_ctx(ngx_connection_t *c > } > > ctx->send_ack = 0; > + ctx->pto_count = 0; > > ngx_quic_set_lost_timer(c); > } > diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c > --- a/src/event/quic/ngx_event_quic_ack.c > +++ b/src/event/quic/ngx_event_quic_ack.c > @@ -286,8 +286,12 @@ ngx_quic_handle_ack_frame_range(ngx_conn > if (!found) { > > if (max < ctx->pnum) { > - /* duplicate ACK or ACK for non-ack-eliciting frame */ > - return NGX_OK; > + /* > + * - ACK for frames not in sent queue (i.e. PING) > + * - duplicate ACK > + * - ACK for non-ack-eliciting frame > + */ > + goto done; > } > > ngx_log_error(NGX_LOG_INFO, c->log, 0, > @@ -300,11 +304,13 @@ ngx_quic_handle_ack_frame_range(ngx_conn > return NGX_ERROR; > } > > +done: > + > if (!qc->push.timer_set) { > ngx_post_event(&qc->push, &ngx_posted_events); > } > > - qc->pto_count = 0; > + ctx->pto_count = 0; This part of the change to reset pto_count for duplicate ACK or ACK for non-ack-eliciting frame contradicts the OnAckReceived example in RFC 9002, although I didn't found a format text in the RFC itself: OnAckReceived(ack, pn_space): ... // DetectAndRemoveAckedPackets finds packets that are newly // acknowledged and removes them from sent_packets. newly_acked_packets = DetectAndRemoveAckedPackets(ack, pn_space) // Nothing to do if there are no newly acked packets. if (newly_acked_packets.empty()): return // Update the RTT if the largest acknowledged is newly acked // and at least one ack-eliciting was newly acked. ... // Reset pto_count ... >From which it follows that pto_count is reset (and RTT updated) for newly ack'ed packets only. I think the better fix would be to properly track in-flight PING frames. Moreover, the current behaviour of not tracking PING frames in ctx->sent prevents from a properly calculated PTO timeout: each time it is calculated against the original packet (with increasingly receding time to the past) that triggered the first PTO timeout, which doesn't result in exponentially increased PTO period as expected, but rather some bogus value. > > return NGX_OK; > } > @@ -744,7 +750,7 @@ ngx_quic_set_lost_timer(ngx_connection_t > > q = ngx_queue_last(&ctx->sent); > f = ngx_queue_data(q, ngx_quic_frame_t, queue); > - w = (ngx_msec_int_t) (f->last + (ngx_quic_pto(c, ctx) << qc->pto_count) > + w = (ngx_msec_int_t) (f->last + (ngx_quic_pto(c, ctx) << ctx->pto_count) > - now); > > if (w < 0) { > @@ -855,7 +861,7 @@ ngx_quic_pto_handler(ngx_event_t *ev) > continue; > } > > - if ((ngx_msec_int_t) (f->last + (ngx_quic_pto(c, ctx) << qc->pto_count) > + if ((ngx_msec_int_t) (f->last + (ngx_quic_pto(c, ctx) << ctx->pto_count) > - now) > 0) > { > continue; > @@ -863,7 +869,7 @@ ngx_quic_pto_handler(ngx_event_t *ev) > > ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, > "quic pto %s pto_count:%ui", > - ngx_quic_level_name(ctx->level), qc->pto_count); > + ngx_quic_level_name(ctx->level), ctx->pto_count); > > ngx_memzero(&frame, sizeof(ngx_quic_frame_t)); > > @@ -876,10 +882,10 @@ ngx_quic_pto_handler(ngx_event_t *ev) > ngx_quic_close_connection(c, NGX_ERROR); > return; > } > + > + ctx->pto_count++; > } > > - qc->pto_count++; > - > ngx_quic_set_lost_timer(c); > > ngx_quic_connstate_dbg(c); > diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h > --- a/src/event/quic/ngx_event_quic_connection.h > +++ b/src/event/quic/ngx_event_quic_connection.h > @@ -195,6 +195,8 @@ struct ngx_quic_send_ctx_s { > ngx_uint_t nranges; > ngx_quic_ack_range_t ranges[NGX_QUIC_MAX_RANGES]; > ngx_uint_t send_ack; > + > + ngx_uint_t pto_count; > }; > > > @@ -240,8 +242,6 @@ struct ngx_quic_connection_s { > ngx_msec_t min_rtt; > ngx_msec_t rttvar; > > - ngx_uint_t pto_count; > - > ngx_queue_t free_frames; > ngx_buf_t *free_bufs; > ngx_buf_t *free_shadow_bufs; From vl at inspert.ru Thu Oct 26 14:20:39 2023 From: vl at inspert.ru (Vladimir Homutov) Date: Thu, 26 Oct 2023 17:20:39 +0300 Subject: [patch] quic PTO counter fixes In-Reply-To: <20231025230855.gkob3yoigbmtazcl@Y9MQ9X2QVV> References: <20231025230855.gkob3yoigbmtazcl@Y9MQ9X2QVV> Message-ID: On Thu, Oct 26, 2023 at 03:08:55AM +0400, Sergey Kandaurov wrote: > On Wed, Oct 11, 2023 at 04:58:47PM +0300, Vladimir Homutov via nginx-devel wrote: [..] > > diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c > > --- a/src/event/quic/ngx_event_quic_output.c > > +++ b/src/event/quic/ngx_event_quic_output.c > > @@ -563,8 +563,6 @@ ngx_quic_output_packet(ngx_connection_t > > pkt.need_ack = 1; > > } > > > > - ngx_quic_log_frame(c->log, f, 1); > > - > > flen = ngx_quic_create_frame(p, f); > > if (flen == -1) { > > return NGX_ERROR; > > @@ -578,6 +576,8 @@ ngx_quic_output_packet(ngx_connection_t > > f->last = now; > > f->plen = 0; > > > > + ngx_quic_log_frame(c->log, f, 1); > > + > > nframes++; > > I'd rather move setting frame fields before calling > ngx_quic_log_frame()/ngx_quic_create_frame() > to preserve consistency with other places, i.e.: > - set fields > - log frame > - create frame > > To look as follows: > > : f->pnum = ctx->pnum; > : f->first = now; > : f->last = now; > : f->plen = 0; > : > : ngx_quic_log_frame(c->log, f, 1); > : > : flen = ngx_quic_create_frame(p, f); > : agreed > > } > > > > @@ -925,6 +925,13 @@ ngx_quic_send_early_cc(ngx_connection_t > > > > res.data = dst; > > > > + ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, > > + "quic packet tx %s bytes:%ui need_ack:%d" > > + " number:%L encoded nl:%d trunc:0x%xD frame:%ui]", > > typo: closing square bracket thanks, removed > Not sure we need logging for a (particular) frame in packet logging, > not to say that it looks like a layering violation. > Anyway, it is logged nearby, for example: > > quic frame tx init:0 CONNECTION_CLOSE err:11 invalid address validation token ft:0 > quic packet tx init bytes:36 need_ack:0 number:0 encoded nl:1 trunc:0x0 > > So I'd remove this part. agreed, frame logging removed > > + ngx_quic_level_name(pkt.level), pkt.payload.len, > > + pkt.need_ack, pkt.number, pkt.num_len, pkt.trunc, > > + frame->type); > > + > > BTW, it would make sense to get a new macro / inline function > for packet tx logging, similar to ngx_quic_log_frame(), > since we will have three places with identical ngx_log_debug7(). actually, four (we have also retry), so having a macro is a good idea updated patch attached -------------- next part -------------- A non-text attachment was scrubbed... Name: dbg_frames_2.diff Type: text/x-diff Size: 5317 bytes Desc: not available URL: From mdounin at mdounin.ru Thu Oct 26 17:26:19 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 26 Oct 2023 20:26:19 +0300 Subject: [PATCH] Linux packages: documented nginx-module-otel package In-Reply-To: References: Message-ID: Hello! On Wed, Oct 25, 2023 at 10:52:40AM +0100, Yaroslav Zhuravlev wrote: [...] > As an optional variant to consider, perhaps it might be good > to reflect that it's a third party module authored by nginx devs, e.g: [...] > +Additionally, since version 1.25.3 > +nginx-authored third-party module > +nginx-otel > +is built as dynamic and shipped as a separate package: > + > +nginx-module-otel > + Note that "nginx-authored" here looks misleading, as no nginx core developers work on this module. Overall, I do support the clear distinction between nginx's own modules and 3rd-party modules provided in the packages repository. (But, as correctly noted by Konstantin, this should include njs as well.) -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Thu Oct 26 20:27:22 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 27 Oct 2023 00:27:22 +0400 Subject: [patch] quic PTO counter fixes In-Reply-To: References: <20231025230855.gkob3yoigbmtazcl@Y9MQ9X2QVV> Message-ID: <20231026202722.227y44nmtsf3zfpu@Y9MQ9X2QVV> On Thu, Oct 26, 2023 at 05:20:39PM +0300, Vladimir Homutov wrote: > On Thu, Oct 26, 2023 at 03:08:55AM +0400, Sergey Kandaurov wrote: > > On Wed, Oct 11, 2023 at 04:58:47PM +0300, Vladimir Homutov via nginx-devel wrote: > [..] > > > > diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c > > > --- a/src/event/quic/ngx_event_quic_output.c > > > +++ b/src/event/quic/ngx_event_quic_output.c > > > @@ -563,8 +563,6 @@ ngx_quic_output_packet(ngx_connection_t > > > pkt.need_ack = 1; > > > } > > > > > > - ngx_quic_log_frame(c->log, f, 1); > > > - > > > flen = ngx_quic_create_frame(p, f); > > > if (flen == -1) { > > > return NGX_ERROR; > > > @@ -578,6 +576,8 @@ ngx_quic_output_packet(ngx_connection_t > > > f->last = now; > > > f->plen = 0; > > > > > > + ngx_quic_log_frame(c->log, f, 1); > > > + > > > nframes++; > > > > I'd rather move setting frame fields before calling > > ngx_quic_log_frame()/ngx_quic_create_frame() > > to preserve consistency with other places, i.e.: > > - set fields > > - log frame > > - create frame > > > > To look as follows: > > > > : f->pnum = ctx->pnum; > > : f->first = now; > > : f->last = now; > > : f->plen = 0; > > : > > : ngx_quic_log_frame(c->log, f, 1); > > : > > : flen = ngx_quic_create_frame(p, f); > > : > > agreed > > > > } > > > > > > @@ -925,6 +925,13 @@ ngx_quic_send_early_cc(ngx_connection_t > > > > > > res.data = dst; > > > > > > + ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, > > > + "quic packet tx %s bytes:%ui need_ack:%d" > > > + " number:%L encoded nl:%d trunc:0x%xD frame:%ui]", > > > > typo: closing square bracket > > thanks, removed > > > Not sure we need logging for a (particular) frame in packet logging, > > not to say that it looks like a layering violation. > > Anyway, it is logged nearby, for example: > > > > quic frame tx init:0 CONNECTION_CLOSE err:11 invalid address validation token ft:0 > > quic packet tx init bytes:36 need_ack:0 number:0 encoded nl:1 trunc:0x0 > > > > So I'd remove this part. > > agreed, frame logging removed > > > > + ngx_quic_level_name(pkt.level), pkt.payload.len, > > > + pkt.need_ack, pkt.number, pkt.num_len, pkt.trunc, > > > + frame->type); > > > + > > > > BTW, it would make sense to get a new macro / inline function > > for packet tx logging, similar to ngx_quic_log_frame(), > > since we will have three places with identical ngx_log_debug7(). > > actually, four (we have also retry), so having a macro is a good idea > > updated patch attached Well, I don't think retry needs logging, because this is not a real packet, it carries a token and is used to construct a Retry packet (which is also a special packet) later in ngx_quic_encrypt(). Logging such a construct is bogus, because nearly all fields aren't initialized to sensible values, personally I've got the following: quic packet tx init bytes:0 need_ack:0 number:0 encoded nl:0 trunc:0x0 Otherwise, it looks good. > # HG changeset patch > # User Vladimir Khomutov > # Date 1698329226 -10800 > # Thu Oct 26 17:07:06 2023 +0300 > # Node ID b8cdb9518f877fb3ed6386731df1e263eeae8e7c > # Parent 25a2efd97a3e21d106ce4547a763b77eb9c732ad > QUIC: improved packet and frames debug tracing. > > Currently, packets generated by ngx_quic_frame_sendto() and > ngx_quic_send_early_cc() are not logged, thus making it hard > to read logs due to gaps appearing in packet numbers sequence. > > At frames level, it is handy to see immediately packet number > in which they arrived or being sent. > > diff --git a/src/event/quic/ngx_event_quic_frames.c b/src/event/quic/ngx_event_quic_frames.c > --- a/src/event/quic/ngx_event_quic_frames.c > +++ b/src/event/quic/ngx_event_quic_frames.c > @@ -886,8 +886,8 @@ ngx_quic_log_frame(ngx_log_t *log, ngx_q > break; > } > > - ngx_log_debug4(NGX_LOG_DEBUG_EVENT, log, 0, "quic frame %s %s %*s", > - tx ? "tx" : "rx", ngx_quic_level_name(f->level), > + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, log, 0, "quic frame %s %s:%ui %*s", > + tx ? "tx" : "rx", ngx_quic_level_name(f->level), f->pnum, > p - buf, buf); > } > > diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c > --- a/src/event/quic/ngx_event_quic_output.c > +++ b/src/event/quic/ngx_event_quic_output.c > @@ -35,6 +35,15 @@ > #define NGX_QUIC_SOCKET_RETRY_DELAY 10 /* ms, for NGX_AGAIN on write */ > > > +#define ngx_quic_log_packet(log, pkt) \ > + ngx_log_debug6(NGX_LOG_DEBUG_EVENT, log, 0, \ > + "quic packet tx %s bytes:%ui need_ack:%d" \ > + " number:%L encoded nl:%d trunc:0x%xD", \ > + ngx_quic_level_name((pkt)->level), (pkt)->payload.len, \ > + (pkt)->need_ack, (pkt)->number, (pkt)->num_len, \ > + (pkt)->trunc); > + > + > static ngx_int_t ngx_quic_create_datagrams(ngx_connection_t *c); > static void ngx_quic_commit_send(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx); > static void ngx_quic_revert_send(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, > @@ -578,6 +587,11 @@ ngx_quic_output_packet(ngx_connection_t > pkt.need_ack = 1; > } > > + f->pnum = ctx->pnum; > + f->first = now; > + f->last = now; > + f->plen = 0; > + > ngx_quic_log_frame(c->log, f, 1); > > flen = ngx_quic_create_frame(p, f); > @@ -588,11 +602,6 @@ ngx_quic_output_packet(ngx_connection_t > len += flen; > p += flen; > > - f->pnum = ctx->pnum; > - f->first = now; > - f->last = now; > - f->plen = 0; > - > nframes++; > } > > @@ -610,11 +619,7 @@ ngx_quic_output_packet(ngx_connection_t > > res.data = data; > > - ngx_log_debug6(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic packet tx %s bytes:%ui" > - " need_ack:%d number:%L encoded nl:%d trunc:0x%xD", > - ngx_quic_level_name(ctx->level), pkt.payload.len, > - pkt.need_ack, pkt.number, pkt.num_len, pkt.trunc); > + ngx_quic_log_packet(c->log, &pkt); > > if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { > return NGX_ERROR; > @@ -899,13 +904,13 @@ ngx_quic_send_early_cc(ngx_connection_t > frame.u.close.reason.data = (u_char *) reason; > frame.u.close.reason.len = ngx_strlen(reason); > > + ngx_quic_log_frame(c->log, &frame, 1); > + > len = ngx_quic_create_frame(NULL, &frame); > if (len > NGX_QUIC_MAX_UDP_PAYLOAD_SIZE) { > return NGX_ERROR; > } > > - ngx_quic_log_frame(c->log, &frame, 1); > - > len = ngx_quic_create_frame(src, &frame); > if (len == -1) { > return NGX_ERROR; > @@ -940,6 +945,8 @@ ngx_quic_send_early_cc(ngx_connection_t > > res.data = dst; > > + ngx_quic_log_packet(c->log, &pkt); > + > if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { > ngx_quic_keys_cleanup(pkt.keys); > return NGX_ERROR; > @@ -1001,6 +1008,8 @@ ngx_quic_send_retry(ngx_connection_t *c, > > res.data = buf; > > + ngx_quic_log_packet(c->log, &pkt); > + > if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { > return NGX_ERROR; > } > @@ -1198,13 +1207,17 @@ ngx_quic_frame_sendto(ngx_connection_t * > pad = 4 - pkt.num_len; > min_payload = ngx_max(min_payload, pad); > > +#if (NGX_DEBUG) > + frame->pnum = pkt.number; > +#endif > + > + ngx_quic_log_frame(c->log, frame, 1); > + > len = ngx_quic_create_frame(NULL, frame); > if (len > NGX_QUIC_MAX_UDP_PAYLOAD_SIZE) { > return NGX_ERROR; > } > > - ngx_quic_log_frame(c->log, frame, 1); > - > len = ngx_quic_create_frame(src, frame); > if (len == -1) { > return NGX_ERROR; > @@ -1220,6 +1233,8 @@ ngx_quic_frame_sendto(ngx_connection_t * > > res.data = dst; > > + ngx_quic_log_packet(c->log, &pkt); > + > if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { > return NGX_ERROR; > } > diff --git a/src/event/quic/ngx_event_quic_transport.c b/src/event/quic/ngx_event_quic_transport.c > --- a/src/event/quic/ngx_event_quic_transport.c > +++ b/src/event/quic/ngx_event_quic_transport.c > @@ -1135,6 +1135,9 @@ ngx_quic_parse_frame(ngx_quic_header_t * > } > > f->level = pkt->level; > +#if (NGX_DEBUG) > + f->pnum = pkt->pn; > +#endif > > return p - start; > From vl at inspert.ru Thu Oct 26 20:37:27 2023 From: vl at inspert.ru (Vladimir Homutov) Date: Thu, 26 Oct 2023 23:37:27 +0300 Subject: [patch] quic PTO counter fixes In-Reply-To: <20231026202722.227y44nmtsf3zfpu@Y9MQ9X2QVV> References: <20231025230855.gkob3yoigbmtazcl@Y9MQ9X2QVV> <20231026202722.227y44nmtsf3zfpu@Y9MQ9X2QVV> Message-ID: On Fri, Oct 27, 2023 at 12:27:22AM +0400, Sergey Kandaurov wrote: > On Thu, Oct 26, 2023 at 05:20:39PM +0300, Vladimir Homutov wrote: > > On Thu, Oct 26, 2023 at 03:08:55AM +0400, Sergey Kandaurov wrote: > > > On Wed, Oct 11, 2023 at 04:58:47PM +0300, Vladimir Homutov via nginx-devel wrote: > > [..] > > > > > > diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c > > > > --- a/src/event/quic/ngx_event_quic_output.c > > > > +++ b/src/event/quic/ngx_event_quic_output.c > > > > @@ -563,8 +563,6 @@ ngx_quic_output_packet(ngx_connection_t > > > > pkt.need_ack = 1; > > > > } > > > > > > > > - ngx_quic_log_frame(c->log, f, 1); > > > > - > > > > flen = ngx_quic_create_frame(p, f); > > > > if (flen == -1) { > > > > return NGX_ERROR; > > > > @@ -578,6 +576,8 @@ ngx_quic_output_packet(ngx_connection_t > > > > f->last = now; > > > > f->plen = 0; > > > > > > > > + ngx_quic_log_frame(c->log, f, 1); > > > > + > > > > nframes++; > > > > > > I'd rather move setting frame fields before calling > > > ngx_quic_log_frame()/ngx_quic_create_frame() > > > to preserve consistency with other places, i.e.: > > > - set fields > > > - log frame > > > - create frame > > > > > > To look as follows: > > > > > > : f->pnum = ctx->pnum; > > > : f->first = now; > > > : f->last = now; > > > : f->plen = 0; > > > : > > > : ngx_quic_log_frame(c->log, f, 1); > > > : > > > : flen = ngx_quic_create_frame(p, f); > > > : > > > > agreed > > > > > > } > > > > > > > > @@ -925,6 +925,13 @@ ngx_quic_send_early_cc(ngx_connection_t > > > > > > > > res.data = dst; > > > > > > > > + ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, > > > > + "quic packet tx %s bytes:%ui need_ack:%d" > > > > + " number:%L encoded nl:%d trunc:0x%xD frame:%ui]", > > > > > > typo: closing square bracket > > > > thanks, removed > > > > > Not sure we need logging for a (particular) frame in packet logging, > > > not to say that it looks like a layering violation. > > > Anyway, it is logged nearby, for example: > > > > > > quic frame tx init:0 CONNECTION_CLOSE err:11 invalid address validation token ft:0 > > > quic packet tx init bytes:36 need_ack:0 number:0 encoded nl:1 trunc:0x0 > > > > > > So I'd remove this part. > > > > agreed, frame logging removed > > > > > > + ngx_quic_level_name(pkt.level), pkt.payload.len, > > > > + pkt.need_ack, pkt.number, pkt.num_len, pkt.trunc, > > > > + frame->type); > > > > + > > > > > > BTW, it would make sense to get a new macro / inline function > > > for packet tx logging, similar to ngx_quic_log_frame(), > > > since we will have three places with identical ngx_log_debug7(). > > > > actually, four (we have also retry), so having a macro is a good idea > > > > updated patch attached > > Well, I don't think retry needs logging, because this is not a real > packet, it carries a token and is used to construct a Retry packet > (which is also a special packet) later in ngx_quic_encrypt(). > Logging such a construct is bogus, because nearly all fields aren't > initialized to sensible values, personally I've got the following: > > quic packet tx init bytes:0 need_ack:0 number:0 encoded nl:0 trunc:0x0 yes, this makes sense, removed. -------------- next part -------------- A non-text attachment was scrubbed... Name: dbg_frames_3.diff Type: text/x-diff Size: 5102 bytes Desc: not available URL: From pluknet at nginx.com Thu Oct 26 21:33:06 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Thu, 26 Oct 2023 21:33:06 +0000 Subject: [nginx] Version bump. Message-ID: details: https://hg.nginx.org/nginx/rev/8b1526a7e383 branches: changeset: 9183:8b1526a7e383 user: Sergey Kandaurov date: Fri Oct 27 01:29:28 2023 +0400 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r 25a2efd97a3e -r 8b1526a7e383 src/core/nginx.h --- a/src/core/nginx.h Tue Oct 24 16:46:47 2023 +0300 +++ b/src/core/nginx.h Fri Oct 27 01:29:28 2023 +0400 @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1025003 -#define NGINX_VERSION "1.25.3" +#define nginx_version 1025004 +#define NGINX_VERSION "1.25.4" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From pluknet at nginx.com Thu Oct 26 21:33:09 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Thu, 26 Oct 2023 21:33:09 +0000 Subject: [nginx] QUIC: improved packet and frames debug tracing. Message-ID: details: https://hg.nginx.org/nginx/rev/7ec761f0365f branches: changeset: 9184:7ec761f0365f user: Vladimir Khomutov date: Thu Oct 26 23:35:09 2023 +0300 description: QUIC: improved packet and frames debug tracing. Currently, packets generated by ngx_quic_frame_sendto() and ngx_quic_send_early_cc() are not logged, thus making it hard to read logs due to gaps appearing in packet numbers sequence. At frames level, it is handy to see immediately packet number in which they arrived or being sent. diffstat: src/event/quic/ngx_event_quic_frames.c | 4 +- src/event/quic/ngx_event_quic_output.c | 41 ++++++++++++++++++++---------- src/event/quic/ngx_event_quic_transport.c | 3 ++ 3 files changed, 32 insertions(+), 16 deletions(-) diffs (137 lines): diff -r 8b1526a7e383 -r 7ec761f0365f src/event/quic/ngx_event_quic_frames.c --- a/src/event/quic/ngx_event_quic_frames.c Fri Oct 27 01:29:28 2023 +0400 +++ b/src/event/quic/ngx_event_quic_frames.c Thu Oct 26 23:35:09 2023 +0300 @@ -886,8 +886,8 @@ ngx_quic_log_frame(ngx_log_t *log, ngx_q break; } - ngx_log_debug4(NGX_LOG_DEBUG_EVENT, log, 0, "quic frame %s %s %*s", - tx ? "tx" : "rx", ngx_quic_level_name(f->level), + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, log, 0, "quic frame %s %s:%uL %*s", + tx ? "tx" : "rx", ngx_quic_level_name(f->level), f->pnum, p - buf, buf); } diff -r 8b1526a7e383 -r 7ec761f0365f src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c Fri Oct 27 01:29:28 2023 +0400 +++ b/src/event/quic/ngx_event_quic_output.c Thu Oct 26 23:35:09 2023 +0300 @@ -35,6 +35,15 @@ #define NGX_QUIC_SOCKET_RETRY_DELAY 10 /* ms, for NGX_AGAIN on write */ +#define ngx_quic_log_packet(log, pkt) \ + ngx_log_debug6(NGX_LOG_DEBUG_EVENT, log, 0, \ + "quic packet tx %s bytes:%ui need_ack:%d" \ + " number:%L encoded nl:%d trunc:0x%xD", \ + ngx_quic_level_name((pkt)->level), (pkt)->payload.len, \ + (pkt)->need_ack, (pkt)->number, (pkt)->num_len, \ + (pkt)->trunc); + + static ngx_int_t ngx_quic_create_datagrams(ngx_connection_t *c); static void ngx_quic_commit_send(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx); static void ngx_quic_revert_send(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, @@ -578,6 +587,11 @@ ngx_quic_output_packet(ngx_connection_t pkt.need_ack = 1; } + f->pnum = ctx->pnum; + f->first = now; + f->last = now; + f->plen = 0; + ngx_quic_log_frame(c->log, f, 1); flen = ngx_quic_create_frame(p, f); @@ -588,11 +602,6 @@ ngx_quic_output_packet(ngx_connection_t len += flen; p += flen; - f->pnum = ctx->pnum; - f->first = now; - f->last = now; - f->plen = 0; - nframes++; } @@ -610,11 +619,7 @@ ngx_quic_output_packet(ngx_connection_t res.data = data; - ngx_log_debug6(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic packet tx %s bytes:%ui" - " need_ack:%d number:%L encoded nl:%d trunc:0x%xD", - ngx_quic_level_name(ctx->level), pkt.payload.len, - pkt.need_ack, pkt.number, pkt.num_len, pkt.trunc); + ngx_quic_log_packet(c->log, &pkt); if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { return NGX_ERROR; @@ -899,13 +904,13 @@ ngx_quic_send_early_cc(ngx_connection_t frame.u.close.reason.data = (u_char *) reason; frame.u.close.reason.len = ngx_strlen(reason); + ngx_quic_log_frame(c->log, &frame, 1); + len = ngx_quic_create_frame(NULL, &frame); if (len > NGX_QUIC_MAX_UDP_PAYLOAD_SIZE) { return NGX_ERROR; } - ngx_quic_log_frame(c->log, &frame, 1); - len = ngx_quic_create_frame(src, &frame); if (len == -1) { return NGX_ERROR; @@ -940,6 +945,8 @@ ngx_quic_send_early_cc(ngx_connection_t res.data = dst; + ngx_quic_log_packet(c->log, &pkt); + if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { ngx_quic_keys_cleanup(pkt.keys); return NGX_ERROR; @@ -1198,13 +1205,17 @@ ngx_quic_frame_sendto(ngx_connection_t * pad = 4 - pkt.num_len; min_payload = ngx_max(min_payload, pad); +#if (NGX_DEBUG) + frame->pnum = pkt.number; +#endif + + ngx_quic_log_frame(c->log, frame, 1); + len = ngx_quic_create_frame(NULL, frame); if (len > NGX_QUIC_MAX_UDP_PAYLOAD_SIZE) { return NGX_ERROR; } - ngx_quic_log_frame(c->log, frame, 1); - len = ngx_quic_create_frame(src, frame); if (len == -1) { return NGX_ERROR; @@ -1220,6 +1231,8 @@ ngx_quic_frame_sendto(ngx_connection_t * res.data = dst; + ngx_quic_log_packet(c->log, &pkt); + if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { return NGX_ERROR; } diff -r 8b1526a7e383 -r 7ec761f0365f src/event/quic/ngx_event_quic_transport.c --- a/src/event/quic/ngx_event_quic_transport.c Fri Oct 27 01:29:28 2023 +0400 +++ b/src/event/quic/ngx_event_quic_transport.c Thu Oct 26 23:35:09 2023 +0300 @@ -1135,6 +1135,9 @@ ngx_quic_parse_frame(ngx_quic_header_t * } f->level = pkt->level; +#if (NGX_DEBUG) + f->pnum = pkt->pn; +#endif return p - start; From pluknet at nginx.com Thu Oct 26 21:33:24 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 27 Oct 2023 01:33:24 +0400 Subject: [patch] quic PTO counter fixes In-Reply-To: References: <20231025230855.gkob3yoigbmtazcl@Y9MQ9X2QVV> <20231026202722.227y44nmtsf3zfpu@Y9MQ9X2QVV> Message-ID: <20231026213324.ybh5ahr2aszqnsgx@Y9MQ9X2QVV> On Thu, Oct 26, 2023 at 11:37:27PM +0300, Vladimir Homutov wrote: > # HG changeset patch > # User Vladimir Khomutov > # Date 1698352509 -10800 > # Thu Oct 26 23:35:09 2023 +0300 > # Node ID d62960a9e75f07a1d260cf7aaad965f56a9520c2 > # Parent 25a2efd97a3e21d106ce4547a763b77eb9c732ad > QUIC: improved packet and frames debug tracing. > > Currently, packets generated by ngx_quic_frame_sendto() and > ngx_quic_send_early_cc() are not logged, thus making it hard > to read logs due to gaps appearing in packet numbers sequence. > > At frames level, it is handy to see immediately packet number > in which they arrived or being sent. > > diff --git a/src/event/quic/ngx_event_quic_frames.c b/src/event/quic/ngx_event_quic_frames.c > --- a/src/event/quic/ngx_event_quic_frames.c > +++ b/src/event/quic/ngx_event_quic_frames.c > @@ -886,8 +886,8 @@ ngx_quic_log_frame(ngx_log_t *log, ngx_q > break; > } > > - ngx_log_debug4(NGX_LOG_DEBUG_EVENT, log, 0, "quic frame %s %s %*s", > - tx ? "tx" : "rx", ngx_quic_level_name(f->level), > + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, log, 0, "quic frame %s %s:%ui %*s", > + tx ? "tx" : "rx", ngx_quic_level_name(f->level), f->pnum, > p - buf, buf); > } > > diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c > --- a/src/event/quic/ngx_event_quic_output.c > +++ b/src/event/quic/ngx_event_quic_output.c > @@ -35,6 +35,15 @@ > #define NGX_QUIC_SOCKET_RETRY_DELAY 10 /* ms, for NGX_AGAIN on write */ > > > +#define ngx_quic_log_packet(log, pkt) \ > + ngx_log_debug6(NGX_LOG_DEBUG_EVENT, log, 0, \ > + "quic packet tx %s bytes:%ui need_ack:%d" \ > + " number:%L encoded nl:%d trunc:0x%xD", \ > + ngx_quic_level_name((pkt)->level), (pkt)->payload.len, \ > + (pkt)->need_ack, (pkt)->number, (pkt)->num_len, \ > + (pkt)->trunc); > + > + > static ngx_int_t ngx_quic_create_datagrams(ngx_connection_t *c); > static void ngx_quic_commit_send(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx); > static void ngx_quic_revert_send(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, > @@ -578,6 +587,11 @@ ngx_quic_output_packet(ngx_connection_t > pkt.need_ack = 1; > } > > + f->pnum = ctx->pnum; > + f->first = now; > + f->last = now; > + f->plen = 0; > + > ngx_quic_log_frame(c->log, f, 1); > > flen = ngx_quic_create_frame(p, f); > @@ -588,11 +602,6 @@ ngx_quic_output_packet(ngx_connection_t > len += flen; > p += flen; > > - f->pnum = ctx->pnum; > - f->first = now; > - f->last = now; > - f->plen = 0; > - > nframes++; > } > > @@ -610,11 +619,7 @@ ngx_quic_output_packet(ngx_connection_t > > res.data = data; > > - ngx_log_debug6(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic packet tx %s bytes:%ui" > - " need_ack:%d number:%L encoded nl:%d trunc:0x%xD", > - ngx_quic_level_name(ctx->level), pkt.payload.len, > - pkt.need_ack, pkt.number, pkt.num_len, pkt.trunc); > + ngx_quic_log_packet(c->log, &pkt); > > if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { > return NGX_ERROR; > @@ -899,13 +904,13 @@ ngx_quic_send_early_cc(ngx_connection_t > frame.u.close.reason.data = (u_char *) reason; > frame.u.close.reason.len = ngx_strlen(reason); > > + ngx_quic_log_frame(c->log, &frame, 1); > + > len = ngx_quic_create_frame(NULL, &frame); > if (len > NGX_QUIC_MAX_UDP_PAYLOAD_SIZE) { > return NGX_ERROR; > } > > - ngx_quic_log_frame(c->log, &frame, 1); > - > len = ngx_quic_create_frame(src, &frame); > if (len == -1) { > return NGX_ERROR; > @@ -940,6 +945,8 @@ ngx_quic_send_early_cc(ngx_connection_t > > res.data = dst; > > + ngx_quic_log_packet(c->log, &pkt); > + > if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { > ngx_quic_keys_cleanup(pkt.keys); > return NGX_ERROR; > @@ -1198,13 +1205,17 @@ ngx_quic_frame_sendto(ngx_connection_t * > pad = 4 - pkt.num_len; > min_payload = ngx_max(min_payload, pad); > > +#if (NGX_DEBUG) > + frame->pnum = pkt.number; > +#endif > + > + ngx_quic_log_frame(c->log, frame, 1); > + > len = ngx_quic_create_frame(NULL, frame); > if (len > NGX_QUIC_MAX_UDP_PAYLOAD_SIZE) { > return NGX_ERROR; > } > > - ngx_quic_log_frame(c->log, frame, 1); > - > len = ngx_quic_create_frame(src, frame); > if (len == -1) { > return NGX_ERROR; > @@ -1220,6 +1231,8 @@ ngx_quic_frame_sendto(ngx_connection_t * > > res.data = dst; > > + ngx_quic_log_packet(c->log, &pkt); > + > if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { > return NGX_ERROR; > } > diff --git a/src/event/quic/ngx_event_quic_transport.c b/src/event/quic/ngx_event_quic_transport.c > --- a/src/event/quic/ngx_event_quic_transport.c > +++ b/src/event/quic/ngx_event_quic_transport.c > @@ -1135,6 +1135,9 @@ ngx_quic_parse_frame(ngx_quic_header_t * > } > > f->level = pkt->level; > +#if (NGX_DEBUG) > + f->pnum = pkt->pn; > +#endif > > return p - start; > Pushed, thanks (with an obvious fix for f->pnum log format specifier, hope that's ok). From vl at inspert.ru Fri Oct 27 11:58:43 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Fri, 27 Oct 2023 14:58:43 +0300 Subject: [PATCH 0 of 2] [patch] some issues found by gcc undef sanitizer Message-ID: Hello, Below are two patches, created by results of running nginx-tests with GCC undefined behaviour sanitizer enabled. The first one is about memcpy() with NULL second argument calls, which are considere undefined behaviour by sanitizer. While the actual harm is arguable, having such calls is not a good practice. Most of them are results of passing empty ngx_str_t, either for logging or in some other cases. I've decided to test arguments in ngx_resolver_dup() as it seems that adding checks to the calling code will introduce to much changes. YMMV. In ngx_http_variables_request_body() all buffers are copied to output, which may include special. Probably the check must be ngx_buf_special() ? Other cases are obvious checks that allow to skip copy if there is nothing to do actually. From vl at inspert.ru Fri Oct 27 11:58:45 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Fri, 27 Oct 2023 14:58:45 +0300 Subject: [PATCH 2 of 2] HTTP: suppressed possible overflow in interim r->uri_end calculation In-Reply-To: References: Message-ID: <1b28902de1c648fc2586.1698407925@vlws> If URI is not fully parsed yet, the r->uri_end pointer is NULL. As a result, calculation of "new + (r->uri_end - old)" expression may overflow. In such case, just avoid calculating it, as r->uri_end will be set correctly later by the parser in any case. The issue was found by GCC undefined behaviour sanitizer. src/http/ngx_http_request.c | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-2.patch Type: text/x-patch Size: 1155 bytes Desc: not available URL: From vl at inspert.ru Fri Oct 27 11:58:44 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Fri, 27 Oct 2023 14:58:44 +0300 Subject: [PATCH 1 of 2] Core: avoid calling memcpy() in edge cases In-Reply-To: References: Message-ID: Patch subject is complete summary. src/core/ngx_cycle.c | 10 ++++++---- src/core/ngx_resolver.c | 2 +- src/core/ngx_string.c | 15 +++++++++++++++ src/http/modules/ngx_http_proxy_module.c | 4 ++-- src/http/ngx_http_file_cache.c | 4 +++- src/http/ngx_http_variables.c | 3 +++ src/mail/ngx_mail_auth_http_module.c | 12 +++++++++--- src/stream/ngx_stream_script.c | 4 +++- 8 files changed, 42 insertions(+), 12 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-1.patch Type: text/x-patch Size: 6153 bytes Desc: not available URL: From mdounin at mdounin.ru Fri Oct 27 18:50:57 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 27 Oct 2023 21:50:57 +0300 Subject: [PATCH 2 of 2] HTTP: suppressed possible overflow in interim r->uri_end calculation In-Reply-To: <1b28902de1c648fc2586.1698407925@vlws> References: <1b28902de1c648fc2586.1698407925@vlws> Message-ID: Hello! On Fri, Oct 27, 2023 at 02:58:45PM +0300, Vladimir Homutov via nginx-devel wrote: > If URI is not fully parsed yet, the r->uri_end pointer is NULL. > As a result, calculation of "new + (r->uri_end - old)" expression > may overflow. In such case, just avoid calculating it, as r->uri_end > will be set correctly later by the parser in any case. > > The issue was found by GCC undefined behaviour sanitizer. > > > src/http/ngx_http_request.c | 4 +++- > 1 files changed, 3 insertions(+), 1 deletions(-) > > > # HG changeset patch > # User Vladimir Khomutov > # Date 1698407686 -10800 > # Fri Oct 27 14:54:46 2023 +0300 > # Node ID 1b28902de1c648fc2586bba8e05c2ff63e0e33cb > # Parent ef9f124b156aff0e9f66057e438af835bd7a60d2 > HTTP: suppressed possible overflow in interim r->uri_end calculation. > > If URI is not fully parsed yet, the r->uri_end pointer is NULL. > As a result, calculation of "new + (r->uri_end - old)" expression > may overflow. In such case, just avoid calculating it, as r->uri_end > will be set correctly later by the parser in any case. > > The issue was found by GCC undefined behaviour sanitizer. > > diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c > --- a/src/http/ngx_http_request.c > +++ b/src/http/ngx_http_request.c > @@ -1721,7 +1721,9 @@ ngx_http_alloc_large_header_buffer(ngx_h > r->method_end = new + (r->method_end - old); > > r->uri_start = new + (r->uri_start - old); > - r->uri_end = new + (r->uri_end - old); > + if (r->uri_end) { > + r->uri_end = new + (r->uri_end - old); > + } > > if (r->schema_start) { > r->schema_start = new + (r->schema_start - old); As already noted off-list, this is certainly not the only field which might be not yet set when ngx_http_alloc_large_header_buffer() is called. From the patch context as shown, at least r->method_end and r->uri_start might not be set as well, leading to similar overflows. And certainly there are other fields as well. While I have no objections to fixing such overflows, which formally might be seen as undefined behaviour (though safe in practice, since calculated values are never used), I very much object to fixing just the particular items which were reported by a particular sanitizer in particular test runs. Rather, sanitizer results should be used to identify patterns we want to fix (if at all), and then all such patterns should be fixed (or not). -- Maxim Dounin http://mdounin.ru/ From junseong.kim at dzsi.com Tue Oct 31 06:20:43 2023 From: junseong.kim at dzsi.com (=?utf-8?B?THVjYXMgSnVuU2VvbmcgS2ltIOq5gOykgOyEsQ==?=) Date: Tue, 31 Oct 2023 06:20:43 +0000 Subject: RESTCONF Server-Sent-Event session control in nginx Message-ID: Hi, I have a question. We have the system which the following structure is used: Nginx <----> CGI interpreter <----> yumapro Netconf server Multiple CGI interpreters are created and used through the settings below. spawn-fcgi -s /var/run/fcgiwrap.socket -F 5 -- /var/www/yang-api/restconf Nginx <----> CGI interpreter <----> yumapro Netconf server CGI interpreter CGI interpreter CGI interpreter CGI interpreter A typical restconf set/get operation is used while appropriately load sharing through these five CGI interpreters, and when the set/get operation is completed, we can see that all used sessions are closed. However, once a server-sent-event session is created, one CGI interpreter continues to wait in a blocking state for an event to occur from yumapro, so one CGI interpreter cannot perform other set/get operations. In the above case, once 5 server-sent-event sessions are created, other set/get operations cannot be performed. I wonder if there is a way to avoid this situation (for example, the request to create the 5th server-sent-event session is not allowed by nginx..) Please let me know if you have any idea. Thanks. B.R Bstar Disclaimer The information contained in this communication from the sender is confidential. It is intended solely for use by the recipient and others authorized to receive it. If you are not the recipient, you are hereby notified that any disclosure, copying, distribution or taking action in relation of the contents of this information is strictly prohibited and may be unlawful. This email has been scanned for viruses and malware, and may have been automatically archived by Mimecast, a leader in email security and cyber resilience. Mimecast integrates email defenses with brand protection, security awareness training, web security, compliance and other essential capabilities. Mimecast helps protect large and small organizations from malicious activity, human error and technology failure; and to lead the movement toward building a more resilient world. To find out more, visit our website. -------------- next part -------------- An HTML attachment was scrubbed... URL: From winshining at 163.com Tue Oct 31 13:06:03 2023 From: winshining at 163.com (winshining) Date: Tue, 31 Oct 2023 21:06:03 +0800 (CST) Subject: QUIC: improved huffman decode debug tracing. Message-ID: <4d14bfec.6a40.18b85d7527d.Coremail.winshining@163.com> Previously, only HTTP2 used huffman encoding (gRPC is util now HTTP2 based), as HTTP3 becomes available, both of them uses huffman encoding. But existed debug log in huffman decode function is hard coded using "http2" prefixes, if a client transports an incorrect huffman encoded field value in an HTTP3 request, it will give an erroneous log. With the patch, it will properly log a bad field value. Alternatively, removing "http2" prefixes only is ok, but it can not differentiate whether it is caused by an HTTP2 or an HTTP3 request. -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: huff_decode_debug.diff Type: application/octet-stream Size: 5767 bytes Desc: not available URL: From mdounin at mdounin.ru Tue Oct 31 20:40:47 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 31 Oct 2023 23:40:47 +0300 Subject: RESTCONF Server-Sent-Event session control in nginx In-Reply-To: References: Message-ID: Hello! On Tue, Oct 31, 2023 at 06:20:43AM +0000, Lucas JunSeong Kim 김준성 wrote: > Hi, I have a question. > > We have the system which the following structure is used: > > Nginx <----> CGI interpreter <----> yumapro Netconf server > > Multiple CGI interpreters are created and used through the settings below. > > spawn-fcgi -s /var/run/fcgiwrap.socket -F 5 -- /var/www/yang-api/restconf > > Nginx <----> CGI interpreter <----> yumapro Netconf server > CGI interpreter > CGI interpreter > CGI interpreter > CGI interpreter [...] This mailing list is dedicated to nginx development. For questions on how to configure nginx, please use the nginx@ mailing list instead, see http://nginx.org/en/support.html for details. Thank you. -- Maxim Dounin http://mdounin.ru/ From junseong.kim at dzsi.com Tue Oct 31 23:55:20 2023 From: junseong.kim at dzsi.com (=?utf-8?B?THVjYXMgSnVuU2VvbmcgS2ltIOq5gOykgOyEsQ==?=) Date: Tue, 31 Oct 2023 23:55:20 +0000 Subject: RESTCONF Server-Sent-Event session control in nginx In-Reply-To: References: Message-ID: Thank you for your info. I re-sent it to nginx@ B.R Bstar From: nginx-devel On Behalf Of Maxim Dounin Sent: Wednesday, November 1, 2023 5:41 AM To: nginx-devel at nginx.org Subject: Re: RESTCONF Server-Sent-Event session control in nginx CAUTION: This email originated from outside of the organization. Do not click links or open attachments unless you recognize the sender and know the content is safe. Hello! On Tue, Oct 31, 2023 at 06:20:43AM +0000, Lucas JunSeong Kim 김준성 wrote: > Hi, I have a question. > > We have the system which the following structure is used: > > Nginx <----> CGI interpreter <----> yumapro Netconf server > > Multiple CGI interpreters are created and used through the settings below. > > spawn-fcgi -s /var/run/fcgiwrap.socket -F 5 -- /var/www/yang-api/restconf > > Nginx <----> CGI interpreter <----> yumapro Netconf server > CGI interpreter > CGI interpreter > CGI interpreter > CGI interpreter [...] This mailing list is dedicated to nginx development. For questions on how to configure nginx, please use the nginx@ mailing list instead, see http://nginx.org/en/support.html for details. Thank you. -- Maxim Dounin http://mdounin.ru/ _______________________________________________ nginx-devel mailing list nginx-devel at nginx.org https://mailman.nginx.org/mailman/listinfo/nginx-devel Disclaimer The information contained in this communication from the sender is confidential. It is intended solely for use by the recipient and others authorized to receive it. If you are not the recipient, you are hereby notified that any disclosure, copying, distribution or taking action in relation of the contents of this information is strictly prohibited and may be unlawful. This email has been scanned for viruses and malware, and may have been automatically archived by Mimecast, a leader in email security and cyber resilience. Mimecast integrates email defenses with brand protection, security awareness training, web security, compliance and other essential capabilities. Mimecast helps protect large and small organizations from malicious activity, human error and technology failure; and to lead the movement toward building a more resilient world. To find out more, visit our website. -------------- next part -------------- An HTML attachment was scrubbed... URL: