From noreply at nginx.com Thu Apr 3 04:11:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Thu, 3 Apr 2025 04:11:02 +0000 (UTC) Subject: [njs] QuickJS: fix exception handling during configuration phase. Message-ID: <20250403041102.3695B4839F@pubserv1.nginx> details: https://github.com/nginx/njs/commit/a4d0f9478dc0fb5c0cd5e9003a208ca3fb8ecb15 branches: master commit: a4d0f9478dc0fb5c0cd5e9003a208ca3fb8ecb15 user: Zhidao HONG date: Thu, 3 Apr 2025 11:44:57 +0800 description: QuickJS: fix exception handling during configuration phase. The issue was introduced in 9010aee (not released yet). --- nginx/ngx_js.c | 50 +++++++++++++++++++++----------------------- nginx/ngx_js.h | 2 +- nginx/ngx_stream_js_module.c | 2 +- 3 files changed, 26 insertions(+), 28 deletions(-) diff --git a/nginx/ngx_js.c b/nginx/ngx_js.c index c91a5530..ba0fee92 100644 --- a/nginx/ngx_js.c +++ b/nginx/ngx_js.c @@ -101,7 +101,7 @@ static void ngx_qjs_rejection_tracker(JSContext *ctx, JSValueConst promise, JSValueConst reason, JS_BOOL is_handled, void *opaque); static JSValue ngx_qjs_value(JSContext *cx, const ngx_str_t *path); -static ngx_int_t ngx_qjs_dump_obj(JSContext *cx, JSValueConst val, +static ngx_int_t ngx_qjs_dump_obj(ngx_engine_t *e, JSValueConst val, ngx_str_t *dst); static JSModuleDef *ngx_qjs_core_init(JSContext *cx, const char *name); @@ -849,7 +849,7 @@ ngx_engine_qjs_compile(ngx_js_loc_conf_t *conf, ngx_log_t *log, u_char *start, JS_EVAL_TYPE_MODULE | JS_EVAL_FLAG_COMPILE_ONLY); if (JS_IsException(code)) { - ngx_qjs_exception(cx, &text); + ngx_qjs_exception(engine, &text); ngx_log_error(NGX_LOG_EMERG, log, 0, "js compile %V", &text); return NGX_ERROR; } @@ -972,7 +972,7 @@ ngx_qjs_clone(ngx_js_ctx_t *ctx, ngx_js_loc_conf_t *cf, void *external) rv = JS_ReadObject(cx, pc[i].code, pc[i].code_size, JS_READ_OBJ_BYTECODE); if (JS_IsException(rv)) { - ngx_qjs_exception(cx, &exception); + ngx_qjs_exception(engine, &exception); ngx_log_error(NGX_LOG_ERR, ctx->log, 0, "js load module exception: %V", &exception); @@ -988,7 +988,7 @@ ngx_qjs_clone(ngx_js_ctx_t *ctx, ngx_js_loc_conf_t *cf, void *external) rv = JS_EvalFunction(cx, rv); if (JS_IsException(rv)) { - ngx_qjs_exception(cx, &exception); + ngx_qjs_exception(engine, &exception); ngx_log_error(NGX_LOG_ERR, ctx->log, 0, "js eval exception: %V", &exception); @@ -997,7 +997,7 @@ ngx_qjs_clone(ngx_js_ctx_t *ctx, ngx_js_loc_conf_t *cf, void *external) rv = js_std_await(cx, rv); if (JS_IsException(rv)) { - ngx_qjs_exception(cx, &exception); + ngx_qjs_exception(engine, &exception); ngx_log_error(NGX_LOG_ERR, ctx->log, 0, "js eval exception: %V", &exception); @@ -1042,7 +1042,7 @@ ngx_engine_qjs_call(ngx_js_ctx_t *ctx, ngx_str_t *fname, val = JS_Call(cx, fn, JS_UNDEFINED, nargs, &ngx_qjs_arg(args[0])); JS_FreeValue(cx, fn); if (JS_IsException(val)) { - ngx_qjs_exception(cx, &exception); + ngx_qjs_exception(ctx->engine, &exception); ngx_log_error(NGX_LOG_ERR, ctx->log, 0, "js call exception: %V", &exception); @@ -1059,7 +1059,7 @@ ngx_engine_qjs_call(ngx_js_ctx_t *ctx, ngx_str_t *fname, rc = JS_ExecutePendingJob(rt, &cx1); if (rc <= 0) { if (rc == -1) { - ngx_qjs_exception(cx, &exception); + ngx_qjs_exception(ctx->engine, &exception); ngx_log_error(NGX_LOG_ERR, ctx->log, 0, "js job exception: %V", &exception); @@ -1093,7 +1093,7 @@ static ngx_int_t ngx_engine_qjs_string(ngx_engine_t *e, njs_opaque_value_t *value, ngx_str_t *str) { - return ngx_qjs_dump_obj(e->u.qjs.ctx, ngx_qjs_arg(*value), str); + return ngx_qjs_dump_obj(e, ngx_qjs_arg(*value), str); } @@ -1150,7 +1150,7 @@ ngx_engine_qjs_destroy(ngx_engine_t *e, ngx_js_ctx_t *ctx, } if (ngx_qjs_unhandled_rejection(ctx)) { - ngx_qjs_exception(cx, &exception); + ngx_qjs_exception(e, &exception); ngx_log_error(NGX_LOG_ERR, ctx->log, 0, "js unhandled rejection: %V", &exception); } @@ -1255,17 +1255,13 @@ ngx_qjs_value(JSContext *cx, const ngx_str_t *path) static ngx_int_t -ngx_qjs_dump_obj(JSContext *cx, JSValueConst val, ngx_str_t *dst) +ngx_qjs_dump_obj(ngx_engine_t *e, JSValueConst val, ngx_str_t *dst) { - size_t len, byte_offset, byte_length; - u_char *start, *p; - JSValue buffer, stack; - ngx_str_t str, stack_str; - ngx_js_ctx_t *ctx; - ngx_engine_t *e; - - ctx = ngx_qjs_external_ctx(cx, JS_GetContextOpaque(cx)); - e = ctx->engine; + size_t len, byte_offset, byte_length; + u_char *start, *p; + JSValue buffer, stack; + ngx_str_t str, stack_str; + JSContext *cx; if (JS_IsNullOrUndefined(val)) { dst->data = NULL; @@ -1367,7 +1363,7 @@ ngx_qjs_call(JSContext *cx, JSValue fn, JSValue *argv, int argc) ret = JS_Call(cx, fn, JS_UNDEFINED, argc, argv); if (JS_IsException(ret)) { - ngx_qjs_exception(cx, &exception); + ngx_qjs_exception(ctx->engine, &exception); ngx_log_error(NGX_LOG_ERR, ctx->log, 0, "js call exception: %V", &exception); @@ -1383,7 +1379,7 @@ ngx_qjs_call(JSContext *cx, JSValue fn, JSValue *argv, int argc) rc = JS_ExecutePendingJob(rt, &cx1); if (rc <= 0) { if (rc == -1) { - ngx_qjs_exception(cx, &exception); + ngx_qjs_exception(ctx->engine, &exception); ngx_log_error(NGX_LOG_ERR, ctx->log, 0, "js job exception: %V", &exception); @@ -1400,16 +1396,16 @@ ngx_qjs_call(JSContext *cx, JSValue fn, JSValue *argv, int argc) ngx_int_t -ngx_qjs_exception(JSContext *cx, ngx_str_t *s) +ngx_qjs_exception(ngx_engine_t *e, ngx_str_t *s) { JSValue exception; - exception = JS_GetException(cx); - if (ngx_qjs_dump_obj(cx, exception, s) != NGX_OK) { + exception = JS_GetException(e->u.qjs.ctx); + if (ngx_qjs_dump_obj(e, exception, s) != NGX_OK) { return NGX_ERROR; } - JS_FreeValue(cx, exception); + JS_FreeValue(e->u.qjs.ctx, exception); return NGX_OK; } @@ -1746,6 +1742,7 @@ ngx_qjs_ext_log(JSContext *cx, JSValueConst this_val, int argc, char *p; uint32_t level; ngx_str_t msg; + ngx_js_ctx_t *ctx; ngx_connection_t *c; p = JS_GetContextOpaque(cx); @@ -1764,10 +1761,11 @@ ngx_qjs_ext_log(JSContext *cx, JSValueConst this_val, int argc, argv++; } + ctx = ngx_qjs_external_ctx(cx, p); c = ngx_qjs_external_connection(cx, p); for ( ; argc > 0; argc--, argv++) { - if (ngx_qjs_dump_obj(cx, argv[0], &msg) != NGX_OK) { + if (ngx_qjs_dump_obj(ctx->engine, argv[0], &msg) != NGX_OK) { return JS_EXCEPTION; } diff --git a/nginx/ngx_js.h b/nginx/ngx_js.h index ba4cbdf9..51b8916c 100644 --- a/nginx/ngx_js.h +++ b/nginx/ngx_js.h @@ -342,7 +342,7 @@ void ngx_engine_qjs_destroy(ngx_engine_t *e, ngx_js_ctx_t *ctx, ngx_js_loc_conf_t *conf); ngx_int_t ngx_qjs_call(JSContext *cx, JSValue function, JSValue *argv, int argc); -ngx_int_t ngx_qjs_exception(JSContext *cx, ngx_str_t *s); +ngx_int_t ngx_qjs_exception(ngx_engine_t *e, ngx_str_t *s); ngx_int_t ngx_qjs_integer(JSContext *cx, JSValueConst val, ngx_int_t *n); ngx_int_t ngx_qjs_string(JSContext *cx, JSValueConst val, ngx_str_t *str); diff --git a/nginx/ngx_stream_js_module.c b/nginx/ngx_stream_js_module.c index a7dddd04..b1e175d2 100644 --- a/nginx/ngx_stream_js_module.c +++ b/nginx/ngx_stream_js_module.c @@ -2672,7 +2672,7 @@ ngx_stream_qjs_run_event(ngx_stream_session_t *s, ngx_stream_js_ctx_t *ctx, if (rc == NGX_ERROR) { error: - ngx_qjs_exception(cx, &exception); + ngx_qjs_exception(ctx->engine, &exception); ngx_log_error(NGX_LOG_ERR, c->log, 0, "js exception: %V", &exception); From noreply at nginx.com Fri Apr 4 05:21:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 4 Apr 2025 05:21:02 +0000 (UTC) Subject: [njs] QuickJS: added xml to nginx modules. Message-ID: <20250404052102.60C6E4788F@pubserv1.nginx> details: https://github.com/nginx/njs/commit/f678c9048b734a6cfff36e6de9ecfe11bed266b4 branches: master commit: f678c9048b734a6cfff36e6de9ecfe11bed266b4 user: Dmitry Volyntsev date: Wed, 2 Apr 2025 17:47:10 -0700 description: QuickJS: added xml to nginx modules. Missed in cec9a1650. --- nginx/config | 4 ++++ nginx/ngx_http_js_module.c | 3 +++ nginx/ngx_js.h | 1 + nginx/ngx_stream_js_module.c | 3 +++ 4 files changed, 11 insertions(+) diff --git a/nginx/config b/nginx/config index 2edf7a3d..03ec03d0 100644 --- a/nginx/config +++ b/nginx/config @@ -129,6 +129,10 @@ if [ $NJS_LIBXSLT != NO ]; then have=NJS_HAVE_XML . auto/have NJS_SRCS="$NJS_SRCS $ngx_addon_dir/../external/njs_xml_module.c" + if [ "$NJS_HAVE_QUICKJS" = "YES" ]; then + NJS_SRCS="$NJS_SRCS $ngx_addon_dir/../external/qjs_xml_module.c" + fi + echo " enabled xml module" fi diff --git a/nginx/ngx_http_js_module.c b/nginx/ngx_http_js_module.c index 0c8215c5..5a131bc9 100644 --- a/nginx/ngx_http_js_module.c +++ b/nginx/ngx_http_js_module.c @@ -1140,6 +1140,9 @@ qjs_module_t *njs_http_qjs_addon_modules[] = { #ifdef NJS_HAVE_OPENSSL &qjs_webcrypto_module, #endif +#ifdef NJS_HAVE_XML + &qjs_xml_module, +#endif #ifdef NJS_HAVE_ZLIB &qjs_zlib_module, #endif diff --git a/nginx/ngx_js.h b/nginx/ngx_js.h index 51b8916c..0a99a696 100644 --- a/nginx/ngx_js.h +++ b/nginx/ngx_js.h @@ -378,6 +378,7 @@ ngx_int_t ngx_qjs_string(JSContext *cx, JSValueConst val, ngx_str_t *str); ((ngx_js_external_ctx_pt) ngx_qjs_meta(cx, 11))(e) extern qjs_module_t qjs_webcrypto_module; +extern qjs_module_t qjs_xml_module; extern qjs_module_t qjs_zlib_module; extern qjs_module_t ngx_qjs_ngx_module; extern qjs_module_t ngx_qjs_ngx_shared_dict_module; diff --git a/nginx/ngx_stream_js_module.c b/nginx/ngx_stream_js_module.c index b1e175d2..44c7af17 100644 --- a/nginx/ngx_stream_js_module.c +++ b/nginx/ngx_stream_js_module.c @@ -841,6 +841,9 @@ qjs_module_t *njs_stream_qjs_addon_modules[] = { #ifdef NJS_HAVE_OPENSSL &qjs_webcrypto_module, #endif +#ifdef NJS_HAVE_XML + &qjs_xml_module, +#endif #ifdef NJS_HAVE_ZLIB &qjs_zlib_module, #endif From noreply at nginx.com Tue Apr 8 05:15:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 8 Apr 2025 05:15:02 +0000 (UTC) Subject: [njs] QuickJS: ignoring rejected promises while destroying context. Message-ID: <20250408051502.5227A478F2@pubserv1.nginx> details: https://github.com/nginx/njs/commit/113fac87c8a123d0627e82cfe3771334baa11edd branches: master commit: 113fac87c8a123d0627e82cfe3771334baa11edd user: Dmitry Volyntsev date: Mon, 7 Apr 2025 18:28:50 -0700 description: QuickJS: ignoring rejected promises while destroying context. --- nginx/ngx_js.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nginx/ngx_js.c b/nginx/ngx_js.c index ba0fee92..316a2076 100644 --- a/nginx/ngx_js.c +++ b/nginx/ngx_js.c @@ -1155,6 +1155,8 @@ ngx_engine_qjs_destroy(ngx_engine_t *e, ngx_js_ctx_t *ctx, "js unhandled rejection: %V", &exception); } + JS_SetHostPromiseRejectionTracker(JS_GetRuntime(cx), NULL, NULL); + class_id = JS_GetClassID(ngx_qjs_arg(ctx->args[0])); opaque = JS_GetOpaque(ngx_qjs_arg(ctx->args[0]), class_id); opaque->external = NULL; From noreply at nginx.com Tue Apr 8 05:15:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 8 Apr 2025 05:15:02 +0000 (UTC) Subject: [njs] Version 0.8.10. Message-ID: <20250408051502.55E1148380@pubserv1.nginx> details: https://github.com/nginx/njs/commit/9d3e71ca656b920e3e63b0e647aca8e91669d29a branches: master commit: 9d3e71ca656b920e3e63b0e647aca8e91669d29a user: Dmitry Volyntsev date: Mon, 7 Apr 2025 18:53:52 -0700 description: Version 0.8.10. --- CHANGES | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/CHANGES b/CHANGES index c67eaf8b..87392f4c 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,52 @@ +Changes with njs 0.8.10 08 Apr 2025 + + nginx modules: + + *) Feature: reading r.requestText or r.requestBuffer from + a temp file. + Previously, an exception was thrown when accessing r.requestText + or r.requestBuffer if a client request body size exceeded + client_body_buffer_size. + + *) Improvement: improved reporting of unhandled promise rejections. + + *) Bugfix: fixed name corruption in variable and header processing. + + *) Bugfix: fixed SharedDict.incr() with empty init argument + for QuickJS engine. + + *) Bugfix: accepting response headers with underscore characters + in Fetch API. + + Core: + + *) Change: fixed serializeToString(). + Previously, serializeToString() was exclusiveC14n() which returned + string instead of Buffer. According to the published documentation it + should be c14n(). + + *) Feature: added WebCrypto API for QuickJS engine. + + *) Feature: added TextEncoder/TextDecoder for QuickJS engine. + + *) Feature: added querystring module for QuickJS engine. + + *) Feature: added crypto module for QuickJS engine. + + *) Feature: added xml module for QuickJS engine. + + *) Feature: added support for QuickJS-NG library. + + *) Bugfix: fixed buffer.concat() with a single argument in quickjs. + + *) Bugfix: added missed syntax error for await in template literal. + + *) Bugfix: fixed non-NULL terminated strings formatting in + exceptions for QuickJS engine. + + *) Bugfix: fixed compatibility with recent change in QuickJS + and QuickJS-NG. + Changes with njs 0.8.9 14 Jan 2025 nginx modules: From noreply at nginx.com Tue Apr 8 05:16:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 8 Apr 2025 05:16:02 +0000 (UTC) Subject: [njs] Lightweight tag created: 0.8.10 Message-ID: <20250408051602.196A148380@pubserv1.nginx> details: https://github.com/nginx/njs/releases/tag/0.8.10 branches: commit: 9d3e71ca656b920e3e63b0e647aca8e91669d29a user: Dmitry Volyntsev date: Mon Apr 7 18:53:52 2025 -0700 description: Version 0.8.10. From noreply at nginx.com Wed Apr 9 15:38:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Wed, 9 Apr 2025 15:38:02 +0000 (UTC) Subject: [nginx] Charset filter: improved validation of charset_map with utf-8. Message-ID: <20250409153802.A8DD1477D2@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/a813c639211728a1441945dee149b44a0935f48b branches: master commit: a813c639211728a1441945dee149b44a0935f48b user: Sergey Kandaurov date: Thu, 27 Feb 2025 18:42:06 +0400 description: Charset filter: improved validation of charset_map with utf-8. It was possible to write outside of the buffer used to keep UTF-8 decoded values when parsing conversion table configuration. Since this happened before UTF-8 decoding, the fix is to check in advance if character codes are of more than 3-byte sequence. Note that this is already enforced by a later check for ngx_utf8_decode() decoded values for 0xffff, which corresponds to the maximum value encoded as a valid 3-byte sequence, so the fix does not affect the valid values. Found with AddressSanitizer. Fixes GitHub issue #529. --- src/http/modules/ngx_http_charset_filter_module.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/http/modules/ngx_http_charset_filter_module.c b/src/http/modules/ngx_http_charset_filter_module.c index e52b96e9b..d44da6233 100644 --- a/src/http/modules/ngx_http_charset_filter_module.c +++ b/src/http/modules/ngx_http_charset_filter_module.c @@ -1332,6 +1332,12 @@ ngx_http_charset_map(ngx_conf_t *cf, ngx_command_t *dummy, void *conf) table = ctx->table; if (ctx->charset->utf8) { + if (value[1].len / 2 > NGX_UTF_LEN - 1) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid value \"%V\"", &value[1]); + return NGX_CONF_ERROR; + } + p = &table->src2dst[src * NGX_UTF_LEN]; *p++ = (u_char) (value[1].len / 2); From noreply at nginx.com Thu Apr 10 01:55:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Thu, 10 Apr 2025 01:55:02 +0000 (UTC) Subject: [njs] Version bump. Message-ID: <20250410015502.CC9D9489DB@pubserv1.nginx> details: https://github.com/nginx/njs/commit/b5d64ed6eeb0323917f83459fae80c59965a8725 branches: master commit: b5d64ed6eeb0323917f83459fae80c59965a8725 user: Dmitry Volyntsev date: Wed, 9 Apr 2025 17:18:53 -0700 description: Version bump. --- src/njs.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/njs.h b/src/njs.h index c7e67aca..0e2855d1 100644 --- a/src/njs.h +++ b/src/njs.h @@ -11,8 +11,8 @@ #include -#define NJS_VERSION "0.8.10" -#define NJS_VERSION_NUMBER 0x00080a +#define NJS_VERSION "0.9.0" +#define NJS_VERSION_NUMBER 0x000900 #include From noreply at nginx.com Thu Apr 10 01:55:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Thu, 10 Apr 2025 01:55:02 +0000 (UTC) Subject: [njs] Fixed typo in stream event handler debug message. Message-ID: <20250410015502.D10A1489E2@pubserv1.nginx> details: https://github.com/nginx/njs/commit/0c9c847d3350c5cefbdc8bbc28918768271e4a7d branches: master commit: 0c9c847d3350c5cefbdc8bbc28918768271e4a7d user: Dmitry Volyntsev date: Wed, 9 Apr 2025 17:19:37 -0700 description: Fixed typo in stream event handler debug message. --- nginx/ngx_stream_js_module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nginx/ngx_stream_js_module.c b/nginx/ngx_stream_js_module.c index 44c7af17..7620f2e0 100644 --- a/nginx/ngx_stream_js_module.c +++ b/nginx/ngx_stream_js_module.c @@ -1914,7 +1914,7 @@ static void ngx_stream_js_event_finalize(ngx_stream_session_t *s, ngx_int_t rc) { ngx_log_debug1(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, - "http js event finalize rc: %i", rc); + "stream js event finalize rc: %i", rc); if (rc == NGX_ERROR) { if (s->health_check) { From noreply at nginx.com Thu Apr 10 02:16:01 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Thu, 10 Apr 2025 02:16:01 +0000 (UTC) Subject: [njs] Fetch: remove unused parameter in ngx_js_http_error(). Message-ID: <20250410021601.E330E489E2@pubserv1.nginx> details: https://github.com/nginx/njs/commit/4f96013f83ec3e84314d2dfcf2f81d47897fa6e4 branches: master commit: 4f96013f83ec3e84314d2dfcf2f81d47897fa6e4 user: Zhidao HONG date: Tue, 8 Apr 2025 23:00:42 +0800 description: Fetch: remove unused parameter in ngx_js_http_error(). No functional changes. --- nginx/ngx_js_fetch.c | 58 ++++++++++++++++++++++++++-------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/nginx/ngx_js_fetch.c b/nginx/ngx_js_fetch.c index cd6e54f6..a803ab4c 100644 --- a/nginx/ngx_js_fetch.c +++ b/nginx/ngx_js_fetch.c @@ -158,7 +158,7 @@ struct ngx_js_http_s { -#define ngx_js_http_error(http, err, fmt, ...) \ +#define ngx_js_http_error(http, fmt, ...) \ do { \ njs_vm_error((http)->vm, fmt, ##__VA_ARGS__); \ njs_vm_exception_get((http)->vm, \ @@ -1343,7 +1343,7 @@ ngx_js_resolve_handler(ngx_resolver_ctx_t *ctx) http = ctx->data; if (ctx->state) { - ngx_js_http_error(http, 0, "\"%V\" could not be resolved (%i: %s)", + ngx_js_http_error(http, "\"%V\" could not be resolved (%i: %s)", &ctx->name, ctx->state, ngx_resolver_strerror(ctx->state)); return; @@ -1410,7 +1410,7 @@ ngx_js_resolve_handler(ngx_resolver_ctx_t *ctx) failed: - ngx_js_http_error(http, 0, "memory error"); + ngx_js_http_error(http, "memory error"); } @@ -1577,7 +1577,7 @@ ngx_js_http_connect(ngx_js_http_t *http) rc = ngx_event_connect_peer(&http->peer); if (rc == NGX_ERROR) { - ngx_js_http_error(http, 0, "connect failed"); + ngx_js_http_error(http, "connect failed"); return; } @@ -1627,14 +1627,14 @@ ngx_js_http_ssl_init_connection(ngx_js_http_t *http) if (ngx_ssl_create_connection(http->ssl, c, NGX_SSL_BUFFER|NGX_SSL_CLIENT) != NGX_OK) { - ngx_js_http_error(http, 0, "failed to create ssl connection"); + ngx_js_http_error(http, "failed to create ssl connection"); return; } c->sendfile = 0; if (ngx_js_http_ssl_name(http) != NGX_OK) { - ngx_js_http_error(http, 0, "failed to create ssl connection"); + ngx_js_http_error(http, "failed to create ssl connection"); return; } @@ -1770,7 +1770,7 @@ ngx_js_http_next(ngx_js_http_t *http) ngx_log_debug0(NGX_LOG_DEBUG_EVENT, http->log, 0, "js fetch next addr"); if (++http->naddr >= http->naddrs) { - ngx_js_http_error(http, 0, "connect failed"); + ngx_js_http_error(http, "connect failed"); return; } @@ -1799,7 +1799,7 @@ ngx_js_http_write_handler(ngx_event_t *wev) ngx_log_debug0(NGX_LOG_DEBUG_EVENT, wev->log, 0, "js fetch write handler"); if (wev->timedout) { - ngx_js_http_error(http, NGX_ETIMEDOUT, "write timed out"); + ngx_js_http_error(http, "write timed out"); return; } @@ -1815,13 +1815,13 @@ ngx_js_http_write_handler(ngx_event_t *wev) if (b == NULL) { size = njs_chb_size(&http->chain); if (size < 0) { - ngx_js_http_error(http, 0, "memory error"); + ngx_js_http_error(http, "memory error"); return; } b = ngx_create_temp_buf(http->pool, size); if (b == NULL) { - ngx_js_http_error(http, 0, "memory error"); + ngx_js_http_error(http, "memory error"); return; } @@ -1853,7 +1853,7 @@ ngx_js_http_write_handler(ngx_event_t *wev) } if (ngx_handle_write_event(wev, 0) != NGX_OK) { - ngx_js_http_error(http, 0, "write failed"); + ngx_js_http_error(http, "write failed"); } return; @@ -1881,14 +1881,14 @@ ngx_js_http_read_handler(ngx_event_t *rev) ngx_log_debug0(NGX_LOG_DEBUG_EVENT, rev->log, 0, "js fetch read handler"); if (rev->timedout) { - ngx_js_http_error(http, NGX_ETIMEDOUT, "read timed out"); + ngx_js_http_error(http, "read timed out"); return; } if (http->buffer == NULL) { b = ngx_create_temp_buf(http->pool, http->buffer_size); if (b == NULL) { - ngx_js_http_error(http, 0, "memory error"); + ngx_js_http_error(http, "memory error"); return; } @@ -1915,7 +1915,7 @@ ngx_js_http_read_handler(ngx_event_t *rev) if (n == NGX_AGAIN) { if (ngx_handle_read_event(rev, 0) != NGX_OK) { - ngx_js_http_error(http, 0, "read failed"); + ngx_js_http_error(http, "read failed"); } return; @@ -1939,7 +1939,7 @@ ngx_js_http_read_handler(ngx_event_t *rev) } if (rc == NGX_AGAIN) { - ngx_js_http_error(http, 0, "prematurely closed connection"); + ngx_js_http_error(http, "prematurely closed connection"); } } @@ -2374,7 +2374,7 @@ ngx_js_http_process_status_line(ngx_js_http_t *http) /* rc == NGX_ERROR */ - ngx_js_http_error(http, 0, "invalid fetch status line"); + ngx_js_http_error(http, "invalid fetch status line"); return NGX_ERROR; } @@ -2397,7 +2397,7 @@ ngx_js_http_process_headers(ngx_js_http_t *http) rc = ngx_list_init(&http->response.headers.header_list, http->pool, 4, sizeof(ngx_js_tb_elt_t)); if (rc != NGX_OK) { - ngx_js_http_error(http, 0, "alloc failed"); + ngx_js_http_error(http, "alloc failed"); return NGX_ERROR; } } @@ -2414,7 +2414,7 @@ ngx_js_http_process_headers(ngx_js_http_t *http) hp->header_start, vlen); if (ret == NJS_ERROR) { - ngx_js_http_error(http, 0, "cannot add respose header"); + ngx_js_http_error(http, "cannot add respose header"); return NGX_ERROR; } @@ -2438,7 +2438,7 @@ ngx_js_http_process_headers(ngx_js_http_t *http) { hp->content_length_n = ngx_atoof(hp->header_start, vlen); if (hp->content_length_n == NGX_ERROR) { - ngx_js_http_error(http, 0, "invalid fetch content length"); + ngx_js_http_error(http, "invalid fetch content length"); return NGX_ERROR; } @@ -2446,7 +2446,7 @@ ngx_js_http_process_headers(ngx_js_http_t *http) && hp->content_length_n > (off_t) http->max_response_body_size) { - ngx_js_http_error(http, 0, + ngx_js_http_error(http, "fetch content length is too large"); return NGX_ERROR; } @@ -2466,7 +2466,7 @@ ngx_js_http_process_headers(ngx_js_http_t *http) /* rc == NGX_ERROR */ - ngx_js_http_error(http, 0, "invalid fetch header"); + ngx_js_http_error(http, "invalid fetch header"); return NGX_ERROR; } @@ -2495,7 +2495,7 @@ ngx_js_http_process_body(ngx_js_http_t *http) if (http->done) { size = njs_chb_size(&http->response.chain); if (size < 0) { - ngx_js_http_error(http, 0, "memory error"); + ngx_js_http_error(http, "memory error"); return NGX_ERROR; } @@ -2503,7 +2503,7 @@ ngx_js_http_process_body(ngx_js_http_t *http) && http->http_parse.chunked && http->http_parse.content_length_n == -1) { - ngx_js_http_error(http, 0, "invalid fetch chunked response"); + ngx_js_http_error(http, "invalid fetch chunked response"); return NGX_ERROR; } @@ -2516,7 +2516,7 @@ ngx_js_http_process_body(ngx_js_http_t *http) ngx_http_js_fetch_response_proto_id, &http->response, 0); if (ret != NJS_OK) { - ngx_js_http_error(http, 0, "fetch response creation failed"); + ngx_js_http_error(http, "fetch response creation failed"); return NGX_ERROR; } @@ -2528,7 +2528,7 @@ ngx_js_http_process_body(ngx_js_http_t *http) return NGX_AGAIN; } - ngx_js_http_error(http, 0, "fetch trailing data"); + ngx_js_http_error(http, "fetch trailing data"); return NGX_ERROR; } @@ -2538,7 +2538,7 @@ ngx_js_http_process_body(ngx_js_http_t *http) rc = ngx_js_http_parse_chunked(&http->http_chunk_parse, b, &http->response.chain); if (rc == NGX_ERROR) { - ngx_js_http_error(http, 0, "invalid fetch chunked response"); + ngx_js_http_error(http, "invalid fetch chunked response"); return NGX_ERROR; } @@ -2549,7 +2549,7 @@ ngx_js_http_process_body(ngx_js_http_t *http) } if (size > http->max_response_body_size * 10) { - ngx_js_http_error(http, 0, "very large fetch chunked response"); + ngx_js_http_error(http, "very large fetch chunked response"); return NGX_ERROR; } @@ -2571,7 +2571,7 @@ ngx_js_http_process_body(ngx_js_http_t *http) chsize = ngx_min(need, b->last - b->pos); if (size + chsize > http->max_response_body_size) { - ngx_js_http_error(http, 0, "fetch response body is too large"); + ngx_js_http_error(http, "fetch response body is too large"); return NGX_ERROR; } @@ -2587,7 +2587,7 @@ ngx_js_http_process_body(ngx_js_http_t *http) if (http->chunk == NULL) { b = ngx_create_temp_buf(http->pool, http->buffer_size); if (b == NULL) { - ngx_js_http_error(http, 0, "memory error"); + ngx_js_http_error(http, "memory error"); return NGX_ERROR; } From noreply at nginx.com Thu Apr 10 13:28:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Thu, 10 Apr 2025 13:28:02 +0000 (UTC) Subject: [nginx] Upstream: fixed passwords support for dynamic certificates. Message-ID: <20250410132802.926D8489C7@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/6c3a9d561271ec451f479a84fbe54c81a63dad2e branches: master commit: 6c3a9d561271ec451f479a84fbe54c81a63dad2e user: Sergey Kandaurov date: Wed, 5 Feb 2025 19:16:05 +0400 description: Upstream: fixed passwords support for dynamic certificates. Passwords were not preserved in optimized SSL contexts, the bug had appeared in d791b4aab (1.23.1), as in the following configuration: server { proxy_ssl_password_file password; proxy_ssl_certificate $ssl_server_name.crt; proxy_ssl_certificate_key $ssl_server_name.key; location /original/ { proxy_pass https://u1/; } location /optimized/ { proxy_pass https://u2/; } } The fix is to always preserve passwords, by copying to the configuration pool, if dynamic certificates are used. This is done as part of merging "ssl_passwords" configuration. To minimize the number of copies, a preserved version is then used for inheritance. A notable exception is inheritance of preserved empty passwords to the context with statically configured certificates: server { proxy_ssl_certificate $ssl_server_name.crt; proxy_ssl_certificate_key $ssl_server_name.key; location / { proxy_pass ...; proxy_ssl_certificate example.com.crt; proxy_ssl_certificate_key example.com.key; } } In this case, an unmodified version (NULL) of empty passwords is set, to allow reading them from the password prompt on nginx startup. As an additional optimization, a preserved instance of inherited configured passwords is set to the previous level, to inherit it to other contexts: server { proxy_ssl_password_file password; location /1/ { proxy_pass https://u1/; proxy_ssl_certificate $ssl_server_name.crt; proxy_ssl_certificate_key $ssl_server_name.key; } location /2/ { proxy_pass https://u2/; proxy_ssl_certificate $ssl_server_name.crt; proxy_ssl_certificate_key $ssl_server_name.key; } } --- src/http/modules/ngx_http_grpc_module.c | 20 +++++----- src/http/modules/ngx_http_proxy_module.c | 20 +++++----- src/http/modules/ngx_http_uwsgi_module.c | 20 +++++----- src/http/ngx_http_upstream.c | 55 ++++++++++++++++++++++++++ src/http/ngx_http_upstream.h | 4 ++ src/stream/ngx_stream_proxy_module.c | 68 +++++++++++++++++++++++++++----- 6 files changed, 144 insertions(+), 43 deletions(-) diff --git a/src/http/modules/ngx_http_grpc_module.c b/src/http/modules/ngx_http_grpc_module.c index 8e246c3cf..80046d6a4 100644 --- a/src/http/modules/ngx_http_grpc_module.c +++ b/src/http/modules/ngx_http_grpc_module.c @@ -4509,8 +4509,13 @@ ngx_http_grpc_merge_loc_conf(ngx_conf_t *cf, void *parent, void *child) prev->upstream.ssl_certificate_key, NULL); ngx_conf_merge_ptr_value(conf->upstream.ssl_certificate_cache, prev->upstream.ssl_certificate_cache, NULL); - ngx_conf_merge_ptr_value(conf->upstream.ssl_passwords, - prev->upstream.ssl_passwords, NULL); + + if (ngx_http_upstream_merge_ssl_passwords(cf, &conf->upstream, + &prev->upstream) + != NGX_OK) + { + return NGX_CONF_ERROR; + } ngx_conf_merge_ptr_value(conf->ssl_conf_commands, prev->ssl_conf_commands, NULL); @@ -5077,16 +5082,9 @@ ngx_http_grpc_set_ssl(ngx_conf_t *cf, ngx_http_grpc_loc_conf_t *glcf) return NGX_ERROR; } - if (glcf->upstream.ssl_certificate->lengths - || glcf->upstream.ssl_certificate_key->lengths) + if (glcf->upstream.ssl_certificate->lengths == NULL + && glcf->upstream.ssl_certificate_key->lengths == NULL) { - glcf->upstream.ssl_passwords = - ngx_ssl_preserve_passwords(cf, glcf->upstream.ssl_passwords); - if (glcf->upstream.ssl_passwords == NULL) { - return NGX_ERROR; - } - - } else { if (ngx_ssl_certificate(cf, glcf->upstream.ssl, &glcf->upstream.ssl_certificate->value, &glcf->upstream.ssl_certificate_key->value, diff --git a/src/http/modules/ngx_http_proxy_module.c b/src/http/modules/ngx_http_proxy_module.c index 27c34fef2..d4c5abf62 100644 --- a/src/http/modules/ngx_http_proxy_module.c +++ b/src/http/modules/ngx_http_proxy_module.c @@ -3976,8 +3976,13 @@ ngx_http_proxy_merge_loc_conf(ngx_conf_t *cf, void *parent, void *child) prev->upstream.ssl_certificate_key, NULL); ngx_conf_merge_ptr_value(conf->upstream.ssl_certificate_cache, prev->upstream.ssl_certificate_cache, NULL); - ngx_conf_merge_ptr_value(conf->upstream.ssl_passwords, - prev->upstream.ssl_passwords, NULL); + + if (ngx_http_upstream_merge_ssl_passwords(cf, &conf->upstream, + &prev->upstream) + != NGX_OK) + { + return NGX_CONF_ERROR; + } ngx_conf_merge_ptr_value(conf->ssl_conf_commands, prev->ssl_conf_commands, NULL); @@ -5337,16 +5342,9 @@ ngx_http_proxy_set_ssl(ngx_conf_t *cf, ngx_http_proxy_loc_conf_t *plcf) return NGX_ERROR; } - if (plcf->upstream.ssl_certificate->lengths - || plcf->upstream.ssl_certificate_key->lengths) + if (plcf->upstream.ssl_certificate->lengths == NULL + && plcf->upstream.ssl_certificate_key->lengths == NULL) { - plcf->upstream.ssl_passwords = - ngx_ssl_preserve_passwords(cf, plcf->upstream.ssl_passwords); - if (plcf->upstream.ssl_passwords == NULL) { - return NGX_ERROR; - } - - } else { if (ngx_ssl_certificate(cf, plcf->upstream.ssl, &plcf->upstream.ssl_certificate->value, &plcf->upstream.ssl_certificate_key->value, diff --git a/src/http/modules/ngx_http_uwsgi_module.c b/src/http/modules/ngx_http_uwsgi_module.c index 14aae5bf1..51a861d9a 100644 --- a/src/http/modules/ngx_http_uwsgi_module.c +++ b/src/http/modules/ngx_http_uwsgi_module.c @@ -1933,8 +1933,13 @@ ngx_http_uwsgi_merge_loc_conf(ngx_conf_t *cf, void *parent, void *child) prev->upstream.ssl_certificate_key, NULL); ngx_conf_merge_ptr_value(conf->upstream.ssl_certificate_cache, prev->upstream.ssl_certificate_cache, NULL); - ngx_conf_merge_ptr_value(conf->upstream.ssl_passwords, - prev->upstream.ssl_passwords, NULL); + + if (ngx_http_upstream_merge_ssl_passwords(cf, &conf->upstream, + &prev->upstream) + != NGX_OK) + { + return NGX_CONF_ERROR; + } ngx_conf_merge_ptr_value(conf->ssl_conf_commands, prev->ssl_conf_commands, NULL); @@ -2685,16 +2690,9 @@ ngx_http_uwsgi_set_ssl(ngx_conf_t *cf, ngx_http_uwsgi_loc_conf_t *uwcf) return NGX_ERROR; } - if (uwcf->upstream.ssl_certificate->lengths - || uwcf->upstream.ssl_certificate_key->lengths) + if (uwcf->upstream.ssl_certificate->lengths == NULL + && uwcf->upstream.ssl_certificate_key->lengths == NULL) { - uwcf->upstream.ssl_passwords = - ngx_ssl_preserve_passwords(cf, uwcf->upstream.ssl_passwords); - if (uwcf->upstream.ssl_passwords == NULL) { - return NGX_ERROR; - } - - } else { if (ngx_ssl_certificate(cf, uwcf->upstream.ssl, &uwcf->upstream.ssl_certificate->value, &uwcf->upstream.ssl_certificate_key->value, diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c index 77dc032f2..d4cf1b7fe 100644 --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -6921,6 +6921,61 @@ ngx_http_upstream_hide_headers_hash(ngx_conf_t *cf, } +#if (NGX_HTTP_SSL) + +ngx_int_t +ngx_http_upstream_merge_ssl_passwords(ngx_conf_t *cf, + ngx_http_upstream_conf_t *conf, ngx_http_upstream_conf_t *prev) +{ + ngx_uint_t preserve; + + ngx_conf_merge_ptr_value(conf->ssl_passwords, prev->ssl_passwords, NULL); + + if (conf->ssl_certificate == NULL + || conf->ssl_certificate->value.len == 0 + || conf->ssl_certificate_key == NULL) + { + return NGX_OK; + } + + if (conf->ssl_certificate->lengths == NULL + && conf->ssl_certificate_key->lengths == NULL) + { + if (conf->ssl_passwords && conf->ssl_passwords->pool == NULL) { + /* un-preserve empty password list */ + conf->ssl_passwords = NULL; + } + + return NGX_OK; + } + + if (conf->ssl_passwords && conf->ssl_passwords->pool != cf->temp_pool) { + /* already preserved */ + return NGX_OK; + } + + preserve = (conf->ssl_passwords == prev->ssl_passwords) ? 1 : 0; + + conf->ssl_passwords = ngx_ssl_preserve_passwords(cf, conf->ssl_passwords); + if (conf->ssl_passwords == NULL) { + return NGX_ERROR; + } + + /* + * special handling to keep a preserved ssl_passwords copy + * in the previous configuration to inherit it to all children + */ + + if (preserve) { + prev->ssl_passwords = conf->ssl_passwords; + } + + return NGX_OK; +} + +#endif + + static void * ngx_http_upstream_create_main_conf(ngx_conf_t *cf) { diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h index 069c0f7a4..e0a903669 100644 --- a/src/http/ngx_http_upstream.h +++ b/src/http/ngx_http_upstream.h @@ -437,6 +437,10 @@ char *ngx_http_upstream_param_set_slot(ngx_conf_t *cf, ngx_command_t *cmd, ngx_int_t ngx_http_upstream_hide_headers_hash(ngx_conf_t *cf, ngx_http_upstream_conf_t *conf, ngx_http_upstream_conf_t *prev, ngx_str_t *default_hide_headers, ngx_hash_init_t *hash); +#if (NGX_HTTP_SSL) +ngx_int_t ngx_http_upstream_merge_ssl_passwords(ngx_conf_t *cf, + ngx_http_upstream_conf_t *conf, ngx_http_upstream_conf_t *prev); +#endif #define ngx_http_conf_upstream_srv_conf(uscf, module) \ diff --git a/src/stream/ngx_stream_proxy_module.c b/src/stream/ngx_stream_proxy_module.c index 7f8bfc4e0..6e51585f6 100644 --- a/src/stream/ngx_stream_proxy_module.c +++ b/src/stream/ngx_stream_proxy_module.c @@ -108,6 +108,8 @@ static ngx_int_t ngx_stream_proxy_ssl_name(ngx_stream_session_t *s); static ngx_int_t ngx_stream_proxy_ssl_certificate(ngx_stream_session_t *s); static ngx_int_t ngx_stream_proxy_merge_ssl(ngx_conf_t *cf, ngx_stream_proxy_srv_conf_t *conf, ngx_stream_proxy_srv_conf_t *prev); +static ngx_int_t ngx_stream_proxy_merge_ssl_passwords(ngx_conf_t *cf, + ngx_stream_proxy_srv_conf_t *conf, ngx_stream_proxy_srv_conf_t *prev); static ngx_int_t ngx_stream_proxy_set_ssl(ngx_conf_t *cf, ngx_stream_proxy_srv_conf_t *pscf); @@ -2315,7 +2317,9 @@ ngx_stream_proxy_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) ngx_conf_merge_ptr_value(conf->ssl_certificate_cache, prev->ssl_certificate_cache, NULL); - ngx_conf_merge_ptr_value(conf->ssl_passwords, prev->ssl_passwords, NULL); + if (ngx_stream_proxy_merge_ssl_passwords(cf, conf, prev) != NGX_OK) { + return NGX_CONF_ERROR; + } ngx_conf_merge_ptr_value(conf->ssl_conf_commands, prev->ssl_conf_commands, NULL); @@ -2381,6 +2385,57 @@ ngx_stream_proxy_merge_ssl(ngx_conf_t *cf, ngx_stream_proxy_srv_conf_t *conf, } +static ngx_int_t +ngx_stream_proxy_merge_ssl_passwords(ngx_conf_t *cf, + ngx_stream_proxy_srv_conf_t *conf, ngx_stream_proxy_srv_conf_t *prev) +{ + ngx_uint_t preserve; + + ngx_conf_merge_ptr_value(conf->ssl_passwords, prev->ssl_passwords, NULL); + + if (conf->ssl_certificate == NULL + || conf->ssl_certificate->value.len == 0 + || conf->ssl_certificate_key == NULL) + { + return NGX_OK; + } + + if (conf->ssl_certificate->lengths == NULL + && conf->ssl_certificate_key->lengths == NULL) + { + if (conf->ssl_passwords && conf->ssl_passwords->pool == NULL) { + /* un-preserve empty password list */ + conf->ssl_passwords = NULL; + } + + return NGX_OK; + } + + if (conf->ssl_passwords && conf->ssl_passwords->pool != cf->temp_pool) { + /* already preserved */ + return NGX_OK; + } + + preserve = (conf->ssl_passwords == prev->ssl_passwords) ? 1 : 0; + + conf->ssl_passwords = ngx_ssl_preserve_passwords(cf, conf->ssl_passwords); + if (conf->ssl_passwords == NULL) { + return NGX_ERROR; + } + + /* + * special handling to keep a preserved ssl_passwords copy + * in the previous configuration to inherit it to all children + */ + + if (preserve) { + prev->ssl_passwords = conf->ssl_passwords; + } + + return NGX_OK; +} + + static ngx_int_t ngx_stream_proxy_set_ssl(ngx_conf_t *cf, ngx_stream_proxy_srv_conf_t *pscf) { @@ -2418,16 +2473,9 @@ ngx_stream_proxy_set_ssl(ngx_conf_t *cf, ngx_stream_proxy_srv_conf_t *pscf) return NGX_ERROR; } - if (pscf->ssl_certificate->lengths - || pscf->ssl_certificate_key->lengths) + if (pscf->ssl_certificate->lengths == NULL + && pscf->ssl_certificate_key->lengths == NULL) { - pscf->ssl_passwords = - ngx_ssl_preserve_passwords(cf, pscf->ssl_passwords); - if (pscf->ssl_passwords == NULL) { - return NGX_ERROR; - } - - } else { if (ngx_ssl_certificate(cf, pscf->ssl, &pscf->ssl_certificate->value, &pscf->ssl_certificate_key->value, From noreply at nginx.com Thu Apr 10 14:52:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Thu, 10 Apr 2025 14:52:02 +0000 (UTC) Subject: [nginx] SSL: external groups support in $ssl_curve and $ssl_curves. Message-ID: <20250410145202.3FDE7489E2@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/b6e7eb0f5792d7a52d2675ee3906e502d63c48e3 branches: master commit: b6e7eb0f5792d7a52d2675ee3906e502d63c48e3 user: Sergey Kandaurov date: Thu, 3 Apr 2025 15:29:31 +0400 description: SSL: external groups support in $ssl_curve and $ssl_curves. Starting with OpenSSL 3.0, groups may be added externally with pluggable KEM providers. Using SSL_get_negotiated_group(), which makes lookup in a static table with known groups, doesn't allow to list such groups by names leaving them in hex. Adding X25519MLKEM768 to the default group list in OpenSSL 3.5 made this problem more visible. SSL_get0_group_name() and, apparently, SSL_group_to_name() allow to resolve such provider-implemented groups, which is also "generally preferred" over SSL_get_negotiated_group() as documented in OpenSSL git commit 93d4f6133f. This change makes external groups listing by name using SSL_group_to_name() available since OpenSSL 3.0. To preserve "prime256v1" naming for the group 0x0017, and to avoid breaking BoringSSL and older OpenSSL versions support, it is used supplementary for a group that appears to be unknown. See https://github.com/openssl/openssl/issues/27137 for related discussion. --- src/event/ngx_event_openssl.c | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c index 865c78540..6992cc4a4 100644 --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -5040,7 +5040,8 @@ ngx_ssl_get_curve(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s) { #ifdef SSL_get_negotiated_group - int nid; + int nid; + const char *name; nid = SSL_get_negotiated_group(c->ssl->connection); @@ -5052,14 +5053,24 @@ ngx_ssl_get_curve(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s) return NGX_OK; } - s->len = sizeof("0x0000") - 1; +#if (OPENSSL_VERSION_NUMBER >= 0x3000000fL) + name = SSL_group_to_name(c->ssl->connection, nid); +#else + name = NULL; +#endif + s->len = name ? ngx_strlen(name) : sizeof("0x0000") - 1; s->data = ngx_pnalloc(pool, s->len); if (s->data == NULL) { return NGX_ERROR; } - ngx_sprintf(s->data, "0x%04xd", nid & 0xffff); + if (name) { + ngx_memcpy(s->data, name, s->len); + + } else { + ngx_sprintf(s->data, "0x%04xd", nid & 0xffff); + } return NGX_OK; } @@ -5079,6 +5090,7 @@ ngx_ssl_get_curves(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s) int *curves, n, i, nid; u_char *p; size_t len; + const char *name; n = SSL_get1_curves(c->ssl->connection, NULL); @@ -5099,7 +5111,13 @@ ngx_ssl_get_curves(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s) nid = curves[i]; if (nid & TLSEXT_nid_unknown) { - len += sizeof("0x0000") - 1; +#if (OPENSSL_VERSION_NUMBER >= 0x3000000fL) + name = SSL_group_to_name(c->ssl->connection, nid); +#else + name = NULL; +#endif + + len += name ? ngx_strlen(name) : sizeof("0x0000") - 1; } else { len += ngx_strlen(OBJ_nid2sn(nid)); @@ -5119,7 +5137,14 @@ ngx_ssl_get_curves(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s) nid = curves[i]; if (nid & TLSEXT_nid_unknown) { - p = ngx_sprintf(p, "0x%04xd", nid & 0xffff); +#if (OPENSSL_VERSION_NUMBER >= 0x3000000fL) + name = SSL_group_to_name(c->ssl->connection, nid); +#else + name = NULL; +#endif + + p = name ? ngx_cpymem(p, name, ngx_strlen(name)) + : ngx_sprintf(p, "0x%04xd", nid & 0xffff); } else { p = ngx_sprintf(p, "%s", OBJ_nid2sn(nid)); From noreply at nginx.com Fri Apr 11 15:29:01 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 11 Apr 2025 15:29:01 +0000 (UTC) Subject: [njs] Fetch: generalize ngx_js_http_error(). Message-ID: <20250411152901.F3BA948F29@pubserv1.nginx> details: https://github.com/nginx/njs/commit/48070f44bd9d44dfd92a8f7873c38bf252d53b45 branches: master commit: 48070f44bd9d44dfd92a8f7873c38bf252d53b45 user: Zhidao HONG date: Fri, 11 Apr 2025 21:47:52 +0800 description: Fetch: generalize ngx_js_http_error(). Refactored to support both njs and QuickJS, which have different error formats when throwing exceptions. --- nginx/ngx_js_fetch.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/nginx/ngx_js_fetch.c b/nginx/ngx_js_fetch.c index a803ab4c..2db73904 100644 --- a/nginx/ngx_js_fetch.c +++ b/nginx/ngx_js_fetch.c @@ -156,17 +156,6 @@ struct ngx_js_http_s { }; - - -#define ngx_js_http_error(http, fmt, ...) \ - do { \ - njs_vm_error((http)->vm, fmt, ##__VA_ARGS__); \ - njs_vm_exception_get((http)->vm, \ - njs_value_arg(&(http)->response_value)); \ - ngx_js_http_fetch_done(http, &(http)->response_value, NJS_ERROR); \ - } while (0) - - static njs_int_t ngx_js_method_process(njs_vm_t *vm, ngx_js_request_t *r); static njs_int_t ngx_js_headers_inherit(njs_vm_t *vm, ngx_js_headers_t *headers, ngx_js_headers_t *orig); @@ -1330,6 +1319,26 @@ failed: } +static void +ngx_js_http_error(ngx_js_http_t *http, const char *fmt, ...) +{ + u_char *p, *end; + va_list args; + u_char err[NGX_MAX_ERROR_STR]; + + end = err + NGX_MAX_ERROR_STR - 1; + + va_start(args, fmt); + p = njs_vsprintf(err, end, fmt, args); + *p = '\0'; + va_end(args); + + njs_vm_error(http->vm, (const char *) err); + njs_vm_exception_get(http->vm, njs_value_arg(&http->response_value)); + ngx_js_http_fetch_done(http, &http->response_value, NJS_ERROR); +} + + static void ngx_js_resolve_handler(ngx_resolver_ctx_t *ctx) { From noreply at nginx.com Fri Apr 11 15:29:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 11 Apr 2025 15:29:02 +0000 (UTC) Subject: [njs] Fetch: renamed njs_js_http_destructor() as ngx_js_http_destructor(). Message-ID: <20250411152902.040A248F2A@pubserv1.nginx> details: https://github.com/nginx/njs/commit/3aa2ed4b4dad5867920e2b8a6da28840c44e67eb branches: master commit: 3aa2ed4b4dad5867920e2b8a6da28840c44e67eb user: Zhidao HONG date: Fri, 11 Apr 2025 21:57:02 +0800 description: Fetch: renamed njs_js_http_destructor() as ngx_js_http_destructor(). --- nginx/ngx_js_fetch.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nginx/ngx_js_fetch.c b/nginx/ngx_js_fetch.c index 2db73904..5abd43bd 100644 --- a/nginx/ngx_js_fetch.c +++ b/nginx/ngx_js_fetch.c @@ -163,7 +163,7 @@ static njs_int_t ngx_js_headers_fill(njs_vm_t *vm, ngx_js_headers_t *headers, njs_value_t *init); static ngx_js_http_t *ngx_js_http_alloc(njs_vm_t *vm, ngx_pool_t *pool, ngx_log_t *log); -static void njs_js_http_destructor(ngx_js_event_t *event); +static void ngx_js_http_destructor(ngx_js_event_t *event); static void ngx_js_resolve_handler(ngx_resolver_ctx_t *ctx); static njs_int_t ngx_js_fetch_promissified_result(njs_vm_t *vm, njs_value_t *result, njs_int_t rc, njs_value_t *retval); @@ -1299,7 +1299,7 @@ ngx_js_http_alloc(njs_vm_t *vm, ngx_pool_t *pool, ngx_log_t *log) event->ctx = vm; njs_value_function_set(njs_value_arg(&event->function), callback); - event->destructor = njs_js_http_destructor; + event->destructor = ngx_js_http_destructor; event->fd = ctx->event_id++; event->data = http; @@ -1447,7 +1447,7 @@ ngx_js_http_close_connection(ngx_connection_t *c) static void -njs_js_http_destructor(ngx_js_event_t *event) +ngx_js_http_destructor(ngx_js_event_t *event) { ngx_js_http_t *http; From noreply at nginx.com Fri Apr 11 15:29:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 11 Apr 2025 15:29:02 +0000 (UTC) Subject: [njs] Fetch: refactored out ngx_js_http_close_peer(). Message-ID: <20250411152902.0F62C48F36@pubserv1.nginx> details: https://github.com/nginx/njs/commit/78b55af1e689eac4c87961a65592a094845ed482 branches: master commit: 78b55af1e689eac4c87961a65592a094845ed482 user: Zhidao HONG date: Fri, 11 Apr 2025 22:02:16 +0800 description: Fetch: refactored out ngx_js_http_close_peer(). --- nginx/ngx_js_fetch.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/nginx/ngx_js_fetch.c b/nginx/ngx_js_fetch.c index d3f8b356..944ed28e 100644 --- a/nginx/ngx_js_fetch.c +++ b/nginx/ngx_js_fetch.c @@ -164,6 +164,7 @@ static njs_int_t ngx_js_headers_fill(njs_vm_t *vm, ngx_js_headers_t *headers, static ngx_js_http_t *ngx_js_http_alloc(njs_vm_t *vm, ngx_pool_t *pool, ngx_log_t *log); static void ngx_js_http_resolve_done(ngx_js_http_t *http); +static void ngx_js_http_close_peer(ngx_js_http_t *http); static void ngx_js_http_destructor(ngx_js_event_t *event); static void ngx_js_resolve_handler(ngx_resolver_ctx_t *ctx); static njs_int_t ngx_js_fetch_promissified_result(njs_vm_t *vm, @@ -1456,6 +1457,16 @@ ngx_js_http_resolve_done(ngx_js_http_t *http) } +static void +ngx_js_http_close_peer(ngx_js_http_t *http) +{ + if (http->peer.connection != NULL) { + ngx_js_http_close_connection(http->peer.connection); + http->peer.connection = NULL; + } +} + + static void ngx_js_http_destructor(ngx_js_event_t *event) { @@ -1467,11 +1478,7 @@ ngx_js_http_destructor(ngx_js_event_t *event) http); ngx_js_http_resolve_done(http); - - if (http->peer.connection != NULL) { - ngx_js_http_close_connection(http->peer.connection); - http->peer.connection = NULL; - } + ngx_js_http_close_peer(http); } @@ -1532,10 +1539,7 @@ ngx_js_http_fetch_done(ngx_js_http_t *http, njs_opaque_value_t *retval, ngx_log_debug2(NGX_LOG_DEBUG_EVENT, http->log, 0, "js fetch done http:%p rc:%i", http, (ngx_int_t) rc); - if (http->peer.connection != NULL) { - ngx_js_http_close_connection(http->peer.connection); - http->peer.connection = NULL; - } + ngx_js_http_close_peer(http); if (http->event != NULL) { action = &http->promise_callbacks[(rc != NJS_OK)]; From noreply at nginx.com Fri Apr 11 15:29:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 11 Apr 2025 15:29:02 +0000 (UTC) Subject: [njs] Fetch: refactored out ngx_js_http_resolve_done(). Message-ID: <20250411152902.09EDE48F35@pubserv1.nginx> details: https://github.com/nginx/njs/commit/befd1d603a9c898657702439644fd8f609610804 branches: master commit: befd1d603a9c898657702439644fd8f609610804 user: Zhidao HONG date: Fri, 11 Apr 2025 21:59:16 +0800 description: Fetch: refactored out ngx_js_http_resolve_done(). --- nginx/ngx_js_fetch.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/nginx/ngx_js_fetch.c b/nginx/ngx_js_fetch.c index 5abd43bd..d3f8b356 100644 --- a/nginx/ngx_js_fetch.c +++ b/nginx/ngx_js_fetch.c @@ -163,6 +163,7 @@ static njs_int_t ngx_js_headers_fill(njs_vm_t *vm, ngx_js_headers_t *headers, njs_value_t *init); static ngx_js_http_t *ngx_js_http_alloc(njs_vm_t *vm, ngx_pool_t *pool, ngx_log_t *log); +static void ngx_js_http_resolve_done(ngx_js_http_t *http); static void ngx_js_http_destructor(ngx_js_event_t *event); static void ngx_js_resolve_handler(ngx_resolver_ctx_t *ctx); static njs_int_t ngx_js_fetch_promissified_result(njs_vm_t *vm, @@ -1410,8 +1411,7 @@ ngx_js_resolve_handler(ngx_resolver_ctx_t *ctx) http->addrs[i].name.data = p; } - ngx_resolve_name_done(ctx); - http->ctx = NULL; + ngx_js_http_resolve_done(http); ngx_js_http_connect(http); @@ -1446,6 +1446,16 @@ ngx_js_http_close_connection(ngx_connection_t *c) } +static void +ngx_js_http_resolve_done(ngx_js_http_t *http) +{ + if (http->ctx != NULL) { + ngx_resolve_name_done(http->ctx); + http->ctx = NULL; + } +} + + static void ngx_js_http_destructor(ngx_js_event_t *event) { @@ -1456,10 +1466,7 @@ ngx_js_http_destructor(ngx_js_event_t *event) ngx_log_debug1(NGX_LOG_DEBUG_EVENT, http->log, 0, "js fetch destructor:%p", http); - if (http->ctx != NULL) { - ngx_resolve_name_done(http->ctx); - http->ctx = NULL; - } + ngx_js_http_resolve_done(http); if (http->peer.connection != NULL) { ngx_js_http_close_connection(http->peer.connection); From noreply at nginx.com Fri Apr 11 15:29:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 11 Apr 2025 15:29:02 +0000 (UTC) Subject: [njs] Fetch: refactored out ngx_js_http_resolve(). Message-ID: <20250411152902.14FDB48F38@pubserv1.nginx> details: https://github.com/nginx/njs/commit/dbf556f235af1f42a7d9e3b90682de090170cd0f branches: master commit: dbf556f235af1f42a7d9e3b90682de090170cd0f user: Zhidao HONG date: Fri, 11 Apr 2025 22:43:31 +0800 description: Fetch: refactored out ngx_js_http_resolve(). --- nginx/ngx_js_fetch.c | 59 ++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/nginx/ngx_js_fetch.c b/nginx/ngx_js_fetch.c index 944ed28e..9539a687 100644 --- a/nginx/ngx_js_fetch.c +++ b/nginx/ngx_js_fetch.c @@ -166,7 +166,9 @@ static ngx_js_http_t *ngx_js_http_alloc(njs_vm_t *vm, ngx_pool_t *pool, static void ngx_js_http_resolve_done(ngx_js_http_t *http); static void ngx_js_http_close_peer(ngx_js_http_t *http); static void ngx_js_http_destructor(ngx_js_event_t *event); -static void ngx_js_resolve_handler(ngx_resolver_ctx_t *ctx); +static ngx_resolver_ctx_t *ngx_js_http_resolve(ngx_js_http_t *http, + ngx_resolver_t *r, ngx_str_t *host, in_port_t port, ngx_msec_t timeout); +static void ngx_js_http_resolve_handler(ngx_resolver_ctx_t *ctx); static njs_int_t ngx_js_fetch_promissified_result(njs_vm_t *vm, njs_value_t *result, njs_int_t rc, njs_value_t *retval); static void ngx_js_http_fetch_done(ngx_js_http_t *http, @@ -832,7 +834,9 @@ ngx_js_ext_fetch(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, } if (u.addrs == NULL) { - ctx = ngx_resolve_start(ngx_external_resolver(vm, external), NULL); + ctx = ngx_js_http_resolve(http, ngx_external_resolver(vm, external), + &u.host, u.port, + ngx_external_resolver_timeout(vm, external)); if (ctx == NULL) { njs_vm_memory_error(vm); return NJS_ERROR; @@ -843,21 +847,6 @@ ngx_js_ext_fetch(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, goto fail; } - http->ctx = ctx; - http->port = u.port; - - ctx->name = u.host; - ctx->handler = ngx_js_resolve_handler; - ctx->data = http; - ctx->timeout = ngx_external_resolver_timeout(vm, external); - - ret = ngx_resolve_name(http->ctx); - if (ret != NGX_OK) { - http->ctx = NULL; - njs_vm_memory_error(vm); - return NJS_ERROR; - } - njs_value_assign(retval, njs_value_arg(&http->promise)); return NJS_OK; @@ -1341,8 +1330,42 @@ ngx_js_http_error(ngx_js_http_t *http, const char *fmt, ...) } +static ngx_resolver_ctx_t * +ngx_js_http_resolve(ngx_js_http_t *http, ngx_resolver_t *r, ngx_str_t *host, + in_port_t port, ngx_msec_t timeout) +{ + ngx_int_t ret; + ngx_resolver_ctx_t *ctx; + + ctx = ngx_resolve_start(r, NULL); + if (ctx == NULL) { + return NULL; + } + + if (ctx == NGX_NO_RESOLVER) { + return ctx; + } + + http->ctx = ctx; + http->port = port; + + ctx->name = *host; + ctx->handler = ngx_js_http_resolve_handler; + ctx->data = http; + ctx->timeout = timeout; + + ret = ngx_resolve_name(ctx); + if (ret != NGX_OK) { + http->ctx = NULL; + return NULL; + } + + return ctx; +} + + static void -ngx_js_resolve_handler(ngx_resolver_ctx_t *ctx) +ngx_js_http_resolve_handler(ngx_resolver_ctx_t *ctx) { u_char *p; size_t len; From noreply at nginx.com Tue Apr 15 15:02:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:02 +0000 (UTC) Subject: [nginx] QUIC: prevent spurious congestion control recovery mode. Message-ID: <20250415150202.DCD5048F63@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/38236bf74f3e5728eeea488bef381c61842ac1d2 branches: master commit: 38236bf74f3e5728eeea488bef381c61842ac1d2 user: Roman Arutyunyan date: Fri, 3 Jan 2025 13:01:06 +0400 description: QUIC: prevent spurious congestion control recovery mode. Since recovery_start field was initialized with ngx_current_msec, all congestion events that happened within the same millisecond or cycle iteration, were treated as in recovery mode. Also, when handling persistent congestion, initializing recovery_start with ngx_current_msec resulted in treating all sent packets as in recovery mode, which violates RFC 9002, see example in Appendix B.8. While here, also fixed recovery_start wrap protection. Previously it used 2 * max_idle_timeout time frame for all sent frames, which is not a reliable protection since max_idle_timeout is unrelated to congestion control. Now recovery_start <= now condition is enforced. Note that recovery_start wrap is highly unlikely and can only occur on a 32-bit system if there are no congestion events for 24 days. --- src/event/quic/ngx_event_quic.c | 2 +- src/event/quic/ngx_event_quic_ack.c | 54 ++++++++++++++++++++++++------- src/event/quic/ngx_event_quic_migration.c | 2 +- 3 files changed, 44 insertions(+), 14 deletions(-) diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c index 70d9748bd..11497a6d7 100644 --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -312,7 +312,7 @@ ngx_quic_new_connection(ngx_connection_t *c, ngx_quic_conf_t *conf, ngx_max(2 * NGX_QUIC_MIN_INITIAL_SIZE, 14720)); qc->congestion.ssthresh = (size_t) -1; - qc->congestion.recovery_start = ngx_current_msec; + qc->congestion.recovery_start = ngx_current_msec - 1; if (pkt->validated && pkt->retried) { qc->tp.retry_scid.len = pkt->dcid.len; diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c index 4616e7053..29c5bfed1 100644 --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -41,6 +41,7 @@ static ngx_int_t ngx_quic_detect_lost(ngx_connection_t *c, ngx_quic_ack_stat_t *st); static ngx_msec_t ngx_quic_pcg_duration(ngx_connection_t *c); static void ngx_quic_persistent_congestion(ngx_connection_t *c); +static ngx_msec_t ngx_quic_oldest_sent_packet(ngx_connection_t *c); static void ngx_quic_congestion_lost(ngx_connection_t *c, ngx_quic_frame_t *frame); static void ngx_quic_lost_handler(ngx_event_t *ev); @@ -335,6 +336,14 @@ ngx_quic_congestion_ack(ngx_connection_t *c, ngx_quic_frame_t *f) cg->in_flight -= f->plen; + /* prevent recovery_start from wrapping */ + + timer = now - cg->recovery_start; + + if ((ngx_msec_int_t) timer < 0) { + cg->recovery_start = ngx_quic_oldest_sent_packet(c) - 1; + } + timer = f->send_time - cg->recovery_start; if ((ngx_msec_int_t) timer <= 0) { @@ -360,14 +369,6 @@ ngx_quic_congestion_ack(ngx_connection_t *c, ngx_quic_frame_t *f) now, cg->window, cg->in_flight); } - /* prevent recovery_start from wrapping */ - - timer = cg->recovery_start - now + qc->tp.max_idle_timeout * 2; - - if ((ngx_msec_int_t) timer < 0) { - cg->recovery_start = now - qc->tp.max_idle_timeout * 2; - } - done: if (blocked && cg->in_flight < cg->window) { @@ -543,19 +544,48 @@ ngx_quic_pcg_duration(ngx_connection_t *c) static void ngx_quic_persistent_congestion(ngx_connection_t *c) { - ngx_msec_t now; ngx_quic_congestion_t *cg; ngx_quic_connection_t *qc; qc = ngx_quic_get_connection(c); cg = &qc->congestion; - now = ngx_current_msec; - cg->recovery_start = now; + cg->recovery_start = ngx_quic_oldest_sent_packet(c) - 1; cg->window = qc->path->mtu * 2; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic congestion persistent t:%M win:%uz", now, cg->window); + "quic congestion persistent t:%M win:%uz", + ngx_current_msec, cg->window); +} + + +static ngx_msec_t +ngx_quic_oldest_sent_packet(ngx_connection_t *c) +{ + ngx_msec_t oldest; + ngx_uint_t i; + ngx_queue_t *q; + ngx_quic_frame_t *start; + ngx_quic_send_ctx_t *ctx; + ngx_quic_connection_t *qc; + + qc = ngx_quic_get_connection(c); + oldest = ngx_current_msec; + + for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { + ctx = &qc->send_ctx[i]; + + if (!ngx_queue_empty(&ctx->sent)) { + q = ngx_queue_head(&ctx->sent); + start = ngx_queue_data(q, ngx_quic_frame_t, queue); + + if ((ngx_msec_int_t) (start->send_time - oldest) < 0) { + oldest = start->send_time; + } + } + } + + return oldest; } diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c index ac22b1327..3caae88e5 100644 --- a/src/event/quic/ngx_event_quic_migration.c +++ b/src/event/quic/ngx_event_quic_migration.c @@ -186,7 +186,7 @@ valid: ngx_max(2 * NGX_QUIC_MIN_INITIAL_SIZE, 14720)); qc->congestion.ssthresh = (size_t) -1; - qc->congestion.recovery_start = ngx_current_msec; + qc->congestion.recovery_start = ngx_current_msec - 1; ngx_quic_init_rtt(qc); } From noreply at nginx.com Tue Apr 15 15:02:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:02 +0000 (UTC) Subject: [nginx] QUIC: all-levels commit and revert functions. Message-ID: <20250415150202.E658A48F76@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/04c65ccd9a094c00f33bac3a7e0d43cc692409c8 branches: master commit: 04c65ccd9a094c00f33bac3a7e0d43cc692409c8 user: Roman Arutyunyan date: Sun, 9 Mar 2025 16:09:28 +0400 description: QUIC: all-levels commit and revert functions. Previously, these functions operated on a per-level basis. This however resulted in excessive logging of in_flight and will also led to extra work detecting underutilized congestion window in the followup patches. --- src/event/quic/ngx_event_quic_output.c | 96 +++++++++++++++++++--------------- 1 file changed, 53 insertions(+), 43 deletions(-) diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c index f087e2bfa..9aa7f37ba 100644 --- a/src/event/quic/ngx_event_quic_output.c +++ b/src/event/quic/ngx_event_quic_output.c @@ -45,9 +45,9 @@ static ngx_int_t ngx_quic_create_datagrams(ngx_connection_t *c); -static void ngx_quic_commit_send(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx); -static void ngx_quic_revert_send(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, - uint64_t pnum); +static void ngx_quic_commit_send(ngx_connection_t *c); +static void ngx_quic_revert_send(ngx_connection_t *c, + uint64_t preserved_pnum[NGX_QUIC_SEND_CTX_LAST]); #if ((NGX_HAVE_UDP_SEGMENT) && (NGX_HAVE_MSGHDR_MSG_CONTROL)) static ngx_uint_t ngx_quic_allow_segmentation(ngx_connection_t *c); static ngx_int_t ngx_quic_create_segments(ngx_connection_t *c); @@ -127,6 +127,10 @@ ngx_quic_create_datagrams(ngx_connection_t *c) cg = &qc->congestion; path = qc->path; +#if (NGX_SUPPRESS_WARN) + ngx_memzero(preserved_pnum, sizeof(preserved_pnum)); +#endif + while (cg->in_flight < cg->window) { p = dst; @@ -150,12 +154,7 @@ ngx_quic_create_datagrams(ngx_connection_t *c) if (min > len) { /* padding can't be applied - avoid sending the packet */ - - while (i-- > 0) { - ctx = &qc->send_ctx[i]; - ngx_quic_revert_send(c, ctx, preserved_pnum[i]); - } - + ngx_quic_revert_send(c, preserved_pnum); return NGX_OK; } @@ -180,17 +179,12 @@ ngx_quic_create_datagrams(ngx_connection_t *c) } if (n == NGX_AGAIN) { - for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { - ngx_quic_revert_send(c, &qc->send_ctx[i], preserved_pnum[i]); - } - + ngx_quic_revert_send(c, preserved_pnum); ngx_add_timer(&qc->push, NGX_QUIC_SOCKET_RETRY_DELAY); break; } - for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { - ngx_quic_commit_send(c, &qc->send_ctx[i]); - } + ngx_quic_commit_send(c); path->sent += len; } @@ -200,31 +194,36 @@ ngx_quic_create_datagrams(ngx_connection_t *c) static void -ngx_quic_commit_send(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx) +ngx_quic_commit_send(ngx_connection_t *c) { + ngx_uint_t i; ngx_queue_t *q; ngx_quic_frame_t *f; + ngx_quic_send_ctx_t *ctx; ngx_quic_congestion_t *cg; ngx_quic_connection_t *qc; qc = ngx_quic_get_connection(c); - cg = &qc->congestion; - while (!ngx_queue_empty(&ctx->sending)) { + for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { + ctx = &qc->send_ctx[i]; - q = ngx_queue_head(&ctx->sending); - f = ngx_queue_data(q, ngx_quic_frame_t, queue); + while (!ngx_queue_empty(&ctx->sending)) { - ngx_queue_remove(q); + q = ngx_queue_head(&ctx->sending); + f = ngx_queue_data(q, ngx_quic_frame_t, queue); - if (f->pkt_need_ack && !qc->closing) { - ngx_queue_insert_tail(&ctx->sent, q); + ngx_queue_remove(q); - cg->in_flight += f->plen; + if (f->pkt_need_ack && !qc->closing) { + ngx_queue_insert_tail(&ctx->sent, q); - } else { - ngx_quic_free_frame(c, f); + cg->in_flight += f->plen; + + } else { + ngx_quic_free_frame(c, f); + } } } @@ -234,19 +233,30 @@ ngx_quic_commit_send(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx) static void -ngx_quic_revert_send(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, - uint64_t pnum) +ngx_quic_revert_send(ngx_connection_t *c, uint64_t pnum[NGX_QUIC_SEND_CTX_LAST]) { - ngx_queue_t *q; + ngx_uint_t i; + ngx_queue_t *q; + ngx_quic_send_ctx_t *ctx; + ngx_quic_connection_t *qc; + + qc = ngx_quic_get_connection(c); - while (!ngx_queue_empty(&ctx->sending)) { + for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { + ctx = &qc->send_ctx[i]; - q = ngx_queue_last(&ctx->sending); - ngx_queue_remove(q); - ngx_queue_insert_head(&ctx->frames, q); - } + if (ngx_queue_empty(&ctx->sending)) { + continue; + } - ctx->pnum = pnum; + do { + q = ngx_queue_last(&ctx->sending); + ngx_queue_remove(q); + ngx_queue_insert_head(&ctx->frames, q); + } while (!ngx_queue_empty(&ctx->sending)); + + ctx->pnum = pnum[i]; + } } @@ -311,13 +321,13 @@ ngx_quic_create_segments(ngx_connection_t *c) size_t len, segsize; ssize_t n; u_char *p, *end; - uint64_t preserved_pnum; - ngx_uint_t nseg; + ngx_uint_t nseg, level; ngx_quic_path_t *path; ngx_quic_send_ctx_t *ctx; ngx_quic_congestion_t *cg; ngx_quic_connection_t *qc; static u_char dst[NGX_QUIC_MAX_UDP_SEGMENT_BUF]; + static uint64_t preserved_pnum[NGX_QUIC_SEND_CTX_LAST]; qc = ngx_quic_get_connection(c); cg = &qc->congestion; @@ -335,7 +345,8 @@ ngx_quic_create_segments(ngx_connection_t *c) nseg = 0; - preserved_pnum = ctx->pnum; + level = ctx - qc->send_ctx; + preserved_pnum[level] = ctx->pnum; for ( ;; ) { @@ -369,19 +380,18 @@ ngx_quic_create_segments(ngx_connection_t *c) } if (n == NGX_AGAIN) { - ngx_quic_revert_send(c, ctx, preserved_pnum); - + ngx_quic_revert_send(c, preserved_pnum); ngx_add_timer(&qc->push, NGX_QUIC_SOCKET_RETRY_DELAY); break; } - ngx_quic_commit_send(c, ctx); + ngx_quic_commit_send(c); path->sent += n; p = dst; nseg = 0; - preserved_pnum = ctx->pnum; + preserved_pnum[level] = ctx->pnum; } } From noreply at nginx.com Tue Apr 15 15:02:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:02 +0000 (UTC) Subject: [nginx] QUIC: do not increase underutilized congestion window. Message-ID: <20250415150202.EBADF48F77@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/cd5e4fa1446dff86fafc3b6ffcc11afd527a024f branches: master commit: cd5e4fa1446dff86fafc3b6ffcc11afd527a024f user: Roman Arutyunyan date: Sat, 4 Jan 2025 18:03:46 +0400 description: QUIC: do not increase underutilized congestion window. As per RFC 9002, Section 7.8, congestion window should not be increased when it's underutilized. --- src/event/quic/ngx_event_quic_ack.c | 24 ++++++++++++++++++++++++ src/event/quic/ngx_event_quic_ack.h | 1 + src/event/quic/ngx_event_quic_connection.h | 1 + src/event/quic/ngx_event_quic_output.c | 12 +++++++++++- 4 files changed, 37 insertions(+), 1 deletion(-) diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c index a6f34348b..bc99947bd 100644 --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -354,6 +354,14 @@ ngx_quic_congestion_ack(ngx_connection_t *c, ngx_quic_frame_t *f) goto done; } + if (cg->idle) { + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic congestion ack idle t:%M win:%uz if:%uz", + now, cg->window, cg->in_flight); + + goto done; + } + if (cg->window < cg->ssthresh) { cg->window += f->plen; @@ -377,6 +385,22 @@ done: } +void +ngx_quic_congestion_idle(ngx_connection_t *c, ngx_uint_t idle) +{ + ngx_quic_congestion_t *cg; + ngx_quic_connection_t *qc; + + qc = ngx_quic_get_connection(c); + cg = &qc->congestion; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic congestion idle:%ui", idle); + + cg->idle = idle; +} + + static void ngx_quic_drop_ack_ranges(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, uint64_t pn) diff --git a/src/event/quic/ngx_event_quic_ack.h b/src/event/quic/ngx_event_quic_ack.h index 56920c2a5..4ad59660f 100644 --- a/src/event/quic/ngx_event_quic_ack.h +++ b/src/event/quic/ngx_event_quic_ack.h @@ -17,6 +17,7 @@ ngx_int_t ngx_quic_handle_ack_frame(ngx_connection_t *c, void ngx_quic_congestion_ack(ngx_connection_t *c, ngx_quic_frame_t *frame); +void ngx_quic_congestion_idle(ngx_connection_t *c, ngx_uint_t idle); void ngx_quic_resend_frames(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx); void ngx_quic_set_lost_timer(ngx_connection_t *c); void ngx_quic_pto_handler(ngx_event_t *ev); diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h index 824c92b57..acc09c142 100644 --- a/src/event/quic/ngx_event_quic_connection.h +++ b/src/event/quic/ngx_event_quic_connection.h @@ -169,6 +169,7 @@ typedef struct { size_t window; size_t ssthresh; ngx_msec_t recovery_start; + ngx_uint_t idle; /* unsigned idle:1; */ } ngx_quic_congestion_t; diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c index 9aa7f37ba..a92a539f3 100644 --- a/src/event/quic/ngx_event_quic_output.c +++ b/src/event/quic/ngx_event_quic_output.c @@ -196,7 +196,7 @@ ngx_quic_create_datagrams(ngx_connection_t *c) static void ngx_quic_commit_send(ngx_connection_t *c) { - ngx_uint_t i; + ngx_uint_t i, idle; ngx_queue_t *q; ngx_quic_frame_t *f; ngx_quic_send_ctx_t *ctx; @@ -206,9 +206,15 @@ ngx_quic_commit_send(ngx_connection_t *c) qc = ngx_quic_get_connection(c); cg = &qc->congestion; + idle = 1; + for (i = 0; i < NGX_QUIC_SEND_CTX_LAST; i++) { ctx = &qc->send_ctx[i]; + if (!ngx_queue_empty(&ctx->frames)) { + idle = 0; + } + while (!ngx_queue_empty(&ctx->sending)) { q = ngx_queue_head(&ctx->sending); @@ -229,6 +235,8 @@ ngx_quic_commit_send(ngx_connection_t *c) ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic congestion send if:%uz", cg->in_flight); + + ngx_quic_congestion_idle(c, idle); } @@ -257,6 +265,8 @@ ngx_quic_revert_send(ngx_connection_t *c, uint64_t pnum[NGX_QUIC_SEND_CTX_LAST]) ctx->pnum = pnum[i]; } + + ngx_quic_congestion_idle(c, 1); } From noreply at nginx.com Tue Apr 15 15:02:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:02 +0000 (UTC) Subject: [nginx] QUIC: do not shrink congestion window after losing an MTU probe. Message-ID: <20250415150202.F0C7D48F78@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/6bf13e9d57bbc664ac055cdb58c738b09a0f0189 branches: master commit: 6bf13e9d57bbc664ac055cdb58c738b09a0f0189 user: Roman Arutyunyan date: Mon, 6 Jan 2025 16:27:03 +0400 description: QUIC: do not shrink congestion window after losing an MTU probe. As per RFC 9000, Section 14.4: Loss of a QUIC packet that is carried in a PMTU probe is therefore not a reliable indication of congestion and SHOULD NOT trigger a congestion control reaction. --- src/event/quic/ngx_event_quic_ack.c | 8 ++++++++ src/event/quic/ngx_event_quic_migration.c | 1 + src/event/quic/ngx_event_quic_transport.h | 1 + 3 files changed, 10 insertions(+) diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c index bc99947bd..d16545a1d 100644 --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -755,6 +755,14 @@ ngx_quic_congestion_lost(ngx_connection_t *c, ngx_quic_frame_t *f) goto done; } + if (f->ignore_loss) { + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic congestion lost ignore t:%M win:%uz if:%uz", + now, cg->window, cg->in_flight); + + goto done; + } + cg->recovery_start = now; cg->window /= 2; diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c index 3caae88e5..463eeb503 100644 --- a/src/event/quic/ngx_event_quic_migration.c +++ b/src/event/quic/ngx_event_quic_migration.c @@ -923,6 +923,7 @@ ngx_quic_send_path_mtu_probe(ngx_connection_t *c, ngx_quic_path_t *path) frame->level = ssl_encryption_application; frame->type = NGX_QUIC_FT_PING; + frame->ignore_loss = 1; qc = ngx_quic_get_connection(c); ctx = ngx_quic_get_send_ctx(qc, ssl_encryption_application); diff --git a/src/event/quic/ngx_event_quic_transport.h b/src/event/quic/ngx_event_quic_transport.h index 3e320391a..dcd763df1 100644 --- a/src/event/quic/ngx_event_quic_transport.h +++ b/src/event/quic/ngx_event_quic_transport.h @@ -271,6 +271,7 @@ struct ngx_quic_frame_s { unsigned need_ack:1; unsigned pkt_need_ack:1; unsigned ignore_congestion:1; + unsigned ignore_loss:1; ngx_chain_t *data; union { From noreply at nginx.com Tue Apr 15 15:02:03 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:03 +0000 (UTC) Subject: [nginx] QUIC: CUBIC congestion control. Message-ID: <20250415150203.06C6A48F92@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/f9a7e7cc11e71b2c62d4c5b9ac4feb7e92913c64 branches: master commit: f9a7e7cc11e71b2c62d4c5b9ac4feb7e92913c64 user: Roman Arutyunyan date: Thu, 7 Nov 2024 17:25:45 +0400 description: QUIC: CUBIC congestion control. --- src/event/quic/ngx_event_quic.c | 1 + src/event/quic/ngx_event_quic_ack.c | 189 +++++++++++++++++++++++++++-- src/event/quic/ngx_event_quic_connection.h | 6 + src/event/quic/ngx_event_quic_migration.c | 1 + 4 files changed, 185 insertions(+), 12 deletions(-) diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c index 11497a6d7..49d30e82a 100644 --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -312,6 +312,7 @@ ngx_quic_new_connection(ngx_connection_t *c, ngx_quic_conf_t *conf, ngx_max(2 * NGX_QUIC_MIN_INITIAL_SIZE, 14720)); qc->congestion.ssthresh = (size_t) -1; + qc->congestion.mtu = NGX_QUIC_MIN_INITIAL_SIZE; qc->congestion.recovery_start = ngx_current_msec - 1; if (pkt->validated && pkt->retried) { diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c index d16545a1d..6b0eef35e 100644 --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -20,6 +20,10 @@ /* RFC 9002, 7.6.1. Duration: kPersistentCongestionThreshold */ #define NGX_QUIC_PERSISTENT_CONGESTION_THR 3 +/* CUBIC parameters x10 */ +#define NGX_QUIC_CUBIC_BETA 7 +#define MGX_QUIC_CUBIC_C 4 + /* send time of ACK'ed packets */ typedef struct { @@ -35,10 +39,12 @@ static void ngx_quic_rtt_sample(ngx_connection_t *c, ngx_quic_ack_frame_t *ack, static ngx_int_t ngx_quic_handle_ack_frame_range(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, uint64_t min, uint64_t max, ngx_quic_ack_stat_t *st); +static size_t ngx_quic_congestion_cubic(ngx_connection_t *c); static void ngx_quic_drop_ack_ranges(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, uint64_t pn); static ngx_int_t ngx_quic_detect_lost(ngx_connection_t *c, ngx_quic_ack_stat_t *st); +static ngx_msec_t ngx_quic_congestion_cubic_time(ngx_connection_t *c); static ngx_msec_t ngx_quic_pcg_duration(ngx_connection_t *c); static void ngx_quic_persistent_congestion(ngx_connection_t *c); static ngx_msec_t ngx_quic_oldest_sent_packet(ngx_connection_t *c); @@ -314,6 +320,7 @@ ngx_quic_handle_ack_frame_range(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, void ngx_quic_congestion_ack(ngx_connection_t *c, ngx_quic_frame_t *f) { + size_t w_cubic; ngx_uint_t blocked; ngx_msec_t now, timer; ngx_quic_congestion_t *cg; @@ -370,11 +377,46 @@ ngx_quic_congestion_ack(ngx_connection_t *c, ngx_quic_frame_t *f) now, cg->window, cg->ssthresh, cg->in_flight); } else { - cg->window += (uint64_t) qc->path->mtu * f->plen / cg->window; - ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic congestion ack reno t:%M win:%uz if:%uz", - now, cg->window, cg->in_flight); + /* RFC 9438, 4.2. Window Increase Function */ + + w_cubic = ngx_quic_congestion_cubic(c); + + if (cg->window < cg->w_prior) { + cg->w_est += (uint64_t) cg->mtu * f->plen + * 3 * (10 - NGX_QUIC_CUBIC_BETA) + / (10 + NGX_QUIC_CUBIC_BETA) / cg->window; + + } else { + cg->w_est += (uint64_t) cg->mtu * f->plen / cg->window; + } + + if (w_cubic < cg->w_est) { + cg->window = cg->w_est; + + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic congestion ack reno t:%M win:%uz c:%uz if:%uz", + now, cg->window, w_cubic, cg->in_flight); + + } else if (w_cubic > cg->window) { + + if (w_cubic >= cg->window * 3 / 2) { + cg->window += cg->mtu / 2; + + } else { + cg->window += (uint64_t) cg->mtu * (w_cubic - cg->window) + / cg->window; + } + + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic congestion ack cubic t:%M win:%uz c:%uz if:%uz", + now, cg->window, w_cubic, cg->in_flight); + + } else { + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic congestion ack skip t:%M win:%uz c:%uz if:%uz", + now, cg->window, w_cubic, cg->in_flight); + } } done: @@ -385,9 +427,62 @@ done: } +static size_t +ngx_quic_congestion_cubic(ngx_connection_t *c) +{ + int64_t w, t, cc; + ngx_msec_t now; + ngx_quic_congestion_t *cg; + ngx_quic_connection_t *qc; + + qc = ngx_quic_get_connection(c); + cg = &qc->congestion; + + ngx_quic_congestion_idle(c, cg->idle); + + now = ngx_current_msec; + t = (ngx_msec_int_t) (now - cg->k); + + if (t > 1000000) { + w = NGX_MAX_SIZE_T_VALUE; + goto done; + } + + if (t < -1000000) { + w = 0; + goto done; + } + + /* + * RFC 9438, Figure 1 + * + * w_cubic = C * (t_msec / 1000) ^ 3 * mtu + w_max + */ + + cc = 10000000000ll / (int64_t) cg->mtu / MGX_QUIC_CUBIC_C; + w = t * t * t / cc + (int64_t) cg->w_max; + + if (w > NGX_MAX_SIZE_T_VALUE) { + w = NGX_MAX_SIZE_T_VALUE; + } + + if (w < 0) { + w = 0; + } + +done: + + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic cubic t:%L w:%L wm:%uz", t, w, cg->w_max); + + return w; +} + + void ngx_quic_congestion_idle(ngx_connection_t *c, ngx_uint_t idle) { + ngx_msec_t now; ngx_quic_congestion_t *cg; ngx_quic_connection_t *qc; @@ -397,6 +492,18 @@ ngx_quic_congestion_idle(ngx_connection_t *c, ngx_uint_t idle) ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic congestion idle:%ui", idle); + if (cg->window >= cg->ssthresh) { + /* RFC 9438, 5.8. Behavior for Application-Limited Flows */ + + now = ngx_current_msec; + + if (cg->idle) { + cg->k += now - cg->idle_start; + } + + cg->idle_start = now; + } + cg->idle = idle; } @@ -580,8 +687,9 @@ ngx_quic_persistent_congestion(ngx_connection_t *c) qc = ngx_quic_get_connection(c); cg = &qc->congestion; + cg->mtu = qc->path->mtu; cg->recovery_start = ngx_quic_oldest_sent_packet(c) - 1; - cg->window = qc->path->mtu * 2; + cg->window = cg->mtu * 2; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic congestion persistent t:%M win:%uz", @@ -763,14 +871,19 @@ ngx_quic_congestion_lost(ngx_connection_t *c, ngx_quic_frame_t *f) goto done; } - cg->recovery_start = now; - cg->window /= 2; - - if (cg->window < qc->path->mtu * 2) { - cg->window = qc->path->mtu * 2; - } + /* RFC 9438, 4.6. Multiplicative Decrease */ - cg->ssthresh = cg->window; + cg->mtu = qc->path->mtu; + cg->recovery_start = now; + cg->w_prior = cg->window; + /* RFC 9438, 4.7. Fast Convergence */ + cg->w_max = (cg->window < cg->w_max) + ? cg->window * (10 + NGX_QUIC_CUBIC_BETA) / 20 : cg->window; + cg->ssthresh = cg->in_flight * NGX_QUIC_CUBIC_BETA / 10; + cg->window = ngx_max(cg->ssthresh, cg->mtu * 2); + cg->w_est = cg->window; + cg->k = now + ngx_quic_congestion_cubic_time(c); + cg->idle_start = now; ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic congestion lost t:%M win:%uz if:%uz", @@ -784,6 +897,58 @@ done: } +static ngx_msec_t +ngx_quic_congestion_cubic_time(ngx_connection_t *c) +{ + int64_t v, x, d, cc; + ngx_uint_t n; + ngx_quic_congestion_t *cg; + ngx_quic_connection_t *qc; + + qc = ngx_quic_get_connection(c); + cg = &qc->congestion; + + /* + * RFC 9438, Figure 2 + * + * k_msec = ((w_max - cwnd_epoch) / C / mtu) ^ 1/3 * 1000 + */ + + if (cg->w_max <= cg->window) { + return 0; + } + + cc = 10000000000ll / (int64_t) cg->mtu / MGX_QUIC_CUBIC_C; + v = (int64_t) (cg->w_max - cg->window) * cc; + + /* + * Newton-Raphson method for x ^ 3 = v: + * + * x_next = (2 * x_prev + v / x_prev ^ 2) / 3 + */ + + x = 5000; + + for (n = 1; n <= 10; n++) { + d = (v / x / x - x) / 3; + x += d; + + if (ngx_abs(d) <= 100) { + break; + } + } + + if (x > NGX_MAX_SIZE_T_VALUE) { + return NGX_MAX_SIZE_T_VALUE; + } + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic cubic time:%L n:%ui", x, n); + + return x; +} + + void ngx_quic_set_lost_timer(ngx_connection_t *c) { diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h index acc09c142..716d62308 100644 --- a/src/event/quic/ngx_event_quic_connection.h +++ b/src/event/quic/ngx_event_quic_connection.h @@ -168,7 +168,13 @@ typedef struct { size_t in_flight; size_t window; size_t ssthresh; + size_t w_max; + size_t w_est; + size_t w_prior; + size_t mtu; ngx_msec_t recovery_start; + ngx_msec_t idle_start; + ngx_msec_t k; ngx_uint_t idle; /* unsigned idle:1; */ } ngx_quic_congestion_t; diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c index 1d914ffd8..6befc3427 100644 --- a/src/event/quic/ngx_event_quic_migration.c +++ b/src/event/quic/ngx_event_quic_migration.c @@ -186,6 +186,7 @@ valid: ngx_max(2 * NGX_QUIC_MIN_INITIAL_SIZE, 14720)); qc->congestion.ssthresh = (size_t) -1; + qc->congestion.mtu = NGX_QUIC_MIN_INITIAL_SIZE; qc->congestion.recovery_start = ngx_current_msec - 1; ngx_quic_init_rtt(qc); From noreply at nginx.com Tue Apr 15 15:02:03 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:03 +0000 (UTC) Subject: [nginx] QUIC: ignore congestion control when sending MTU probes. Message-ID: <20250415150203.00FB448F79@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/a40cc700238796d6668a461e121f6ffee5066394 branches: master commit: a40cc700238796d6668a461e121f6ffee5066394 user: Roman Arutyunyan date: Mon, 6 Jan 2025 10:19:56 +0400 description: QUIC: ignore congestion control when sending MTU probes. If connection is network-limited, MTU probes have little chance of being sent since congestion window is almost always full. As a result, PMTUD may not be able to reach the real MTU and the connection may operate with a reduced MTU. The solution is to ignore the congestion window. This may lead to a temporary increase in in-flight count beyond congestion window. --- src/event/quic/ngx_event_quic_migration.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c index 463eeb503..1d914ffd8 100644 --- a/src/event/quic/ngx_event_quic_migration.c +++ b/src/event/quic/ngx_event_quic_migration.c @@ -924,6 +924,7 @@ ngx_quic_send_path_mtu_probe(ngx_connection_t *c, ngx_quic_path_t *path) frame->level = ssl_encryption_application; frame->type = NGX_QUIC_FT_PING; frame->ignore_loss = 1; + frame->ignore_congestion = 1; qc = ngx_quic_get_connection(c); ctx = ngx_quic_get_send_ctx(qc, ssl_encryption_application); From noreply at nginx.com Tue Apr 15 15:02:03 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:03 +0000 (UTC) Subject: [nginx] QUIC: optimized connection frame threshold. Message-ID: <20250415150203.0C3E348F9D@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/2fb32ff24d431211b673ff9c854352ca0c74e27c branches: master commit: 2fb32ff24d431211b673ff9c854352ca0c74e27c user: Roman Arutyunyan date: Fri, 4 Apr 2025 17:39:05 +0400 description: QUIC: optimized connection frame threshold. Previosly the threshold was hardcoded at 10000. This value is too low for high BDP networks. For example, if all frames are STREAM frames, and MTU is 1500, the upper limit for congestion window would be roughly 15M (10000 * 1500). With 100ms RTT it's just a 1.2Gbps network (15M * 10 * 8). In reality, the limit is even lower because of other frame types. Also, the number of frames that could be used simultaneously depends on the total amount of data buffered in all server streams, and client flow control. The change sets frame threshold based on max concurrent streams and stream buffer size, the product of which is the maximum number of in-flight stream data in all server streams at any moment. The value is divided by 2000 to account for a typical MTU 1500 and the fact that not all frames are STREAM frames. --- src/event/quic/ngx_event_quic.c | 4 ++++ src/event/quic/ngx_event_quic_connection.h | 1 + src/event/quic/ngx_event_quic_frames.c | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c index 49d30e82a..4682ecad9 100644 --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -315,6 +315,10 @@ ngx_quic_new_connection(ngx_connection_t *c, ngx_quic_conf_t *conf, qc->congestion.mtu = NGX_QUIC_MIN_INITIAL_SIZE; qc->congestion.recovery_start = ngx_current_msec - 1; + qc->max_frames = (conf->max_concurrent_streams_uni + + conf->max_concurrent_streams_bidi) + * conf->stream_buffer_size / 2000; + if (pkt->validated && pkt->retried) { qc->tp.retry_scid.len = pkt->dcid.len; qc->tp.retry_scid.data = ngx_pstrdup(c->pool, &pkt->dcid); diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h index 716d62308..04cda859e 100644 --- a/src/event/quic/ngx_event_quic_connection.h +++ b/src/event/quic/ngx_event_quic_connection.h @@ -261,6 +261,7 @@ struct ngx_quic_connection_s { ngx_buf_t *free_shadow_bufs; ngx_uint_t nframes; + ngx_uint_t max_frames; #ifdef NGX_QUIC_DEBUG_ALLOC ngx_uint_t nbufs; ngx_uint_t nshadowbufs; diff --git a/src/event/quic/ngx_event_quic_frames.c b/src/event/quic/ngx_event_quic_frames.c index 6ea908cc1..888e8bda2 100644 --- a/src/event/quic/ngx_event_quic_frames.c +++ b/src/event/quic/ngx_event_quic_frames.c @@ -214,7 +214,7 @@ ngx_quic_alloc_frame(ngx_connection_t *c) "quic reuse frame n:%ui", qc->nframes); #endif - } else if (qc->nframes < 10000) { + } else if (qc->nframes < qc->max_frames) { frame = ngx_palloc(c->pool, sizeof(ngx_quic_frame_t)); if (frame == NULL) { return NULL; From noreply at nginx.com Tue Apr 15 15:02:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:02 +0000 (UTC) Subject: [nginx] HTTP/3: graceful shutdown on keepalive timeout expiration. Message-ID: <20250415150202.D1FC148F60@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/3a97111adfb6e538ddef1828bbf04a35a8915c1f branches: master commit: 3a97111adfb6e538ddef1828bbf04a35a8915c1f user: Roman Arutyunyan date: Tue, 7 Jan 2025 21:14:58 +0400 description: HTTP/3: graceful shutdown on keepalive timeout expiration. Previously, the expiration caused QUIC connection finalization even if there are application-terminated streams finishing sending data. Such finalization terminated these streams. An easy way to trigger this is to request a large file from HTTP/3 over a small MTU. In this case keepalive timeout expiration may abruptly terminate the request stream. --- src/http/v3/ngx_http_v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c index 8db229b29..d8597ec5c 100644 --- a/src/http/v3/ngx_http_v3.c +++ b/src/http/v3/ngx_http_v3.c @@ -70,7 +70,7 @@ ngx_http_v3_keepalive_handler(ngx_event_t *ev) ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 keepalive handler"); - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, + ngx_http_v3_shutdown_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, "keepalive timeout"); } From noreply at nginx.com Tue Apr 15 15:02:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:02 +0000 (UTC) Subject: [nginx] QUIC: use path MTU in congestion window computations. Message-ID: <20250415150202.D761E48F62@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/53e7e9eb542fb1d3d885bbca03ed1d704aa08f31 branches: master commit: 53e7e9eb542fb1d3d885bbca03ed1d704aa08f31 user: Roman Arutyunyan date: Fri, 3 Jan 2025 11:17:07 +0400 description: QUIC: use path MTU in congestion window computations. As per RFC 9002, Section B.2, max_datagram_size used in congestion window computations should be based on path MTU. --- src/event/quic/ngx_event_quic.c | 4 ++-- src/event/quic/ngx_event_quic_ack.c | 8 ++++---- src/event/quic/ngx_event_quic_migration.c | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c index 308597e27..70d9748bd 100644 --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -308,8 +308,8 @@ ngx_quic_new_connection(ngx_connection_t *c, ngx_quic_conf_t *conf, qc->streams.client_max_streams_uni = qc->tp.initial_max_streams_uni; qc->streams.client_max_streams_bidi = qc->tp.initial_max_streams_bidi; - qc->congestion.window = ngx_min(10 * qc->tp.max_udp_payload_size, - ngx_max(2 * qc->tp.max_udp_payload_size, + qc->congestion.window = ngx_min(10 * NGX_QUIC_MIN_INITIAL_SIZE, + ngx_max(2 * NGX_QUIC_MIN_INITIAL_SIZE, 14720)); qc->congestion.ssthresh = (size_t) -1; qc->congestion.recovery_start = ngx_current_msec; diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c index 2487ea60d..4616e7053 100644 --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -353,7 +353,7 @@ ngx_quic_congestion_ack(ngx_connection_t *c, ngx_quic_frame_t *f) now, cg->window, cg->ssthresh, cg->in_flight); } else { - cg->window += qc->tp.max_udp_payload_size * f->plen / cg->window; + cg->window += (uint64_t) qc->path->mtu * f->plen / cg->window; ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic congestion ack reno t:%M win:%uz if:%uz", @@ -552,7 +552,7 @@ ngx_quic_persistent_congestion(ngx_connection_t *c) now = ngx_current_msec; cg->recovery_start = now; - cg->window = qc->tp.max_udp_payload_size * 2; + cg->window = qc->path->mtu * 2; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic congestion persistent t:%M win:%uz", now, cg->window); @@ -698,8 +698,8 @@ ngx_quic_congestion_lost(ngx_connection_t *c, ngx_quic_frame_t *f) cg->recovery_start = now; cg->window /= 2; - if (cg->window < qc->tp.max_udp_payload_size * 2) { - cg->window = qc->tp.max_udp_payload_size * 2; + if (cg->window < qc->path->mtu * 2) { + cg->window = qc->path->mtu * 2; } cg->ssthresh = cg->window; diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c index 2d1467e14..ac22b1327 100644 --- a/src/event/quic/ngx_event_quic_migration.c +++ b/src/event/quic/ngx_event_quic_migration.c @@ -182,8 +182,8 @@ valid: ngx_memzero(&qc->congestion, sizeof(ngx_quic_congestion_t)); - qc->congestion.window = ngx_min(10 * qc->tp.max_udp_payload_size, - ngx_max(2 * qc->tp.max_udp_payload_size, + qc->congestion.window = ngx_min(10 * NGX_QUIC_MIN_INITIAL_SIZE, + ngx_max(2 * NGX_QUIC_MIN_INITIAL_SIZE, 14720)); qc->congestion.ssthresh = (size_t) -1; qc->congestion.recovery_start = ngx_current_msec; From noreply at nginx.com Tue Apr 15 15:02:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:02 +0000 (UTC) Subject: [nginx] QUIC: graph-friendly congestion control logging. Message-ID: <20250415150202.CEB0F48F5F@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/2b8b70068a7f7b800ec23390cd2da01b5b91b25f branches: master commit: 2b8b70068a7f7b800ec23390cd2da01b5b91b25f user: Roman Arutyunyan date: Fri, 3 Jan 2025 14:49:47 +0400 description: QUIC: graph-friendly congestion control logging. Improved logging for simpler data extraction for plotting congestion window graphs. In particular, added current milliseconds number from ngx_current_msec. While here, simplified logging text and removed irrelevant data. --- src/event/quic/ngx_event_quic_ack.c | 44 +++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c index c953b8042..2487ea60d 100644 --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -314,7 +314,7 @@ void ngx_quic_congestion_ack(ngx_connection_t *c, ngx_quic_frame_t *f) { ngx_uint_t blocked; - ngx_msec_t timer; + ngx_msec_t now, timer; ngx_quic_congestion_t *cg; ngx_quic_connection_t *qc; @@ -329,6 +329,8 @@ ngx_quic_congestion_ack(ngx_connection_t *c, ngx_quic_frame_t *f) return; } + now = ngx_current_msec; + blocked = (cg->in_flight >= cg->window) ? 1 : 0; cg->in_flight -= f->plen; @@ -337,8 +339,8 @@ ngx_quic_congestion_ack(ngx_connection_t *c, ngx_quic_frame_t *f) if ((ngx_msec_int_t) timer <= 0) { ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic congestion ack recovery win:%uz ss:%z if:%uz", - cg->window, cg->ssthresh, cg->in_flight); + "quic congestion ack rec t:%M win:%uz if:%uz", + now, cg->window, cg->in_flight); goto done; } @@ -346,24 +348,24 @@ ngx_quic_congestion_ack(ngx_connection_t *c, ngx_quic_frame_t *f) if (cg->window < cg->ssthresh) { cg->window += f->plen; - ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic congestion slow start win:%uz ss:%z if:%uz", - cg->window, cg->ssthresh, cg->in_flight); + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic congestion ack ss t:%M win:%uz ss:%z if:%uz", + now, cg->window, cg->ssthresh, cg->in_flight); } else { cg->window += qc->tp.max_udp_payload_size * f->plen / cg->window; ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic congestion avoidance win:%uz ss:%z if:%uz", - cg->window, cg->ssthresh, cg->in_flight); + "quic congestion ack reno t:%M win:%uz if:%uz", + now, cg->window, cg->in_flight); } /* prevent recovery_start from wrapping */ - timer = cg->recovery_start - ngx_current_msec + qc->tp.max_idle_timeout * 2; + timer = cg->recovery_start - now + qc->tp.max_idle_timeout * 2; if ((ngx_msec_int_t) timer < 0) { - cg->recovery_start = ngx_current_msec - qc->tp.max_idle_timeout * 2; + cg->recovery_start = now - qc->tp.max_idle_timeout * 2; } done: @@ -541,17 +543,19 @@ ngx_quic_pcg_duration(ngx_connection_t *c) static void ngx_quic_persistent_congestion(ngx_connection_t *c) { + ngx_msec_t now; ngx_quic_congestion_t *cg; ngx_quic_connection_t *qc; qc = ngx_quic_get_connection(c); cg = &qc->congestion; + now = ngx_current_msec; - cg->recovery_start = ngx_current_msec; + cg->recovery_start = now; cg->window = qc->tp.max_udp_payload_size * 2; - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic persistent congestion win:%uz", cg->window); + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic congestion persistent t:%M win:%uz", now, cg->window); } @@ -659,7 +663,7 @@ static void ngx_quic_congestion_lost(ngx_connection_t *c, ngx_quic_frame_t *f) { ngx_uint_t blocked; - ngx_msec_t timer; + ngx_msec_t now, timer; ngx_quic_congestion_t *cg; ngx_quic_connection_t *qc; @@ -681,15 +685,17 @@ ngx_quic_congestion_lost(ngx_connection_t *c, ngx_quic_frame_t *f) timer = f->send_time - cg->recovery_start; + now = ngx_current_msec; + if ((ngx_msec_int_t) timer <= 0) { ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic congestion lost recovery win:%uz ss:%z if:%uz", - cg->window, cg->ssthresh, cg->in_flight); + "quic congestion lost rec t:%M win:%uz if:%uz", + now, cg->window, cg->in_flight); goto done; } - cg->recovery_start = ngx_current_msec; + cg->recovery_start = now; cg->window /= 2; if (cg->window < qc->tp.max_udp_payload_size * 2) { @@ -699,8 +705,8 @@ ngx_quic_congestion_lost(ngx_connection_t *c, ngx_quic_frame_t *f) cg->ssthresh = cg->window; ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic congestion lost win:%uz ss:%z if:%uz", - cg->window, cg->ssthresh, cg->in_flight); + "quic congestion lost t:%M win:%uz if:%uz", + now, cg->window, cg->in_flight); done: From noreply at nginx.com Tue Apr 15 15:02:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:02 +0000 (UTC) Subject: [nginx] QUIC: ngx_msec_t overflow protection. Message-ID: <20250415150202.E178D48F64@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/1e883a40db98b70e422ff8e4d1d0e87e3f8ccaa5 branches: master commit: 1e883a40db98b70e422ff8e4d1d0e87e3f8ccaa5 user: Roman Arutyunyan date: Mon, 10 Mar 2025 12:19:25 +0400 description: QUIC: ngx_msec_t overflow protection. On some systems the value of ngx_current_msec is derived from monotonic clock, for which the following is defined by POSIX: For this clock, the value returned by clock_gettime() represents the amount of time (in seconds and nanoseconds) since an unspecified point in the past. As as result, overflow protection is needed when comparing two ngx_msec_t. The change adds such protection to the ngx_quic_detect_lost() function. --- src/event/quic/ngx_event_quic_ack.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c index 29c5bfed1..a6f34348b 100644 --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -449,9 +449,10 @@ ngx_quic_detect_lost(ngx_connection_t *c, ngx_quic_ack_stat_t *st) now = ngx_current_msec; thr = ngx_quic_lost_threshold(qc); - /* send time of lost packets across all send contexts */ - oldest = NGX_TIMER_INFINITE; - newest = NGX_TIMER_INFINITE; +#if (NGX_SUPPRESS_WARN) + oldest = now; + newest = now; +#endif nlost = 0; @@ -484,13 +485,17 @@ ngx_quic_detect_lost(ngx_connection_t *c, ngx_quic_ack_stat_t *st) break; } - if (start->send_time > qc->first_rtt) { + if ((ngx_msec_int_t) (start->send_time - qc->first_rtt) > 0) { - if (oldest == NGX_TIMER_INFINITE || start->send_time < oldest) { + if (nlost == 0 + || (ngx_msec_int_t) (start->send_time - oldest) < 0) + { oldest = start->send_time; } - if (newest == NGX_TIMER_INFINITE || start->send_time > newest) { + if (nlost == 0 + || (ngx_msec_int_t) (start->send_time - newest) > 0) + { newest = start->send_time; } @@ -511,8 +516,9 @@ ngx_quic_detect_lost(ngx_connection_t *c, ngx_quic_ack_stat_t *st) * latest ACK frame. */ - if (st && nlost >= 2 && (st->newest < oldest || st->oldest > newest)) { - + if (st && nlost >= 2 && ((ngx_msec_int_t) (st->newest - oldest) < 0 + || (ngx_msec_int_t) (st->oldest - newest) > 0)) + { if (newest - oldest > ngx_quic_pcg_duration(c)) { ngx_quic_persistent_congestion(c); } From noreply at nginx.com Tue Apr 15 15:02:03 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 15 Apr 2025 15:02:03 +0000 (UTC) Subject: [nginx] QUIC: dynamic packet threshold. Message-ID: <20250415150203.1129348FA4@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/aa49a416b8ea762558211de25d6ee70ca73bb373 branches: master commit: aa49a416b8ea762558211de25d6ee70ca73bb373 user: Roman Arutyunyan date: Mon, 14 Apr 2025 17:16:47 +0400 description: QUIC: dynamic packet threshold. RFC 9002, Section 6.1.1 defines packet reordering threshold as 3. Testing shows that such low value leads to spurious packet losses followed by congestion window collapse. The change implements dynamic packet threshold detection based on in-flight packet range. Packet threshold is defined as half the number of in-flight packets, with mininum value of 3. Also, renamed ngx_quic_lost_threshold() to ngx_quic_time_threshold() for better compliance with RFC 9002 terms. --- src/event/quic/ngx_event_quic_ack.c | 48 ++++++++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 9 deletions(-) diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c index 6b0eef35e..b8b72e943 100644 --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -33,7 +33,8 @@ typedef struct { } ngx_quic_ack_stat_t; -static ngx_inline ngx_msec_t ngx_quic_lost_threshold(ngx_quic_connection_t *qc); +static ngx_inline ngx_msec_t ngx_quic_time_threshold(ngx_quic_connection_t *qc); +static uint64_t ngx_quic_packet_threshold(ngx_quic_send_ctx_t *ctx); static void ngx_quic_rtt_sample(ngx_connection_t *c, ngx_quic_ack_frame_t *ack, enum ssl_encryption_level_t level, ngx_msec_t send_time); static ngx_int_t ngx_quic_handle_ack_frame_range(ngx_connection_t *c, @@ -55,7 +56,7 @@ static void ngx_quic_lost_handler(ngx_event_t *ev); /* RFC 9002, 6.1.2. Time Threshold: kTimeThreshold, kGranularity */ static ngx_inline ngx_msec_t -ngx_quic_lost_threshold(ngx_quic_connection_t *qc) +ngx_quic_time_threshold(ngx_quic_connection_t *qc) { ngx_msec_t thr; @@ -66,6 +67,29 @@ ngx_quic_lost_threshold(ngx_quic_connection_t *qc) } +static uint64_t +ngx_quic_packet_threshold(ngx_quic_send_ctx_t *ctx) +{ + uint64_t pkt_thr; + ngx_queue_t *q; + ngx_quic_frame_t *f; + + if (ngx_queue_empty(&ctx->sent)) { + return NGX_QUIC_PKT_THR; + } + + q = ngx_queue_head(&ctx->sent); + f = ngx_queue_data(q, ngx_quic_frame_t, queue); + pkt_thr = (ctx->pnum - f->pnum) / 2; + + if (pkt_thr <= NGX_QUIC_PKT_THR) { + return NGX_QUIC_PKT_THR; + } + + return pkt_thr; +} + + ngx_int_t ngx_quic_handle_ack_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_frame_t *f) @@ -569,6 +593,7 @@ ngx_quic_drop_ack_ranges(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, static ngx_int_t ngx_quic_detect_lost(ngx_connection_t *c, ngx_quic_ack_stat_t *st) { + uint64_t pkt_thr; ngx_uint_t i, nlost; ngx_msec_t now, wait, thr, oldest, newest; ngx_queue_t *q; @@ -578,7 +603,7 @@ ngx_quic_detect_lost(ngx_connection_t *c, ngx_quic_ack_stat_t *st) qc = ngx_quic_get_connection(c); now = ngx_current_msec; - thr = ngx_quic_lost_threshold(qc); + thr = ngx_quic_time_threshold(qc); #if (NGX_SUPPRESS_WARN) oldest = now; @@ -595,6 +620,8 @@ ngx_quic_detect_lost(ngx_connection_t *c, ngx_quic_ack_stat_t *st) continue; } + pkt_thr = ngx_quic_packet_threshold(ctx); + while (!ngx_queue_empty(&ctx->sent)) { q = ngx_queue_head(&ctx->sent); @@ -606,12 +633,12 @@ ngx_quic_detect_lost(ngx_connection_t *c, ngx_quic_ack_stat_t *st) wait = start->send_time + thr - now; - ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic detect_lost pnum:%uL thr:%M wait:%i level:%d", - start->pnum, thr, (ngx_int_t) wait, start->level); + ngx_log_debug5(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic detect_lost pnum:%uL thr:%M pthr:%uL wait:%i level:%d", + start->pnum, thr, pkt_thr, (ngx_int_t) wait, start->level); if ((ngx_msec_int_t) wait > 0 - && ctx->largest_ack - start->pnum < NGX_QUIC_PKT_THR) + && ctx->largest_ack - start->pnum < pkt_thr) { break; } @@ -952,6 +979,7 @@ ngx_quic_congestion_cubic_time(ngx_connection_t *c) void ngx_quic_set_lost_timer(ngx_connection_t *c) { + uint64_t pkt_thr; ngx_uint_t i; ngx_msec_t now; ngx_queue_t *q; @@ -977,10 +1005,12 @@ ngx_quic_set_lost_timer(ngx_connection_t *c) q = ngx_queue_head(&ctx->sent); f = ngx_queue_data(q, ngx_quic_frame_t, queue); w = (ngx_msec_int_t) - (f->send_time + ngx_quic_lost_threshold(qc) - now); + (f->send_time + ngx_quic_time_threshold(qc) - now); if (f->pnum <= ctx->largest_ack) { - if (w < 0 || ctx->largest_ack - f->pnum >= NGX_QUIC_PKT_THR) { + pkt_thr = ngx_quic_packet_threshold(ctx); + + if (w < 0 || ctx->largest_ack - f->pnum >= pkt_thr) { w = 0; } From noreply at nginx.com Wed Apr 16 12:02:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Wed, 16 Apr 2025 12:02:02 +0000 (UTC) Subject: [nginx] nginx-1.27.5-RELEASE Message-ID: <20250416120202.A425247801@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/6ac8b69f06bc10d5503f636da888fa70095b151c branches: master commit: 6ac8b69f06bc10d5503f636da888fa70095b151c user: Sergey Kandaurov date: Mon, 14 Apr 2025 22:35:27 +0400 description: nginx-1.27.5-RELEASE --- docs/xml/nginx/changes.xml | 71 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/docs/xml/nginx/changes.xml b/docs/xml/nginx/changes.xml index b55177d80..ecb063d57 100644 --- a/docs/xml/nginx/changes.xml +++ b/docs/xml/nginx/changes.xml @@ -5,6 +5,77 @@ + + + + +контроль перегрузки CUBIC в соединениях QUIC. + + +CUBIC congestion control in QUIC connections. + + + + + +ограничение на максимальный размер кешируемых в разделяемой памяти +SSL-сессий поднято до 8192. + + +the maximum size limit for SSL sessions cached in shared memory +has been raised to 8192. + + + + + +в директивах grpc_ssl_password_file, proxy_ssl_password_file и +uwsgi_ssl_password_file +при загрузке SSL-сертификатов и зашифрованных ключей из переменных; +ошибка появилась в 1.23.1. + + +in the "grpc_ssl_password_file", "proxy_ssl_password_file", and +"uwsgi_ssl_password_file" directives +when loading SSL certificates and encrypted keys from variables; +the bug had appeared in 1.23.1. + + + + + +в переменных $ssl_curve и $ssl_curves +при использовании подключаемых кривых в OpenSSL. + + +in the $ssl_curve and $ssl_curves variables +when using pluggable curves in OpenSSL. + + + + + +nginx не собирался с musl libc.
+Спасибо Piotr Sikora. +
+ +nginx could not be built with musl libc.
+Thanks to Piotr Sikora. +
+
+ + + +Улучшения производительности и исправления в HTTP/3. + + +Performance improvements and bugfixes in HTTP/3. + + + +
+ + From noreply at nginx.com Wed Apr 16 12:05:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Wed, 16 Apr 2025 12:05:02 +0000 (UTC) Subject: [nginx] Annotated tag created: release-1.27.5 Message-ID: <20250416120502.A9FB34784E@pubserv1.nginx> details: https://github.com/nginx/nginx/releases/tag/release-1.27.5 branches: commit: 6ac8b69f06bc10d5503f636da888fa70095b151c user: Sergey Kandaurov date: Wed Apr 16 16:03:18 2025 +0400 description: release-1.27.5 tag From noreply at nginx.com Wed Apr 16 14:56:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Wed, 16 Apr 2025 14:56:02 +0000 (UTC) Subject: [nginx] Version bump. Message-ID: <20250416145602.723FF489E9@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/0626e60a754895b0d6e8f6f7d3043c465da7b346 branches: master commit: 0626e60a754895b0d6e8f6f7d3043c465da7b346 user: Roman Arutyunyan date: Wed, 16 Apr 2025 18:48:50 +0400 description: Version bump. --- src/core/nginx.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/nginx.h b/src/core/nginx.h index 090777853..72664a531 100644 --- a/src/core/nginx.h +++ b/src/core/nginx.h @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1027005 -#define NGINX_VERSION "1.27.5" +#define nginx_version 1029000 +#define NGINX_VERSION "1.29.0" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From noreply at nginx.com Thu Apr 17 08:52:01 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Thu, 17 Apr 2025 08:52:01 +0000 (UTC) Subject: [nginx] QUIC: lowered log level for unsupported transport parameters. Message-ID: <20250417085201.F38E948EF7@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/04813dac865d538d160de1d01c7248a22503700b branches: master commit: 04813dac865d538d160de1d01c7248a22503700b user: Roman Arutyunyan date: Tue, 8 Apr 2025 16:54:28 +0400 description: QUIC: lowered log level for unsupported transport parameters. --- src/event/quic/ngx_event_quic_transport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/event/quic/ngx_event_quic_transport.c b/src/event/quic/ngx_event_quic_transport.c index bb13447b5..0b3ef4b2e 100644 --- a/src/event/quic/ngx_event_quic_transport.c +++ b/src/event/quic/ngx_event_quic_transport.c @@ -1773,7 +1773,7 @@ ngx_quic_parse_transport_params(u_char *p, u_char *end, ngx_quic_tp_t *tp, } if (rc == NGX_DECLINED) { - ngx_log_error(NGX_LOG_INFO, log, 0, + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, log, 0, "quic %s transport param id:0x%xL, skipped", (id % 31 == 27) ? "reserved" : "unknown", id); } From noreply at nginx.com Thu Apr 17 15:13:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Thu, 17 Apr 2025 15:13:02 +0000 (UTC) Subject: [nginx] Fixed -Wunterminated-string-initialization with gcc15. Message-ID: <20250417151302.C008748EF7@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/444954abacef1d77f3dc6e9b1878684c7e6fe5b3 branches: master commit: 444954abacef1d77f3dc6e9b1878684c7e6fe5b3 user: Roman Arutyunyan date: Wed, 16 Apr 2025 16:56:44 +0400 description: Fixed -Wunterminated-string-initialization with gcc15. --- src/event/quic/ngx_event_quic_protection.c | 12 +++++++----- src/http/v2/ngx_http_v2_filter_module.c | 7 ++++--- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c index 3f249b36a..e5c0df7b4 100644 --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -125,9 +125,10 @@ ngx_quic_keys_set_initial_secret(ngx_quic_keys_t *keys, ngx_str_t *secret, ngx_quic_secret_t *client, *server; ngx_quic_ciphers_t ciphers; - static const uint8_t salt[20] = - "\x38\x76\x2c\xf7\xf5\x59\x34\xb3\x4d\x17" - "\x9a\xe6\xa4\xc8\x0c\xad\xcc\xbb\x7f\x0a"; + static const uint8_t salt[20] = { + 0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, + 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a + }; client = &keys->secrets[ssl_encryption_initial].client; server = &keys->secrets[ssl_encryption_initial].server; @@ -958,8 +959,9 @@ ngx_quic_create_retry_packet(ngx_quic_header_t *pkt, ngx_str_t *res) /* 5.8. Retry Packet Integrity */ static ngx_quic_md_t key = ngx_quic_md( "\xbe\x0c\x69\x0b\x9f\x66\x57\x5a\x1d\x76\x6b\x54\xe3\x68\xc8\x4e"); - static const u_char nonce[NGX_QUIC_IV_LEN] = - "\x46\x15\x99\xd3\x5d\x63\x2b\xf2\x23\x98\x25\xbb"; + static const u_char nonce[NGX_QUIC_IV_LEN] = { + 0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb + }; static ngx_str_t in = ngx_string(""); ad.data = res->data; diff --git a/src/http/v2/ngx_http_v2_filter_module.c b/src/http/v2/ngx_http_v2_filter_module.c index 1e2cafaf1..b63e343a0 100644 --- a/src/http/v2/ngx_http_v2_filter_module.c +++ b/src/http/v2/ngx_http_v2_filter_module.c @@ -115,10 +115,11 @@ ngx_http_v2_header_filter(ngx_http_request_t *r) ngx_http_core_srv_conf_t *cscf; u_char addr[NGX_SOCKADDR_STRLEN]; - static const u_char nginx[5] = "\x84\xaa\x63\x55\xe7"; + static const u_char nginx[5] = { 0x84, 0xaa, 0x63, 0x55, 0xe7 }; #if (NGX_HTTP_GZIP) - static const u_char accept_encoding[12] = - "\x8b\x84\x84\x2d\x69\x5b\x05\x44\x3c\x86\xaa\x6f"; + static const u_char accept_encoding[12] = { + 0x8b, 0x84, 0x84, 0x2d, 0x69, 0x5b, 0x05, 0x44, 0x3c, 0x86, 0xaa, 0x6f + }; #endif static size_t nginx_ver_len = ngx_http_v2_literal_size(NGINX_VER); From noreply at nginx.com Fri Apr 18 11:29:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 18 Apr 2025 11:29:02 +0000 (UTC) Subject: [nginx] HTTP/3: fixed NGX_HTTP_V3_VARLEN_INT_LEN value. Message-ID: <20250418112902.EA70948F3C@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/0f9f43b79eed64ab1a876be76ff0f49d499784fc branches: master commit: 0f9f43b79eed64ab1a876be76ff0f49d499784fc user: Roman Arutyunyan date: Fri, 18 Apr 2025 11:16:57 +0400 description: HTTP/3: fixed NGX_HTTP_V3_VARLEN_INT_LEN value. After fixing ngx_http_v3_encode_varlen_int() in 400eb1b628, NGX_HTTP_V3_VARLEN_INT_LEN retained the old value of 4, which is insufficient for the values over 1073741823 (1G - 1). The NGX_HTTP_V3_VARLEN_INT_LEN macro is used in ngx_http_v3_uni.c to format stream and frame types. Old buffer size is enough for formatting this data. Also, the macro is used in ngx_http_v3_filter_module.c to format output chunks and trailers. Considering output_buffers and proxy_buffer_size are below 1G in all realistic scenarios, the old buffer size is enough here as well. --- src/http/v3/ngx_http_v3.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h index 9dcb5e6a7..8fd212c1f 100644 --- a/src/http/v3/ngx_http_v3.h +++ b/src/http/v3/ngx_http_v3.h @@ -23,7 +23,7 @@ #define NGX_HTTP_V3_HQ_ALPN_PROTO "\x0Ahq-interop" #define NGX_HTTP_V3_HQ_PROTO "hq-interop" -#define NGX_HTTP_V3_VARLEN_INT_LEN 4 +#define NGX_HTTP_V3_VARLEN_INT_LEN 8 #define NGX_HTTP_V3_PREFIX_INT_LEN 11 #define NGX_HTTP_V3_STREAM_CONTROL 0x00 From noreply at nginx.com Fri Apr 18 19:58:03 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 18 Apr 2025 19:58:03 +0000 (UTC) Subject: [nginx] Core: improved NGX_ALIGNMENT detection on some x86_64 platforms. Message-ID: <20250418195803.378D448F62@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/b9d0ba6677ff7761c85f5556776d6a6c2a7a7051 branches: master commit: b9d0ba6677ff7761c85f5556776d6a6c2a7a7051 user: Aleksei Bavshin date: Tue, 14 Jan 2025 10:32:24 -0800 description: Core: improved NGX_ALIGNMENT detection on some x86_64 platforms. Previously, the default pool alignment used sizeof(unsigned long), with the expectation that this would match to a platform word size. Certain 64-bit platforms prove this assumption wrong by keeping the 32-bit long type, which is fully compliant with the C standard. This introduces a possibility of suboptimal misaligned access to the data allocated with ngx_palloc() on the affected platforms, which is addressed here by changing the default NGX_ALIGNMENT to a pointer size. As we override the detection in auto/os/conf for all the machine types except x86, and Unix-like 64-bit systems prefer the 64-bit long, the impact of the change should be limited to Win64 x64. --- src/core/ngx_config.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/ngx_config.h b/src/core/ngx_config.h index 1861be601..707ab216b 100644 --- a/src/core/ngx_config.h +++ b/src/core/ngx_config.h @@ -94,7 +94,7 @@ typedef intptr_t ngx_flag_t; #ifndef NGX_ALIGNMENT -#define NGX_ALIGNMENT sizeof(unsigned long) /* platform word */ +#define NGX_ALIGNMENT sizeof(uintptr_t) /* platform word */ #endif #define ngx_align(d, a) (((d) + (a - 1)) & ~(a - 1)) From noreply at nginx.com Fri Apr 18 19:58:03 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 18 Apr 2025 19:58:03 +0000 (UTC) Subject: [nginx] Win32: added detection of ARM64 target. Message-ID: <20250418195803.3A8C448F78@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/020b1db7eb187d4a9a5f1d6154c664a463473b36 branches: master commit: 020b1db7eb187d4a9a5f1d6154c664a463473b36 user: Aleksei Bavshin date: Tue, 14 Jan 2025 11:11:28 -0800 description: Win32: added detection of ARM64 target. This extends the target selection implemented in dad6ec3aa63f to support Windows ARM64 platforms. OpenSSL support for VC-WIN64-ARM target first appeared in 1.1.1 and is present in all currently supported (3.x) branches. As a side effect, ARM64 Windows builds will get 16-byte alignment along with the rest of non-x86 platforms. This is safe, as malloc on 64-bit Windows guarantees the fundamental alignment of allocations, 16 bytes. --- auto/cc/msvc | 4 ++++ auto/lib/openssl/make | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/auto/cc/msvc b/auto/cc/msvc index 567bac7bc..fe7c34e49 100644 --- a/auto/cc/msvc +++ b/auto/cc/msvc @@ -26,6 +26,10 @@ ngx_msvc_ver=`echo $NGX_MSVC_VER | sed -e 's/^\([0-9]*\).*/\1/'` case "$NGX_MSVC_VER" in + *ARM64) + NGX_MACHINE=arm64 + ;; + *x64) NGX_MACHINE=amd64 ;; diff --git a/auto/lib/openssl/make b/auto/lib/openssl/make index a7e9369e7..f8480146d 100644 --- a/auto/lib/openssl/make +++ b/auto/lib/openssl/make @@ -13,6 +13,10 @@ case "$CC" in OPENSSL_TARGET=VC-WIN64A ;; + arm64) + OPENSSL_TARGET=VC-WIN64-ARM + ;; + *) OPENSSL_TARGET=VC-WIN32 ;; From noreply at nginx.com Mon Apr 21 16:48:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Mon, 21 Apr 2025 16:48:02 +0000 (UTC) Subject: [njs] Replaced vm with mp in NJS_CHB_MP_INIT(). Message-ID: <20250421164802.BD90C48FA4@pubserv1.nginx> details: https://github.com/nginx/njs/commit/264b1ced4259db93f928f57dfbddcc5e3f93d68b branches: master commit: 264b1ced4259db93f928f57dfbddcc5e3f93d68b user: Zhidao HONG date: Mon, 21 Apr 2025 23:37:49 +0800 description: Replaced vm with mp in NJS_CHB_MP_INIT(). --- external/njs_query_string_module.c | 6 +++--- external/njs_xml_module.c | 2 +- external/njs_zlib_module.c | 4 ++-- nginx/ngx_http_js_module.c | 4 ++-- nginx/ngx_js_fetch.c | 8 ++++---- src/njs_array.c | 2 +- src/njs_chb.h | 4 ++-- src/njs_error.c | 2 +- src/njs_function.c | 2 +- src/njs_json.c | 4 ++-- src/njs_regexp.c | 2 +- src/njs_string.c | 8 ++++---- src/njs_typed_array.c | 2 +- src/njs_vm.c | 2 +- src/test/njs_benchmark.c | 2 +- src/test/njs_externals_test.c | 2 +- src/test/njs_unit_test.c | 2 +- 17 files changed, 29 insertions(+), 29 deletions(-) diff --git a/external/njs_query_string_module.c b/external/njs_query_string_module.c index f9b7c552..e2f849d2 100644 --- a/external/njs_query_string_module.c +++ b/external/njs_query_string_module.c @@ -152,7 +152,7 @@ njs_query_string_decode(njs_vm_t *vm, njs_value_t *value, const u_char *start, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, }; - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); njs_utf8_decode_init(&ctx); cp = 0; @@ -749,7 +749,7 @@ njs_query_string_stringify(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, encode = njs_value_function(val); } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); keys = njs_vm_object_keys(vm, object, njs_value_arg(&value)); if (njs_slow_path(keys == NULL)) { @@ -841,7 +841,7 @@ njs_query_string_escape(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_value_string_get(string, &str); - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); ret = njs_query_string_encode(&chain, &str); if (njs_slow_path(ret != NJS_OK)) { diff --git a/external/njs_xml_module.c b/external/njs_xml_module.c index 6bdbb639..e524ba5c 100644 --- a/external/njs_xml_module.c +++ b/external/njs_xml_module.c @@ -1785,7 +1785,7 @@ njs_xml_ext_canonicalization(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, } } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); buf = xmlOutputBufferCreateIO(njs_xml_buf_write_cb, NULL, &chain, NULL); if (njs_slow_path(buf == NULL)) { diff --git a/external/njs_zlib_module.c b/external/njs_zlib_module.c index 7945e646..d3a667d8 100644 --- a/external/njs_zlib_module.c +++ b/external/njs_zlib_module.c @@ -320,7 +320,7 @@ njs_zlib_ext_deflate(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, } } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); do { stream.next_out = njs_chb_reserve(&chain, chunk_size); @@ -461,7 +461,7 @@ njs_zlib_ext_inflate(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, } } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); while (rc != Z_STREAM_END) { stream.next_out = njs_chb_reserve(&chain, chunk_size); diff --git a/nginx/ngx_http_js_module.c b/nginx/ngx_http_js_module.c index 5a131bc9..ce4ce365 100644 --- a/nginx/ngx_http_js_module.c +++ b/nginx/ngx_http_js_module.c @@ -3121,7 +3121,7 @@ ngx_http_js_header_in_array(njs_vm_t *vm, ngx_http_request_t *r, (*hh)->value.len); } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); for (i = 0; i < n; i++) { njs_chb_append(&chain, hh[i]->value.data, hh[i]->value.len); @@ -4166,7 +4166,7 @@ ngx_http_js_header_generic(njs_vm_t *vm, ngx_http_request_t *r, (*ph)->value.len); } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); sep = flags & NJS_HEADER_SEMICOLON ? ';' : ','; diff --git a/nginx/ngx_js_fetch.c b/nginx/ngx_js_fetch.c index 9539a687..63e7a298 100644 --- a/nginx/ngx_js_fetch.c +++ b/nginx/ngx_js_fetch.c @@ -732,7 +732,7 @@ ngx_js_ext_fetch(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, http->header_only = njs_strstr_eq(&request.method, &njs_str_value("HEAD")); - NJS_CHB_MP_INIT(&http->chain, vm); + NJS_CHB_MP_INIT(&http->chain, njs_vm_memory_pool(vm)); njs_chb_append(&http->chain, request.method.start, request.method.length); njs_chb_append_literal(&http->chain, " "); @@ -1035,7 +1035,7 @@ ngx_js_ext_response_constructor(njs_vm_t *vm, njs_value_t *args, } } - NJS_CHB_MP_INIT(&response->chain, vm); + NJS_CHB_MP_INIT(&response->chain, njs_vm_memory_pool(vm)); body = njs_arg(args, nargs, 1); @@ -2516,7 +2516,7 @@ ngx_js_http_process_headers(ngx_js_http_t *http) njs_chb_destroy(&http->chain); - NJS_CHB_MP_INIT(&http->response.chain, http->vm); + NJS_CHB_MP_INIT(&http->response.chain, njs_vm_memory_pool(http->vm)); http->process = ngx_js_http_process_body; @@ -3296,7 +3296,7 @@ ngx_headers_js_get(njs_vm_t *vm, njs_value_t *value, njs_str_t *name, return NJS_DECLINED; } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); h = ph; diff --git a/src/njs_array.c b/src/njs_array.c index 15a6b6d3..913bc8cf 100644 --- a/src/njs_array.c +++ b/src/njs_array.c @@ -1686,7 +1686,7 @@ njs_array_prototype_join(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, value = &entry; - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); for (i = 0; i < len; i++) { ret = njs_value_property_i64(vm, this, i, value); diff --git a/src/njs_chb.h b/src/njs_chb.h index 3dbe5ac9..4bd1cb3f 100644 --- a/src/njs_chb.h +++ b/src/njs_chb.h @@ -34,8 +34,8 @@ typedef struct { void njs_chb_init(njs_chb_t *chain, void *pool, njs_chb_alloc_t alloc, njs_chb_free_t free); -#define NJS_CHB_MP_INIT(chain, vm) \ - njs_chb_init(chain, njs_vm_memory_pool(vm), (njs_chb_alloc_t) njs_mp_alloc,\ +#define NJS_CHB_MP_INIT(chain, mp) \ + njs_chb_init(chain, mp, (njs_chb_alloc_t) njs_mp_alloc, \ (njs_chb_free_t) njs_mp_free) #define NJS_CHB_CTX_INIT(chain, ctx) \ njs_chb_init(chain, ctx, (njs_chb_alloc_t) js_malloc, \ diff --git a/src/njs_error.c b/src/njs_error.c index 6a14d767..b9da6597 100644 --- a/src/njs_error.c +++ b/src/njs_error.c @@ -1149,7 +1149,7 @@ njs_backtrace_to_string(njs_vm_t *vm, njs_arr_t *backtrace, njs_str_t *dst) return NJS_OK; } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); njs_chb_append_str(&chain, dst); njs_chb_append(&chain, "\n", 1); diff --git a/src/njs_function.c b/src/njs_function.c index c677be57..326549f5 100644 --- a/src/njs_function.c +++ b/src/njs_function.c @@ -1032,7 +1032,7 @@ njs_function_constructor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, goto fail; } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); if (async) { njs_chb_append_literal(&chain, "(async function("); diff --git a/src/njs_json.c b/src/njs_json.c index 85c5d0e8..8ba13314 100644 --- a/src/njs_json.c +++ b/src/njs_json.c @@ -1093,7 +1093,7 @@ njs_json_stringify_iterator(njs_json_stringify_t *stringify, goto memory_error; } - NJS_CHB_MP_INIT(&chain, stringify->vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(stringify->vm)); for ( ;; ) { if (state->index == 0) { @@ -1977,7 +1977,7 @@ njs_vm_value_dump(njs_vm_t *vm, njs_str_t *retval, njs_value_t *value, value = &exception; } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); if (!njs_dump_is_recursive(value)) { ret = njs_dump_terminal(stringify, &chain, value, console); diff --git a/src/njs_regexp.c b/src/njs_regexp.c index 1c7cfe87..479f0b75 100644 --- a/src/njs_regexp.c +++ b/src/njs_regexp.c @@ -1373,7 +1373,7 @@ njs_regexp_prototype_symbol_replace(njs_vm_t *vm, njs_value_t *args, } } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); results.separate = 0; results.pointer = 0; diff --git a/src/njs_string.c b/src/njs_string.c index 9405a8db..6a91cb4c 100644 --- a/src/njs_string.c +++ b/src/njs_string.c @@ -2955,7 +2955,7 @@ njs_string_get_substitution(njs_vm_t *vm, njs_value_t *matched, p = rep.start; end = rep.start + rep.length; - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); while (p < end) { r = njs_strlchr(p, end, '$'); @@ -3244,7 +3244,7 @@ njs_string_prototype_replace(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, return NJS_OK; } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); start = string.start; @@ -3899,7 +3899,7 @@ njs_string_decode_uri(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, src = string.start; end = string.start + string.size; - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); njs_utf8_decode_init(&ctx); @@ -4173,7 +4173,7 @@ njs_string_atob(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, len = njs_base64_decoded_length(len, pad); - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); dst = njs_chb_reserve(&chain, len * 2); if (njs_slow_path(dst == NULL)) { diff --git a/src/njs_typed_array.c b/src/njs_typed_array.c index 2a485a19..5930dcb0 100644 --- a/src/njs_typed_array.c +++ b/src/njs_typed_array.c @@ -2131,7 +2131,7 @@ njs_typed_array_prototype_join(njs_vm_t *vm, njs_value_t *args, return NJS_ERROR; } - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); njs_typed_array_to_chain(vm, &chain, array, separator); diff --git a/src/njs_vm.c b/src/njs_vm.c index dbeffa51..43b2ccc2 100644 --- a/src/njs_vm.c +++ b/src/njs_vm.c @@ -233,7 +233,7 @@ njs_vm_compile(njs_vm_t *vm, u_char **start, u_char *end) } if (njs_slow_path(vm->options.ast)) { - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); ret = njs_parser_serialize_ast(parser.node, &chain); if (njs_slow_path(ret == NJS_ERROR)) { return ret; diff --git a/src/test/njs_benchmark.c b/src/test/njs_benchmark.c index 98c618c8..66c4e84b 100644 --- a/src/test/njs_benchmark.c +++ b/src/test/njs_benchmark.c @@ -745,7 +745,7 @@ njs_benchmark_string(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, } else if (memcmp(mode.start, "chb", 3) == 0) { - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); njs_chb_append_literal(&chain, "abc"); njs_chb_append(&chain, s.start, s.length); diff --git a/src/test/njs_externals_test.c b/src/test/njs_externals_test.c index 7d2e82af..33f0020a 100644 --- a/src/test/njs_externals_test.c +++ b/src/test/njs_externals_test.c @@ -323,7 +323,7 @@ njs_unit_test_r_header(njs_vm_t *vm, njs_object_prop_t *prop, ret = njs_vm_prop_name(vm, prop, &h); if (ret == NJS_OK) { - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); njs_chb_append(&chain, h.start, h.length); njs_chb_append(&chain, (u_char *) "|АБВ", njs_length("|АБВ")); diff --git a/src/test/njs_unit_test.c b/src/test/njs_unit_test.c index 980fd7fa..9c935e5b 100644 --- a/src/test/njs_unit_test.c +++ b/src/test/njs_unit_test.c @@ -22399,7 +22399,7 @@ njs_chb_test(njs_vm_t *vm, njs_opts_t *opts, njs_stat_t *stat) static const njs_str_t expected = njs_str("arg: \"XYZ\" -5"); - NJS_CHB_MP_INIT(&chain, vm); + NJS_CHB_MP_INIT(&chain, njs_vm_memory_pool(vm)); p = njs_chb_reserve(&chain, 513); if (p == NULL) { From noreply at nginx.com Mon Apr 21 21:47:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Mon, 21 Apr 2025 21:47:02 +0000 (UTC) Subject: [njs] Test262: fixed mktemp format on OpenBSD. Message-ID: <20250421214702.B148248FAC@pubserv1.nginx> details: https://github.com/nginx/njs/commit/6a0826429142d3ad509c7dede5ad301d23dffce4 branches: master commit: 6a0826429142d3ad509c7dede5ad301d23dffce4 user: Dmitry Volyntsev date: Mon, 21 Apr 2025 09:41:46 -0700 description: Test262: fixed mktemp format on OpenBSD. mktemp: insufficient number of Xs in template `/tmp/njs_test.XXX' --- test/options | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/options b/test/options index a7b6bd3e..58000703 100644 --- a/test/options +++ b/test/options @@ -3,7 +3,7 @@ # Copyright (C) Dmitry Volyntsev # Copyright (C) NGINX, Inc. -NJS_TEST_DIR=`mktemp -d /tmp/njs_test.XXX` +NJS_TEST_DIR=`mktemp -d /tmp/njs_test.XXXXXX` NJS_TEST_LOG_DEFAULT="$NJS_TEST_DIR/log.log" NJS_TEST_VERBOSE=${NJS_TEST_VERBOSE:-} From noreply at nginx.com Tue Apr 22 16:05:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 22 Apr 2025 16:05:02 +0000 (UTC) Subject: [nginx] Branch created: stable-1.28 Message-ID: <20250422160502.7F93648F92@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/6ac8b69f06bc10d5503f636da888fa70095b151c branches: stable-1.28 commit: 6ac8b69f06bc10d5503f636da888fa70095b151c user: Sergey Kandaurov date: Mon, 14 Apr 2025 22:35:27 +0400 description: nginx-1.27.5-RELEASE --- docs/xml/nginx/changes.xml | 71 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/docs/xml/nginx/changes.xml b/docs/xml/nginx/changes.xml index b55177d80..ecb063d57 100644 --- a/docs/xml/nginx/changes.xml +++ b/docs/xml/nginx/changes.xml @@ -5,6 +5,77 @@ + + + + +контроль перегрузки CUBIC в соединениях QUIC. + + +CUBIC congestion control in QUIC connections. + + + + + +ограничение на максимальный размер кешируемых в разделяемой памяти +SSL-сессий поднято до 8192. + + +the maximum size limit for SSL sessions cached in shared memory +has been raised to 8192. + + + + + +в директивах grpc_ssl_password_file, proxy_ssl_password_file и +uwsgi_ssl_password_file +при загрузке SSL-сертификатов и зашифрованных ключей из переменных; +ошибка появилась в 1.23.1. + + +in the "grpc_ssl_password_file", "proxy_ssl_password_file", and +"uwsgi_ssl_password_file" directives +when loading SSL certificates and encrypted keys from variables; +the bug had appeared in 1.23.1. + + + + + +в переменных $ssl_curve и $ssl_curves +при использовании подключаемых кривых в OpenSSL. + + +in the $ssl_curve and $ssl_curves variables +when using pluggable curves in OpenSSL. + + + + + +nginx не собирался с musl libc.
+Спасибо Piotr Sikora. +
+ +nginx could not be built with musl libc.
+Thanks to Piotr Sikora. +
+
+ + + +Улучшения производительности и исправления в HTTP/3. + + +Performance improvements and bugfixes in HTTP/3. + + + +
+ + From noreply at nginx.com Wed Apr 23 07:54:03 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Wed, 23 Apr 2025 07:54:03 +0000 (UTC) Subject: [nginx] QUIC: fixed a typo. Message-ID: <20250423075403.1DA5A4783D@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/9785db9bd504ff25c1d84857505e6546fc04ae68 branches: master commit: 9785db9bd504ff25c1d84857505e6546fc04ae68 user: nandsky date: Fri, 18 Apr 2025 11:45:12 +0800 description: QUIC: fixed a typo. --- src/event/quic/ngx_event_quic_ack.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c index b8b72e943..c7fd96c2c 100644 --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -22,7 +22,7 @@ /* CUBIC parameters x10 */ #define NGX_QUIC_CUBIC_BETA 7 -#define MGX_QUIC_CUBIC_C 4 +#define NGX_QUIC_CUBIC_C 4 /* send time of ACK'ed packets */ @@ -483,7 +483,7 @@ ngx_quic_congestion_cubic(ngx_connection_t *c) * w_cubic = C * (t_msec / 1000) ^ 3 * mtu + w_max */ - cc = 10000000000ll / (int64_t) cg->mtu / MGX_QUIC_CUBIC_C; + cc = 10000000000ll / (int64_t) cg->mtu / NGX_QUIC_CUBIC_C; w = t * t * t / cc + (int64_t) cg->w_max; if (w > NGX_MAX_SIZE_T_VALUE) { @@ -945,7 +945,7 @@ ngx_quic_congestion_cubic_time(ngx_connection_t *c) return 0; } - cc = 10000000000ll / (int64_t) cg->mtu / MGX_QUIC_CUBIC_C; + cc = 10000000000ll / (int64_t) cg->mtu / NGX_QUIC_CUBIC_C; v = (int64_t) (cg->w_max - cg->window) * cc; /* From noreply at nginx.com Wed Apr 23 11:49:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Wed, 23 Apr 2025 11:49:02 +0000 (UTC) Subject: [nginx] Stable branch. Message-ID: <20250423114902.90D1F4783E@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/f2aa4339a5877bfe8a346a090b1bdef00b20d521 branches: stable-1.28 commit: f2aa4339a5877bfe8a346a090b1bdef00b20d521 user: Sergey Kandaurov date: Tue, 22 Apr 2025 17:29:23 +0400 description: Stable branch. --- src/core/nginx.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/nginx.h b/src/core/nginx.h index 090777853..4229012ba 100644 --- a/src/core/nginx.h +++ b/src/core/nginx.h @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1027005 -#define NGINX_VERSION "1.27.5" +#define nginx_version 1028000 +#define NGINX_VERSION "1.28.0" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From noreply at nginx.com Wed Apr 23 11:49:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Wed, 23 Apr 2025 11:49:02 +0000 (UTC) Subject: [nginx] nginx-1.28.0-RELEASE Message-ID: <20250423114902.A73F747842@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/481d28cb4e04c8096b9b6134856891dc52ecc68f branches: stable-1.28 commit: 481d28cb4e04c8096b9b6134856891dc52ecc68f user: Sergey Kandaurov date: Tue, 22 Apr 2025 18:09:32 +0400 description: nginx-1.28.0-RELEASE --- docs/xml/nginx/changes.xml | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/docs/xml/nginx/changes.xml b/docs/xml/nginx/changes.xml index ecb063d57..2af30d6d6 100644 --- a/docs/xml/nginx/changes.xml +++ b/docs/xml/nginx/changes.xml @@ -5,6 +5,42 @@ + + + + +Стабильная ветка 1.28.x. + + +1.28.x stable branch. + + + + + +nginx не собирался gcc 15, +если использовались модули ngx_http_v2_module и ngx_http_v3_module. + + +nginx could not be built by gcc 15 +if ngx_http_v2_module or ngx_http_v3_module modules were used. + + + + + +nginx мог не собираться gcc 14 и новее с оптимизацией -O3 -flto, +если использовался модуль ngx_http_v3_module. + + +nginx might not be built by gcc 14 or newer with -O3 -flto optimization +if ngx_http_v3_module was used. + + + + + + From noreply at nginx.com Wed Apr 23 11:49:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Wed, 23 Apr 2025 11:49:02 +0000 (UTC) Subject: [nginx] HTTP/3: fixed NGX_HTTP_V3_VARLEN_INT_LEN value. Message-ID: <20250423114902.996F747841@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/55be5536a8fa4dba3ef687db2532ac96bd879b2b branches: stable-1.28 commit: 55be5536a8fa4dba3ef687db2532ac96bd879b2b user: Roman Arutyunyan date: Fri, 18 Apr 2025 11:16:57 +0400 description: HTTP/3: fixed NGX_HTTP_V3_VARLEN_INT_LEN value. After fixing ngx_http_v3_encode_varlen_int() in 400eb1b628, NGX_HTTP_V3_VARLEN_INT_LEN retained the old value of 4, which is insufficient for the values over 1073741823 (1G - 1). The NGX_HTTP_V3_VARLEN_INT_LEN macro is used in ngx_http_v3_uni.c to format stream and frame types. Old buffer size is enough for formatting this data. Also, the macro is used in ngx_http_v3_filter_module.c to format output chunks and trailers. Considering output_buffers and proxy_buffer_size are below 1G in all realistic scenarios, the old buffer size is enough here as well. --- src/http/v3/ngx_http_v3.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h index 9dcb5e6a7..8fd212c1f 100644 --- a/src/http/v3/ngx_http_v3.h +++ b/src/http/v3/ngx_http_v3.h @@ -23,7 +23,7 @@ #define NGX_HTTP_V3_HQ_ALPN_PROTO "\x0Ahq-interop" #define NGX_HTTP_V3_HQ_PROTO "hq-interop" -#define NGX_HTTP_V3_VARLEN_INT_LEN 4 +#define NGX_HTTP_V3_VARLEN_INT_LEN 8 #define NGX_HTTP_V3_PREFIX_INT_LEN 11 #define NGX_HTTP_V3_STREAM_CONTROL 0x00 From noreply at nginx.com Wed Apr 23 11:49:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Wed, 23 Apr 2025 11:49:02 +0000 (UTC) Subject: [nginx] Fixed -Wunterminated-string-initialization with gcc15. Message-ID: <20250423114902.952724783F@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/3a97c9616cfd7c4dd3a177cb2cb583301e80404c branches: stable-1.28 commit: 3a97c9616cfd7c4dd3a177cb2cb583301e80404c user: Roman Arutyunyan date: Wed, 16 Apr 2025 16:56:44 +0400 description: Fixed -Wunterminated-string-initialization with gcc15. --- src/event/quic/ngx_event_quic_protection.c | 12 +++++++----- src/http/v2/ngx_http_v2_filter_module.c | 7 ++++--- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/event/quic/ngx_event_quic_protection.c b/src/event/quic/ngx_event_quic_protection.c index 3f249b36a..e5c0df7b4 100644 --- a/src/event/quic/ngx_event_quic_protection.c +++ b/src/event/quic/ngx_event_quic_protection.c @@ -125,9 +125,10 @@ ngx_quic_keys_set_initial_secret(ngx_quic_keys_t *keys, ngx_str_t *secret, ngx_quic_secret_t *client, *server; ngx_quic_ciphers_t ciphers; - static const uint8_t salt[20] = - "\x38\x76\x2c\xf7\xf5\x59\x34\xb3\x4d\x17" - "\x9a\xe6\xa4\xc8\x0c\xad\xcc\xbb\x7f\x0a"; + static const uint8_t salt[20] = { + 0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, + 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a + }; client = &keys->secrets[ssl_encryption_initial].client; server = &keys->secrets[ssl_encryption_initial].server; @@ -958,8 +959,9 @@ ngx_quic_create_retry_packet(ngx_quic_header_t *pkt, ngx_str_t *res) /* 5.8. Retry Packet Integrity */ static ngx_quic_md_t key = ngx_quic_md( "\xbe\x0c\x69\x0b\x9f\x66\x57\x5a\x1d\x76\x6b\x54\xe3\x68\xc8\x4e"); - static const u_char nonce[NGX_QUIC_IV_LEN] = - "\x46\x15\x99\xd3\x5d\x63\x2b\xf2\x23\x98\x25\xbb"; + static const u_char nonce[NGX_QUIC_IV_LEN] = { + 0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb + }; static ngx_str_t in = ngx_string(""); ad.data = res->data; diff --git a/src/http/v2/ngx_http_v2_filter_module.c b/src/http/v2/ngx_http_v2_filter_module.c index 1e2cafaf1..b63e343a0 100644 --- a/src/http/v2/ngx_http_v2_filter_module.c +++ b/src/http/v2/ngx_http_v2_filter_module.c @@ -115,10 +115,11 @@ ngx_http_v2_header_filter(ngx_http_request_t *r) ngx_http_core_srv_conf_t *cscf; u_char addr[NGX_SOCKADDR_STRLEN]; - static const u_char nginx[5] = "\x84\xaa\x63\x55\xe7"; + static const u_char nginx[5] = { 0x84, 0xaa, 0x63, 0x55, 0xe7 }; #if (NGX_HTTP_GZIP) - static const u_char accept_encoding[12] = - "\x8b\x84\x84\x2d\x69\x5b\x05\x44\x3c\x86\xaa\x6f"; + static const u_char accept_encoding[12] = { + 0x8b, 0x84, 0x84, 0x2d, 0x69, 0x5b, 0x05, 0x44, 0x3c, 0x86, 0xaa, 0x6f + }; #endif static size_t nginx_ver_len = ngx_http_v2_literal_size(NGINX_VER); From noreply at nginx.com Wed Apr 23 11:54:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Wed, 23 Apr 2025 11:54:02 +0000 (UTC) Subject: [nginx] Annotated tag created: release-1.28.0 Message-ID: <20250423115402.B3EF54783F@pubserv1.nginx> details: https://github.com/nginx/nginx/releases/tag/release-1.28.0 branches: commit: 481d28cb4e04c8096b9b6134856891dc52ecc68f user: Sergey Kandaurov date: Wed Apr 23 15:51:46 2025 +0400 description: release-1.28.0 tag From vl at inspert.ru Fri Apr 25 10:51:18 2025 From: vl at inspert.ru (Vladimir Homutov) Date: Fri, 25 Apr 2025 13:51:18 +0300 Subject: [nginx] QUIC: do not increase underutilized congestion window. In-Reply-To: <20250415150202.EBADF48F77@pubserv1.nginx> References: <20250415150202.EBADF48F77@pubserv1.nginx> Message-ID: On Tue, Apr 15, 2025 at 03:02:02PM +0000, noreply at nginx.com wrote: > details: https://github.com/nginx/nginx/commit/cd5e4fa1446dff86fafc3b6ffcc11afd527a024f > branches: master > commit: cd5e4fa1446dff86fafc3b6ffcc11afd527a024f > user: Roman Arutyunyan > date: Sat, 4 Jan 2025 18:03:46 +0400 > description: > QUIC: do not increase underutilized congestion window. > > As per RFC 9002, Section 7.8, congestion window should not be increased > when it's underutilized. > Hello, it looks like this patch triggers some issues with ack processing (possible previously, but now more probable): see details in suggested patch. I'm also attaching bad.log and good.log. The first one demonstrates that nginx is not sending any ACKs since it is switched into idle mode and the window is not increasing. Since the client code is symmetric, deadlock occurs. From vl at inspert.ru Fri Apr 25 10:55:11 2025 From: vl at inspert.ru (Vladimir Homutov) Date: Fri, 25 Apr 2025 13:55:11 +0300 Subject: [nginx] QUIC: do not increase underutilized congestion window. In-Reply-To: References: <20250415150202.EBADF48F77@pubserv1.nginx> Message-ID: missing attachments -------------- next part -------------- commit 0c7c9d6732a5fe3a3208286c8904db1851ac2cba Author: Vladimir Homutov Date: Fri Apr 25 13:35:00 2025 +0300 QUIC: fixed possible deadlock with ACKs not sent due to congestion. The commit cd5e4fa1446dff86fafc3b6ffcc11afd527a024f (QUIC: do not increase underutilized congestion window) lead to increased possibility of deadlock, caused by the fact that quic does not output any packets in congestion. If both client and server follow the same logic, it is possible that both ends are in the same state: waiting for ACKs to increase window, while being unable to send even ACK to unblock the other side. Since packets that contain ACK frames only are not congestion controlled, we are definitely allowed to send them, since we also need to respect the max_ack_delay. Currently, the max_ack_delay may be triggered and push handler is called, but the output does not send anything, because window is closed, thus nothing is sent. The patch allows to send ACK-only packets in case when the window is closed. Probably, this needs to be attached to the actual trigger of max_ack_delay timer, but it looks like suggested changes is goed enough for practical reasons. Also, RFC 9000 13.2.1 says: Since packets containing only ACK frames are not congestion controlled, an endpoint MUST NOT send more than one such packet in response to receiving an ack-eliciting packet. Probably, this also needs to be accounted. diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c index a92a539f3..4e51518c0 100644 --- a/src/event/quic/ngx_event_quic_output.c +++ b/src/event/quic/ngx_event_quic_output.c @@ -55,7 +55,8 @@ static ssize_t ngx_quic_send_segments(ngx_connection_t *c, u_char *buf, size_t len, struct sockaddr *sockaddr, socklen_t socklen, size_t segment); #endif static ssize_t ngx_quic_output_packet(ngx_connection_t *c, - ngx_quic_send_ctx_t *ctx, u_char *data, size_t max, size_t min); + ngx_quic_send_ctx_t *ctx, u_char *data, size_t max, size_t min, + ngx_uint_t ack_only); static void ngx_quic_init_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, ngx_quic_header_t *pkt, ngx_quic_path_t *path); static ngx_uint_t ngx_quic_get_padding_level(ngx_connection_t *c); @@ -116,7 +117,7 @@ ngx_quic_create_datagrams(ngx_connection_t *c) ssize_t n; u_char *p; uint64_t preserved_pnum[NGX_QUIC_SEND_CTX_LAST]; - ngx_uint_t i, pad; + ngx_uint_t i, pad, ack_only; ngx_quic_path_t *path; ngx_quic_send_ctx_t *ctx; ngx_quic_congestion_t *cg; @@ -131,7 +132,9 @@ ngx_quic_create_datagrams(ngx_connection_t *c) ngx_memzero(preserved_pnum, sizeof(preserved_pnum)); #endif - while (cg->in_flight < cg->window) { + ack_only = (cg->in_flight >= cg->window); + + while ((cg->in_flight < cg->window) || ack_only) { p = dst; @@ -158,7 +161,7 @@ ngx_quic_create_datagrams(ngx_connection_t *c) return NGX_OK; } - n = ngx_quic_output_packet(c, ctx, p, len, min); + n = ngx_quic_output_packet(c, ctx, p, len, min, ack_only); if (n == NGX_ERROR) { return NGX_ERROR; } @@ -186,6 +189,9 @@ ngx_quic_create_datagrams(ngx_connection_t *c) ngx_quic_commit_send(c); + /* send pure acks just once */ + ack_only = 0; + path->sent += len; } @@ -331,7 +337,7 @@ ngx_quic_create_segments(ngx_connection_t *c) size_t len, segsize; ssize_t n; u_char *p, *end; - ngx_uint_t nseg, level; + ngx_uint_t nseg, level, ack_only; ngx_quic_path_t *path; ngx_quic_send_ctx_t *ctx; ngx_quic_congestion_t *cg; @@ -358,13 +364,15 @@ ngx_quic_create_segments(ngx_connection_t *c) level = ctx - qc->send_ctx; preserved_pnum[level] = ctx->pnum; + ack_only = (cg->in_flight >= cg->window); + for ( ;; ) { len = ngx_min(segsize, (size_t) (end - p)); - if (len && cg->in_flight + (p - dst) < cg->window) { + if (len && ((cg->in_flight + (p - dst) < cg->window) || ack_only)) { - n = ngx_quic_output_packet(c, ctx, p, len, len); + n = ngx_quic_output_packet(c, ctx, p, len, len, ack_only); if (n == NGX_ERROR) { return NGX_ERROR; } @@ -397,6 +405,9 @@ ngx_quic_create_segments(ngx_connection_t *c) ngx_quic_commit_send(c); + /* send pure acks just once */ + ack_only = 0; + path->sent += n; p = dst; @@ -521,7 +532,7 @@ ngx_quic_get_padding_level(ngx_connection_t *c) static ssize_t ngx_quic_output_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, - u_char *data, size_t max, size_t min) + u_char *data, size_t max, size_t min, ngx_uint_t ack_only) { size_t len, pad, min_payload, max_payload; u_char *p; @@ -589,6 +600,12 @@ ngx_quic_output_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, break; } + if (ack_only + && (f->type != NGX_QUIC_FT_ACK && f->type != NGX_QUIC_FT_ACK_ECN)) + { + continue; + } + if (len + f->len > max_payload) { rc = ngx_quic_split_frame(c, f, max_payload - len); -------------- next part -------------- A non-text attachment was scrubbed... Name: logs_info.tar.gz Type: application/gzip Size: 24054 bytes Desc: not available URL: From noreply at nginx.com Fri Apr 25 10:57:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 25 Apr 2025 10:57:02 +0000 (UTC) Subject: [nginx] SSL: fixed build with OPENSSL_NO_DEPRECATED. Message-ID: <20250425105702.690DD478D5@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/4f8bc0b282b976eb7d044c81cad06e7e6d64e5ff branches: master commit: 4f8bc0b282b976eb7d044c81cad06e7e6d64e5ff user: Sergey Kandaurov date: Wed, 16 Apr 2025 20:50:29 +0400 description: SSL: fixed build with OPENSSL_NO_DEPRECATED. --- src/event/ngx_event_openssl.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h index b7aaaca75..9ba21a810 100644 --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -83,6 +83,17 @@ #endif +#ifdef OPENSSL_NO_DEPRECATED_3_4 +#define SSL_SESSION_get_time(s) SSL_SESSION_get_time_ex(s) +#define SSL_SESSION_set_time(s, t) SSL_SESSION_set_time_ex(s, t) +#endif + + +#ifdef OPENSSL_NO_DEPRECATED_3_0 +#define EVP_CIPHER_CTX_cipher(c) EVP_CIPHER_CTX_get0_cipher(c) +#endif + + typedef struct ngx_ssl_ocsp_s ngx_ssl_ocsp_t; From noreply at nginx.com Fri Apr 25 10:57:02 2025 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 25 Apr 2025 10:57:02 +0000 (UTC) Subject: [nginx] SSL: fixed build with OPENSSL_NO_DH. Message-ID: <20250425105702.6E2AF478E9@pubserv1.nginx> details: https://github.com/nginx/nginx/commit/adda7041582d8565ee1e5e7dfe740db85398e1ce branches: master commit: adda7041582d8565ee1e5e7dfe740db85398e1ce user: Sergey Kandaurov date: Wed, 16 Apr 2025 20:58:57 +0400 description: SSL: fixed build with OPENSSL_NO_DH. --- src/event/ngx_event_openssl.c | 4 ++++ src/event/ngx_event_openssl.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c index 6992cc4a4..7eb05209d 100644 --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -1315,6 +1315,8 @@ ngx_ssl_passwords_cleanup(void *data) ngx_int_t ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file) { +#ifndef OPENSSL_NO_DH + BIO *bio; if (file->len == 0) { @@ -1385,6 +1387,8 @@ ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file) BIO_free(bio); +#endif + return NGX_OK; } diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h index 9ba21a810..d4a62b82a 100644 --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -19,7 +19,9 @@ #include #include #include +#ifndef OPENSSL_NO_DH #include +#endif #ifndef OPENSSL_NO_ENGINE #include #endif From arut at nginx.com Fri Apr 25 11:29:13 2025 From: arut at nginx.com (Roman Arutyunyan) Date: Fri, 25 Apr 2025 15:29:13 +0400 Subject: [nginx] QUIC: do not increase underutilized congestion window. In-Reply-To: References: <20250415150202.EBADF48F77@pubserv1.nginx> Message-ID: <20250425112913.nxpwbtr6mn3olaxg@N00W24XTQX> Hi Vladimir, Thanks for your patch, I will look into this. The issue indeed seems possible and needs to be addressed. The mentiobned change cd5e4fa14 is correct though since without it CWND used to skyrocket while being underutilized. And this certainly consealed other issues. Do you have a reliable way to trigger this? On Fri, Apr 25, 2025 at 01:55:11PM +0300, Vladimir Homutov via nginx-devel wrote: > missing attachments > > commit 0c7c9d6732a5fe3a3208286c8904db1851ac2cba > Author: Vladimir Homutov > Date: Fri Apr 25 13:35:00 2025 +0300 > > QUIC: fixed possible deadlock with ACKs not sent due to congestion. > > The commit cd5e4fa1446dff86fafc3b6ffcc11afd527a024f (QUIC: do not increase > underutilized congestion window) lead to increased possibility of deadlock, > caused by the fact that quic does not output any packets in congestion. > > If both client and server follow the same logic, it is possible that both > ends are in the same state: waiting for ACKs to increase window, while > being unable to send even ACK to unblock the other side. > > Since packets that contain ACK frames only are not congestion controlled, > we are definitely allowed to send them, since we also need to respect > the max_ack_delay. Currently, the max_ack_delay may be triggered and > push handler is called, but the output does not send anything, because > window is closed, thus nothing is sent. > > The patch allows to send ACK-only packets in case when the window is closed. > Probably, this needs to be attached to the actual trigger of max_ack_delay > timer, but it looks like suggested changes is goed enough for practical reasons. > > Also, RFC 9000 13.2.1 says: > Since packets containing only ACK frames are not congestion controlled, > an endpoint MUST NOT send more than one such packet in response to > receiving an ack-eliciting packet. > > Probably, this also needs to be accounted. > > diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c > index a92a539f3..4e51518c0 100644 > --- a/src/event/quic/ngx_event_quic_output.c > +++ b/src/event/quic/ngx_event_quic_output.c > @@ -55,7 +55,8 @@ static ssize_t ngx_quic_send_segments(ngx_connection_t *c, u_char *buf, > size_t len, struct sockaddr *sockaddr, socklen_t socklen, size_t segment); > #endif > static ssize_t ngx_quic_output_packet(ngx_connection_t *c, > - ngx_quic_send_ctx_t *ctx, u_char *data, size_t max, size_t min); > + ngx_quic_send_ctx_t *ctx, u_char *data, size_t max, size_t min, > + ngx_uint_t ack_only); > static void ngx_quic_init_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, > ngx_quic_header_t *pkt, ngx_quic_path_t *path); > static ngx_uint_t ngx_quic_get_padding_level(ngx_connection_t *c); > @@ -116,7 +117,7 @@ ngx_quic_create_datagrams(ngx_connection_t *c) > ssize_t n; > u_char *p; > uint64_t preserved_pnum[NGX_QUIC_SEND_CTX_LAST]; > - ngx_uint_t i, pad; > + ngx_uint_t i, pad, ack_only; > ngx_quic_path_t *path; > ngx_quic_send_ctx_t *ctx; > ngx_quic_congestion_t *cg; > @@ -131,7 +132,9 @@ ngx_quic_create_datagrams(ngx_connection_t *c) > ngx_memzero(preserved_pnum, sizeof(preserved_pnum)); > #endif > > - while (cg->in_flight < cg->window) { > + ack_only = (cg->in_flight >= cg->window); > + > + while ((cg->in_flight < cg->window) || ack_only) { > > p = dst; > > @@ -158,7 +161,7 @@ ngx_quic_create_datagrams(ngx_connection_t *c) > return NGX_OK; > } > > - n = ngx_quic_output_packet(c, ctx, p, len, min); > + n = ngx_quic_output_packet(c, ctx, p, len, min, ack_only); > if (n == NGX_ERROR) { > return NGX_ERROR; > } > @@ -186,6 +189,9 @@ ngx_quic_create_datagrams(ngx_connection_t *c) > > ngx_quic_commit_send(c); > > + /* send pure acks just once */ > + ack_only = 0; > + > path->sent += len; > } > > @@ -331,7 +337,7 @@ ngx_quic_create_segments(ngx_connection_t *c) > size_t len, segsize; > ssize_t n; > u_char *p, *end; > - ngx_uint_t nseg, level; > + ngx_uint_t nseg, level, ack_only; > ngx_quic_path_t *path; > ngx_quic_send_ctx_t *ctx; > ngx_quic_congestion_t *cg; > @@ -358,13 +364,15 @@ ngx_quic_create_segments(ngx_connection_t *c) > level = ctx - qc->send_ctx; > preserved_pnum[level] = ctx->pnum; > > + ack_only = (cg->in_flight >= cg->window); > + > for ( ;; ) { > > len = ngx_min(segsize, (size_t) (end - p)); > > - if (len && cg->in_flight + (p - dst) < cg->window) { > + if (len && ((cg->in_flight + (p - dst) < cg->window) || ack_only)) { > > - n = ngx_quic_output_packet(c, ctx, p, len, len); > + n = ngx_quic_output_packet(c, ctx, p, len, len, ack_only); > if (n == NGX_ERROR) { > return NGX_ERROR; > } > @@ -397,6 +405,9 @@ ngx_quic_create_segments(ngx_connection_t *c) > > ngx_quic_commit_send(c); > > + /* send pure acks just once */ > + ack_only = 0; > + > path->sent += n; > > p = dst; > @@ -521,7 +532,7 @@ ngx_quic_get_padding_level(ngx_connection_t *c) > > static ssize_t > ngx_quic_output_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, > - u_char *data, size_t max, size_t min) > + u_char *data, size_t max, size_t min, ngx_uint_t ack_only) > { > size_t len, pad, min_payload, max_payload; > u_char *p; > @@ -589,6 +600,12 @@ ngx_quic_output_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, > break; > } > > + if (ack_only > + && (f->type != NGX_QUIC_FT_ACK && f->type != NGX_QUIC_FT_ACK_ECN)) > + { > + continue; > + } > + > if (len + f->len > max_payload) { > rc = ngx_quic_split_frame(c, f, max_payload - len); > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman From vl at inspert.ru Fri Apr 25 12:19:23 2025 From: vl at inspert.ru (Vladimir Homutov) Date: Fri, 25 Apr 2025 15:19:23 +0300 Subject: [nginx] QUIC: do not increase underutilized congestion window. In-Reply-To: <20250425112913.nxpwbtr6mn3olaxg@N00W24XTQX> References: <20250415150202.EBADF48F77@pubserv1.nginx> <20250425112913.nxpwbtr6mn3olaxg@N00W24XTQX> Message-ID: <4fea1847-ab27-4751-9fbd-3b356f638031@inspert.ru> On 4/25/25 2:29 PM, Roman Arutyunyan wrote: > Hi Vladimir, > > Thanks for your patch, I will look into this. The issue indeed seems possible > and needs to be addressed. The mentiobned change cd5e4fa14 is correct though > since without it CWND used to skyrocket while being underutilized. And this > certainly consealed other issues. > > Do you have a reliable way to trigger this? Yes, I'm able to trigger this reliably with angie as a client (with cd5e4fa14 merged both on client and server). > > On Fri, Apr 25, 2025 at 01:55:11PM +0300, Vladimir Homutov via nginx-devel wrote: >> missing attachments >> >> commit 0c7c9d6732a5fe3a3208286c8904db1851ac2cba >> Author: Vladimir Homutov >> Date: Fri Apr 25 13:35:00 2025 +0300 >> >> QUIC: fixed possible deadlock with ACKs not sent due to congestion. >> >> The commit cd5e4fa1446dff86fafc3b6ffcc11afd527a024f (QUIC: do not increase >> underutilized congestion window) lead to increased possibility of deadlock, >> caused by the fact that quic does not output any packets in congestion. >> >> If both client and server follow the same logic, it is possible that both >> ends are in the same state: waiting for ACKs to increase window, while >> being unable to send even ACK to unblock the other side. >> >> Since packets that contain ACK frames only are not congestion controlled, >> we are definitely allowed to send them, since we also need to respect >> the max_ack_delay. Currently, the max_ack_delay may be triggered and >> push handler is called, but the output does not send anything, because >> window is closed, thus nothing is sent. >> >> The patch allows to send ACK-only packets in case when the window is closed. >> Probably, this needs to be attached to the actual trigger of max_ack_delay >> timer, but it looks like suggested changes is goed enough for practical reasons. >> >> Also, RFC 9000 13.2.1 says: >> Since packets containing only ACK frames are not congestion controlled, >> an endpoint MUST NOT send more than one such packet in response to >> receiving an ack-eliciting packet. >> >> Probably, this also needs to be accounted. >> >> diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c >> index a92a539f3..4e51518c0 100644 >> --- a/src/event/quic/ngx_event_quic_output.c >> +++ b/src/event/quic/ngx_event_quic_output.c >> @@ -55,7 +55,8 @@ static ssize_t ngx_quic_send_segments(ngx_connection_t *c, u_char *buf, >> size_t len, struct sockaddr *sockaddr, socklen_t socklen, size_t segment); >> #endif >> static ssize_t ngx_quic_output_packet(ngx_connection_t *c, >> - ngx_quic_send_ctx_t *ctx, u_char *data, size_t max, size_t min); >> + ngx_quic_send_ctx_t *ctx, u_char *data, size_t max, size_t min, >> + ngx_uint_t ack_only); >> static void ngx_quic_init_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, >> ngx_quic_header_t *pkt, ngx_quic_path_t *path); >> static ngx_uint_t ngx_quic_get_padding_level(ngx_connection_t *c); >> @@ -116,7 +117,7 @@ ngx_quic_create_datagrams(ngx_connection_t *c) >> ssize_t n; >> u_char *p; >> uint64_t preserved_pnum[NGX_QUIC_SEND_CTX_LAST]; >> - ngx_uint_t i, pad; >> + ngx_uint_t i, pad, ack_only; >> ngx_quic_path_t *path; >> ngx_quic_send_ctx_t *ctx; >> ngx_quic_congestion_t *cg; >> @@ -131,7 +132,9 @@ ngx_quic_create_datagrams(ngx_connection_t *c) >> ngx_memzero(preserved_pnum, sizeof(preserved_pnum)); >> #endif >> >> - while (cg->in_flight < cg->window) { >> + ack_only = (cg->in_flight >= cg->window); >> + >> + while ((cg->in_flight < cg->window) || ack_only) { >> >> p = dst; >> >> @@ -158,7 +161,7 @@ ngx_quic_create_datagrams(ngx_connection_t *c) >> return NGX_OK; >> } >> >> - n = ngx_quic_output_packet(c, ctx, p, len, min); >> + n = ngx_quic_output_packet(c, ctx, p, len, min, ack_only); >> if (n == NGX_ERROR) { >> return NGX_ERROR; >> } >> @@ -186,6 +189,9 @@ ngx_quic_create_datagrams(ngx_connection_t *c) >> >> ngx_quic_commit_send(c); >> >> + /* send pure acks just once */ >> + ack_only = 0; >> + >> path->sent += len; >> } >> >> @@ -331,7 +337,7 @@ ngx_quic_create_segments(ngx_connection_t *c) >> size_t len, segsize; >> ssize_t n; >> u_char *p, *end; >> - ngx_uint_t nseg, level; >> + ngx_uint_t nseg, level, ack_only; >> ngx_quic_path_t *path; >> ngx_quic_send_ctx_t *ctx; >> ngx_quic_congestion_t *cg; >> @@ -358,13 +364,15 @@ ngx_quic_create_segments(ngx_connection_t *c) >> level = ctx - qc->send_ctx; >> preserved_pnum[level] = ctx->pnum; >> >> + ack_only = (cg->in_flight >= cg->window); >> + >> for ( ;; ) { >> >> len = ngx_min(segsize, (size_t) (end - p)); >> >> - if (len && cg->in_flight + (p - dst) < cg->window) { >> + if (len && ((cg->in_flight + (p - dst) < cg->window) || ack_only)) { >> >> - n = ngx_quic_output_packet(c, ctx, p, len, len); >> + n = ngx_quic_output_packet(c, ctx, p, len, len, ack_only); >> if (n == NGX_ERROR) { >> return NGX_ERROR; >> } >> @@ -397,6 +405,9 @@ ngx_quic_create_segments(ngx_connection_t *c) >> >> ngx_quic_commit_send(c); >> >> + /* send pure acks just once */ >> + ack_only = 0; >> + >> path->sent += n; >> >> p = dst; >> @@ -521,7 +532,7 @@ ngx_quic_get_padding_level(ngx_connection_t *c) >> >> static ssize_t >> ngx_quic_output_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, >> - u_char *data, size_t max, size_t min) >> + u_char *data, size_t max, size_t min, ngx_uint_t ack_only) >> { >> size_t len, pad, min_payload, max_payload; >> u_char *p; >> @@ -589,6 +600,12 @@ ngx_quic_output_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, >> break; >> } >> >> + if (ack_only >> + && (f->type != NGX_QUIC_FT_ACK && f->type != NGX_QUIC_FT_ACK_ECN)) >> + { >> + continue; >> + } >> + >> if (len + f->len > max_payload) { >> rc = ngx_quic_split_frame(c, f, max_payload - len); >> > >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> https://mailman.nginx.org/mailman/listinfo/nginx-devel > > -- > Roman -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Fri Apr 25 13:42:59 2025 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 25 Apr 2025 17:42:59 +0400 Subject: [nginx] QUIC: do not increase underutilized congestion window. In-Reply-To: References: <20250415150202.EBADF48F77@pubserv1.nginx> Message-ID: <52BAA7AB-B53F-47BA-831F-7227BBFA8E51@nginx.com> > On 25 Apr 2025, at 14:51, Vladimir Homutov via nginx-devel wrote: > > On Tue, Apr 15, 2025 at 03:02:02PM +0000, noreply at nginx.com wrote: >> details: https://github.com/nginx/nginx/commit/cd5e4fa1446dff86fafc3b6ffcc11afd527a024f >> branches: master >> commit: cd5e4fa1446dff86fafc3b6ffcc11afd527a024f >> user: Roman Arutyunyan >> date: Sat, 4 Jan 2025 18:03:46 +0400 >> description: >> QUIC: do not increase underutilized congestion window. >> >> As per RFC 9002, Section 7.8, congestion window should not be increased >> when it's underutilized. >> > > Hello, > > it looks like this patch triggers some issues with ack processing > (possible previously, but now more probable): see details in suggested > patch. > > I'm also attaching bad.log and good.log. The first one demonstrates that > nginx is not sending any ACKs since it is switched into idle mode and > the window is not increasing. Since the client code is symmetric, deadlock > occurs. cd5e4fa14 is not the only one to blame uncovering this apparently old bug. Few more things to consider: - 53e7e9eb5 initial congestion window reduction made this easier to trigger - a40cc7002 to ignore congestion in MTU probes made this easier to trigger The fix is to teach ngx_quic_output() to send ACKs on congested links. This meets RFC 9002, 7 and B.2 : An endpoint MUST NOT send a packet if it would cause bytes_in_flight : (see Appendix B.2) to be larger than the congestion window : bytes_in_flight: : Packets only containing ACK frames do not count toward : bytes_in_flight to ensure congestion control does not impede : congestion feedback. A simple reproducer attached. -------------- next part -------------- A non-text attachment was scrubbed... Name: h3_congestion_ack.t Type: application/octet-stream Size: 2056 bytes Desc: not available URL: -------------- next part -------------- -- Sergey Kandaurov From pluknet at nginx.com Fri Apr 25 13:50:15 2025 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 25 Apr 2025 17:50:15 +0400 Subject: [nginx] QUIC: do not increase underutilized congestion window. In-Reply-To: <52BAA7AB-B53F-47BA-831F-7227BBFA8E51@nginx.com> References: <20250415150202.EBADF48F77@pubserv1.nginx> <52BAA7AB-B53F-47BA-831F-7227BBFA8E51@nginx.com> Message-ID: On Fri, Apr 25, 2025 at 05:42:59PM +0400, Sergey Kandaurov wrote: > > [...] > > A simple reproducer attached. Forgotten lib changes. diff --git a/lib/Test/Nginx/HTTP3.pm b/lib/Test/Nginx/HTTP3.pm index 5cc8903..de7134f 100644 --- a/lib/Test/Nginx/HTTP3.pm +++ b/lib/Test/Nginx/HTTP3.pm @@ -43,6 +43,7 @@ sub new { $self->{token} = $extra{token} || ''; $self->{psk_list} = $extra{psk_list} || []; $self->{early_data} = $extra{early_data}; + $self->{send_ack} = 1; $self->{sni} = exists $extra{sni} ? $extra{sni} : 'localhost'; $self->{cipher} = 0x1301; @@ -1570,7 +1571,8 @@ sub handle_frames { } } - $self->{socket}->syswrite($self->encrypt_aead(build_ack($ack), $level)); + $self->{socket}->syswrite($self->encrypt_aead(build_ack($ack), $level)) + if $self->{send_ack}; for my $pn (keys %$ack) { $ack->{$pn} = $self->{pn}[0][$level] if $ack->{$pn} == -1; From pluknet at nginx.com Fri Apr 25 18:51:12 2025 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 25 Apr 2025 22:51:12 +0400 Subject: [nginx] QUIC: do not increase underutilized congestion window. In-Reply-To: References: <20250415150202.EBADF48F77@pubserv1.nginx> Message-ID: On Fri, Apr 25, 2025 at 01:55:11PM +0300, Vladimir Homutov via nginx-devel wrote: > missing attachments > Thanks for the provided debug, it is really helpful. Looking into debug, it becomes clear that once we sent something (be it a large response or MTU probe) that temporarily exceeds the congestion window (which is permitted for MTU probes), we can no longer send acknowledgments. This is especially visible if we don't receive ACKs for some reason, such as in your case, to decrease our inflight counter, so the connection becomes stalled. Below is a fix I have made without looking into your version (no offense, it is purely for aesthetic reasons). Although it appears to be quite similar to yours, it's somewhat less intrusive. I tried not to break sending segments for exceeded congestion, as well as to send correct frames in the ack_only mode. diff --git a/src/event/quic/ngx_event_quic_output.c b/src/event/quic/ngx_event_quic_output.c index a92a539f3..3fc2091e0 100644 --- a/src/event/quic/ngx_event_quic_output.c +++ b/src/event/quic/ngx_event_quic_output.c @@ -55,7 +55,8 @@ static ssize_t ngx_quic_send_segments(ngx_connection_t *c, u_char *buf, size_t len, struct sockaddr *sockaddr, socklen_t socklen, size_t segment); #endif static ssize_t ngx_quic_output_packet(ngx_connection_t *c, - ngx_quic_send_ctx_t *ctx, u_char *data, size_t max, size_t min); + ngx_quic_send_ctx_t *ctx, u_char *data, size_t max, size_t min, + ngx_int_t ack_only); static void ngx_quic_init_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, ngx_quic_header_t *pkt, ngx_quic_path_t *path); static ngx_uint_t ngx_quic_get_padding_level(ngx_connection_t *c); @@ -131,8 +132,7 @@ ngx_quic_create_datagrams(ngx_connection_t *c) ngx_memzero(preserved_pnum, sizeof(preserved_pnum)); #endif - while (cg->in_flight < cg->window) { - + do { p = dst; len = ngx_quic_path_limit(c, path, path->mtu); @@ -158,7 +158,8 @@ ngx_quic_create_datagrams(ngx_connection_t *c) return NGX_OK; } - n = ngx_quic_output_packet(c, ctx, p, len, min); + n = ngx_quic_output_packet(c, ctx, p, len, min, + cg->in_flight >= cg->window); if (n == NGX_ERROR) { return NGX_ERROR; } @@ -187,7 +188,8 @@ ngx_quic_create_datagrams(ngx_connection_t *c) ngx_quic_commit_send(c); path->sent += len; - } + + } while (cg->in_flight < cg->window); return NGX_OK; } @@ -315,6 +317,10 @@ ngx_quic_allow_segmentation(ngx_connection_t *c) bytes += f->len; + if (qc->congestion->in_flight + bytes >= qc->congestion->window)) { + return 0; + } + if (bytes > len * 3) { /* require at least ~3 full packets to batch */ return 1; @@ -364,7 +370,7 @@ ngx_quic_create_segments(ngx_connection_t *c) if (len && cg->in_flight + (p - dst) < cg->window) { - n = ngx_quic_output_packet(c, ctx, p, len, len); + n = ngx_quic_output_packet(c, ctx, p, len, len, 0); if (n == NGX_ERROR) { return NGX_ERROR; } @@ -521,7 +527,7 @@ ngx_quic_get_padding_level(ngx_connection_t *c) static ssize_t ngx_quic_output_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, - u_char *data, size_t max, size_t min) + u_char *data, size_t max, size_t min, ngx_int_t ack_only) { size_t len, pad, min_payload, max_payload; u_char *p; @@ -585,6 +591,10 @@ ngx_quic_output_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, { f = ngx_queue_data(q, ngx_quic_frame_t, queue); + if (ack_only && f->type != NGX_QUIC_FT_ACK) { + continue; + } + if (len >= max_payload) { break; } @@ -651,14 +661,20 @@ ngx_quic_output_packet(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx, f->plen = res.len; } - while (nframes--) { + while (nframes) { q = ngx_queue_head(&ctx->frames); f = ngx_queue_data(q, ngx_quic_frame_t, queue); + if (ack_only && f->type != NGX_QUIC_FT_ACK) { + continue; + } + f->pkt_need_ack = pkt.need_ack; ngx_queue_remove(q); ngx_queue_insert_tail(&ctx->sending, q); + + nframes--; } return res.len; From pluknet at nginx.com Sat Apr 26 08:13:31 2025 From: pluknet at nginx.com (Sergey Kandaurov) Date: Sat, 26 Apr 2025 12:13:31 +0400 Subject: [nginx] QUIC: do not increase underutilized congestion window. In-Reply-To: References: <20250415150202.EBADF48F77@pubserv1.nginx> Message-ID: <8D0A5E8C-A438-4033-BF29-A992CF439316@nginx.com> > On 25 Apr 2025, at 22:51, Sergey Kandaurov wrote: > > On Fri, Apr 25, 2025 at 01:55:11PM +0300, Vladimir Homutov via nginx-devel wrote: >> missing attachments >> > [..] A compile tested version is uploaded at https://github.com/nginx/nginx/pull/655 (attached for convenience). Besides, it has fixed generalization to obey the abstraction layer. -------------- next part -------------- A non-text attachment was scrubbed... Name: 0001-QUIC-fixed-sending-acknowledgments-with-limited-cong.patch Type: application/octet-stream Size: 5159 bytes Desc: not available URL: -------------- next part -------------- -- Sergey Kandaurov