From mdounin at mdounin.ru Mon Mar 1 16:56:47 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 01 Mar 2021 16:56:47 +0000 Subject: [nginx] HTTP/2: client_header_timeout before first request (ticket #2142). Message-ID: details: https://hg.nginx.org/nginx/rev/171682010da4 branches: changeset: 7783:171682010da4 user: Maxim Dounin date: Mon Mar 01 17:31:28 2021 +0300 description: HTTP/2: client_header_timeout before first request (ticket #2142). With this change, behaviour of HTTP/2 becomes even closer to HTTP/1.x, and client_header_timeout instead of keepalive_timeout is used before the first request is received. This fixes HTTP/2 connections being closed even before the first request if "keepalive_timeout 0;" was used in the configuration; the problem appeared in f790816a0e87 (1.19.7). diffstat: src/http/v2/ngx_http_v2.c | 7 +++++-- 1 files changed, 5 insertions(+), 2 deletions(-) diffs (24 lines): diff -r dea93b6dce94 -r 171682010da4 src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Thu Feb 25 23:42:25 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Mon Mar 01 17:31:28 2021 +0300 @@ -238,6 +238,7 @@ ngx_http_v2_init(ngx_event_t *rev) ngx_http_v2_srv_conf_t *h2scf; ngx_http_v2_main_conf_t *h2mcf; ngx_http_v2_connection_t *h2c; + ngx_http_core_srv_conf_t *cscf; c = rev->data; hc = c->data; @@ -325,8 +326,10 @@ ngx_http_v2_init(ngx_event_t *rev) rev->handler = ngx_http_v2_read_handler; c->write->handler = ngx_http_v2_write_handler; - if (c->read->timer_set) { - ngx_del_timer(c->read); + if (!rev->timer_set) { + cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, + ngx_http_core_module); + ngx_add_timer(rev, cscf->client_header_timeout); } c->idle = 1; From xeioex at nginx.com Mon Mar 1 17:16:54 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 01 Mar 2021 17:16:54 +0000 Subject: [njs] Stream: simplified session cleanup. Message-ID: details: https://hg.nginx.org/njs/rev/a83de0fce29f branches: changeset: 1614:a83de0fce29f user: Dmitry Volyntsev date: Mon Mar 01 17:15:44 2021 +0000 description: Stream: simplified session cleanup. diffstat: nginx/ngx_stream_js_module.c | 20 ++++++++++---------- 1 files changed, 10 insertions(+), 10 deletions(-) diffs (61 lines): diff -r 67b45ebbb6e4 -r a83de0fce29f nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Thu Feb 25 10:06:18 2021 +0300 +++ b/nginx/ngx_stream_js_module.c Mon Mar 01 17:15:44 2021 +0000 @@ -45,13 +45,11 @@ typedef struct { typedef struct { njs_vm_t *vm; - ngx_log_t *log; njs_opaque_value_t args[3]; ngx_buf_t *buf; ngx_chain_t **last_out; ngx_chain_t *free; ngx_chain_t *busy; - ngx_stream_session_t *session; ngx_int_t status; #define NGX_JS_EVENT_UPLOAD 0 #define NGX_JS_EVENT_DOWNLOAD 1 @@ -81,7 +79,7 @@ static ngx_int_t ngx_stream_js_variable( ngx_stream_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_stream_js_init_vm(ngx_stream_session_t *s); static void ngx_stream_js_drop_events(ngx_stream_js_ctx_t *ctx); -static void ngx_stream_js_cleanup_ctx(void *data); +static void ngx_stream_js_cleanup(void *data); static void ngx_stream_js_cleanup_vm(void *data); static njs_int_t ngx_stream_js_run_event(ngx_stream_session_t *s, ngx_stream_js_ctx_t *ctx, ngx_stream_js_ev_t *event); @@ -698,10 +696,8 @@ ngx_stream_js_init_vm(ngx_stream_session return NGX_ERROR; } - ctx->log = s->connection->log; - - cln->handler = ngx_stream_js_cleanup_ctx; - cln->data = ctx; + cln->handler = ngx_stream_js_cleanup; + cln->data = s; if (njs_vm_start(ctx->vm) == NJS_ERROR) { njs_vm_retval_string(ctx->vm, &exception); @@ -737,14 +733,18 @@ ngx_stream_js_drop_events(ngx_stream_js_ static void -ngx_stream_js_cleanup_ctx(void *data) +ngx_stream_js_cleanup(void *data) { - ngx_stream_js_ctx_t *ctx = data; + ngx_stream_js_ctx_t *ctx; + + ngx_stream_session_t *s = data; + + ctx = ngx_stream_get_module_ctx(s, ngx_stream_js_module); ngx_stream_js_drop_events(ctx); if (njs_vm_pending(ctx->vm)) { - ngx_log_error(NGX_LOG_ERR, ctx->log, 0, "pending events"); + ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, "pending events"); } njs_vm_destroy(ctx->vm); From xeioex at nginx.com Mon Mar 1 17:16:56 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 01 Mar 2021 17:16:56 +0000 Subject: [njs] Stream: introduced "stream" property. Message-ID: details: https://hg.nginx.org/njs/rev/da49b292dcef branches: changeset: 1615:da49b292dcef user: Dmitry Volyntsev date: Fri Feb 26 17:45:56 2021 +0000 description: Stream: introduced "stream" property. Is an alias to $status variable. diffstat: nginx/ngx_js.c | 21 +++++++++++++++++++++ nginx/ngx_js.h | 2 ++ nginx/ngx_stream_js_module.c | 10 ++++++++++ 3 files changed, 33 insertions(+), 0 deletions(-) diffs (63 lines): diff -r a83de0fce29f -r da49b292dcef nginx/ngx_js.c --- a/nginx/ngx_js.c Mon Mar 01 17:15:44 2021 +0000 +++ b/nginx/ngx_js.c Fri Feb 26 17:45:56 2021 +0000 @@ -186,6 +186,27 @@ ngx_js_ext_string(njs_vm_t *vm, njs_obje njs_int_t +ngx_js_ext_uint(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, + njs_value_t *setval, njs_value_t *retval) +{ + char *p; + ngx_uint_t field; + + p = njs_vm_external(vm, value); + if (p == NULL) { + njs_value_undefined_set(retval); + return NJS_DECLINED; + } + + field = *(ngx_uint_t *) (p + njs_vm_prop_magic32(prop)); + + njs_value_number_set(retval, field); + + return NJS_OK; +} + + +njs_int_t ngx_js_ext_constant(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval) { diff -r a83de0fce29f -r da49b292dcef nginx/ngx_js.h --- a/nginx/ngx_js.h Mon Mar 01 17:15:44 2021 +0000 +++ b/nginx/ngx_js.h Fri Feb 26 17:45:56 2021 +0000 @@ -57,6 +57,8 @@ njs_int_t ngx_js_ext_log(njs_vm_t *vm, n njs_int_t ngx_js_ext_string(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval); +njs_int_t ngx_js_ext_uint(njs_vm_t *vm, njs_object_prop_t *prop, + njs_value_t *value, njs_value_t *setval, njs_value_t *retval); njs_int_t ngx_js_ext_constant(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval); njs_int_t ngx_js_ext_boolean(njs_vm_t *vm, njs_object_prop_t *prop, diff -r a83de0fce29f -r da49b292dcef nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Mon Mar 01 17:15:44 2021 +0000 +++ b/nginx/ngx_stream_js_module.c Fri Feb 26 17:45:56 2021 +0000 @@ -226,6 +226,16 @@ static njs_external_t ngx_stream_js_ext { .flags = NJS_EXTERN_PROPERTY, + .name.string = njs_str("status"), + .enumerable = 1, + .u.property = { + .handler = ngx_js_ext_uint, + .magic32 = offsetof(ngx_stream_session_t, status), + } + }, + + { + .flags = NJS_EXTERN_PROPERTY, .name.string = njs_str("remoteAddress"), .enumerable = 1, .u.property = { From mdounin at mdounin.ru Mon Mar 1 18:43:12 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 01 Mar 2021 18:43:12 +0000 Subject: [nginx] Improved maximum errno detection. Message-ID: details: https://hg.nginx.org/nginx/rev/8cc5b0365ee5 branches: changeset: 7784:8cc5b0365ee5 user: Maxim Dounin date: Mon Mar 01 20:00:43 2021 +0300 description: Improved maximum errno detection. Previously, systems without sys_nerr (or _sys_nerr) were handled with an assumption that errors start at 0 and continuous. This is, however, not something POSIX requires, and not true on some platforms. Notably, on Linux, where sys_nerr is no longer available for newly linked binaries starting with glibc 2.32, there are gaps in error list, which used to stop us from properly detecting maximum errno. Further, on GNU/Hurd errors start at 0x40000001. With this change, maximum errno detection is moved to the runtime code, now able to ignore gaps, and also detects the first error if needed. This fixes observed "Unknown error" messages as seen on Linux with glibc 2.32 and on GNU/Hurd. diffstat: auto/unix | 28 --------------- src/os/unix/ngx_errno.c | 91 +++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 85 insertions(+), 34 deletions(-) diffs (172 lines): diff -r 171682010da4 -r 8cc5b0365ee5 auto/unix --- a/auto/unix Mon Mar 01 17:31:28 2021 +0300 +++ b/auto/unix Mon Mar 01 20:00:43 2021 +0300 @@ -753,34 +753,6 @@ if [ $ngx_found = no ]; then fi -if [ $ngx_found = no ]; then - - # Solaris has no sys_nerr - ngx_feature='maximum errno' - ngx_feature_name=NGX_SYS_NERR - ngx_feature_run=value - ngx_feature_incs='#include - #include - #include ' - ngx_feature_path= - ngx_feature_libs= - ngx_feature_test='int n; - char *p; - for (n = 1; n < 1000; n++) { - errno = 0; - p = strerror(n); - if (errno == EINVAL - || p == NULL - || strncmp(p, "Unknown error", 13) == 0) - { - break; - } - } - printf("%d", n);' - . auto/feature -fi - - ngx_feature="localtime_r()" ngx_feature_name="NGX_HAVE_LOCALTIME_R" ngx_feature_run=no diff -r 171682010da4 -r 8cc5b0365ee5 src/os/unix/ngx_errno.c --- a/src/os/unix/ngx_errno.c Mon Mar 01 17:31:28 2021 +0300 +++ b/src/os/unix/ngx_errno.c Mon Mar 01 20:00:43 2021 +0300 @@ -27,6 +27,8 @@ static ngx_str_t *ngx_sys_errlist; static ngx_str_t ngx_unknown_error = ngx_string("Unknown error"); +static ngx_err_t ngx_first_error; +static ngx_err_t ngx_last_error; u_char * @@ -34,8 +36,13 @@ ngx_strerror(ngx_err_t err, u_char *errs { ngx_str_t *msg; - msg = ((ngx_uint_t) err < NGX_SYS_NERR) ? &ngx_sys_errlist[err]: - &ngx_unknown_error; + if (err >= ngx_first_error && err < ngx_last_error) { + msg = &ngx_sys_errlist[err - ngx_first_error]; + + } else { + msg = &ngx_unknown_error; + } + size = ngx_min(size, msg->len); return ngx_cpymem(errstr, msg->data, size); @@ -50,20 +57,92 @@ ngx_strerror_init(void) size_t len; ngx_err_t err; +#if (NGX_SYS_NERR) + ngx_first_error = 0; + ngx_last_error = NGX_SYS_NERR; + +#elif (EPERM > 1000 && EPERM < 0x7fffffff - 1000) + + /* + * If number of errors is not known, and EPERM error code has large + * but reasonable value, guess possible error codes based on the error + * messages returned by strerror(), starting from EPERM. Notably, + * this covers GNU/Hurd, where errors start at 0x40000001. + */ + + for (err = EPERM; err > EPERM - 1000; err--) { + ngx_set_errno(0); + msg = strerror(err); + + if (errno == EINVAL + || msg == NULL + || strncmp(msg, "Unknown error", 13) == 0) + { + continue; + } + + ngx_first_error = err; + } + + for (err = EPERM; err < EPERM + 1000; err++) { + ngx_set_errno(0); + msg = strerror(err); + + if (errno == EINVAL + || msg == NULL + || strncmp(msg, "Unknown error", 13) == 0) + { + continue; + } + + ngx_last_error = err + 1; + } + +#else + + /* + * If number of errors is not known, guess it based on the error + * messages returned by strerror(). + */ + + ngx_first_error = 0; + + for (err = 0; err < 1000; err++) { + ngx_set_errno(0); + msg = strerror(err); + + if (errno == EINVAL + || msg == NULL + || strncmp(msg, "Unknown error", 13) == 0) + { + continue; + } + + ngx_last_error = err + 1; + } + +#endif + /* * ngx_strerror() is not ready to work at this stage, therefore, * malloc() is used and possible errors are logged using strerror(). */ - len = NGX_SYS_NERR * sizeof(ngx_str_t); + len = (ngx_last_error - ngx_first_error) * sizeof(ngx_str_t); ngx_sys_errlist = malloc(len); if (ngx_sys_errlist == NULL) { goto failed; } - for (err = 0; err < NGX_SYS_NERR; err++) { + for (err = ngx_first_error; err < ngx_last_error; err++) { msg = strerror(err); + + if (msg == NULL) { + ngx_sys_errlist[err - ngx_first_error] = ngx_unknown_error; + continue; + } + len = ngx_strlen(msg); p = malloc(len); @@ -72,8 +151,8 @@ ngx_strerror_init(void) } ngx_memcpy(p, msg, len); - ngx_sys_errlist[err].len = len; - ngx_sys_errlist[err].data = p; + ngx_sys_errlist[err - ngx_first_error].len = len; + ngx_sys_errlist[err - ngx_first_error].data = p; } return NGX_OK; From mdounin at mdounin.ru Mon Mar 1 18:43:15 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 01 Mar 2021 18:43:15 +0000 Subject: [nginx] Introduced strerrordesc_np() support. Message-ID: details: https://hg.nginx.org/nginx/rev/c43a2e8fdf7e branches: changeset: 7785:c43a2e8fdf7e user: Maxim Dounin date: Mon Mar 01 20:00:45 2021 +0300 description: Introduced strerrordesc_np() support. The strerrordesc_np() function, introduced in glibc 2.32, provides an async-signal-safe way to obtain error messages. This makes it possible to avoid copying error messages. diffstat: auto/unix | 28 ++++++++++++++++++++++------ src/os/unix/ngx_errno.c | 46 +++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 7 deletions(-) diffs (112 lines): diff -r 8cc5b0365ee5 -r c43a2e8fdf7e auto/unix --- a/auto/unix Mon Mar 01 20:00:43 2021 +0300 +++ b/auto/unix Mon Mar 01 20:00:45 2021 +0300 @@ -727,19 +727,35 @@ ngx_feature_test="char buf[1]; struct io . auto/feature -ngx_feature="sys_nerr" -ngx_feature_name="NGX_SYS_NERR" -ngx_feature_run=value -ngx_feature_incs='#include - #include ' +# strerrordesc_np(), introduced in glibc 2.32 + +ngx_feature="strerrordesc_np()" +ngx_feature_name="NGX_HAVE_STRERRORDESC_NP" +ngx_feature_run=no +ngx_feature_incs='#include ' ngx_feature_path= ngx_feature_libs= -ngx_feature_test='printf("%d", sys_nerr);' +ngx_feature_test="char *p; p = strerrordesc_np(0); + if (p == NULL) return 1" . auto/feature if [ $ngx_found = no ]; then + ngx_feature="sys_nerr" + ngx_feature_name="NGX_SYS_NERR" + ngx_feature_run=value + ngx_feature_incs='#include + #include ' + ngx_feature_path= + ngx_feature_libs= + ngx_feature_test='printf("%d", sys_nerr);' + . auto/feature +fi + + +if [ $ngx_found = no ]; then + # Cygiwn defines _sys_nerr ngx_feature="_sys_nerr" ngx_feature_name="NGX_SYS_NERR" diff -r 8cc5b0365ee5 -r c43a2e8fdf7e src/os/unix/ngx_errno.c --- a/src/os/unix/ngx_errno.c Mon Mar 01 20:00:43 2021 +0300 +++ b/src/os/unix/ngx_errno.c Mon Mar 01 20:00:45 2021 +0300 @@ -9,6 +9,49 @@ #include +static ngx_str_t ngx_unknown_error = ngx_string("Unknown error"); + + +#if (NGX_HAVE_STRERRORDESC_NP) + +/* + * The strerrordesc_np() function, introduced in glibc 2.32, is + * async-signal-safe. This makes it possible to use it directly, + * without copying error messages. + */ + + +u_char * +ngx_strerror(ngx_err_t err, u_char *errstr, size_t size) +{ + size_t len; + const char *msg; + + msg = strerrordesc_np(err); + + if (msg == NULL) { + msg = (char *) ngx_unknown_error.data; + len = ngx_unknown_error.len; + + } else { + len = ngx_strlen(msg); + } + + size = ngx_min(size, len); + + return ngx_cpymem(errstr, msg, size); +} + + +ngx_int_t +ngx_strerror_init(void) +{ + return NGX_OK; +} + + +#else + /* * The strerror() messages are copied because: * @@ -26,7 +69,6 @@ static ngx_str_t *ngx_sys_errlist; -static ngx_str_t ngx_unknown_error = ngx_string("Unknown error"); static ngx_err_t ngx_first_error; static ngx_err_t ngx_last_error; @@ -164,3 +206,5 @@ failed: return NGX_ERROR; } + +#endif From ru at nginx.com Mon Mar 1 21:58:53 2021 From: ru at nginx.com (Ruslan Ermilov) Date: Mon, 01 Mar 2021 21:58:53 +0000 Subject: [nginx] Proxy: variables support in "proxy_cookie_flags" flags. Message-ID: details: https://hg.nginx.org/nginx/rev/529b73f75d19 branches: changeset: 7786:529b73f75d19 user: Ruslan Ermilov date: Tue Mar 02 00:58:24 2021 +0300 description: Proxy: variables support in "proxy_cookie_flags" flags. diffstat: src/http/modules/ngx_http_proxy_module.c | 93 ++++++++++++++++++++++--------- 1 files changed, 65 insertions(+), 28 deletions(-) diffs (139 lines): diff -r c43a2e8fdf7e -r 529b73f75d19 src/http/modules/ngx_http_proxy_module.c --- a/src/http/modules/ngx_http_proxy_module.c Mon Mar 01 20:00:45 2021 +0300 +++ b/src/http/modules/ngx_http_proxy_module.c Tue Mar 02 00:58:24 2021 +0300 @@ -56,7 +56,7 @@ typedef struct { #endif } cookie; - ngx_uint_t flags; + ngx_array_t flags_values; ngx_uint_t regex; } ngx_http_proxy_cookie_flags_t; @@ -2916,12 +2916,14 @@ static ngx_int_t ngx_http_proxy_rewrite_cookie_flags(ngx_http_request_t *r, ngx_array_t *attrs, ngx_array_t *flags) { - ngx_str_t pattern; + ngx_str_t pattern, value; #if (NGX_PCRE) ngx_int_t rc; #endif - ngx_uint_t i; + ngx_uint_t i, m, f, nelts; ngx_keyval_t *attr; + ngx_conf_bitmask_t *mask; + ngx_http_complex_value_t *flags_values; ngx_http_proxy_cookie_flags_t *pcf; attr = attrs->elts; @@ -2965,7 +2967,47 @@ ngx_http_proxy_rewrite_cookie_flags(ngx_ return NGX_DECLINED; } - return ngx_http_proxy_edit_cookie_flags(r, attrs, pcf[i].flags); + nelts = pcf[i].flags_values.nelts; + flags_values = pcf[i].flags_values.elts; + + mask = ngx_http_proxy_cookie_flags_masks; + f = 0; + + for (i = 0; i < nelts; i++) { + + if (ngx_http_complex_value(r, &flags_values[i], &value) != NGX_OK) { + return NGX_ERROR; + } + + if (value.len == 0) { + continue; + } + + for (m = 0; mask[m].name.len != 0; m++) { + + if (mask[m].name.len != value.len + || ngx_strncasecmp(mask[m].name.data, value.data, value.len) + != 0) + { + continue; + } + + f |= mask[m].mask; + + break; + } + + if (mask[m].name.len == 0) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "invalid proxy_cookie_flags flag \"%V\"", &value); + } + } + + if (f == 0) { + return NGX_DECLINED; + } + + return ngx_http_proxy_edit_cookie_flags(r, attrs, f); } @@ -4514,8 +4556,8 @@ ngx_http_proxy_cookie_flags(ngx_conf_t * ngx_http_proxy_loc_conf_t *plcf = conf; ngx_str_t *value; - ngx_uint_t i, m; - ngx_conf_bitmask_t *mask; + ngx_uint_t i; + ngx_http_complex_value_t *cv; ngx_http_proxy_cookie_flags_t *pcf; ngx_http_compile_complex_value_t ccv; #if (NGX_PCRE) @@ -4599,32 +4641,27 @@ ngx_http_proxy_cookie_flags(ngx_conf_t * } } - mask = ngx_http_proxy_cookie_flags_masks; - pcf->flags = 0; + if (ngx_array_init(&pcf->flags_values, cf->pool, cf->args->nelts - 2, + sizeof(ngx_http_complex_value_t)) + != NGX_OK) + { + return NGX_CONF_ERROR; + } for (i = 2; i < cf->args->nelts; i++) { - for (m = 0; mask[m].name.len != 0; m++) { - - if (mask[m].name.len != value[i].len - || ngx_strcasecmp(mask[m].name.data, value[i].data) != 0) - { - continue; - } - - if (pcf->flags & mask[m].mask) { - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "duplicate parameter \"%V\"", &value[i]); - return NGX_CONF_ERROR; - } - - pcf->flags |= mask[m].mask; - - break; + + cv = ngx_array_push(&pcf->flags_values); + if (cv == NULL) { + return NGX_CONF_ERROR; } - if (mask[m].name.len == 0) { - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "invalid parameter \"%V\"", &value[i]); + ngx_memzero(&ccv, sizeof(ngx_http_compile_complex_value_t)); + + ccv.cf = cf; + ccv.value = &value[i]; + ccv.complex_value = cv; + + if (ngx_http_compile_complex_value(&ccv) != NGX_OK) { return NGX_CONF_ERROR; } } From pankaj.rohtas at gmail.com Tue Mar 2 05:41:27 2021 From: pankaj.rohtas at gmail.com (Pankaj Sigriwal) Date: Tue, 2 Mar 2021 11:11:27 +0530 Subject: How to hook a function before and after a handler in nginx? Message-ID: Hi List, I am looking for a way to hook a callback before and after a handler in a phase. Is it possible to do so ? What is the best way to achieve this ? Looking forward to hearing from you. Thanks & Regards, Pankaj -------------- next part -------------- An HTML attachment was scrubbed... URL: From gaoyan09 at baidu.com Wed Mar 3 11:53:57 2021 From: gaoyan09 at baidu.com (Gao,Yan(ACG VCP)) Date: Wed, 3 Mar 2021 11:53:57 +0000 Subject: QUIC: quic retry log error when ngx_exiting Message-ID: <8D9BF182-2E8E-45A1-A261-28AC7697E7CD@baidu.com> 2021/03/03 19:34:49 [debug] 176572#0: *19974 recvmsg: 111.206.214.32:59242 fd:12 n:1350 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic run 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic packet rx long flags:c8 version:ff00001d 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic packet rx init len:1332 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic packet rx dcid len:8 92545cbefeb0567c 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic packet rx scid len:0 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic address validation token len:0 2021/03/03 19:34:49 [debug] 176572#0: *19974 sendmsg: 107 of 107 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic retry packet sent to 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic packet init done decr:0 pn:0 perr:0 rc:-4 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic ngx_quic_close_connection rc:-4 2021/03/03 19:34:49 [debug] 176572#0: *19974 reusable connection: 0 2021/03/03 19:34:49 [debug] 176572#0: *19974 free: 000056414CFB9160 2021/03/03 19:34:49 [debug] 176572#0: *19974 free: 000056414CFC9580, unused: 74 Code: ngx_log_debug(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic retry packet sent to %xV", &pkt.dcid); Bug produce step: 1, copy https://quic.nginx.org/quic.html to localhost 2, visit https://localhost/quic.html by chrome --enable-quic --quic-version=h3-29 3, run test script, and reload nginx multi times meanwhile 4, chrome stuck for a while, and some request fails, some change to http2 5, check nginx error log, quic retry log no dcid Info: sbin/nginx -V nginx version: nginx/1.19.7 built by gcc 8.4.0 (Ubuntu 8.4.0-3ubuntu2) built with OpenSSL 1.1.1 (compatible; BoringSSL) (running with BoringSSL) TLS SNI support enabled configure arguments: --with-debug --with-http_v3_module --with-http_quic_module --with-cc-opt=-I../boringssl/include --with-ld-opt='-L../boringssl/build/ssl -L../boringssl/build/crypto' Gao,Yan(ACG VCP) -------------- next part -------------- An HTML attachment was scrubbed... URL: From vl at nginx.com Wed Mar 3 12:27:56 2021 From: vl at nginx.com (Vladimir Homutov) Date: Wed, 3 Mar 2021 15:27:56 +0300 Subject: QUIC: quic retry log error when ngx_exiting In-Reply-To: <8D9BF182-2E8E-45A1-A261-28AC7697E7CD@baidu.com> References: <8D9BF182-2E8E-45A1-A261-28AC7697E7CD@baidu.com> Message-ID: Hi Gao Yan, > On Wed, Mar 03, 2021 at 11:53:57AM +0000, Gao,Yan(ACG VCP) wrote: > 2021/03/03 19:34:49 [debug] 176572#0: *19974 recvmsg: 111.206.214.32:59242 fd:12 n:1350 > 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic run > 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic packet rx long flags:c8 version:ff00001d > 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic packet rx init len:1332 > 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic packet rx dcid len:8 92545cbefeb0567c > 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic packet rx scid len:0 ^^ What we see here is the quic packet with zero length source id arrived (note this is possible in quic protocol) > 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic address validation token len:0 > 2021/03/03 19:34:49 [debug] 176572#0: *19974 sendmsg: 107 of 107 > 2021/03/03 19:34:49 [debug] 176572#0: *19974 quic retry packet sent to Here we send the retry packet in reply; since source id is zero length, nothing is printed here in debug for destination From xeioex at nginx.com Thu Mar 4 13:53:29 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 04 Mar 2021 13:53:29 +0000 Subject: [njs] Added missing njs_vm_destroy() calls in CLI. Message-ID: details: https://hg.nginx.org/njs/rev/52ddc3a050be branches: changeset: 1616:52ddc3a050be user: Dmitry Volyntsev date: Wed Mar 03 18:28:00 2021 +0000 description: Added missing njs_vm_destroy() calls in CLI. diffstat: src/njs_shell.c | 9 +++++++++ 1 files changed, 9 insertions(+), 0 deletions(-) diffs (40 lines): diff -r da49b292dcef -r 52ddc3a050be src/njs_shell.c --- a/src/njs_shell.c Fri Feb 26 17:45:56 2021 +0000 +++ b/src/njs_shell.c Wed Mar 03 18:28:00 2021 +0000 @@ -282,6 +282,7 @@ main(int argc, char **argv) command.start = (u_char *) opts.command; command.length = njs_strlen(opts.command); ret = njs_process_script(&opts, vm_options.external, &command); + njs_vm_destroy(vm); } } else { @@ -498,6 +499,8 @@ njs_process_file(njs_opts_t *opts, njs_v size = sb.st_size; } + vm = NULL; + source.length = 0; source.start = realloc(NULL, size); if (source.start == NULL) { @@ -577,6 +580,10 @@ njs_process_file(njs_opts_t *opts, njs_v done: + if (vm != NULL) { + njs_vm_destroy(vm); + } + if (source.start != NULL) { free(source.start); } @@ -938,6 +945,8 @@ njs_interactive_shell(njs_opts_t *opts, free(line.start); } + njs_vm_destroy(vm); + return NJS_OK; } From mdounin at mdounin.ru Fri Mar 5 15:31:33 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 05 Mar 2021 15:31:33 +0000 Subject: [nginx] SSL: fixed build by Sun C with old OpenSSL versions. Message-ID: details: https://hg.nginx.org/nginx/rev/7ce28b4cc57e branches: changeset: 7787:7ce28b4cc57e user: Maxim Dounin date: Fri Mar 05 17:16:13 2021 +0300 description: SSL: fixed build by Sun C with old OpenSSL versions. Sun C complains about "statement not reached" if a "return" is followed by additional statements. diffstat: src/http/modules/ngx_http_grpc_module.c | 4 ++-- src/http/modules/ngx_http_proxy_module.c | 4 ++-- src/http/modules/ngx_http_ssl_module.c | 4 ++-- src/http/modules/ngx_http_uwsgi_module.c | 4 ++-- src/mail/ngx_mail_ssl_module.c | 4 ++-- src/stream/ngx_stream_proxy_module.c | 4 ++-- src/stream/ngx_stream_ssl_module.c | 4 ++-- 7 files changed, 14 insertions(+), 14 deletions(-) diffs (103 lines): diff -r 529b73f75d19 -r 7ce28b4cc57e src/http/modules/ngx_http_grpc_module.c --- a/src/http/modules/ngx_http_grpc_module.c Tue Mar 02 00:58:24 2021 +0300 +++ b/src/http/modules/ngx_http_grpc_module.c Fri Mar 05 17:16:13 2021 +0300 @@ -4841,9 +4841,9 @@ ngx_http_grpc_ssl_conf_command_check(ngx { #ifndef SSL_CONF_FLAG_FILE return "is not supported on this platform"; +#else + return NGX_CONF_OK; #endif - - return NGX_CONF_OK; } diff -r 529b73f75d19 -r 7ce28b4cc57e src/http/modules/ngx_http_proxy_module.c --- a/src/http/modules/ngx_http_proxy_module.c Tue Mar 02 00:58:24 2021 +0300 +++ b/src/http/modules/ngx_http_proxy_module.c Fri Mar 05 17:16:13 2021 +0300 @@ -4913,9 +4913,9 @@ ngx_http_proxy_ssl_conf_command_check(ng { #ifndef SSL_CONF_FLAG_FILE return "is not supported on this platform"; +#else + return NGX_CONF_OK; #endif - - return NGX_CONF_OK; } diff -r 529b73f75d19 -r 7ce28b4cc57e src/http/modules/ngx_http_ssl_module.c --- a/src/http/modules/ngx_http_ssl_module.c Tue Mar 02 00:58:24 2021 +0300 +++ b/src/http/modules/ngx_http_ssl_module.c Fri Mar 05 17:16:13 2021 +0300 @@ -1274,9 +1274,9 @@ ngx_http_ssl_conf_command_check(ngx_conf { #ifndef SSL_CONF_FLAG_FILE return "is not supported on this platform"; +#else + return NGX_CONF_OK; #endif - - return NGX_CONF_OK; } diff -r 529b73f75d19 -r 7ce28b4cc57e src/http/modules/ngx_http_uwsgi_module.c --- a/src/http/modules/ngx_http_uwsgi_module.c Tue Mar 02 00:58:24 2021 +0300 +++ b/src/http/modules/ngx_http_uwsgi_module.c Fri Mar 05 17:16:13 2021 +0300 @@ -2398,9 +2398,9 @@ ngx_http_uwsgi_ssl_conf_command_check(ng { #ifndef SSL_CONF_FLAG_FILE return "is not supported on this platform"; +#else + return NGX_CONF_OK; #endif - - return NGX_CONF_OK; } diff -r 529b73f75d19 -r 7ce28b4cc57e src/mail/ngx_mail_ssl_module.c --- a/src/mail/ngx_mail_ssl_module.c Tue Mar 02 00:58:24 2021 +0300 +++ b/src/mail/ngx_mail_ssl_module.c Fri Mar 05 17:16:13 2021 +0300 @@ -682,7 +682,7 @@ ngx_mail_ssl_conf_command_check(ngx_conf { #ifndef SSL_CONF_FLAG_FILE return "is not supported on this platform"; +#else + return NGX_CONF_OK; #endif - - return NGX_CONF_OK; } diff -r 529b73f75d19 -r 7ce28b4cc57e src/stream/ngx_stream_proxy_module.c --- a/src/stream/ngx_stream_proxy_module.c Tue Mar 02 00:58:24 2021 +0300 +++ b/src/stream/ngx_stream_proxy_module.c Fri Mar 05 17:16:13 2021 +0300 @@ -1026,9 +1026,9 @@ ngx_stream_proxy_ssl_conf_command_check( { #ifndef SSL_CONF_FLAG_FILE return "is not supported on this platform"; +#else + return NGX_CONF_OK; #endif - - return NGX_CONF_OK; } diff -r 529b73f75d19 -r 7ce28b4cc57e src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c Tue Mar 02 00:58:24 2021 +0300 +++ b/src/stream/ngx_stream_ssl_module.c Fri Mar 05 17:16:13 2021 +0300 @@ -1061,9 +1061,9 @@ ngx_stream_ssl_conf_command_check(ngx_co { #ifndef SSL_CONF_FLAG_FILE return "is not supported on this platform"; +#else + return NGX_CONF_OK; #endif - - return NGX_CONF_OK; } From mdounin at mdounin.ru Fri Mar 5 15:31:36 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 05 Mar 2021 15:31:36 +0000 Subject: [nginx] Events: fixed eventport handling in ngx_handle_read_event(). Message-ID: details: https://hg.nginx.org/nginx/rev/9ca8fb98ef1c branches: changeset: 7788:9ca8fb98ef1c user: Maxim Dounin date: Fri Mar 05 17:16:15 2021 +0300 description: Events: fixed eventport handling in ngx_handle_read_event(). The "!rev->ready" test seems to be a typo, introduced in the original commit (719:f30b1a75fd3b). The ngx_handle_write_event() code properly tests for "rev->ready" instead. Due to this typo, read events might be unexpectedly removed during proxying after an event on the other part of the proxied connection. Catched by mail proxying tests. diffstat: src/event/ngx_event.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 7ce28b4cc57e -r 9ca8fb98ef1c src/event/ngx_event.c --- a/src/event/ngx_event.c Fri Mar 05 17:16:13 2021 +0300 +++ b/src/event/ngx_event.c Fri Mar 05 17:16:15 2021 +0300 @@ -318,7 +318,7 @@ ngx_handle_read_event(ngx_event_t *rev, return NGX_OK; } - if (rev->oneshot && !rev->ready) { + if (rev->oneshot && rev->ready) { if (ngx_del_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } From mdounin at mdounin.ru Fri Mar 5 15:31:39 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 05 Mar 2021 15:31:39 +0000 Subject: [nginx] Mail: added missing event handling after blocking events. Message-ID: details: https://hg.nginx.org/nginx/rev/ab6257dac2a8 branches: changeset: 7789:ab6257dac2a8 user: Maxim Dounin date: Fri Mar 05 17:16:16 2021 +0300 description: Mail: added missing event handling after blocking events. As long as a read event is blocked (ignored), ngx_handle_read_event() needs to be called to make sure no further notifications will be triggered when using level-triggered event methods, such as select() or poll(). diffstat: src/mail/ngx_mail_imap_handler.c | 6 ++++++ src/mail/ngx_mail_pop3_handler.c | 6 ++++++ src/mail/ngx_mail_smtp_handler.c | 6 ++++++ 3 files changed, 18 insertions(+), 0 deletions(-) diffs (48 lines): diff -r 9ca8fb98ef1c -r ab6257dac2a8 src/mail/ngx_mail_imap_handler.c --- a/src/mail/ngx_mail_imap_handler.c Fri Mar 05 17:16:15 2021 +0300 +++ b/src/mail/ngx_mail_imap_handler.c Fri Mar 05 17:16:16 2021 +0300 @@ -123,6 +123,12 @@ ngx_mail_imap_auth_state(ngx_event_t *re if (s->out.len) { ngx_log_debug0(NGX_LOG_DEBUG_MAIL, c->log, 0, "imap send handler busy"); s->blocked = 1; + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_close_connection(c); + return; + } + return; } diff -r 9ca8fb98ef1c -r ab6257dac2a8 src/mail/ngx_mail_pop3_handler.c --- a/src/mail/ngx_mail_pop3_handler.c Fri Mar 05 17:16:15 2021 +0300 +++ b/src/mail/ngx_mail_pop3_handler.c Fri Mar 05 17:16:16 2021 +0300 @@ -138,6 +138,12 @@ ngx_mail_pop3_auth_state(ngx_event_t *re if (s->out.len) { ngx_log_debug0(NGX_LOG_DEBUG_MAIL, c->log, 0, "pop3 send handler busy"); s->blocked = 1; + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_close_connection(c); + return; + } + return; } diff -r 9ca8fb98ef1c -r ab6257dac2a8 src/mail/ngx_mail_smtp_handler.c --- a/src/mail/ngx_mail_smtp_handler.c Fri Mar 05 17:16:15 2021 +0300 +++ b/src/mail/ngx_mail_smtp_handler.c Fri Mar 05 17:16:16 2021 +0300 @@ -449,6 +449,12 @@ ngx_mail_smtp_auth_state(ngx_event_t *re if (s->out.len) { ngx_log_debug0(NGX_LOG_DEBUG_MAIL, c->log, 0, "smtp send handler busy"); s->blocked = 1; + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_close_connection(c); + return; + } + return; } From mdounin at mdounin.ru Fri Mar 5 15:31:41 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 05 Mar 2021 15:31:41 +0000 Subject: [nginx] Mail: added missing event handling after reading data. Message-ID: details: https://hg.nginx.org/nginx/rev/da0a85e91587 branches: changeset: 7790:da0a85e91587 user: Maxim Dounin date: Fri Mar 05 17:16:17 2021 +0300 description: Mail: added missing event handling after reading data. If we need to be notified about further events, ngx_handle_read_event() needs to be called after a read event is processed. Without this, an event can be removed from the kernel and won't be reported again, notably when using oneshot event methods, such as eventport on Solaris. For consistency, existing ngx_handle_read_event() call removed from ngx_mail_read_command(), as this call only covers one of the code paths where ngx_mail_read_command() returns NGX_AGAIN. Instead, appropriate processing added to the callers, covering all code paths where NGX_AGAIN is returned. diffstat: src/mail/ngx_mail_handler.c | 5 ----- src/mail/ngx_mail_imap_handler.c | 16 +++++++++++++++- src/mail/ngx_mail_pop3_handler.c | 16 +++++++++++++++- src/mail/ngx_mail_proxy_module.c | 30 ++++++++++++++++++++++++++++++ src/mail/ngx_mail_smtp_handler.c | 16 +++++++++++++++- 5 files changed, 75 insertions(+), 8 deletions(-) diffs (189 lines): diff -r ab6257dac2a8 -r da0a85e91587 src/mail/ngx_mail_handler.c --- a/src/mail/ngx_mail_handler.c Fri Mar 05 17:16:16 2021 +0300 +++ b/src/mail/ngx_mail_handler.c Fri Mar 05 17:16:17 2021 +0300 @@ -722,11 +722,6 @@ ngx_mail_read_command(ngx_mail_session_t } if (n == NGX_AGAIN) { - if (ngx_handle_read_event(c->read, 0) != NGX_OK) { - ngx_mail_session_internal_server_error(s); - return NGX_ERROR; - } - if (s->buffer->pos == s->buffer->last) { return NGX_AGAIN; } diff -r ab6257dac2a8 -r da0a85e91587 src/mail/ngx_mail_imap_handler.c --- a/src/mail/ngx_mail_imap_handler.c Fri Mar 05 17:16:16 2021 +0300 +++ b/src/mail/ngx_mail_imap_handler.c Fri Mar 05 17:16:17 2021 +0300 @@ -136,7 +136,16 @@ ngx_mail_imap_auth_state(ngx_event_t *re rc = ngx_mail_read_command(s, c); - if (rc == NGX_AGAIN || rc == NGX_ERROR) { + if (rc == NGX_AGAIN) { + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_session_internal_server_error(s); + return; + } + + return; + } + + if (rc == NGX_ERROR) { return; } @@ -299,6 +308,11 @@ ngx_mail_imap_auth_state(ngx_event_t *re } } + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_session_internal_server_error(s); + return; + } + ngx_mail_send(c->write); } diff -r ab6257dac2a8 -r da0a85e91587 src/mail/ngx_mail_pop3_handler.c --- a/src/mail/ngx_mail_pop3_handler.c Fri Mar 05 17:16:16 2021 +0300 +++ b/src/mail/ngx_mail_pop3_handler.c Fri Mar 05 17:16:17 2021 +0300 @@ -151,7 +151,16 @@ ngx_mail_pop3_auth_state(ngx_event_t *re rc = ngx_mail_read_command(s, c); - if (rc == NGX_AGAIN || rc == NGX_ERROR) { + if (rc == NGX_AGAIN) { + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_session_internal_server_error(s); + return; + } + + return; + } + + if (rc == NGX_ERROR) { return; } @@ -281,6 +290,11 @@ ngx_mail_pop3_auth_state(ngx_event_t *re s->arg_start = s->buffer->start; } + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_session_internal_server_error(s); + return; + } + ngx_mail_send(c->write); } } diff -r ab6257dac2a8 -r da0a85e91587 src/mail/ngx_mail_proxy_module.c --- a/src/mail/ngx_mail_proxy_module.c Fri Mar 05 17:16:16 2021 +0300 +++ b/src/mail/ngx_mail_proxy_module.c Fri Mar 05 17:16:17 2021 +0300 @@ -233,6 +233,11 @@ ngx_mail_proxy_pop3_handler(ngx_event_t rc = ngx_mail_proxy_read_response(s, 0); if (rc == NGX_AGAIN) { + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_proxy_internal_server_error(s); + return; + } + return; } @@ -314,6 +319,11 @@ ngx_mail_proxy_pop3_handler(ngx_event_t return; } + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_proxy_internal_server_error(s); + return; + } + s->proxy->buffer->pos = s->proxy->buffer->start; s->proxy->buffer->last = s->proxy->buffer->start; } @@ -346,6 +356,11 @@ ngx_mail_proxy_imap_handler(ngx_event_t rc = ngx_mail_proxy_read_response(s, s->mail_state); if (rc == NGX_AGAIN) { + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_proxy_internal_server_error(s); + return; + } + return; } @@ -448,6 +463,11 @@ ngx_mail_proxy_imap_handler(ngx_event_t return; } + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_proxy_internal_server_error(s); + return; + } + s->proxy->buffer->pos = s->proxy->buffer->start; s->proxy->buffer->last = s->proxy->buffer->start; } @@ -482,6 +502,11 @@ ngx_mail_proxy_smtp_handler(ngx_event_t rc = ngx_mail_proxy_read_response(s, s->mail_state); if (rc == NGX_AGAIN) { + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_proxy_internal_server_error(s); + return; + } + return; } @@ -763,6 +788,11 @@ ngx_mail_proxy_smtp_handler(ngx_event_t return; } + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_proxy_internal_server_error(s); + return; + } + s->proxy->buffer->pos = s->proxy->buffer->start; s->proxy->buffer->last = s->proxy->buffer->start; } diff -r ab6257dac2a8 -r da0a85e91587 src/mail/ngx_mail_smtp_handler.c --- a/src/mail/ngx_mail_smtp_handler.c Fri Mar 05 17:16:16 2021 +0300 +++ b/src/mail/ngx_mail_smtp_handler.c Fri Mar 05 17:16:17 2021 +0300 @@ -462,7 +462,16 @@ ngx_mail_smtp_auth_state(ngx_event_t *re rc = ngx_mail_read_command(s, c); - if (rc == NGX_AGAIN || rc == NGX_ERROR) { + if (rc == NGX_AGAIN) { + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_session_internal_server_error(s); + return; + } + + return; + } + + if (rc == NGX_ERROR) { return; } @@ -574,6 +583,11 @@ ngx_mail_smtp_auth_state(ngx_event_t *re s->arg_start = s->buffer->pos; } + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_session_internal_server_error(s); + return; + } + ngx_mail_send(c->write); } } From mdounin at mdounin.ru Fri Mar 5 15:31:45 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 05 Mar 2021 15:31:45 +0000 Subject: [nginx] Mail: postponed session initialization under accept mutex. Message-ID: details: https://hg.nginx.org/nginx/rev/d84f13618277 branches: changeset: 7791:d84f13618277 user: Maxim Dounin date: Fri Mar 05 17:16:19 2021 +0300 description: Mail: postponed session initialization under accept mutex. Similarly to 40e8ce405859 in the stream module, this reduces the time accept mutex is held. This also simplifies following changes to introduce PROXY protocol support. diffstat: src/mail/ngx_mail.h | 1 + src/mail/ngx_mail_handler.c | 29 ++++++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletions(-) diffs (77 lines): diff -r da0a85e91587 -r d84f13618277 src/mail/ngx_mail.h --- a/src/mail/ngx_mail.h Fri Mar 05 17:16:17 2021 +0300 +++ b/src/mail/ngx_mail.h Fri Mar 05 17:16:19 2021 +0300 @@ -197,6 +197,7 @@ typedef struct { ngx_uint_t mail_state; + unsigned ssl:1; unsigned protocol:3; unsigned blocked:1; unsigned quit:1; diff -r da0a85e91587 -r d84f13618277 src/mail/ngx_mail_handler.c --- a/src/mail/ngx_mail_handler.c Fri Mar 05 17:16:17 2021 +0300 +++ b/src/mail/ngx_mail_handler.c Fri Mar 05 17:16:19 2021 +0300 @@ -11,6 +11,7 @@ #include +static void ngx_mail_init_session_handler(ngx_event_t *rev); static void ngx_mail_init_session(ngx_connection_t *c); #if (NGX_MAIL_SSL) @@ -26,6 +27,7 @@ ngx_mail_init_connection(ngx_connection_ { size_t len; ngx_uint_t i; + ngx_event_t *rev; ngx_mail_port_t *port; struct sockaddr *sa; struct sockaddr_in *sin; @@ -129,6 +131,10 @@ ngx_mail_init_connection(ngx_connection_ s->main_conf = addr_conf->ctx->main_conf; s->srv_conf = addr_conf->ctx->srv_conf; +#if (NGX_MAIL_SSL) + s->ssl = addr_conf->ssl; +#endif + s->addr_text = &addr_conf->addr_text; c->data = s; @@ -159,13 +165,34 @@ ngx_mail_init_connection(ngx_connection_ c->log_error = NGX_ERROR_INFO; + rev = c->read; + rev->handler = ngx_mail_init_session_handler; + + if (ngx_use_accept_mutex) { + ngx_post_event(rev, &ngx_posted_events); + return; + } + + rev->handler(rev); +} + + +static void +ngx_mail_init_session_handler(ngx_event_t *rev) +{ + ngx_connection_t *c; + ngx_mail_session_t *s; + + c = rev->data; + s = c->data; + #if (NGX_MAIL_SSL) { ngx_mail_ssl_conf_t *sslcf; sslcf = ngx_mail_get_module_srv_conf(s, ngx_mail_ssl_module); - if (sslcf->enable || addr_conf->ssl) { + if (sslcf->enable || s->ssl) { c->log->action = "SSL handshaking"; ngx_mail_ssl_init_connection(&sslcf->ssl, c); From mdounin at mdounin.ru Fri Mar 5 15:31:48 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 05 Mar 2021 15:31:48 +0000 Subject: [nginx] Mail: fixed log action after SSL handshake. Message-ID: details: https://hg.nginx.org/nginx/rev/adee10c7fac8 branches: changeset: 7792:adee10c7fac8 user: Maxim Dounin date: Fri Mar 05 17:16:20 2021 +0300 description: Mail: fixed log action after SSL handshake. diffstat: src/mail/ngx_mail_handler.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diffs (12 lines): diff -r d84f13618277 -r adee10c7fac8 src/mail/ngx_mail_handler.c --- a/src/mail/ngx_mail_handler.c Fri Mar 05 17:16:19 2021 +0300 +++ b/src/mail/ngx_mail_handler.c Fri Mar 05 17:16:20 2021 +0300 @@ -365,6 +365,8 @@ ngx_mail_init_session(ngx_connection_t * s = c->data; + c->log->action = "sending client greeting line"; + cscf = ngx_mail_get_module_srv_conf(s, ngx_mail_core_module); s->protocol = cscf->protocol->type; From mdounin at mdounin.ru Fri Mar 5 15:31:50 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 05 Mar 2021 15:31:50 +0000 Subject: [nginx] Mail: made auth http creating request easier to extend. Message-ID: details: https://hg.nginx.org/nginx/rev/44ebeeceb70e branches: changeset: 7793:44ebeeceb70e user: Maxim Dounin date: Fri Mar 05 17:16:23 2021 +0300 description: Mail: made auth http creating request easier to extend. diffstat: src/mail/ngx_mail_auth_http_module.c | 42 ++++++++++++++++++++++++----------- 1 files changed, 29 insertions(+), 13 deletions(-) diffs (55 lines): diff -r adee10c7fac8 -r 44ebeeceb70e src/mail/ngx_mail_auth_http_module.c --- a/src/mail/ngx_mail_auth_http_module.c Fri Mar 05 17:16:20 2021 +0300 +++ b/src/mail/ngx_mail_auth_http_module.c Fri Mar 05 17:16:23 2021 +0300 @@ -1224,22 +1224,38 @@ ngx_mail_auth_http_create_request(ngx_ma + sizeof("Client-IP: ") - 1 + s->connection->addr_text.len + sizeof(CRLF) - 1 + sizeof("Client-Host: ") - 1 + s->host.len + sizeof(CRLF) - 1 - + sizeof("Auth-SMTP-Helo: ") - 1 + s->smtp_helo.len + sizeof(CRLF) - 1 - + sizeof("Auth-SMTP-From: ") - 1 + s->smtp_from.len + sizeof(CRLF) - 1 - + sizeof("Auth-SMTP-To: ") - 1 + s->smtp_to.len + sizeof(CRLF) - 1 -#if (NGX_MAIL_SSL) - + sizeof("Auth-SSL: on" CRLF) - 1 - + sizeof("Auth-SSL-Verify: ") - 1 + verify.len + sizeof(CRLF) - 1 - + sizeof("Auth-SSL-Subject: ") - 1 + subject.len + sizeof(CRLF) - 1 - + sizeof("Auth-SSL-Issuer: ") - 1 + issuer.len + sizeof(CRLF) - 1 - + sizeof("Auth-SSL-Serial: ") - 1 + serial.len + sizeof(CRLF) - 1 - + sizeof("Auth-SSL-Fingerprint: ") - 1 + fingerprint.len - + sizeof(CRLF) - 1 - + sizeof("Auth-SSL-Cert: ") - 1 + cert.len + sizeof(CRLF) - 1 -#endif + ahcf->header.len + sizeof(CRLF) - 1; + if (s->auth_method == NGX_MAIL_AUTH_NONE) { + len += sizeof("Auth-SMTP-Helo: ") - 1 + s->smtp_helo.len + + sizeof(CRLF) - 1 + + sizeof("Auth-SMTP-From: ") - 1 + s->smtp_from.len + + sizeof(CRLF) - 1 + + sizeof("Auth-SMTP-To: ") - 1 + s->smtp_to.len + + sizeof(CRLF) - 1; + } + +#if (NGX_MAIL_SSL) + + if (c->ssl) { + len += sizeof("Auth-SSL: on" CRLF) - 1 + + sizeof("Auth-SSL-Verify: ") - 1 + verify.len + + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Subject: ") - 1 + subject.len + + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Issuer: ") - 1 + issuer.len + + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Serial: ") - 1 + serial.len + + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Fingerprint: ") - 1 + fingerprint.len + + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Cert: ") - 1 + cert.len + + sizeof(CRLF) - 1; + } + +#endif + b = ngx_create_temp_buf(pool, len); if (b == NULL) { return NULL; From mdounin at mdounin.ru Fri Mar 5 15:31:54 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 05 Mar 2021 15:31:54 +0000 Subject: [nginx] Mail: parsing of the PROXY protocol from clients. Message-ID: details: https://hg.nginx.org/nginx/rev/12ea1de7d87c branches: changeset: 7794:12ea1de7d87c user: Maxim Dounin date: Fri Mar 05 17:16:24 2021 +0300 description: Mail: parsing of the PROXY protocol from clients. Activated with the "proxy_protocol" parameter of the "listen" directive. Obtained information is passed to the auth_http script in Proxy-Protocol-Addr, Proxy-Protocol-Port, Proxy-Protocol-Server-Addr, and Proxy-Protocol-Server-Port headers. diffstat: src/mail/ngx_mail.c | 2 + src/mail/ngx_mail.h | 4 +- src/mail/ngx_mail_auth_http_module.c | 31 +++++++++++ src/mail/ngx_mail_core_module.c | 5 + src/mail/ngx_mail_handler.c | 94 ++++++++++++++++++++++++++++++++++- 5 files changed, 132 insertions(+), 4 deletions(-) diffs (228 lines): diff -r 44ebeeceb70e -r 12ea1de7d87c src/mail/ngx_mail.c --- a/src/mail/ngx_mail.c Fri Mar 05 17:16:23 2021 +0300 +++ b/src/mail/ngx_mail.c Fri Mar 05 17:16:24 2021 +0300 @@ -405,6 +405,7 @@ ngx_mail_add_addrs(ngx_conf_t *cf, ngx_m #if (NGX_MAIL_SSL) addrs[i].conf.ssl = addr[i].opt.ssl; #endif + addrs[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; addrs[i].conf.addr_text = addr[i].opt.addr_text; } @@ -439,6 +440,7 @@ ngx_mail_add_addrs6(ngx_conf_t *cf, ngx_ #if (NGX_MAIL_SSL) addrs6[i].conf.ssl = addr[i].opt.ssl; #endif + addrs6[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; addrs6[i].conf.addr_text = addr[i].opt.addr_text; } diff -r 44ebeeceb70e -r 12ea1de7d87c src/mail/ngx_mail.h --- a/src/mail/ngx_mail.h Fri Mar 05 17:16:23 2021 +0300 +++ b/src/mail/ngx_mail.h Fri Mar 05 17:16:24 2021 +0300 @@ -41,6 +41,7 @@ typedef struct { unsigned ipv6only:1; #endif unsigned so_keepalive:2; + unsigned proxy_protocol:1; #if (NGX_HAVE_KEEPALIVE_TUNABLE) int tcp_keepidle; int tcp_keepintvl; @@ -55,7 +56,8 @@ typedef struct { typedef struct { ngx_mail_conf_ctx_t *ctx; ngx_str_t addr_text; - ngx_uint_t ssl; /* unsigned ssl:1; */ + unsigned ssl:1; + unsigned proxy_protocol:1; } ngx_mail_addr_conf_t; typedef struct { diff -r 44ebeeceb70e -r 12ea1de7d87c src/mail/ngx_mail_auth_http_module.c --- a/src/mail/ngx_mail_auth_http_module.c Fri Mar 05 17:16:23 2021 +0300 +++ b/src/mail/ngx_mail_auth_http_module.c Fri Mar 05 17:16:24 2021 +0300 @@ -1227,6 +1227,17 @@ ngx_mail_auth_http_create_request(ngx_ma + ahcf->header.len + sizeof(CRLF) - 1; + if (c->proxy_protocol) { + len += sizeof("Proxy-Protocol-Addr: ") - 1 + + c->proxy_protocol->src_addr.len + sizeof(CRLF) - 1 + + sizeof("Proxy-Protocol-Port: ") - 1 + + sizeof("65535") - 1 + sizeof(CRLF) - 1 + + sizeof("Proxy-Protocol-Server-Addr: ") - 1 + + c->proxy_protocol->dst_addr.len + sizeof(CRLF) - 1 + + sizeof("Proxy-Protocol-Server-Port: ") - 1 + + sizeof("65535") - 1 + sizeof(CRLF) - 1; + } + if (s->auth_method == NGX_MAIL_AUTH_NONE) { len += sizeof("Auth-SMTP-Helo: ") - 1 + s->smtp_helo.len + sizeof(CRLF) - 1 @@ -1314,6 +1325,26 @@ ngx_mail_auth_http_create_request(ngx_ma *b->last++ = CR; *b->last++ = LF; } + if (c->proxy_protocol) { + b->last = ngx_cpymem(b->last, "Proxy-Protocol-Addr: ", + sizeof("Proxy-Protocol-Addr: ") - 1); + b->last = ngx_copy(b->last, c->proxy_protocol->src_addr.data, + c->proxy_protocol->src_addr.len); + *b->last++ = CR; *b->last++ = LF; + + b->last = ngx_sprintf(b->last, "Proxy-Protocol-Port: %d" CRLF, + c->proxy_protocol->src_port); + + b->last = ngx_cpymem(b->last, "Proxy-Protocol-Server-Addr: ", + sizeof("Proxy-Protocol-Server-Addr: ") - 1); + b->last = ngx_copy(b->last, c->proxy_protocol->dst_addr.data, + c->proxy_protocol->dst_addr.len); + *b->last++ = CR; *b->last++ = LF; + + b->last = ngx_sprintf(b->last, "Proxy-Protocol-Server-Port: %d" CRLF, + c->proxy_protocol->dst_port); + } + if (s->auth_method == NGX_MAIL_AUTH_NONE) { /* HELO, MAIL FROM, and RCPT TO can't contain CRLF, no need to escape */ diff -r 44ebeeceb70e -r 12ea1de7d87c src/mail/ngx_mail_core_module.c --- a/src/mail/ngx_mail_core_module.c Fri Mar 05 17:16:23 2021 +0300 +++ b/src/mail/ngx_mail_core_module.c Fri Mar 05 17:16:24 2021 +0300 @@ -548,6 +548,11 @@ ngx_mail_core_listen(ngx_conf_t *cf, ngx #endif } + if (ngx_strcmp(value[i].data, "proxy_protocol") == 0) { + ls->proxy_protocol = 1; + continue; + } + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "the invalid \"%V\" parameter", &value[i]); return NGX_CONF_ERROR; diff -r 44ebeeceb70e -r 12ea1de7d87c src/mail/ngx_mail_handler.c --- a/src/mail/ngx_mail_handler.c Fri Mar 05 17:16:23 2021 +0300 +++ b/src/mail/ngx_mail_handler.c Fri Mar 05 17:16:24 2021 +0300 @@ -11,6 +11,7 @@ #include +static void ngx_mail_proxy_protocol_handler(ngx_event_t *rev); static void ngx_mail_init_session_handler(ngx_event_t *rev); static void ngx_mail_init_session(ngx_connection_t *c); @@ -168,6 +169,22 @@ ngx_mail_init_connection(ngx_connection_ rev = c->read; rev->handler = ngx_mail_init_session_handler; + if (addr_conf->proxy_protocol) { + c->log->action = "reading PROXY protocol"; + + rev->handler = ngx_mail_proxy_protocol_handler; + + if (!rev->ready) { + ngx_add_timer(rev, cscf->timeout); + + if (ngx_handle_read_event(rev, 0) != NGX_OK) { + ngx_mail_close_connection(c); + } + + return; + } + } + if (ngx_use_accept_mutex) { ngx_post_event(rev, &ngx_posted_events); return; @@ -178,6 +195,76 @@ ngx_mail_init_connection(ngx_connection_ static void +ngx_mail_proxy_protocol_handler(ngx_event_t *rev) +{ + u_char *p, buf[NGX_PROXY_PROTOCOL_MAX_HEADER]; + size_t size; + ssize_t n; + ngx_err_t err; + ngx_connection_t *c; + ngx_mail_session_t *s; + ngx_mail_core_srv_conf_t *cscf; + + c = rev->data; + s = c->data; + + ngx_log_debug0(NGX_LOG_DEBUG_MAIL, c->log, 0, + "mail PROXY protocol handler"); + + if (rev->timedout) { + ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed out"); + c->timedout = 1; + ngx_mail_close_connection(c); + return; + } + + n = recv(c->fd, (char *) buf, sizeof(buf), MSG_PEEK); + + err = ngx_socket_errno; + + ngx_log_debug1(NGX_LOG_DEBUG_MAIL, c->log, 0, "recv(): %z", n); + + if (n == -1) { + if (err == NGX_EAGAIN) { + rev->ready = 0; + + if (!rev->timer_set) { + cscf = ngx_mail_get_module_srv_conf(s, ngx_mail_core_module); + ngx_add_timer(rev, cscf->timeout); + } + + if (ngx_handle_read_event(rev, 0) != NGX_OK) { + ngx_mail_close_connection(c); + } + + return; + } + + ngx_connection_error(c, err, "recv() failed"); + + ngx_mail_close_connection(c); + return; + } + + p = ngx_proxy_protocol_read(c, buf, buf + n); + + if (p == NULL) { + ngx_mail_close_connection(c); + return; + } + + size = p - buf; + + if (c->recv(c, buf, size) != (ssize_t) size) { + ngx_mail_close_connection(c); + return; + } + + ngx_mail_init_session_handler(rev); +} + + +static void ngx_mail_init_session_handler(ngx_event_t *rev) { ngx_connection_t *c; @@ -242,9 +329,10 @@ ngx_mail_ssl_init_connection(ngx_ssl_t * s = c->data; - cscf = ngx_mail_get_module_srv_conf(s, ngx_mail_core_module); - - ngx_add_timer(c->read, cscf->timeout); + if (!c->read->timer_set) { + cscf = ngx_mail_get_module_srv_conf(s, ngx_mail_core_module); + ngx_add_timer(c->read, cscf->timeout); + } c->ssl->handler = ngx_mail_ssl_handshake_handler; From mdounin at mdounin.ru Fri Mar 5 15:31:58 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 05 Mar 2021 15:31:58 +0000 Subject: [nginx] Mail: realip module. Message-ID: details: https://hg.nginx.org/nginx/rev/ef4bdbbce57e branches: changeset: 7795:ef4bdbbce57e user: Maxim Dounin date: Fri Mar 05 17:16:29 2021 +0300 description: Mail: realip module. When configured with the "set_real_ip_from", it can set client's IP address as visible in logs to the one obtained via the PROXY protocol. diffstat: auto/modules | 6 + src/mail/ngx_mail.h | 1 + src/mail/ngx_mail_handler.c | 5 + src/mail/ngx_mail_realip_module.c | 269 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 281 insertions(+), 0 deletions(-) diffs (315 lines): diff -r 12ea1de7d87c -r ef4bdbbce57e auto/modules --- a/auto/modules Fri Mar 05 17:16:24 2021 +0300 +++ b/auto/modules Fri Mar 05 17:16:29 2021 +0300 @@ -985,6 +985,12 @@ if [ $MAIL != NO ]; then ngx_module_srcs=src/mail/ngx_mail_proxy_module.c . auto/module + + ngx_module_name=ngx_mail_realip_module + ngx_module_deps= + ngx_module_srcs=src/mail/ngx_mail_realip_module.c + + . auto/module fi diff -r 12ea1de7d87c -r ef4bdbbce57e src/mail/ngx_mail.h --- a/src/mail/ngx_mail.h Fri Mar 05 17:16:24 2021 +0300 +++ b/src/mail/ngx_mail.h Fri Mar 05 17:16:29 2021 +0300 @@ -408,6 +408,7 @@ char *ngx_mail_capabilities(ngx_conf_t * /* STUB */ void ngx_mail_proxy_init(ngx_mail_session_t *s, ngx_addr_t *peer); void ngx_mail_auth_http_init(ngx_mail_session_t *s); +ngx_int_t ngx_mail_realip_handler(ngx_mail_session_t *s); /**/ diff -r 12ea1de7d87c -r ef4bdbbce57e src/mail/ngx_mail_handler.c --- a/src/mail/ngx_mail_handler.c Fri Mar 05 17:16:24 2021 +0300 +++ b/src/mail/ngx_mail_handler.c Fri Mar 05 17:16:29 2021 +0300 @@ -260,6 +260,11 @@ ngx_mail_proxy_protocol_handler(ngx_even return; } + if (ngx_mail_realip_handler(s) != NGX_OK) { + ngx_mail_close_connection(c); + return; + } + ngx_mail_init_session_handler(rev); } diff -r 12ea1de7d87c -r ef4bdbbce57e src/mail/ngx_mail_realip_module.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/mail/ngx_mail_realip_module.c Fri Mar 05 17:16:29 2021 +0300 @@ -0,0 +1,269 @@ + +/* + * Copyright (C) Igor Sysoev + * Copyright (C) Nginx, Inc. + */ + + +#include +#include +#include + + +typedef struct { + ngx_array_t *from; /* array of ngx_cidr_t */ +} ngx_mail_realip_srv_conf_t; + + +static ngx_int_t ngx_mail_realip_set_addr(ngx_mail_session_t *s, + ngx_addr_t *addr); +static char *ngx_mail_realip_from(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); +static void *ngx_mail_realip_create_srv_conf(ngx_conf_t *cf); +static char *ngx_mail_realip_merge_srv_conf(ngx_conf_t *cf, void *parent, + void *child); + + +static ngx_command_t ngx_mail_realip_commands[] = { + + { ngx_string("set_real_ip_from"), + NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_TAKE1, + ngx_mail_realip_from, + NGX_MAIL_SRV_CONF_OFFSET, + 0, + NULL }, + + ngx_null_command +}; + + +static ngx_mail_module_t ngx_mail_realip_module_ctx = { + NULL, /* protocol */ + + NULL, /* create main configuration */ + NULL, /* init main configuration */ + + ngx_mail_realip_create_srv_conf, /* create server configuration */ + ngx_mail_realip_merge_srv_conf /* merge server configuration */ +}; + + +ngx_module_t ngx_mail_realip_module = { + NGX_MODULE_V1, + &ngx_mail_realip_module_ctx, /* module context */ + ngx_mail_realip_commands, /* module directives */ + NGX_MAIL_MODULE, /* module type */ + NULL, /* init master */ + NULL, /* init module */ + NULL, /* init process */ + NULL, /* init thread */ + NULL, /* exit thread */ + NULL, /* exit process */ + NULL, /* exit master */ + NGX_MODULE_V1_PADDING +}; + + +ngx_int_t +ngx_mail_realip_handler(ngx_mail_session_t *s) +{ + ngx_addr_t addr; + ngx_connection_t *c; + ngx_mail_realip_srv_conf_t *rscf; + + rscf = ngx_mail_get_module_srv_conf(s, ngx_mail_realip_module); + + if (rscf->from == NULL) { + return NGX_OK; + } + + c = s->connection; + + if (c->proxy_protocol == NULL) { + return NGX_OK; + } + + if (ngx_cidr_match(c->sockaddr, rscf->from) != NGX_OK) { + return NGX_OK; + } + + if (ngx_parse_addr(c->pool, &addr, c->proxy_protocol->src_addr.data, + c->proxy_protocol->src_addr.len) + != NGX_OK) + { + return NGX_OK; + } + + ngx_inet_set_port(addr.sockaddr, c->proxy_protocol->src_port); + + return ngx_mail_realip_set_addr(s, &addr); +} + + +static ngx_int_t +ngx_mail_realip_set_addr(ngx_mail_session_t *s, ngx_addr_t *addr) +{ + size_t len; + u_char *p; + u_char text[NGX_SOCKADDR_STRLEN]; + ngx_connection_t *c; + + c = s->connection; + + len = ngx_sock_ntop(addr->sockaddr, addr->socklen, text, + NGX_SOCKADDR_STRLEN, 0); + if (len == 0) { + return NGX_ERROR; + } + + p = ngx_pnalloc(c->pool, len); + if (p == NULL) { + return NGX_ERROR; + } + + ngx_memcpy(p, text, len); + + c->sockaddr = addr->sockaddr; + c->socklen = addr->socklen; + c->addr_text.len = len; + c->addr_text.data = p; + + return NGX_OK; +} + + +static char * +ngx_mail_realip_from(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_mail_realip_srv_conf_t *rscf = conf; + + ngx_int_t rc; + ngx_str_t *value; + ngx_url_t u; + ngx_cidr_t c, *cidr; + ngx_uint_t i; + struct sockaddr_in *sin; +#if (NGX_HAVE_INET6) + struct sockaddr_in6 *sin6; +#endif + + value = cf->args->elts; + + if (rscf->from == NULL) { + rscf->from = ngx_array_create(cf->pool, 2, + sizeof(ngx_cidr_t)); + if (rscf->from == NULL) { + return NGX_CONF_ERROR; + } + } + +#if (NGX_HAVE_UNIX_DOMAIN) + + if (ngx_strcmp(value[1].data, "unix:") == 0) { + cidr = ngx_array_push(rscf->from); + if (cidr == NULL) { + return NGX_CONF_ERROR; + } + + cidr->family = AF_UNIX; + return NGX_CONF_OK; + } + +#endif + + rc = ngx_ptocidr(&value[1], &c); + + if (rc != NGX_ERROR) { + if (rc == NGX_DONE) { + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, + "low address bits of %V are meaningless", + &value[1]); + } + + cidr = ngx_array_push(rscf->from); + if (cidr == NULL) { + return NGX_CONF_ERROR; + } + + *cidr = c; + + return NGX_CONF_OK; + } + + ngx_memzero(&u, sizeof(ngx_url_t)); + u.host = value[1]; + + if (ngx_inet_resolve_host(cf->pool, &u) != NGX_OK) { + if (u.err) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "%s in set_real_ip_from \"%V\"", + u.err, &u.host); + } + + return NGX_CONF_ERROR; + } + + cidr = ngx_array_push_n(rscf->from, u.naddrs); + if (cidr == NULL) { + return NGX_CONF_ERROR; + } + + ngx_memzero(cidr, u.naddrs * sizeof(ngx_cidr_t)); + + for (i = 0; i < u.naddrs; i++) { + cidr[i].family = u.addrs[i].sockaddr->sa_family; + + switch (cidr[i].family) { + +#if (NGX_HAVE_INET6) + case AF_INET6: + sin6 = (struct sockaddr_in6 *) u.addrs[i].sockaddr; + cidr[i].u.in6.addr = sin6->sin6_addr; + ngx_memset(cidr[i].u.in6.mask.s6_addr, 0xff, 16); + break; +#endif + + default: /* AF_INET */ + sin = (struct sockaddr_in *) u.addrs[i].sockaddr; + cidr[i].u.in.addr = sin->sin_addr.s_addr; + cidr[i].u.in.mask = 0xffffffff; + break; + } + } + + return NGX_CONF_OK; +} + + +static void * +ngx_mail_realip_create_srv_conf(ngx_conf_t *cf) +{ + ngx_mail_realip_srv_conf_t *conf; + + conf = ngx_pcalloc(cf->pool, sizeof(ngx_mail_realip_srv_conf_t)); + if (conf == NULL) { + return NULL; + } + + /* + * set by ngx_pcalloc(): + * + * conf->from = NULL; + */ + + return conf; +} + + +static char * +ngx_mail_realip_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) +{ + ngx_mail_realip_srv_conf_t *prev = parent; + ngx_mail_realip_srv_conf_t *conf = child; + + if (conf->from == NULL) { + conf->from = prev->from; + } + + return NGX_CONF_OK; +} From mdounin at mdounin.ru Fri Mar 5 15:32:01 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 05 Mar 2021 15:32:01 +0000 Subject: [nginx] Mail: sending of the PROXY protocol to backends. Message-ID: details: https://hg.nginx.org/nginx/rev/4b8f23a36ebf branches: changeset: 7796:4b8f23a36ebf user: Maxim Dounin date: Fri Mar 05 17:16:32 2021 +0300 description: Mail: sending of the PROXY protocol to backends. Activated with the "proxy_protocol" directive. Can be combined with "listen ... proxy_protocol;" and "set_real_ip_from ...;" to pass client address provided to nginx in the PROXY protocol header. diffstat: src/mail/ngx_mail.h | 1 + src/mail/ngx_mail_proxy_module.c | 139 +++++++++++++++++++++++++++++++++++++- 2 files changed, 133 insertions(+), 7 deletions(-) diffs (245 lines): diff -r ef4bdbbce57e -r 4b8f23a36ebf src/mail/ngx_mail.h --- a/src/mail/ngx_mail.h Fri Mar 05 17:16:29 2021 +0300 +++ b/src/mail/ngx_mail.h Fri Mar 05 17:16:32 2021 +0300 @@ -178,6 +178,7 @@ typedef enum { typedef struct { ngx_peer_connection_t upstream; ngx_buf_t *buffer; + ngx_uint_t proxy_protocol; /* unsigned proxy_protocol:1; */ } ngx_mail_proxy_ctx_t; diff -r ef4bdbbce57e -r 4b8f23a36ebf src/mail/ngx_mail_proxy_module.c --- a/src/mail/ngx_mail_proxy_module.c Fri Mar 05 17:16:29 2021 +0300 +++ b/src/mail/ngx_mail_proxy_module.c Fri Mar 05 17:16:32 2021 +0300 @@ -17,6 +17,7 @@ typedef struct { ngx_flag_t pass_error_message; ngx_flag_t xclient; ngx_flag_t smtp_auth; + ngx_flag_t proxy_protocol; size_t buffer_size; ngx_msec_t timeout; } ngx_mail_proxy_conf_t; @@ -26,7 +27,8 @@ static void ngx_mail_proxy_block_read(ng static void ngx_mail_proxy_pop3_handler(ngx_event_t *rev); static void ngx_mail_proxy_imap_handler(ngx_event_t *rev); static void ngx_mail_proxy_smtp_handler(ngx_event_t *rev); -static void ngx_mail_proxy_dummy_handler(ngx_event_t *ev); +static void ngx_mail_proxy_write_handler(ngx_event_t *wev); +static ngx_int_t ngx_mail_proxy_send_proxy_protocol(ngx_mail_session_t *s); static ngx_int_t ngx_mail_proxy_read_response(ngx_mail_session_t *s, ngx_uint_t state); static void ngx_mail_proxy_handler(ngx_event_t *ev); @@ -82,6 +84,13 @@ static ngx_command_t ngx_mail_proxy_com offsetof(ngx_mail_proxy_conf_t, smtp_auth), NULL }, + { ngx_string("proxy_protocol"), + NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_MAIL_SRV_CONF_OFFSET, + offsetof(ngx_mail_proxy_conf_t, proxy_protocol), + NULL }, + ngx_null_command }; @@ -156,7 +165,7 @@ ngx_mail_proxy_init(ngx_mail_session_t * p->upstream.connection->pool = s->connection->pool; s->connection->read->handler = ngx_mail_proxy_block_read; - p->upstream.connection->write->handler = ngx_mail_proxy_dummy_handler; + p->upstream.connection->write->handler = ngx_mail_proxy_write_handler; pcf = ngx_mail_get_module_srv_conf(s, ngx_mail_proxy_module); @@ -167,6 +176,8 @@ ngx_mail_proxy_init(ngx_mail_session_t * return; } + s->proxy->proxy_protocol = pcf->proxy_protocol; + s->out.len = 0; switch (s->protocol) { @@ -186,6 +197,12 @@ ngx_mail_proxy_init(ngx_mail_session_t * s->mail_state = ngx_smtp_start; break; } + + if (rc == NGX_AGAIN) { + return; + } + + ngx_mail_proxy_write_handler(p->upstream.connection->write); } @@ -230,6 +247,17 @@ ngx_mail_proxy_pop3_handler(ngx_event_t return; } + if (s->proxy->proxy_protocol) { + ngx_log_debug0(NGX_LOG_DEBUG_MAIL, c->log, 0, "mail proxy pop3 busy"); + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_proxy_internal_server_error(s); + return; + } + + return; + } + rc = ngx_mail_proxy_read_response(s, 0); if (rc == NGX_AGAIN) { @@ -353,6 +381,17 @@ ngx_mail_proxy_imap_handler(ngx_event_t return; } + if (s->proxy->proxy_protocol) { + ngx_log_debug0(NGX_LOG_DEBUG_MAIL, c->log, 0, "mail proxy imap busy"); + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_proxy_internal_server_error(s); + return; + } + + return; + } + rc = ngx_mail_proxy_read_response(s, s->mail_state); if (rc == NGX_AGAIN) { @@ -499,6 +538,17 @@ ngx_mail_proxy_smtp_handler(ngx_event_t return; } + if (s->proxy->proxy_protocol) { + ngx_log_debug0(NGX_LOG_DEBUG_MAIL, c->log, 0, "mail proxy smtp busy"); + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + ngx_mail_proxy_internal_server_error(s); + return; + } + + return; + } + rc = ngx_mail_proxy_read_response(s, s->mail_state); if (rc == NGX_AGAIN) { @@ -799,19 +849,92 @@ ngx_mail_proxy_smtp_handler(ngx_event_t static void -ngx_mail_proxy_dummy_handler(ngx_event_t *wev) +ngx_mail_proxy_write_handler(ngx_event_t *wev) { ngx_connection_t *c; ngx_mail_session_t *s; - ngx_log_debug0(NGX_LOG_DEBUG_MAIL, wev->log, 0, "mail proxy dummy handler"); + ngx_log_debug0(NGX_LOG_DEBUG_MAIL, wev->log, 0, "mail proxy write handler"); + + c = wev->data; + s = c->data; + + if (s->proxy->proxy_protocol) { + if (ngx_mail_proxy_send_proxy_protocol(s) != NGX_OK) { + return; + } + + s->proxy->proxy_protocol = 0; + } if (ngx_handle_write_event(wev, 0) != NGX_OK) { - c = wev->data; - s = c->data; + ngx_mail_proxy_internal_server_error(s); + } + + if (c->read->ready) { + ngx_post_event(c->read, &ngx_posted_events); + } +} + + +static ngx_int_t +ngx_mail_proxy_send_proxy_protocol(ngx_mail_session_t *s) +{ + u_char *p; + ssize_t n, size; + ngx_connection_t *c; + u_char buf[NGX_PROXY_PROTOCOL_MAX_HEADER]; + + s->connection->log->action = "sending PROXY protocol header to upstream"; + + ngx_log_debug0(NGX_LOG_DEBUG_MAIL, s->connection->log, 0, + "mail proxy send PROXY protocol header"); + + p = ngx_proxy_protocol_write(s->connection, buf, + buf + NGX_PROXY_PROTOCOL_MAX_HEADER); + if (p == NULL) { + ngx_mail_proxy_internal_server_error(s); + return NGX_ERROR; + } + + c = s->proxy->upstream.connection; + + size = p - buf; - ngx_mail_proxy_close_session(s); + n = c->send(c, buf, size); + + if (n == NGX_AGAIN) { + if (ngx_handle_write_event(c->write, 0) != NGX_OK) { + ngx_mail_proxy_internal_server_error(s); + return NGX_ERROR; + } + + return NGX_AGAIN; + } + + if (n == NGX_ERROR) { + ngx_mail_proxy_internal_server_error(s); + return NGX_ERROR; } + + if (n != size) { + + /* + * PROXY protocol specification: + * The sender must always ensure that the header + * is sent at once, so that the transport layer + * maintains atomicity along the path to the receiver. + */ + + ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, + "could not send PROXY protocol header at once"); + + ngx_mail_proxy_internal_server_error(s); + + return NGX_ERROR; + } + + return NGX_OK; } @@ -1212,6 +1335,7 @@ ngx_mail_proxy_create_conf(ngx_conf_t *c pcf->pass_error_message = NGX_CONF_UNSET; pcf->xclient = NGX_CONF_UNSET; pcf->smtp_auth = NGX_CONF_UNSET; + pcf->proxy_protocol = NGX_CONF_UNSET; pcf->buffer_size = NGX_CONF_UNSET_SIZE; pcf->timeout = NGX_CONF_UNSET_MSEC; @@ -1229,6 +1353,7 @@ ngx_mail_proxy_merge_conf(ngx_conf_t *cf ngx_conf_merge_value(conf->pass_error_message, prev->pass_error_message, 0); ngx_conf_merge_value(conf->xclient, prev->xclient, 1); ngx_conf_merge_value(conf->smtp_auth, prev->smtp_auth, 0); + ngx_conf_merge_value(conf->proxy_protocol, prev->proxy_protocol, 0); ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, (size_t) ngx_pagesize); ngx_conf_merge_msec_value(conf->timeout, prev->timeout, 24 * 60 * 60000); From idinasabari at gmail.com Fri Mar 5 15:58:07 2021 From: idinasabari at gmail.com (idina sabari) Date: Fri, 5 Mar 2021 23:58:07 +0800 Subject: nginx-devel Digest, Vol 136, Issue 27 In-Reply-To: References: Message-ID: 37JE#OZS2HY6Y Pada 22 Feb 2021 16:27, menulis: > Send nginx-devel mailing list submissions to > nginx-devel at nginx.org > > To subscribe or unsubscribe via the World Wide Web, visit > http://mailman.nginx.org/mailman/listinfo/nginx-devel > or, via email, send a message with subject or body 'help' to > nginx-devel-request at nginx.org > > You can reach the person managing the list at > nginx-devel-owner at nginx.org > > When replying, please edit your Subject line so it is more specific > than "Re: Contents of nginx-devel digest..." > > > Today's Topics: > > 1. RE: [PATCH] Add io_uring support in AIO(async io) module > (Zhao, Ping) > > > ---------------------------------------------------------------------- > > Message: 1 > Date: Mon, 22 Feb 2021 08:27:10 +0000 > From: "Zhao, Ping" > To: Vadim Fedorenko , "nginx-devel at nginx.org" > > Subject: RE: [PATCH] Add io_uring support in AIO(async io) module > Message-ID: > namprd11.prod.outlook.com> > > Content-Type: text/plain; charset="utf-8" > > Hi Vadim, > > Thanks for your finding. The size is updated after previous io request. > This method is correct and better. > > BR, > Ping > > From: Vadim Fedorenko > Sent: Monday, February 22, 2021 5:55 AM > To: nginx-devel at nginx.org > Cc: Zhao, Ping > Subject: Re: [PATCH] Add io_uring support in AIO(async io) module > > Hi! > Looks like this small fix doesn't work in case when the total size of the > file is less than the size of the buffer and it was partly read. > In my case the size of the file is 16384 bytes and only one page of the > file was in page cache. This patch produces size = 8192 bytes > for my case and the next call reads 12288 bytes and generates errors like > below: > "[alert] 28441#28441: *20855 pread() read only 12288 of 8192 from > " > changing to > size = ngx_min(size, dst->end - dst->last); > fixes the problem > Thanks, > Vadim > > ??, 25 ???. 2021 ?. ? 08:25, Zhao, Ping ng.zhao at intel.com>>: > Hello, add a small update to correct the length when part of request > already received in previous. > This case may happen when using io_uring and throughput increased. > > # HG changeset patch > # User Ping Zhao > > # Date 1611566408 18000 > # Mon Jan 25 04:20:08 2021 -0500 > # Node ID f2c91860b7ac4b374fff4353a830cd9427e1d027 > # Parent 1372f9ee2e829b5de5d12c05713c307e325e0369 > Correct length calculation when part of request received. > > diff -r 1372f9ee2e82 -r f2c91860b7ac src/core/ngx_output_chain.c > --- a/src/core/ngx_output_chain.c Wed Jan 13 11:10:05 2021 -0500 > +++ b/src/core/ngx_output_chain.c Mon Jan 25 04:20:08 2021 -0500 > @@ -531,6 +531,14 @@ > > size = ngx_buf_size(src); > size = ngx_min(size, dst->end - dst->pos); > +#if (NGX_HAVE_FILE_IOURING) > + /* > + * check if already received part of the request in previous, > + * calculate the remain length > + */ > + if(dst->last > dst->pos && size > (dst->last - dst->pos)) > + size = size - (dst->last - dst->pos); > +#endif > > sendfile = ctx->sendfile && !ctx->directio; > > -----Original Message----- > From: nginx-devel nginx-devel-bounces at nginx.org>> On Behalf Of Zhao, Ping > Sent: Thursday, January 21, 2021 9:44 AM > To: nginx-devel at nginx.org > Subject: RE: [PATCH] Add io_uring support in AIO(async io) module > > Hi Vladimir, > > No special/extra configuration needed, but need check if 'aio on' and > 'sendfile off' is correctly set. This is my Nginx config for reference: > > user nobody; > daemon off; > worker_processes 1; > error_log error.log ; > events { > worker_connections 65535; > use epoll; > } > > http { > include mime.types; > default_type application/octet-stream; > access_log on; > aio on; > sendfile off; > directio 2k; > > # Cache Configurations > proxy_cache_path /mnt/cache0 levels=2 keys_zone=nginx-cache0:400m > max_size=1400g inactive=4d use_temp_path=off; ...... > > > To better measure the disk io performance data, I do the following steps: > 1. To exclude other impact, and focus on disk io part.(This patch only > impact disk aio read process) Use cgroup to limit Nginx memory usage. > Otherwise Nginx may also use memory as cache storage and this may cause > test result not so straight.(since most cache hit in memory, disk io bw is > low, like my previous mail found which didn't exclude the memory cache > impact) > echo 2G > memory.limit_in_bytes > use ' cgexec -g memory:nginx' to start Nginx. > > 2. use wrk -t 100 -c 1000, with random 25000 http requests. > My previous test used -t 200 connections, comparing with -t 1000, > libaio performance drop more when connections numbers increased from 200 to > 1000, but io_uring doesn't. It's another advantage of io_uring. > > 3. First clean the cache disk and run the test for 30 minutes to let Nginx > store the cache files to nvme disk as much as possible. > > 4. Rerun the test, this time Nginx will use ngx_file_aio_read to extract > the cache files in nvme cache disk. Use iostat to track the io data. The > data should be align with NIC bw since all data should be from cache > disk.(need exclude memory as cache storage impact) > > Following is the test result: > > Nginx worker_processes 1: > 4k 100k 1M > Io_uring 220MB/s 1GB/s 1.3GB/s > Libaio 70MB/s 250MB/s 600MB/s(with -c 200, 1.0GB/s) > > > Nginx worker_processes 4: > 4k 100k 1M > Io_uring 800MB/s 2.5GB/s 2.6GB/s(my nvme disk io maximum bw) > libaio 250MB/s 900MB/s 2.0GB/s > > So for small request, io_uring has huge improvement than libaio. In > previous mail, because I didn't exclude the memory cache storage impact, > most cache file is stored in memory, very few are from disk in case of > 4k/100k. The data is not correct.(for 1M, because the cache is too big to > store in memory, it wat in disk) Also I enabled directio option "directio > 2k" this time to avoid this. > > Regards, > Ping > > -----Original Message----- > From: nginx-devel nginx-devel-bounces at nginx.org>> On Behalf Of Vladimir Homutov > Sent: Wednesday, January 20, 2021 12:43 AM > To: nginx-devel at nginx.org > Subject: Re: [PATCH] Add io_uring support in AIO(async io) module > > On Tue, Jan 19, 2021 at 03:32:30AM +0000, Zhao, Ping wrote: > > It depends on if disk io is the performance hot spot or not. If yes, > > io_uring shows improvement than libaio. With 4KB/100KB length 1 Nginx > > thread it's hard to see performance difference because iostat is only > > around ~10MB/100MB per second. Disk io is not the performance bottle > > neck, both libaio and io_uring have the same performance. If you > > increase request size or Nginx threads number, for example 1MB length > > or Nginx thread number 4. In this case, disk io became the performance > > bottle neck, you will see io_uring performance improvement. > > Can you please provide full test results with specific nginx configuration? > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- > An HTML attachment was scrubbed... > URL: attachments/20210222/732ba7ef/attachment.htm> > > ------------------------------ > > Subject: Digest Footer > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > ------------------------------ > > End of nginx-devel Digest, Vol 136, Issue 27 > ******************************************** > -------------- next part -------------- An HTML attachment was scrubbed... URL: From xeioex at nginx.com Sat Mar 6 12:48:51 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Sat, 06 Mar 2021 12:48:51 +0000 Subject: [njs] Introduced njs.on('exit') callback support. Message-ID: details: https://hg.nginx.org/njs/rev/84af87035c4e branches: changeset: 1617:84af87035c4e user: Dmitry Volyntsev date: Sat Mar 06 12:42:30 2021 +0000 description: Introduced njs.on('exit') callback support. diffstat: src/njs_builtin.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++- src/njs_vm.c | 4 ++ src/njs_vm.h | 8 +++++ src/test/njs_unit_test.c | 17 ++++++++++++ 4 files changed, 94 insertions(+), 2 deletions(-) diffs (157 lines): diff -r 52ddc3a050be -r 84af87035c4e src/njs_builtin.c --- a/src/njs_builtin.c Wed Mar 03 18:28:00 2021 +0000 +++ b/src/njs_builtin.c Sat Mar 06 12:42:30 2021 +0000 @@ -851,7 +851,7 @@ found: static njs_int_t -njs_dump_value(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, +njs_ext_dump(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { uint32_t n; @@ -878,6 +878,62 @@ njs_dump_value(njs_vm_t *vm, njs_value_t static njs_int_t +njs_ext_on(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t unused) +{ + njs_str_t type; + njs_uint_t i, n; + njs_value_t *value; + + static const struct { + njs_str_t name; + njs_uint_t id; + } hooks[] = { + { + njs_str("exit"), + NJS_HOOK_EXIT + }, + }; + + value = njs_arg(args, nargs, 1); + + if (njs_slow_path(!njs_is_string(value))) { + njs_type_error(vm, "hook type is not a string"); + return NJS_ERROR; + } + + njs_string_get(value, &type); + + i = 0; + n = sizeof(hooks) / sizeof(hooks[0]); + + while (i < n) { + if (njs_strstr_eq(&type, &hooks[i].name)) { + break; + } + + i++; + } + + if (i == n) { + njs_type_error(vm, "unknown hook type \"%V\"", &type); + return NJS_ERROR; + } + + value = njs_arg(args, nargs, 2); + + if (njs_slow_path(!njs_is_function(value) && !njs_is_null(value))) { + njs_type_error(vm, "callback is not a function or null"); + return NJS_ERROR; + } + + vm->hooks[i] = njs_is_function(value) ? njs_function(value) : NULL; + + return NJS_OK; +} + + +static njs_int_t njs_global_this_prop_handler(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *global, njs_value_t *setval, njs_value_t *retval) { @@ -1629,7 +1685,14 @@ static const njs_object_prop_t njs_njs_ { .type = NJS_PROPERTY, .name = njs_string("dump"), - .value = njs_native_function(njs_dump_value, 0), + .value = njs_native_function(njs_ext_dump, 0), + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("on"), + .value = njs_native_function(njs_ext_on, 0), .configurable = 1, }, }; diff -r 52ddc3a050be -r 84af87035c4e src/njs_vm.c --- a/src/njs_vm.c Wed Mar 03 18:28:00 2021 +0000 +++ b/src/njs_vm.c Sat Mar 06 12:42:30 2021 +0000 @@ -91,6 +91,10 @@ njs_vm_destroy(njs_vm_t *vm) njs_event_t *event; njs_lvlhsh_each_t lhe; + if (vm->hooks[NJS_HOOK_EXIT] != NULL) { + (void) njs_vm_call(vm, vm->hooks[NJS_HOOK_EXIT], NULL, 0); + } + if (njs_waiting_events(vm)) { njs_lvlhsh_each_init(&lhe, &njs_event_hash_proto); diff -r 52ddc3a050be -r 84af87035c4e src/njs_vm.h --- a/src/njs_vm.h Wed Mar 03 18:28:00 2021 +0000 +++ b/src/njs_vm.h Sat Mar 06 12:42:30 2021 +0000 @@ -175,6 +175,12 @@ enum njs_object_e { + njs_scope_offset(index))) +enum njs_hook_e { + NJS_HOOK_EXIT = 0, + NJS_HOOK_MAX +}; + + struct njs_vm_s { /* njs_vm_t must be aligned to njs_value_t due to scratch value. */ njs_value_t retval; @@ -210,6 +216,8 @@ struct njs_vm_s { njs_object_prototype_t prototypes[NJS_OBJ_TYPE_MAX]; njs_function_t constructors[NJS_OBJ_TYPE_MAX]; + njs_function_t *hooks[NJS_HOOK_MAX]; + njs_mp_t *mem_pool; u_char *start; diff -r 52ddc3a050be -r 84af87035c4e src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Wed Mar 03 18:28:00 2021 +0000 +++ b/src/test/njs_unit_test.c Sat Mar 06 12:42:30 2021 +0000 @@ -17197,6 +17197,23 @@ static njs_unit_test_t njs_test[] = "decodeURI.name = 'XXX'; njs.dump(decodeURI)"), njs_str("[Function: XXX]") }, + /* njs.on(). */ + + { njs_str("njs.on(decodeURI)"), + njs_str("TypeError: hook type is not a string") }, + + { njs_str("njs.on('xxx')"), + njs_str("TypeError: unknown hook type \"xxx\"") }, + + { njs_str("njs.on('exit')"), + njs_str("TypeError: callback is not a function or null") }, + + { njs_str("njs.on('exit', null); 1"), + njs_str("1") }, + + { njs_str("njs.on('exit', ()=>{}); 1"), + njs_str("1") }, + /* Built-in methods name. */ { njs_str( From gaoyan09 at baidu.com Tue Mar 9 07:13:53 2021 From: gaoyan09 at baidu.com (Gao,Yan(ACG VCP)) Date: Tue, 9 Mar 2021 07:13:53 +0000 Subject: Is ngx_quic_select_socket_by_dcid good when len < 20 ? Message-ID: <94823454-6293-4810-90E5-33A725E1BBAC@baidu.com> ngx_quic_select_socket_by_dcid advance_data(sizeof(struct udphdr)); /* skip UDP header */ advance_data(1); /* QUIC flags */ if (data[0] & NGX_QUIC_PKT_LONG) { advance_data(4); /* skip QUIC version */ len = data[0]; /* read DCID length */ if (len < 8) { /* it's useless to search for key in such short DCID */ return SK_PASS; } advance_data(1); /* skip DCID len */ } else { len = NGX_QUIC_SERVER_CID_LEN; } dcid = &data[1]; advance_data(len); /* we expect the packet to have full DCID */ len = data[0]; /* read DCID length */ advance_data(1); /* skip DCID len */ dcid = &data[1]; len = data[0] and dcid = &data[1] should both move after advance_data(1) or before advance_data(1) is ngx_quic_parse_uint64(dcid) good when len < 20 ? -------------- next part -------------- An HTML attachment was scrubbed... URL: From xeioex at nginx.com Tue Mar 9 13:28:37 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 09 Mar 2021 13:28:37 +0000 Subject: [njs] Types: updated TS definitions. Message-ID: details: https://hg.nginx.org/njs/rev/715e1eeb9a4d branches: changeset: 1618:715e1eeb9a4d user: Dmitry Volyntsev date: Tue Mar 09 13:24:06 2021 +0000 description: Types: updated TS definitions. diffstat: test/ts/test.ts | 6 +++++ ts/ngx_http_js_module.d.ts | 35 +++++++++++++++++++++++++++++++- ts/ngx_stream_js_module.d.ts | 48 ++++++++++++++++++++++++++++++++++++++----- ts/njs_core.d.ts | 5 ++++ 4 files changed, 87 insertions(+), 7 deletions(-) diffs (194 lines): diff -r 84af87035c4e -r 715e1eeb9a4d test/ts/test.ts --- a/test/ts/test.ts Sat Mar 06 12:42:30 2021 +0000 +++ b/test/ts/test.ts Tue Mar 09 13:24:06 2021 +0000 @@ -93,6 +93,11 @@ function http_module(r: NginxHTTPRequest }) .then(body => r.return(200, body)) .catch(e => r.return(501, e.message)) + + // js_body_filter + r.sendBuffer(Buffer.from("xxx"), {last:true}); + r.sendBuffer("xxx", {flush: true}); + r.done(); } function fs_module() { @@ -150,6 +155,7 @@ function timers() { function njs_object() { njs.dump('asdf'); njs.version != process.argv[1]; + njs.on('exit', ()=> {}); } function ngx_object() { diff -r 84af87035c4e -r 715e1eeb9a4d ts/ngx_http_js_module.d.ts --- a/ts/ngx_http_js_module.d.ts Sat Mar 06 12:42:30 2021 +0000 +++ b/ts/ngx_http_js_module.d.ts Tue Mar 09 13:24:06 2021 +0000 @@ -262,12 +262,32 @@ interface NginxSubrequestOptions { detached?: boolean } +interface NginxHTTPSendBufferOptions { + /** + * True if data is a last buffer. + */ + last?: boolean + /** + * True if the buffer should have the flush flag. + */ + flush?: boolean +} + interface NginxHTTPRequest { /** * Request arguments object. */ readonly args: NginxHTTPArgs; /** + * After calling this function, next data chunks will be passed to + * the client without calling js_body_filter. + * + * **Warning:** May be called only from the js_body_filter function. + * + * @since 0.5.2 + */ + done(): void; + /** * Writes a string to the error log on the error level of logging. * @param message Message to log. */ @@ -374,10 +394,23 @@ interface NginxHTTPRequest { */ return(status: number, body?: NjsStringOrBuffer): void; /** - * Sends the HTTP headers to the client. + * Sends a part of the response body to the client. */ send(part: NjsStringOrBuffer): void; /** + * Adds data to the chain of data chunks to be forwarded to the next body filter. + * The actual forwarding happens later, when the all the data chunks of the current + * chain are processed. + * + * **Warning:** May be called only from the js_body_filter function. + * + * @since 0.5.2 + * @param data Data to send. + * @param options Object used to override nginx buffer flags derived from + * an incoming data chunk buffer. + */ + sendBuffer(data: NjsStringOrBuffer, options?: NginxHTTPSendBufferOptions): void; + /** * Sends the HTTP headers to the client. */ sendHeader(): void; diff -r 84af87035c4e -r 715e1eeb9a4d ts/ngx_stream_js_module.d.ts --- a/ts/ngx_stream_js_module.d.ts Sat Mar 06 12:42:30 2021 +0000 +++ b/ts/ngx_stream_js_module.d.ts Tue Mar 09 13:24:06 2021 +0000 @@ -97,21 +97,44 @@ interface NginxStreamSendOptions { interface NginxStreamRequest { /** - * Successfully finalizes the phase handler. + * Successfully finalizes the phase handler. An alias to s.done(0). + * + * @since 0.2.4 + * @see done() */ allow(): void; /** - * Finalizes the phase handler and passes control to the next handler. + * Passing control to the next handler of the current phase (if any). + * An alias to s.done(-5). + * + * @since 0.2.4 + * @see done() */ decline(): void; /** * Finalizes the phase handler with the access error code. + * An alias to s.done(403). + * + * @since 0.2.4 + * @see done() */ deny(): void; /** - * Successfully finalizes the current phase handler - * or finalizes it with the specified numeric code. - * @param code Finalization code. + * Sets an exit code for the current phase handler to a code value. + * The actual finalization happens when the js handler is completed and + * all pending events, for example from ngx.fetch() or setTimeout(), + * are processed. + * + * @param code Finalization code, by default is 0. + * Possible code values: + * 0 - successful finalization, passing control to the next phase + * -5 - undecided, passing control to the next handler of the current + * phase (if any) + * 403 - access is forbidden + * @since 0.2.4 + * @see allow() + * @see decline() + * @see deny() */ done(code?: number): void; /** @@ -127,6 +150,7 @@ interface NginxStreamRequest { /** * Unregisters the callback set by on() method. * @param event Event type to unregister. + * @see on() */ off(event: "upload" | "download" | "upstream" | "downstream"): void; /** @@ -138,6 +162,7 @@ interface NginxStreamRequest { * * **Warning:** For string data type bytes invalid in UTF-8 encoding may be * converted into the replacement character. + * @see off() */ on(event: "upload" | "download", callback: (data: NjsByteString, flags: NginxStreamCallbackFlags) => void): void; @@ -148,13 +173,24 @@ interface NginxStreamRequest { */ readonly remoteAddress: NjsByteString; /** - * Sends the data to the client. + * Adds data to the chain of data chunks that will be forwarded in + * the forward direction: in download callback to a client; in upload + * to an upstream server. The actual forwarding happens later, when the all + * the data chunks of the current chain are processed. + * + * @since 0.2.4 * @param data Data to send. * @param options Object used to override nginx buffer flags derived from * an incoming data chunk buffer. + * @see on() */ send(data: NjsStringOrBuffer, options?: NginxStreamSendOptions): void; /** + * The stream session exit status. It is an alias to the $status variable. + * @since 0.5.2 + */ + readonly status: number; + /** * nginx variables as Buffers. * * @since 0.5.0 diff -r 84af87035c4e -r 715e1eeb9a4d ts/njs_core.d.ts --- a/ts/njs_core.d.ts Sat Mar 06 12:42:30 2021 +0000 +++ b/ts/njs_core.d.ts Tue Mar 09 13:24:06 2021 +0000 @@ -591,6 +591,11 @@ type NjsStringOrBuffer = NjsStringLike | interface NjsGlobal { readonly version: string; dump(value: any, indent?: number): string; + /** + * Registers a callback for the "exit" event. The callback is called before + * the VM is destroyed. + */ + on(event: "exit", callback: () => void): void; } declare const njs: NjsGlobal; From xeioex at nginx.com Tue Mar 9 13:28:39 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 09 Mar 2021 13:28:39 +0000 Subject: [njs] Version 0.5.2. Message-ID: details: https://hg.nginx.org/njs/rev/e5de01378b1a branches: changeset: 1619:e5de01378b1a user: Dmitry Volyntsev date: Tue Mar 09 13:24:51 2021 +0000 description: Version 0.5.2. diffstat: CHANGES | 24 ++++++++++++++++++++++++ 1 files changed, 24 insertions(+), 0 deletions(-) diffs (31 lines): diff -r 715e1eeb9a4d -r e5de01378b1a CHANGES --- a/CHANGES Tue Mar 09 13:24:06 2021 +0000 +++ b/CHANGES Tue Mar 09 13:24:51 2021 +0000 @@ -1,3 +1,27 @@ + +Changes with njs 0.5.2 09 Mar 2021 + + nginx modules: + + *) Feature: added the "js_body_filter" directive. + + *) Feature: introduced the "status" property for stream session + object. + + *) Feature: added njs.on('exit') callback support. + + *) Bugfix: fixed property descriptor reuse for not extensible + objects. + Thanks to Artem S. Povalyukhin. + + *) Bugfix: fixed Object.freeze() and friends according to + the specification. + Thanks to Artem S. Povalyukhin. + + *) Bugfix: fixed Function() in CLI mode. + + *) Bugfix: fixed for-in iteration of typed array values. + Thanks to Artem S. Povalyukhin. Changes with njs 0.5.1 16 Feb 2021 From xeioex at nginx.com Tue Mar 9 13:28:41 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 09 Mar 2021 13:28:41 +0000 Subject: [njs] Added tag 0.5.2 for changeset e5de01378b1a Message-ID: details: https://hg.nginx.org/njs/rev/e8bb829af224 branches: changeset: 1620:e8bb829af224 user: Dmitry Volyntsev date: Tue Mar 09 13:28:31 2021 +0000 description: Added tag 0.5.2 for changeset e5de01378b1a diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r e5de01378b1a -r e8bb829af224 .hgtags --- a/.hgtags Tue Mar 09 13:24:51 2021 +0000 +++ b/.hgtags Tue Mar 09 13:28:31 2021 +0000 @@ -40,3 +40,4 @@ 1ada1061a040e5cd5ec55744bfa916dfc6744e4c fdfd580b0dd617a884ed9287d98341ebef03ee9f 0.4.4 69f07c6151628880bf7d5ac28bd8287ce96d8a36 0.5.0 d355071f55ef4612d89db0ba72e7aaeaa99deef7 0.5.1 +e5de01378b1a8ab0a94dd3a8c4c6bb7a235f4b9c 0.5.2 From gaoyan09 at baidu.com Tue Mar 9 14:43:11 2021 From: gaoyan09 at baidu.com (Gao,Yan(ACG VCP)) Date: Tue, 9 Mar 2021 14:43:11 +0000 Subject: [QUIC] When old worker listen fd detach ebpf reuseport group when reload? Message-ID: <1C05D407-E798-489E-A9C0-0A7D3E775830@baidu.com> We cannot close quic fd to let old session complete when reload. Can detach ebpf reuseport group manually when ngx_close_listening_sockets? Linxu kernel commit e57892f50a07953053dcb1e0c9431197e569c258 Merge: bfdfa51702de 0ab5539f8584 Author: Alexei Starovoitov Date: Fri Jul 17 20:18:18 2020 -0700 Merge branch 'bpf-socket-lookup' Jakub Sitnicki says: BPF sk_lookup program runs when transport layer is looking up a listening socket for a new connection request (TCP), or when looking up an unconnected socket for a packet (UDP). To select a socket BPF program fetches it from a map holding socket references, like SOCKMAP or SOCKHASH, calls bpf_sk_assign(ctx, sk, ...) helper to record the selection, and returns SK_PASS code. Transport layer then uses the selected socket as a result of socket lookup. From mdounin at mdounin.ru Tue Mar 9 15:11:33 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 09 Mar 2021 15:11:33 +0000 Subject: [nginx] Updated OpenSSL used for win32 builds. Message-ID: details: https://hg.nginx.org/nginx/rev/a555ff8fdbb0 branches: changeset: 7797:a555ff8fdbb0 user: Maxim Dounin date: Tue Mar 09 16:38:55 2021 +0300 description: Updated OpenSSL used for win32 builds. diffstat: misc/GNUmakefile | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 4b8f23a36ebf -r a555ff8fdbb0 misc/GNUmakefile --- a/misc/GNUmakefile Fri Mar 05 17:16:32 2021 +0300 +++ b/misc/GNUmakefile Tue Mar 09 16:38:55 2021 +0300 @@ -6,7 +6,7 @@ TEMP = tmp CC = cl OBJS = objs.msvc8 -OPENSSL = openssl-1.1.1i +OPENSSL = openssl-1.1.1j ZLIB = zlib-1.2.11 PCRE = pcre-8.44 From mdounin at mdounin.ru Tue Mar 9 15:31:05 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 09 Mar 2021 15:31:05 +0000 Subject: [nginx] nginx-1.19.8-RELEASE Message-ID: details: https://hg.nginx.org/nginx/rev/8c65d21464aa branches: changeset: 7798:8c65d21464aa user: Maxim Dounin date: Tue Mar 09 18:27:50 2021 +0300 description: nginx-1.19.8-RELEASE diffstat: docs/xml/nginx/changes.xml | 62 ++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 62 insertions(+), 0 deletions(-) diffs (72 lines): diff -r a555ff8fdbb0 -r 8c65d21464aa docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml Tue Mar 09 16:38:55 2021 +0300 +++ b/docs/xml/nginx/changes.xml Tue Mar 09 18:27:50 2021 +0300 @@ -5,6 +5,68 @@ + + + + +? ????????? proxy_cookie_flags ?????? +????? ????? ???????? ? ??????? ??????????. + + +flags in the "proxy_cookie_flags" directive +can now contain variables. + + + + + +???????? proxy_protocol ? ????????? listen, +????????? proxy_protocol ? set_real_ip_from +? ???????? ??????-???????. + + +the "proxy_protocol" parameter of the "listen" directive, +the "proxy_protocol" and "set_real_ip_from" directives +in mail proxy. + + + + + +HTTP/2-?????????? ????? ??????????? +??? ????????????? "keepalive_timeout 0"; +?????? ????????? ? 1.19.7. + + +HTTP/2 connections were immediately closed +when using "keepalive_timeout 0"; +the bug had appeared in 1.19.7. + + + + + +????????? ?????? ????????????? ??? ???????????, +???? nginx ??? ?????? ? glibc 2.32. + + +some errors were logged as unknown +if nginx was built with glibc 2.32. + + + + + +? ?????? ????????? ?????????? eventport. + + +in the eventport method. + + + + + + From mdounin at mdounin.ru Tue Mar 9 15:31:07 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 09 Mar 2021 15:31:07 +0000 Subject: [nginx] release-1.19.8 tag Message-ID: details: https://hg.nginx.org/nginx/rev/34e76ceabcec branches: changeset: 7799:34e76ceabcec user: Maxim Dounin date: Tue Mar 09 18:27:51 2021 +0300 description: release-1.19.8 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r 8c65d21464aa -r 34e76ceabcec .hgtags --- a/.hgtags Tue Mar 09 18:27:50 2021 +0300 +++ b/.hgtags Tue Mar 09 18:27:51 2021 +0300 @@ -457,3 +457,4 @@ dc0cc425fa63a80315f6efb68697cadb6626cdf2 8e5b068f761cd512d10c9671fbde0b568c1fd08b release-1.19.5 f618488eb769e0ed74ef0d93cd118d2ad79ef94d release-1.19.6 3fa6e2095a7a51acc630517e1c27a7b7ac41f7b3 release-1.19.7 +8c65d21464aaa5923775f80c32474adc7a320068 release-1.19.8 From vl at nginx.com Tue Mar 9 20:24:10 2021 From: vl at nginx.com (Vladimir Homutov) Date: Tue, 9 Mar 2021 23:24:10 +0300 Subject: Is ngx_quic_select_socket_by_dcid good when len < 20 ? In-Reply-To: <94823454-6293-4810-90E5-33A725E1BBAC@baidu.com> References: <94823454-6293-4810-90E5-33A725E1BBAC@baidu.com> Message-ID: <13fb86f0-256f-a3b6-8300-1c0490f4b1ad@nginx.com> 09.03.2021 10:13, Gao,Yan(ACG VCP) ?????: > ngx_quic_select_socket_by_dcid > > ??? advance_data(sizeof(struct udphdr)); /* skip UDP header */ > > ??? advance_data(1); /* QUIC flags */ > > ??? if (data[0] & NGX_QUIC_PKT_LONG) { > > ??????? advance_data(4); /* skip QUIC version */ > > ??????? len = data[0];?? /* read DCID length */ > > ??????? if (len < 8) { > > ??????????? /* it's useless to search for key in such short DCID */ > > ??????????? return SK_PASS; > > ??????? } > > ??????? advance_data(1); /* skip DCID len */ > > ??? } else { > > ??????? len = NGX_QUIC_SERVER_CID_LEN; > > ??? } > > dcid = &data[1]; > > advance_data(len); /* we expect the packet to have full DCID */ > > len = data[0];?? /* read DCID length */ > > advance_data(1); /* skip DCID len */ > > dcid = &data[1]; > > len = data[0] and dcid = &data[1] should both move after advance_data(1) > ?or before advance_data(1) > > is ngx_quic_parse_uint64(dcid) good when len < 20 ? > we always check that we have enough bytes to read before actually accessing data. Even more, this is enforced by BPF verifier, so the code won't compile without proper check. We have no idea if the packet contains garbage or proper quic header. we just extract key (if there is enough bytes) and then try to find a match in a map. From vl at nginx.com Tue Mar 9 20:27:01 2021 From: vl at nginx.com (Vladimir Homutov) Date: Tue, 9 Mar 2021 23:27:01 +0300 Subject: [QUIC] When old worker listen fd detach ebpf reuseport group when reload? In-Reply-To: <1C05D407-E798-489E-A9C0-0A7D3E775830@baidu.com> References: <1C05D407-E798-489E-A9C0-0A7D3E775830@baidu.com> Message-ID: <49a205ce-4f44-c33d-0acd-992f6e74281f@nginx.com> 09.03.2021 17:43, Gao,Yan(ACG VCP) ?????: > We cannot close quic fd to let old session complete when reload. > Can detach ebpf reuseport group manually when ngx_close_listening_sockets? Hello Gao,Yan, I'm not sure I understand what you are trying to do. Do you have some issues with existing quic implementations in nginx? > > Linxu kernel > commit e57892f50a07953053dcb1e0c9431197e569c258 > Merge: bfdfa51702de 0ab5539f8584 > Author: Alexei Starovoitov > Date: Fri Jul 17 20:18:18 2020 -0700 > > Merge branch 'bpf-socket-lookup' > > Jakub Sitnicki says: > > BPF sk_lookup program runs when transport layer is looking up a listening > socket for a new connection request (TCP), or when looking up an > unconnected socket for a packet (UDP). > > To select a socket BPF program fetches it from a map holding socket > references, like SOCKMAP or SOCKHASH, calls bpf_sk_assign(ctx, sk, ...) > helper to record the selection, and returns SK_PASS code. Transport layer > then uses the selected socket as a result of socket lookup. > > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > From gaoyan09 at baidu.com Wed Mar 10 03:17:17 2021 From: gaoyan09 at baidu.com (Gao,Yan(ACG VCP)) Date: Wed, 10 Mar 2021 03:17:17 +0000 Subject: [QUIC] When old worker listen fd detach ebpf reuseport group when reload Message-ID: <3CAE3FCE-434A-4F12-A1AF-7E19ADE639D2@contoso.com> Hello Vladimir Homutov, >I'm not sure I understand what you are trying to do. >Do you have some issues with existing quic implementations in nginx? I just want to know how nginx handle old and new quic connections when reload. Nginx keep quic connections open when reload to complete old connections. But new connections can still be handled by old workers. Can the listening fd detach from reuseport group with keeping open, as kernel says, ebpf only look up an unconnected socket for a packet (UDP) Gao,Yan(ACG VCP) -------------- next part -------------- An HTML attachment was scrubbed... URL: From gaoyan09 at baidu.com Wed Mar 10 06:11:46 2021 From: gaoyan09 at baidu.com (Gao,Yan(ACG VCP)) Date: Wed, 10 Mar 2021 06:11:46 +0000 Subject: [QUIC] When old worker listen fd detach ebpf reuseport group when reload In-Reply-To: <3CAE3FCE-434A-4F12-A1AF-7E19ADE639D2@contoso.com> References: <3CAE3FCE-434A-4F12-A1AF-7E19ADE639D2@contoso.com> Message-ID: I test nginx-quic with ngtcp2: ~/ngtcp2/examples/client 127.0.0.1 443 https://example.com:443/index.html --dcid=1a812bf8290b2bd5 And same dcid not always hash to same worker?as access.log: 127.0.0.1 - - [10/Mar/2021:14:04:00 +0800] "GET /index.html HTTP/3.0" 200 788 "-" "nghttp3/ngtcp2 client" "quic" "h3-29" 88 9149 127.0.0.1 - - [10/Mar/2021:14:04:05 +0800] "GET /index.html HTTP/3.0" 200 788 "-" "nghttp3/ngtcp2 client" "quic" "h3-29" 94 9149 127.0.0.1 - - [10/Mar/2021:14:04:09 +0800] "GET /index.html HTTP/3.0" 200 788 "-" "nghttp3/ngtcp2 client" "quic" "h3-29" 100 9147 The last column shows nginx worker pid Nginx.conf: worker_processes 4; error_log logs/error.log debug; events { worker_connections 1024; } quic_bpf on; http { log_format quic '$remote_addr - $remote_user [$time_local] ' '"$request" $status $body_bytes_sent ' '"$http_referer" "$http_user_agent" "$quic" "$http3" $connection $pid'; access_log logs/access.log quic; server { listen 443 http3 reuseport; listen 443 ssl; ssl_certificate server.crt; ssl_certificate_key server.key; ssl_protocols TLSv1.3; location / { add_header Alt-Svc '$http3=":443"; ma=86400'; } } } sbin/nginx -V nginx version: nginx/1.19.7 built by gcc 8.4.0 (Ubuntu 8.4.0-3ubuntu2) built with OpenSSL 1.1.1 (compatible; BoringSSL) (running with BoringSSL) TLS SNI support enabled configure arguments: --with-debug --with-http_v3_module --with-http_quic_module --with-cc-opt=-I../boringssl/include --with-ld-opt='-L../boringssl/build/ssl -L../boringssl/build/crypto' uname -a Linux nginx-quic 5.10.0-051000-generic #202012132330 SMP Sun Dec 13 23:33:36 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux Gao,Yan(ACG VCP) ???: "Gao,Yan(ACG VCP)" ??: 2021?3?10? ??? ??11:17 ???: "nginx-devel at nginx.org" , "vl at nginx.com" ??: Re: [QUIC] When old worker listen fd detach ebpf reuseport group when reload Hello Vladimir Homutov, >I'm not sure I understand what you are trying to do. >Do you have some issues with existing quic implementations in nginx? I just want to know how nginx handle old and new quic connections when reload. Nginx keep quic connections open when reload to complete old connections. But new connections can still be handled by old workers. Can the listening fd detach from reuseport group with keeping open, as kernel says, ebpf only look up an unconnected socket for a packet (UDP) Gao,Yan(ACG VCP) -------------- next part -------------- An HTML attachment was scrubbed... URL: From vl at nginx.com Wed Mar 10 07:00:46 2021 From: vl at nginx.com (Vladimir Homutov) Date: Wed, 10 Mar 2021 10:00:46 +0300 Subject: [QUIC] When old worker listen fd detach ebpf reuseport group when reload In-Reply-To: <3CAE3FCE-434A-4F12-A1AF-7E19ADE639D2@contoso.com> References: <3CAE3FCE-434A-4F12-A1AF-7E19ADE639D2@contoso.com> Message-ID: 10.03.2021 06:17, Gao,Yan(ACG VCP) ?????: > Hello Vladimir Homutov, > >>I'm not sure I understand what you are trying to do. > >>Do you have some issues with existing quic implementations in nginx? > > I just want to know how nginx handle old and new quic connections when > reload. > > Nginx keep quic connections open when reload to complete old connections. > > But new connections can still be handled by old workers. > > Can the listening fd detach from reuseport group with keeping open, as > kernel says, ebpf only look up an unconnected socket for a packet (UDP) > > Gao,Yan(ACG VCP) > Each worker process has it's own socket, identified by SO_COOKIE. Such sockets belong to same reuseport group. BPF is used to route packets with the same key (injected into DCID when connection is established) to the same socket. The reload part is not yet complete. New connections may reach old workers. Since worker knows it is terminating, it will not accept such connection. Client will retry, and next time it will probably reach new worker (or the old one again). You cannot touch old socket, since it is needed to work with existing connections in old worker (and it needs to be in reuseport group, so that packets could reach proper worker). As I said, there still work to do in regard to reloads and upgrades. From ranier.vf at gmail.com Thu Mar 11 01:29:11 2021 From: ranier.vf at gmail.com (Ranier Vilela) Date: Wed, 10 Mar 2021 22:29:11 -0300 Subject: Windows build (nginx-19.8) Message-ID: Hi, The documentation at http://nginx.org/en/docs/howto_build_on_win32.html, it's a little out of date. But I can build nginx-1.10 successfully. However I am trying to build nginx-1.19.8 now, and I have encountered problems. Windows 10 64 bits Msvc 2019 64 bits ./configure --with-cc=cl --builddir=objs --prefix= \ > --conf-path=conf/nginx.conf --pid-path=logs/nginx.pid \ > --http-log-path=logs/access.log --error-log-path=logs/error.log \ > --sbin-path=nginx.exe --http-client-body-temp-path=temp/client_body_temp \ > --http-proxy-temp-path=temp/proxy_temp \ > --http-fastcgi-temp-path=temp/fastcgi_temp \ > --with-cc-opt=-DFD_SETSIZE=1024 --with-pcre=objs/lib/pcre-8.38 \ > --with-zlib=objs/lib/zlib-1.2.8 \ > --with-select_module --with-debug \ > --add-dynamic-module=ngx_rcpdv checking for OS + MINGW32_NT-6.2 1.0.11(0.46/3/2) i686 + using Microsoft Visual C++ compiler + cl version: auto/cc/msvc: line 117: [: : integer expression expected checking for MINGW32_NT-6.2 specific features configuring additional dynamic modules adding module in ngx_rcpdv ngx_rcpdv/config: line 19: pg_config: command not found ngx_rcpdv/config: line 20: pg_config: command not found ngx_http_rcpdv_module: using Sirius 1.0 checking for pg ... not found + ngx_http_rcpdv_module was configured creating objs/Makefile Configuration summary + using PCRE library: objs/lib/pcre-8.38 + OpenSSL library is not used + using zlib library: objs/lib/zlib-1.2.8 nginx path prefix: "" nginx binary file: "/nginx.exe" nginx modules path: "/modules" nginx configuration prefix: "/conf" nginx configuration file: "/conf/nginx.conf" nginx pid file: "/logs/nginx.pid" nginx error log file: "/logs/error.log" nginx http access log file: "/logs/access.log" nginx http client request body temporary files: "temp/client_body_temp" nginx http proxy temporary files: "temp/proxy_temp" nginx http fastcgi temporary files: "temp/fastcgi_temp" nginx http uwsgi temporary files: "uwsgi_temp" nginx http scgi temporary files: "scgi_temp" nmake Microsoft (R) Program Maintenance Utility Vers?o 14.28.29337.0 Direitos autorais da Microsoft Corporation. Todos os direitos reservados. "c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.28.29333\bin\HostX64\x64\nmake.exe" -f objs/Makefile Microsoft (R) Program Maintenance Utility Vers?o 14.28.29337.0 Direitos autorais da Microsoft Corporation. Todos os direitos reservados. NMAKE : fatal error U1073: n?o sabe como criar 'src/os/win32/ngx_win32_config.h' Stop. NMAKE : fatal error U1077: '"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.28.29333\bin\HostX64\x64\nmake.exe"' : c?digo de retorno '0x2' Stop. It seems to me that ngx_win32_config.h was not created. Question, is Windows build, is it supported yet? regards, Ranier Vilela -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Mar 11 01:48:13 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 11 Mar 2021 01:48:13 +0000 Subject: [nginx] Version bump. Message-ID: details: https://hg.nginx.org/nginx/rev/43d9c9c2981f branches: changeset: 7800:43d9c9c2981f user: Maxim Dounin date: Thu Mar 11 04:46:22 2021 +0300 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r 34e76ceabcec -r 43d9c9c2981f src/core/nginx.h --- a/src/core/nginx.h Tue Mar 09 18:27:51 2021 +0300 +++ b/src/core/nginx.h Thu Mar 11 04:46:22 2021 +0300 @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1019008 -#define NGINX_VERSION "1.19.8" +#define nginx_version 1019009 +#define NGINX_VERSION "1.19.9" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From mdounin at mdounin.ru Thu Mar 11 01:48:16 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 11 Mar 2021 01:48:16 +0000 Subject: [nginx] Mail: fixed build without SSL. Message-ID: details: https://hg.nginx.org/nginx/rev/777373b5a169 branches: changeset: 7801:777373b5a169 user: Maxim Dounin date: Thu Mar 11 04:46:26 2021 +0300 description: Mail: fixed build without SSL. Broken by d84f13618277 and 12ea1de7d87c (1.19.8). Reported by Sergey Osokin. diffstat: src/mail/ngx_mail_auth_http_module.c | 5 +++-- src/mail/ngx_mail_handler.c | 7 ++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diffs (51 lines): diff -r 43d9c9c2981f -r 777373b5a169 src/mail/ngx_mail_auth_http_module.c --- a/src/mail/ngx_mail_auth_http_module.c Thu Mar 11 04:46:22 2021 +0300 +++ b/src/mail/ngx_mail_auth_http_module.c Thu Mar 11 04:46:26 2021 +0300 @@ -1135,10 +1135,10 @@ ngx_mail_auth_http_create_request(ngx_ma size_t len; ngx_buf_t *b; ngx_str_t login, passwd; + ngx_connection_t *c; #if (NGX_MAIL_SSL) ngx_str_t verify, subject, issuer, serial, fingerprint, raw_cert, cert; - ngx_connection_t *c; ngx_mail_ssl_conf_t *sslcf; #endif ngx_mail_core_srv_conf_t *cscf; @@ -1151,9 +1151,10 @@ ngx_mail_auth_http_create_request(ngx_ma return NULL; } + c = s->connection; + #if (NGX_MAIL_SSL) - c = s->connection; sslcf = ngx_mail_get_module_srv_conf(s, ngx_mail_ssl_module); if (c->ssl && sslcf->verify) { diff -r 43d9c9c2981f -r 777373b5a169 src/mail/ngx_mail_handler.c --- a/src/mail/ngx_mail_handler.c Thu Mar 11 04:46:22 2021 +0300 +++ b/src/mail/ngx_mail_handler.c Thu Mar 11 04:46:26 2021 +0300 @@ -272,16 +272,17 @@ ngx_mail_proxy_protocol_handler(ngx_even static void ngx_mail_init_session_handler(ngx_event_t *rev) { - ngx_connection_t *c; - ngx_mail_session_t *s; + ngx_connection_t *c; c = rev->data; - s = c->data; #if (NGX_MAIL_SSL) { + ngx_mail_session_t *s; ngx_mail_ssl_conf_t *sslcf; + s = c->data; + sslcf = ngx_mail_get_module_srv_conf(s, ngx_mail_ssl_module); if (sslcf->enable || s->ssl) { From mdounin at mdounin.ru Thu Mar 11 02:04:15 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 11 Mar 2021 05:04:15 +0300 Subject: Windows build (nginx-19.8) In-Reply-To: References: Message-ID: Hello! On Wed, Mar 10, 2021 at 10:29:11PM -0300, Ranier Vilela wrote: > The documentation at http://nginx.org/en/docs/howto_build_on_win32.html, > it's a little out of date. > But I can build nginx-1.10 successfully. > However I am trying to build nginx-1.19.8 now, and I have encountered > problems. > Windows 10 64 bits > Msvc 2019 64 bits > > ./configure --with-cc=cl --builddir=objs --prefix= \ [...] > nmake > > Microsoft (R) Program Maintenance Utility Vers?o 14.28.29337.0 > Direitos autorais da Microsoft Corporation. Todos os direitos reservados. > "c:\Program Files (x86)\Microsoft Visual > Studio\2019\Community\VC\Tools\MSVC\14.28.29333\bin\HostX64\x64\nmake.exe" > -f objs/Makefile > > Microsoft (R) Program Maintenance Utility Vers?o 14.28.29337.0 > Direitos autorais da Microsoft Corporation. Todos os direitos reservados. > NMAKE : fatal error U1073: n?o sabe como criar > 'src/os/win32/ngx_win32_config.h' > Stop. > NMAKE : fatal error U1077: '"c:\Program Files (x86)\Microsoft Visual > Studio\2019\Community\VC\Tools\MSVC\14.28.29333\bin\HostX64\x64\nmake.exe"' > : c?digo de retorno '0x2' > Stop. > > It seems to me that ngx_win32_config.h was not created. > Question, is Windows build, is it supported yet? >From build steps shown and the error message it looks like you are trying to use the source tarball from the site to build win32 version. This is not going to work, as source tarballs does not contain win32-specific sources. You have to obtain sources from the repository, as the howto article says: : Check out nginx sources from the hg.nginx.org repository. For : example: : : hg clone http://hg.nginx.org/nginx Hope this helps. -- Maxim Dounin http://mdounin.ru/ From ranier.vf at gmail.com Thu Mar 11 02:34:33 2021 From: ranier.vf at gmail.com (Ranier Vilela) Date: Wed, 10 Mar 2021 23:34:33 -0300 Subject: Windows build (nginx-19.8) In-Reply-To: References: Message-ID: Em qua., 10 de mar. de 2021 ?s 23:04, Maxim Dounin escreveu: > Hello! > > : Check out nginx sources from the hg.nginx.org repository. For > : example: > : > : hg clone http://hg.nginx.org/nginx > > Hope this helps. > Many thanks Maxim, worked. regards, Ranier Vilela -------------- next part -------------- An HTML attachment was scrubbed... URL: From gaoyan09 at baidu.com Thu Mar 11 07:08:55 2021 From: gaoyan09 at baidu.com (Gao,Yan(ACG VCP)) Date: Thu, 11 Mar 2021 07:08:55 +0000 Subject: [QUIC] Wrong dcid len in ngx_quic_select_socket_by_dcid Message-ID: <797307A9-5FE9-45B0-B6E1-E4D89BBE4489@baidu.com> Console client: ~/ngtcp2/examples/client 127.0.0.1 443 https://example.com:443/index.html --dcid=00000000000000166142cdef2ef2ca5e4be5 Bpf log: <...>-43083 [001] d.s1 94117.746924: bpf_trace_printk: nginx quic socket len 0x1d <...>-43083 [001] d.s1 94117.746936: bpf_trace_printk: nginx quic default route for key 0x16 <...>-43083 [000] d.s1 94117.764132: bpf_trace_printk: nginx quic socket len 0x1d <...>-43083 [000] d.s1 94117.764140: bpf_trace_printk: nginx quic socket selected by key 0x20 <...>-43083 [000] d.s1 94117.764709: bpf_trace_printk: nginx quic socket selected by key 0x20 <...>-43083 [000] d.s1 94117.769174: bpf_trace_printk: nginx quic socket selected by key 0x20 <...>-43083 [000] d.s1 94127.852160: bpf_trace_printk: nginx quic socket selected by key 0x20 The [nginx quic socket len] is added to show the case, it shoud be 0x12, as 18 bytes Len should assign after advance_data(1) Patch and pcap see the attachment Gao,Yan(ACG VCP) -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: wrong_dcid_len.patch Type: application/octet-stream Size: 936 bytes Desc: wrong_dcid_len.patch URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: wrong dcid len.pcap Type: application/octet-stream Size: 8774 bytes Desc: wrong dcid len.pcap URL: From ru at nginx.com Thu Mar 11 07:20:44 2021 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 11 Mar 2021 07:20:44 +0000 Subject: [nginx] Removed "ch" argument from ngx_pass_open_channel(). Message-ID: details: https://hg.nginx.org/nginx/rev/0215ec9aaa8a branches: changeset: 7802:0215ec9aaa8a user: Ruslan Ermilov date: Thu Mar 11 09:58:45 2021 +0300 description: Removed "ch" argument from ngx_pass_open_channel(). diffstat: src/os/unix/ngx_process_cycle.c | 59 +++++++++++++--------------------------- 1 files changed, 19 insertions(+), 40 deletions(-) diffs (130 lines): diff -r 777373b5a169 -r 0215ec9aaa8a src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c Thu Mar 11 04:46:26 2021 +0300 +++ b/src/os/unix/ngx_process_cycle.c Thu Mar 11 09:58:45 2021 +0300 @@ -15,7 +15,7 @@ static void ngx_start_worker_processes(n ngx_int_t type); static void ngx_start_cache_manager_processes(ngx_cycle_t *cycle, ngx_uint_t respawn); -static void ngx_pass_open_channel(ngx_cycle_t *cycle, ngx_channel_t *ch); +static void ngx_pass_open_channel(ngx_cycle_t *cycle); static void ngx_signal_worker_processes(ngx_cycle_t *cycle, int signo); static ngx_uint_t ngx_reap_children(ngx_cycle_t *cycle); static void ngx_master_process_exit(ngx_cycle_t *cycle); @@ -335,25 +335,16 @@ ngx_single_process_cycle(ngx_cycle_t *cy static void ngx_start_worker_processes(ngx_cycle_t *cycle, ngx_int_t n, ngx_int_t type) { - ngx_int_t i; - ngx_channel_t ch; + ngx_int_t i; ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "start worker processes"); - ngx_memzero(&ch, sizeof(ngx_channel_t)); - - ch.command = NGX_CMD_OPEN_CHANNEL; - for (i = 0; i < n; i++) { ngx_spawn_process(cycle, ngx_worker_process_cycle, (void *) (intptr_t) i, "worker process", type); - ch.pid = ngx_processes[ngx_process_slot].pid; - ch.slot = ngx_process_slot; - ch.fd = ngx_processes[ngx_process_slot].channel[0]; - - ngx_pass_open_channel(cycle, &ch); + ngx_pass_open_channel(cycle); } } @@ -361,9 +352,8 @@ ngx_start_worker_processes(ngx_cycle_t * static void ngx_start_cache_manager_processes(ngx_cycle_t *cycle, ngx_uint_t respawn) { - ngx_uint_t i, manager, loader; - ngx_path_t **path; - ngx_channel_t ch; + ngx_uint_t i, manager, loader; + ngx_path_t **path; manager = 0; loader = 0; @@ -388,14 +378,7 @@ ngx_start_cache_manager_processes(ngx_cy &ngx_cache_manager_ctx, "cache manager process", respawn ? NGX_PROCESS_JUST_RESPAWN : NGX_PROCESS_RESPAWN); - ngx_memzero(&ch, sizeof(ngx_channel_t)); - - ch.command = NGX_CMD_OPEN_CHANNEL; - ch.pid = ngx_processes[ngx_process_slot].pid; - ch.slot = ngx_process_slot; - ch.fd = ngx_processes[ngx_process_slot].channel[0]; - - ngx_pass_open_channel(cycle, &ch); + ngx_pass_open_channel(cycle); if (loader == 0) { return; @@ -405,20 +388,21 @@ ngx_start_cache_manager_processes(ngx_cy &ngx_cache_loader_ctx, "cache loader process", respawn ? NGX_PROCESS_JUST_SPAWN : NGX_PROCESS_NORESPAWN); + ngx_pass_open_channel(cycle); +} + + +static void +ngx_pass_open_channel(ngx_cycle_t *cycle) +{ + ngx_int_t i; + ngx_channel_t ch; + ch.command = NGX_CMD_OPEN_CHANNEL; ch.pid = ngx_processes[ngx_process_slot].pid; ch.slot = ngx_process_slot; ch.fd = ngx_processes[ngx_process_slot].channel[0]; - ngx_pass_open_channel(cycle, &ch); -} - - -static void -ngx_pass_open_channel(ngx_cycle_t *cycle, ngx_channel_t *ch) -{ - ngx_int_t i; - for (i = 0; i < ngx_last_process; i++) { if (i == ngx_process_slot @@ -430,14 +414,14 @@ ngx_pass_open_channel(ngx_cycle_t *cycle ngx_log_debug6(NGX_LOG_DEBUG_CORE, cycle->log, 0, "pass channel s:%i pid:%P fd:%d to s:%i pid:%P fd:%d", - ch->slot, ch->pid, ch->fd, + ch.slot, ch.pid, ch.fd, i, ngx_processes[i].pid, ngx_processes[i].channel[0]); /* TODO: NGX_AGAIN */ ngx_write_channel(ngx_processes[i].channel[0], - ch, sizeof(ngx_channel_t), cycle->log); + &ch, sizeof(ngx_channel_t), cycle->log); } } @@ -621,12 +605,7 @@ ngx_reap_children(ngx_cycle_t *cycle) } - ch.command = NGX_CMD_OPEN_CHANNEL; - ch.pid = ngx_processes[ngx_process_slot].pid; - ch.slot = ngx_process_slot; - ch.fd = ngx_processes[ngx_process_slot].channel[0]; - - ngx_pass_open_channel(cycle, &ch); + ngx_pass_open_channel(cycle); live = 1; From vl at nginx.com Thu Mar 11 11:20:57 2021 From: vl at nginx.com (Khomutov Vladimir) Date: Thu, 11 Mar 2021 14:20:57 +0300 Subject: [QUIC] Wrong dcid len in ngx_quic_select_socket_by_dcid In-Reply-To: <797307A9-5FE9-45B0-B6E1-E4D89BBE4489@baidu.com> References: <797307A9-5FE9-45B0-B6E1-E4D89BBE4489@baidu.com> Message-ID: <149d8cf4-20b5-7fd7-db50-3292e8cef4e9@nginx.com> 11.03.2021 10:08, Gao,Yan(ACG VCP) ?????: > > Console client: > > ~/ngtcp2/examples/client 127.0.0.1 443 > https://example.com:443/index.html > --dcid=00000000000000166142cdef2ef2ca5e4be5 > > Bpf log: > > <...>-43083?? [001] d.s1 94117.746924: bpf_trace_printk: nginx quic > socket len 0x1d > > <...>-43083?? [001] d.s1 94117.746936: bpf_trace_printk: nginx quic > default route for key 0x16 > > <...>-43083?? [000] d.s1 94117.764132: bpf_trace_printk: nginx quic > socket len 0x1d > > <...>-43083?? [000] d.s1 94117.764140: bpf_trace_printk: nginx quic > socket selected by key 0x20 > > <...>-43083?? [000] d.s1 94117.764709: bpf_trace_printk: nginx quic > socket selected by key 0x20 > > <...>-43083?? [000] d.s1 94117.769174: bpf_trace_printk: nginx quic > socket selected by key 0x20 > > <...>-43083?? [000] d.s1 94127.852160: bpf_trace_printk: nginx quic > socket selected by key 0x20 > > The ?[nginx quic socket len] is added to show the case, it shoud be > 0x12, as 18 bytes > > Len should assign after advance_data(1) > > Patch and pcap see the attachment > > Gao,Yan(ACG VCP) > > Thank you for reporting, now I see the problem, will fix it. -------------- next part -------------- An HTML attachment was scrubbed... URL: From geniuss.dev at gmail.com Thu Mar 11 18:28:49 2021 From: geniuss.dev at gmail.com (geniuss99) Date: Thu, 11 Mar 2021 21:28:49 +0300 Subject: [PATCH] Keepalive: add new option "keepalive_ssl_respect_sni" Message-ID: src/http/modules/ngx_http_upstream_keepalive_module.c | 42 +++++++++++++++++++ 1 files changed, 42 insertions(+), 0 deletions(-) # HG changeset patch # User geniuss99 # Date 1615484979 -10800 # Thu Mar 11 20:49:39 2021 +0300 # Node ID ed1348e8e25381b3b1a2540289effcf7ccec6fd6 # Parent 0215ec9aaa8af6036c62e1db676c9b0cc1d5fca4 Keepalive: add new option "keepalive_ssl_respect_sni". This option allows handling the following usecase: 1. proxy https requests with different hostnames to server with same ip; 2. use cache of upstream connections via keepalive option in upstream module; 3. reuse connection from keepalive pool only if ip and servername used during handshake with upstream match hostname from downstream request. When this option is turned on not only the ip address of upstream server is taken into account upon connection search but also servername used during handshake procedure. diff -r 0215ec9aaa8a -r ed1348e8e253 src/http/modules/ngx_http_upstream_keepalive_module.c --- a/src/http/modules/ngx_http_upstream_keepalive_module.c Thu Mar 11 09:58:45 2021 +0300 +++ b/src/http/modules/ngx_http_upstream_keepalive_module.c Thu Mar 11 20:49:39 2021 +0300 @@ -15,6 +15,10 @@ ngx_uint_t requests; ngx_msec_t timeout; +#if (NGX_HTTP_SSL) + ngx_flag_t ssl_respect_sni; +#endif + ngx_queue_t cache; ngx_queue_t free; @@ -49,6 +53,7 @@ #if (NGX_HTTP_SSL) ngx_event_set_peer_session_pt original_set_session; ngx_event_save_peer_session_pt original_save_session; + ngx_str_t *http_request_server; #endif } ngx_http_upstream_keepalive_peer_data_t; @@ -100,6 +105,16 @@ offsetof(ngx_http_upstream_keepalive_srv_conf_t, requests), NULL }, +#if (NGX_HTTP_SSL) + + { ngx_string("keepalive_ssl_respect_sni"), + NGX_HTTP_UPS_CONF|NGX_CONF_TAKE1, + ngx_conf_set_flag_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_upstream_keepalive_srv_conf_t, ssl_respect_sni), + NULL }, + +#endif ngx_null_command }; @@ -152,6 +167,10 @@ ngx_conf_init_msec_value(kcf->timeout, 60000); ngx_conf_init_uint_value(kcf->requests, 100); +#if (NGX_HTTP_SSL) + ngx_conf_init_value(kcf->ssl_respect_sni, 0); +#endif + if (kcf->original_init_upstream(cf, us) != NGX_OK) { return NGX_ERROR; } @@ -217,6 +236,8 @@ kp->original_save_session = r->upstream->peer.save_session; r->upstream->peer.set_session = ngx_http_upstream_keepalive_set_session; r->upstream->peer.save_session = ngx_http_upstream_keepalive_save_session; + + kp->http_request_server = &r->headers_in.server; #endif return NGX_OK; @@ -232,6 +253,10 @@ ngx_int_t rc; ngx_queue_t *q, *cache; ngx_connection_t *c; +#if (NGX_HTTP_SSL) + ngx_int_t *ssl_respect_sni; + const char *ssl_server_name; +#endif ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pc->log, 0, "get keepalive peer"); @@ -248,6 +273,10 @@ cache = &kp->conf->cache; +#if (NGX_HTTP_SSL) + ssl_respect_sni = &kp->conf->ssl_respect_sni; +#endif + for (q = ngx_queue_head(cache); q != ngx_queue_sentinel(cache); q = ngx_queue_next(q)) @@ -259,6 +288,15 @@ item->socklen, pc->socklen) == 0) { +#if (NGX_HTTP_SSL) + /* check if server_name from ssl connection corresponds to requested host */ + if (*ssl_respect_sni == 1 && c->ssl != NULL && c->ssl->connection != NULL && c->ssl->handshaked) { + ssl_server_name = SSL_get_servername(c->ssl->connection, TLSEXT_NAMETYPE_host_name); + if (ssl_server_name != NULL && ngx_strncasecmp(kp->http_request_server->data, (u_char*) ssl_server_name, ngx_strlen(ssl_server_name)) != 0) { + continue; + } + } +#endif ngx_queue_remove(q); ngx_queue_insert_head(&kp->conf->free, q); @@ -516,6 +554,10 @@ conf->timeout = NGX_CONF_UNSET_MSEC; conf->requests = NGX_CONF_UNSET_UINT; +#if (NGX_HTTP_SSL) + conf->ssl_respect_sni = NGX_CONF_UNSET; +#endif + return conf; } From mdounin at mdounin.ru Fri Mar 12 19:37:14 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 12 Mar 2021 22:37:14 +0300 Subject: [PATCH] Keepalive: add new option "keepalive_ssl_respect_sni" In-Reply-To: References: Message-ID: Hello! On Thu, Mar 11, 2021 at 09:28:49PM +0300, geniuss99 wrote: > src/http/modules/ngx_http_upstream_keepalive_module.c | 42 +++++++++++++++++++ > 1 files changed, 42 insertions(+), 0 deletions(-) > > > # HG changeset patch > # User geniuss99 > # Date 1615484979 -10800 > # Thu Mar 11 20:49:39 2021 +0300 > # Node ID ed1348e8e25381b3b1a2540289effcf7ccec6fd6 > # Parent 0215ec9aaa8af6036c62e1db676c9b0cc1d5fca4 > Keepalive: add new option "keepalive_ssl_respect_sni". > > This option allows handling the following usecase: > 1. proxy https requests with different hostnames to server with same ip; > 2. use cache of upstream connections via keepalive option in upstream module; > 3. reuse connection from keepalive pool only if ip and servername used during > handshake with upstream match hostname from downstream request. > > When this option is turned on not only the ip address of upstream server is > taken into account upon connection search but also servername used during > handshake procedure. Thank you for the patch. Please see the answer here: http://mailman.nginx.org/pipermail/nginx-devel/2019-August/012583.html -- Maxim Dounin http://mdounin.ru/ From geniuss.dev at gmail.com Tue Mar 16 14:42:01 2021 From: geniuss.dev at gmail.com (geniuss99) Date: Tue, 16 Mar 2021 17:42:01 +0300 Subject: [PATCH] Keepalive: add new option "keepalive_ssl_respect_sni" In-Reply-To: References: Message-ID: Hi. > SSL sessions are cached in the context of the upstream{} block (or an implicit upstream when using an IP address or a DNS name) Oh, I didn't think of that. I guess this can be solved by patching the ngx_http_upstream_round_robin module and saving many sessions per each upstream peer. > No, thank you. The issues as observed in the tickets linked should be resolved by using distinct upstream blocks instead. So what was the reason you rejected the previous patch? Was it because of breaking ssl sessions caching mechanism? Or you just didn't see it fit for nginx from the design (architectural) point of view? Thanks. From geniuss.dev at gmail.com Tue Mar 16 19:04:27 2021 From: geniuss.dev at gmail.com (geniuss99) Date: Tue, 16 Mar 2021 22:04:27 +0300 Subject: [PATCH] gRPC: fixed bug when padding is used in DATA frame Message-ID: <13552d5b785104f9d137.1615921467@raider> src/http/modules/ngx_http_grpc_module.c | 16 +++++++++++++--- 1 files changed, 13 insertions(+), 3 deletions(-) # HG changeset patch # User geniuss99 # Date 1615921026 -10800 # Tue Mar 16 21:57:06 2021 +0300 # Node ID 13552d5b785104f9d137d956a6cbef25ec09b345 # Parent ed1348e8e25381b3b1a2540289effcf7ccec6fd6 gRPC: fixed bug when padding is used in DATA frame. As per RFC 7540 DATA frame MAY contain padding if the PADDED flag is set in "Type" field (see clause 6.1). When such frame is sent from an upstream which previously defined payload length via "Content-Length" header nginx fails with an error: "upstream sent response body larger than indicated content length". This happens because padding is not taken into account while comparing expected and received payload length. diff -r ed1348e8e253 -r 13552d5b7851 src/http/modules/ngx_http_grpc_module.c --- a/src/http/modules/ngx_http_grpc_module.c Thu Mar 11 20:49:39 2021 +0300 +++ b/src/http/modules/ngx_http_grpc_module.c Tue Mar 16 21:57:06 2021 +0300 @@ -2075,14 +2075,24 @@ } if (ctx->length != -1) { - if ((off_t) ctx->rest > ctx->length) { + if (ctx->flags & NGX_HTTP_V2_PADDED_FLAG) { + if (b->pos < b->last) { + u_char pad_length = *b->pos; + size_t payload_data_length = ctx->rest - pad_length - 1; // frame_payload_size_bytes - pad_length_bytes - pad_length_field_1_byte + + ctx->length -= payload_data_length; + } + + } else { + ctx->length -= ctx->rest; + } + + if (ctx->length < 0) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "upstream sent response body larger " "than indicated content length"); return NGX_ERROR; } - - ctx->length -= ctx->rest; } if (ctx->rest > ctx->recv_window) { From mdounin at mdounin.ru Tue Mar 16 23:24:35 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 17 Mar 2021 02:24:35 +0300 Subject: [PATCH] Keepalive: add new option "keepalive_ssl_respect_sni" In-Reply-To: References: Message-ID: Hello! On Tue, Mar 16, 2021 at 05:42:01PM +0300, geniuss99 wrote: > > SSL sessions are cached in the context of the upstream{} block (or an implicit upstream when using an IP address or a DNS name) > Oh, I didn't think of that. I guess this can be solved by patching the > ngx_http_upstream_round_robin module and saving many sessions per each > upstream peer. > > > No, thank you. The issues as observed in the tickets linked should be resolved by using distinct upstream blocks instead. > So what was the reason you rejected the previous patch? Was it because > of breaking ssl sessions caching mechanism? > Or you just didn't see it fit for nginx from the design > (architectural) point of view? >From the design point of view, upstream{} blocks expect all connections to a peer to be equivalent. At the same time, these connections might be established with different connection-specific settings, such as: - proxy_bind - proxy_socket_keepalive - proxy_ssl_certificate - proxy_ssl_certificate_key - proxy_ssl_ciphers - proxy_ssl_conf_command - proxy_ssl_crl - proxy_ssl_name - proxy_ssl_protocols - proxy_ssl_server_name - proxy_ssl_trusted_certificate - proxy_ssl_verify - proxy_ssl_verify_depth Trying to conditionally "respect" some of these settings, such as proxy_ssl_name, by caching connections based on the name in addition to the peer's address, looks wrong. The same applies to configurable caching key, as suggested in the previous patch. I think that two principal approaches are possible here: 1. Respect all the existing connection-specific settings automatically, and avoid using cached connections and/or saved SSL sessions if any of the settings does not match. 2. Assume that the configuration is written in a way which prevents misuse of cached connections / saved SSL sessions. Current approach is (2). That is, connections to the same peer should be equivalent (in most cases this can be achieved by using distinct upstream blocks if you have to use different connection-specific settings), or keepalive connections shouldn't be enabled (and SSL session reuse should be disabled as appropriate). Switching to (1) is possible, but will require significant effort, and have no obvious benefits for common configurations. IMHO, the only compelling reason to implement (1) is introduction of some form of default keepalive connections cache, but this is not something nginx supports. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Mar 16 23:45:13 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 17 Mar 2021 02:45:13 +0300 Subject: [PATCH] gRPC: fixed bug when padding is used in DATA frame In-Reply-To: <13552d5b785104f9d137.1615921467@raider> References: <13552d5b785104f9d137.1615921467@raider> Message-ID: Hello! On Tue, Mar 16, 2021 at 10:04:27PM +0300, geniuss99 wrote: > src/http/modules/ngx_http_grpc_module.c | 16 +++++++++++++--- > 1 files changed, 13 insertions(+), 3 deletions(-) > > > # HG changeset patch > # User geniuss99 > # Date 1615921026 -10800 > # Tue Mar 16 21:57:06 2021 +0300 > # Node ID 13552d5b785104f9d137d956a6cbef25ec09b345 > # Parent ed1348e8e25381b3b1a2540289effcf7ccec6fd6 > gRPC: fixed bug when padding is used in DATA frame. > > As per RFC 7540 DATA frame MAY contain padding if the PADDED flag is set > in "Type" field (see clause 6.1). > > When such frame is sent from an upstream which previously defined payload > length via "Content-Length" header nginx fails with an error: "upstream sent > response body larger than indicated content length". > > This happens because padding is not taken into account while comparing expected > and received payload length. Thanks for reporting this, looks like a bug introduced by me in 7680:39501ce97e29 (nginx 1.19.1). Do you have any particular servers/gRPC libraries in mind which actually use padding on DATA frames, and so nginx cannot talk with them after introduction of the check in question? > > diff -r ed1348e8e253 -r 13552d5b7851 src/http/modules/ngx_http_grpc_module.c > --- a/src/http/modules/ngx_http_grpc_module.c Thu Mar 11 20:49:39 2021 +0300 > +++ b/src/http/modules/ngx_http_grpc_module.c Tue Mar 16 21:57:06 2021 +0300 > @@ -2075,14 +2075,24 @@ > } > > if (ctx->length != -1) { > - if ((off_t) ctx->rest > ctx->length) { > + if (ctx->flags & NGX_HTTP_V2_PADDED_FLAG) { > + if (b->pos < b->last) { > + u_char pad_length = *b->pos; > + size_t payload_data_length = ctx->rest - pad_length - 1; // frame_payload_size_bytes - pad_length_bytes - pad_length_field_1_byte > + > + ctx->length -= payload_data_length; > + } > + > + } else { > + ctx->length -= ctx->rest; > + } > + > + if (ctx->length < 0) { > ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > "upstream sent response body larger " > "than indicated content length"); > return NGX_ERROR; > } > - > - ctx->length -= ctx->rest; > } > > if (ctx->rest > ctx->recv_window) { The fix looks suboptimal. Rather, the check should be moved to the end of the function, where padding is already handled. I'll take a look how to fix this properly. (Note well that as per nginx style, local variables are defined at the start of the function, "//" comments are not used, and maximum line length is 80 characters. The "Contributing Changes" article[1] have additional hints and links to appropriate part of the development guide with the details.) [1] http://nginx.org/en/docs/contributing_changes.html -- Maxim Dounin http://mdounin.ru/ From vl at nginx.com Wed Mar 17 06:43:43 2021 From: vl at nginx.com (Khomutov Vladimir) Date: Wed, 17 Mar 2021 09:43:43 +0300 Subject: [QUIC] Wrong dcid len in ngx_quic_select_socket_by_dcid In-Reply-To: <149d8cf4-20b5-7fd7-db50-3292e8cef4e9@nginx.com> References: <797307A9-5FE9-45B0-B6E1-E4D89BBE4489@baidu.com> <149d8cf4-20b5-7fd7-db50-3292e8cef4e9@nginx.com> Message-ID: > 11.03.2021 10:08, Gao,Yan(ACG VCP) ?????: >> >> Console client: >> >> ~/ngtcp2/examples/client 127.0.0.1 443 >> https://example.com:443/index.html >> --dcid=00000000000000166142cdef2ef2ca5e4be5 >> >> Bpf log: >> >> ?????????? <...>-43083?? [001] d.s1 94117.746924: bpf_trace_printk: >> nginx quic socket len 0x1d >> >> ?????????? <...>-43083?? [001] d.s1 94117.746936: bpf_trace_printk: >> nginx quic default route for key 0x16 >> >> ?????????? <...>-43083?? [000] d.s1 94117.764132: bpf_trace_printk: >> nginx quic socket len 0x1d >> >> ?????????? <...>-43083?? [000] d.s1 94117.764140: bpf_trace_printk: >> nginx quic socket selected by key 0x20 >> >> ?????????? <...>-43083?? [000] d.s1 94117.764709: bpf_trace_printk: >> nginx quic socket selected by key 0x20 >> >> ?????????? <...>-43083?? [000] d.s1 94117.769174: bpf_trace_printk: >> nginx quic socket selected by key 0x20 >> >> ?????????? <...>-43083?? [000] d.s1 94127.852160: bpf_trace_printk: >> nginx quic socket selected by key 0x20 >> >> The ?[nginx quic socket len] is added to show the case, it shoud be >> 0x12, as 18 bytes >> >> Len should assign after advance_data(1) >> >> Patch and pcap see the attachment >> >> Gao,Yan(ACG VCP) >> >> > Thank you for reporting, now I see the problem, will fix it. > Fixed in http://hg.nginx.org/nginx-quic/rev/1a489587e1c8, thanks again! -------------- next part -------------- An HTML attachment was scrubbed... URL: From geniuss.dev at gmail.com Wed Mar 17 15:27:17 2021 From: geniuss.dev at gmail.com (geniuss99) Date: Wed, 17 Mar 2021 18:27:17 +0300 Subject: [PATCH] gRPC: fixed bug when padding is used in DATA frame In-Reply-To: References: <13552d5b785104f9d137.1615921467@raider> Message-ID: > The fix looks suboptimal. Rather, the check should be moved to > the end of the function, where padding is already handled. I'll > take a look how to fix this properly. Feel free to modify the patch as you need. I merely fixed a bug at the place of origin. > (Note well that as per nginx style, local variables are defined at > the start of the function, "//" comments are not used, and maximum > line length is 80 characters. The "Contributing Changes" > article[1] have additional hints and links to appropriate part of > the development guide with the details.) Yeah, I know. I just wanted to make the code clear and compact so that you understand everything. Maybe I shouldn't have named it a patch. You may consider it a pseudocode then :) > Do you have any particular servers/gRPC libraries in mind which > actually use padding on DATA frames, and so nginx cannot talk with > them after introduction of the check in question? I use gRPC module to pass normal http requests to h2 upstreams, not gRPC ones. Works like a charm. So can't give you any gRPC servers for testing. You can observe the bug by connecting to developers.google.com h2 server for example. They use padding in DATA streams. On Wed, Mar 17, 2021 at 2:45 AM Maxim Dounin wrote: > > Hello! > > On Tue, Mar 16, 2021 at 10:04:27PM +0300, geniuss99 wrote: > > > src/http/modules/ngx_http_grpc_module.c | 16 +++++++++++++--- > > 1 files changed, 13 insertions(+), 3 deletions(-) > > > > > > # HG changeset patch > > # User geniuss99 > > # Date 1615921026 -10800 > > # Tue Mar 16 21:57:06 2021 +0300 > > # Node ID 13552d5b785104f9d137d956a6cbef25ec09b345 > > # Parent ed1348e8e25381b3b1a2540289effcf7ccec6fd6 > > gRPC: fixed bug when padding is used in DATA frame. > > > > As per RFC 7540 DATA frame MAY contain padding if the PADDED flag is set > > in "Type" field (see clause 6.1). > > > > When such frame is sent from an upstream which previously defined payload > > length via "Content-Length" header nginx fails with an error: "upstream sent > > response body larger than indicated content length". > > > > This happens because padding is not taken into account while comparing expected > > and received payload length. > > Thanks for reporting this, looks like a bug introduced by me in > 7680:39501ce97e29 (nginx 1.19.1). > > Do you have any particular servers/gRPC libraries in mind which > actually use padding on DATA frames, and so nginx cannot talk with > them after introduction of the check in question? > > > > > diff -r ed1348e8e253 -r 13552d5b7851 src/http/modules/ngx_http_grpc_module.c > > --- a/src/http/modules/ngx_http_grpc_module.c Thu Mar 11 20:49:39 2021 +0300 > > +++ b/src/http/modules/ngx_http_grpc_module.c Tue Mar 16 21:57:06 2021 +0300 > > @@ -2075,14 +2075,24 @@ > > } > > > > if (ctx->length != -1) { > > - if ((off_t) ctx->rest > ctx->length) { > > + if (ctx->flags & NGX_HTTP_V2_PADDED_FLAG) { > > + if (b->pos < b->last) { > > + u_char pad_length = *b->pos; > > + size_t payload_data_length = ctx->rest - pad_length - 1; // frame_payload_size_bytes - pad_length_bytes - pad_length_field_1_byte > > + > > + ctx->length -= payload_data_length; > > + } > > + > > + } else { > > + ctx->length -= ctx->rest; > > + } > > + > > + if (ctx->length < 0) { > > ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > > "upstream sent response body larger " > > "than indicated content length"); > > return NGX_ERROR; > > } > > - > > - ctx->length -= ctx->rest; > > } > > > > if (ctx->rest > ctx->recv_window) { > > The fix looks suboptimal. Rather, the check should be moved to > the end of the function, where padding is already handled. I'll > take a look how to fix this properly. > > (Note well that as per nginx style, local variables are defined at > the start of the function, "//" comments are not used, and maximum > line length is 80 characters. The "Contributing Changes" > article[1] have additional hints and links to appropriate part of > the development guide with the details.) > > [1] http://nginx.org/en/docs/contributing_changes.html > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From geniuss.dev at gmail.com Wed Mar 17 16:17:20 2021 From: geniuss.dev at gmail.com (geniuss99) Date: Wed, 17 Mar 2021 19:17:20 +0300 Subject: [PATCH] Keepalive: add new option "keepalive_ssl_respect_sni" In-Reply-To: References: Message-ID: > From the design point of view, upstream{} blocks expect all > connections to a peer to be equivalent. I see your point. One upstream block per static set of rules (connection-specific settings). What I need is one upstream block per dynamic set of rules. With current design this means creating as many upstream blocks as there are sets of rules. So if I have hundreds of such sets I will need to create hundreds of identical upstream blocks with different names. This is simple yet not flexible at all. Sounds like we need new upstream module for such usecases, something like "superupstream" :) From mdounin at mdounin.ru Thu Mar 18 00:55:59 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 18 Mar 2021 03:55:59 +0300 Subject: [PATCH] gRPC: fixed bug when padding is used in DATA frame In-Reply-To: References: <13552d5b785104f9d137.1615921467@raider> Message-ID: Hello! On Wed, Mar 17, 2021 at 06:27:17PM +0300, geniuss99 wrote: [...] > > Do you have any particular servers/gRPC libraries in mind which > > actually use padding on DATA frames, and so nginx cannot talk with > > them after introduction of the check in question? > > I use gRPC module to pass normal http requests to h2 upstreams, not gRPC ones. > Works like a charm. > > So can't give you any gRPC servers for testing. You can observe the bug by > connecting to developers.google.com h2 server for example. They use padding > in DATA streams. Thanks for the example. Interesting, it only use padding when returning compressed responses, likely as an attempt to mitigate BREACH attacks (https://en.wikipedia.org/wiki/BREACH). The following patch seem to fix this properly. Review and testing appreciated. # HG changeset patch # User Maxim Dounin # Date 1616027280 -10800 # Thu Mar 18 03:28:00 2021 +0300 # Node ID f8dbaa4ae09de125420e5a325b2ccac4a5636494 # Parent 0215ec9aaa8af6036c62e1db676c9b0cc1d5fca4 gRPC: fixed handling of padding on DATA frames. The response size check introduced in 39501ce97e29 did not take into account possible padding on DATA frames, resulting in incorrect "upstream sent response body larger than indicated content length" errors if upstream server used padding in responses with known length. Fix is to check the actual size of response buffers produced by the code, similarly to how it is done in other protocols, instead of checking the size of DATA frames. Reported at: http://mailman.nginx.org/pipermail/nginx-devel/2021-March/013907.html diff --git a/src/http/modules/ngx_http_grpc_module.c b/src/http/modules/ngx_http_grpc_module.c --- a/src/http/modules/ngx_http_grpc_module.c +++ b/src/http/modules/ngx_http_grpc_module.c @@ -2074,17 +2074,6 @@ ngx_http_grpc_filter(void *data, ssize_t return NGX_ERROR; } - if (ctx->length != -1) { - if ((off_t) ctx->rest > ctx->length) { - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, - "upstream sent response body larger " - "than indicated content length"); - return NGX_ERROR; - } - - ctx->length -= ctx->rest; - } - if (ctx->rest > ctx->recv_window) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "upstream violated stream flow control, " @@ -2450,6 +2439,18 @@ ngx_http_grpc_filter(void *data, ssize_t b->pos = b->last; buf->last = b->pos; + if (ctx->length != -1) { + + if (buf->last - buf->pos > ctx->length) { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "upstream sent response body larger " + "than indicated content length"); + return NGX_ERROR; + } + + ctx->length -= buf->last - buf->pos; + } + return NGX_AGAIN; } @@ -2457,6 +2458,18 @@ ngx_http_grpc_filter(void *data, ssize_t buf->last = b->pos; ctx->rest = ctx->padding; + if (ctx->length != -1) { + + if (buf->last - buf->pos > ctx->length) { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "upstream sent response body larger " + "than indicated content length"); + return NGX_ERROR; + } + + ctx->length -= buf->last - buf->pos; + } + done: if (ctx->padding) { -- Maxim Dounin http://mdounin.ru/ From geniuss.dev at gmail.com Fri Mar 19 16:32:31 2021 From: geniuss.dev at gmail.com (geniuss99) Date: Fri, 19 Mar 2021 19:32:31 +0300 Subject: [PATCH] gRPC: fixed bug when padding is used in DATA frame In-Reply-To: References: <13552d5b785104f9d137.1615921467@raider> Message-ID: Made some tests. Looks fine. Thanks! On Thu, Mar 18, 2021 at 3:56 AM Maxim Dounin wrote: > > Hello! > > On Wed, Mar 17, 2021 at 06:27:17PM +0300, geniuss99 wrote: > > [...] > > > > Do you have any particular servers/gRPC libraries in mind which > > > actually use padding on DATA frames, and so nginx cannot talk with > > > them after introduction of the check in question? > > > > I use gRPC module to pass normal http requests to h2 upstreams, not gRPC ones. > > Works like a charm. > > > > So can't give you any gRPC servers for testing. You can observe the bug by > > connecting to developers.google.com h2 server for example. They use padding > > in DATA streams. > > Thanks for the example. Interesting, it only use padding when > returning compressed responses, likely as an attempt to mitigate > BREACH attacks (https://en.wikipedia.org/wiki/BREACH). > > The following patch seem to fix this properly. Review and testing > appreciated. > > # HG changeset patch > # User Maxim Dounin > # Date 1616027280 -10800 > # Thu Mar 18 03:28:00 2021 +0300 > # Node ID f8dbaa4ae09de125420e5a325b2ccac4a5636494 > # Parent 0215ec9aaa8af6036c62e1db676c9b0cc1d5fca4 > gRPC: fixed handling of padding on DATA frames. > > The response size check introduced in 39501ce97e29 did not take into > account possible padding on DATA frames, resulting in incorrect > "upstream sent response body larger than indicated content length" errors > if upstream server used padding in responses with known length. > > Fix is to check the actual size of response buffers produced by the code, > similarly to how it is done in other protocols, instead of checking > the size of DATA frames. > > Reported at: > http://mailman.nginx.org/pipermail/nginx-devel/2021-March/013907.html > > diff --git a/src/http/modules/ngx_http_grpc_module.c b/src/http/modules/ngx_http_grpc_module.c > --- a/src/http/modules/ngx_http_grpc_module.c > +++ b/src/http/modules/ngx_http_grpc_module.c > @@ -2074,17 +2074,6 @@ ngx_http_grpc_filter(void *data, ssize_t > return NGX_ERROR; > } > > - if (ctx->length != -1) { > - if ((off_t) ctx->rest > ctx->length) { > - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > - "upstream sent response body larger " > - "than indicated content length"); > - return NGX_ERROR; > - } > - > - ctx->length -= ctx->rest; > - } > - > if (ctx->rest > ctx->recv_window) { > ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > "upstream violated stream flow control, " > @@ -2450,6 +2439,18 @@ ngx_http_grpc_filter(void *data, ssize_t > b->pos = b->last; > buf->last = b->pos; > > + if (ctx->length != -1) { > + > + if (buf->last - buf->pos > ctx->length) { > + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > + "upstream sent response body larger " > + "than indicated content length"); > + return NGX_ERROR; > + } > + > + ctx->length -= buf->last - buf->pos; > + } > + > return NGX_AGAIN; > } > > @@ -2457,6 +2458,18 @@ ngx_http_grpc_filter(void *data, ssize_t > buf->last = b->pos; > ctx->rest = ctx->padding; > > + if (ctx->length != -1) { > + > + if (buf->last - buf->pos > ctx->length) { > + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > + "upstream sent response body larger " > + "than indicated content length"); > + return NGX_ERROR; > + } > + > + ctx->length -= buf->last - buf->pos; > + } > + > done: > > if (ctx->padding) { > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Mon Mar 22 16:17:55 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 22 Mar 2021 19:17:55 +0300 Subject: [PATCH] Add io_uring support in AIO(async io) module In-Reply-To: References: <443b30a2-dbb7-f72f-da2e-b459e1619aa3@nginx.com> <1c2a277b-58d7-524a-23e1-3c193ccc7f7d@nginx.com> <3d737faa-a8f6-ae8f-fce3-54089fecd1c3@nginx.com> <89fb913c-1262-3419-88a3-feaad6f43deb@nginx.com> Message-ID: Hello! On Sat, Feb 27, 2021 at 12:48:04PM +0000, Zhao, Ping wrote: > Yes, io_uring can help Nginx achieve same performance with lower > resource cost. This is the key improvement of io_uring I think. > It can't break through the HW limitation. Thanks for the patch and testing. Potentially this looks interesting, despite the fact that improvements even in terms of resource costs seem to be minor compared to properly configured nginx using other I/O variants. There are, however, some concerns regarding the interface itself and tunings need to be applied in order for it to work, as well as regressions in various kernels out there. There are also some questions about the patch, notably retries in case of short reads, though this probably needs better understanding of the interface. As of now, the consensus is that we'll get back to this some time later, though probably we want to make this not a default aio method at first, but rather an alternative one, available with something like "aio io_uring;", similarly to "aio threads;". Thanks again for your work. We'll contact you once we return to this topic. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Tue Mar 23 12:25:48 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 23 Mar 2021 15:25:48 +0300 Subject: [PATCH] gRPC: fixed bug when padding is used in DATA frame In-Reply-To: References: <13552d5b785104f9d137.1615921467@raider> Message-ID: <41F2D893-52D4-4781-BD63-7C6609D21835@nginx.com> > On 18 Mar 2021, at 03:55, Maxim Dounin wrote: > > Hello! > > On Wed, Mar 17, 2021 at 06:27:17PM +0300, geniuss99 wrote: > > [...] > >>> Do you have any particular servers/gRPC libraries in mind which >>> actually use padding on DATA frames, and so nginx cannot talk with >>> them after introduction of the check in question? >> >> I use gRPC module to pass normal http requests to h2 upstreams, not gRPC ones. >> Works like a charm. >> >> So can't give you any gRPC servers for testing. You can observe the bug by >> connecting to developers.google.com h2 server for example. They use padding >> in DATA streams. > > Thanks for the example. Interesting, it only use padding when > returning compressed responses, likely as an attempt to mitigate > BREACH attacks (https://en.wikipedia.org/wiki/BREACH). > > The following patch seem to fix this properly. Review and testing > appreciated. > > # HG changeset patch > # User Maxim Dounin > # Date 1616027280 -10800 > # Thu Mar 18 03:28:00 2021 +0300 > # Node ID f8dbaa4ae09de125420e5a325b2ccac4a5636494 > # Parent 0215ec9aaa8af6036c62e1db676c9b0cc1d5fca4 > gRPC: fixed handling of padding on DATA frames. > > The response size check introduced in 39501ce97e29 did not take into > account possible padding on DATA frames, resulting in incorrect > "upstream sent response body larger than indicated content length" errors > if upstream server used padding in responses with known length. > > Fix is to check the actual size of response buffers produced by the code, > similarly to how it is done in other protocols, instead of checking > the size of DATA frames. > > Reported at: > http://mailman.nginx.org/pipermail/nginx-devel/2021-March/013907.html > > diff --git a/src/http/modules/ngx_http_grpc_module.c b/src/http/modules/ngx_http_grpc_module.c > --- a/src/http/modules/ngx_http_grpc_module.c > +++ b/src/http/modules/ngx_http_grpc_module.c > @@ -2074,17 +2074,6 @@ ngx_http_grpc_filter(void *data, ssize_t > return NGX_ERROR; > } > > - if (ctx->length != -1) { > - if ((off_t) ctx->rest > ctx->length) { > - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > - "upstream sent response body larger " > - "than indicated content length"); > - return NGX_ERROR; > - } > - > - ctx->length -= ctx->rest; > - } > - > if (ctx->rest > ctx->recv_window) { > ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > "upstream violated stream flow control, " > @@ -2450,6 +2439,18 @@ ngx_http_grpc_filter(void *data, ssize_t > b->pos = b->last; > buf->last = b->pos; > > + if (ctx->length != -1) { > + > + if (buf->last - buf->pos > ctx->length) { > + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > + "upstream sent response body larger " > + "than indicated content length"); > + return NGX_ERROR; > + } > + > + ctx->length -= buf->last - buf->pos; > + } > + > return NGX_AGAIN; > } > > @@ -2457,6 +2458,18 @@ ngx_http_grpc_filter(void *data, ssize_t > buf->last = b->pos; > ctx->rest = ctx->padding; > > + if (ctx->length != -1) { > + > + if (buf->last - buf->pos > ctx->length) { > + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > + "upstream sent response body larger " > + "than indicated content length"); > + return NGX_ERROR; > + } > + > + ctx->length -= buf->last - buf->pos; > + } > + > done: > > if (ctx->padding) { > Looks good. -- Sergey Kandaurov From mdounin at mdounin.ru Tue Mar 23 15:11:41 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 23 Mar 2021 15:11:41 +0000 Subject: [nginx] gRPC: fixed handling of padding on DATA frames. Message-ID: details: https://hg.nginx.org/nginx/rev/6df9d7df2784 branches: changeset: 7803:6df9d7df2784 user: Maxim Dounin date: Tue Mar 23 16:52:23 2021 +0300 description: gRPC: fixed handling of padding on DATA frames. The response size check introduced in 39501ce97e29 did not take into account possible padding on DATA frames, resulting in incorrect "upstream sent response body larger than indicated content length" errors if upstream server used padding in responses with known length. Fix is to check the actual size of response buffers produced by the code, similarly to how it is done in other protocols, instead of checking the size of DATA frames. Reported at: http://mailman.nginx.org/pipermail/nginx-devel/2021-March/013907.html diffstat: src/http/modules/ngx_http_grpc_module.c | 35 ++++++++++++++++++++++---------- 1 files changed, 24 insertions(+), 11 deletions(-) diffs (59 lines): diff -r 0215ec9aaa8a -r 6df9d7df2784 src/http/modules/ngx_http_grpc_module.c --- a/src/http/modules/ngx_http_grpc_module.c Thu Mar 11 09:58:45 2021 +0300 +++ b/src/http/modules/ngx_http_grpc_module.c Tue Mar 23 16:52:23 2021 +0300 @@ -2074,17 +2074,6 @@ ngx_http_grpc_filter(void *data, ssize_t return NGX_ERROR; } - if (ctx->length != -1) { - if ((off_t) ctx->rest > ctx->length) { - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, - "upstream sent response body larger " - "than indicated content length"); - return NGX_ERROR; - } - - ctx->length -= ctx->rest; - } - if (ctx->rest > ctx->recv_window) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "upstream violated stream flow control, " @@ -2450,6 +2439,18 @@ ngx_http_grpc_filter(void *data, ssize_t b->pos = b->last; buf->last = b->pos; + if (ctx->length != -1) { + + if (buf->last - buf->pos > ctx->length) { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "upstream sent response body larger " + "than indicated content length"); + return NGX_ERROR; + } + + ctx->length -= buf->last - buf->pos; + } + return NGX_AGAIN; } @@ -2457,6 +2458,18 @@ ngx_http_grpc_filter(void *data, ssize_t buf->last = b->pos; ctx->rest = ctx->padding; + if (ctx->length != -1) { + + if (buf->last - buf->pos > ctx->length) { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "upstream sent response body larger " + "than indicated content length"); + return NGX_ERROR; + } + + ctx->length -= buf->last - buf->pos; + } + done: if (ctx->padding) { From mdounin at mdounin.ru Tue Mar 23 15:13:45 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 23 Mar 2021 18:13:45 +0300 Subject: [PATCH] gRPC: fixed bug when padding is used in DATA frame In-Reply-To: References: <13552d5b785104f9d137.1615921467@raider> Message-ID: Hello! On Fri, Mar 19, 2021 at 07:32:31PM +0300, geniuss99 wrote: > Made some tests. Looks fine. > Thanks! Thanks for reporting this, committed: https://hg.nginx.org/nginx/rev/6df9d7df2784 -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Wed Mar 24 11:17:11 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 24 Mar 2021 11:17:11 +0000 Subject: [nginx] Cancel keepalive and lingering close on EOF better (ticket #2145). Message-ID: details: https://hg.nginx.org/nginx/rev/4a9d28f8f39e branches: changeset: 7804:4a9d28f8f39e user: Sergey Kandaurov date: Wed Mar 24 14:03:33 2021 +0300 description: Cancel keepalive and lingering close on EOF better (ticket #2145). Unlike in 75e908236701, which added the logic to ngx_http_finalize_request(), this change moves it to a more generic routine ngx_http_finalize_connection() to cover cases when a request is finalized with NGX_DONE. In particular, this fixes unwanted connection transition into the keepalive state after receiving EOF while discarding request body. With edge-triggered event methods that means the connection will last for extra seconds as set in the keepalive_timeout directive. diffstat: src/http/ngx_http_request.c | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diffs (27 lines): diff -r 6df9d7df2784 -r 4a9d28f8f39e src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Tue Mar 23 16:52:23 2021 +0300 +++ b/src/http/ngx_http_request.c Wed Mar 24 14:03:33 2021 +0300 @@ -2643,11 +2643,6 @@ ngx_http_finalize_request(ngx_http_reque ngx_del_timer(c->write); } - if (c->read->eof) { - ngx_http_close_request(r, 0); - return; - } - ngx_http_finalize_connection(r); } @@ -2746,6 +2741,11 @@ ngx_http_finalize_connection(ngx_http_re r = r->main; + if (r->connection->read->eof) { + ngx_http_close_request(r, 0); + return; + } + if (r->reading_body) { r->keepalive = 0; r->lingering_close = 1; From mdounin at mdounin.ru Thu Mar 25 22:45:51 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 25 Mar 2021 22:45:51 +0000 Subject: [nginx] HTTP/2: improved handling of "keepalive_timeout 0". Message-ID: details: https://hg.nginx.org/nginx/rev/ade8160120c1 branches: changeset: 7805:ade8160120c1 user: Maxim Dounin date: Fri Mar 26 01:44:57 2021 +0300 description: HTTP/2: improved handling of "keepalive_timeout 0". Without explicit handling, a zero timer was actually added, leading to multiple unneeded syscalls. Further, sending GOAWAY frame early might be beneficial for clients. Reported by Sergey Kandaurov. diffstat: src/http/v2/ngx_http_v2.c | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diffs (14 lines): diff -r 4a9d28f8f39e -r ade8160120c1 src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Wed Mar 24 14:03:33 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Fri Mar 26 01:44:57 2021 +0300 @@ -1368,7 +1368,9 @@ ngx_http_v2_state_headers(ngx_http_v2_co clcf = ngx_http_get_module_loc_conf(h2c->http_connection->conf_ctx, ngx_http_core_module); - if (h2c->connection->requests >= clcf->keepalive_requests) { + if (clcf->keepalive_timeout == 0 + || h2c->connection->requests >= clcf->keepalive_requests) + { h2c->goaway = 1; if (ngx_http_v2_send_goaway(h2c, NGX_HTTP_V2_NO_ERROR) == NGX_ERROR) { From mdounin at mdounin.ru Thu Mar 25 22:45:54 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 25 Mar 2021 22:45:54 +0000 Subject: [nginx] Events: fixed expiration of timers in the past. Message-ID: details: https://hg.nginx.org/nginx/rev/2ed5d03c2d90 branches: changeset: 7806:2ed5d03c2d90 user: Maxim Dounin date: Fri Mar 26 01:44:59 2021 +0300 description: Events: fixed expiration of timers in the past. If, at the start of an event loop iteration, there are any timers in the past (including timers expiring now), the ngx_process_events() function is called with zero timeout, and returns immediately even if there are no events. But the following code only calls ngx_event_expire_timers() if time actually changed, so this results in nginx spinning in the event loop till current time changes. While such timers are not expected to appear under normal conditions, as all such timers should be removed on previous event loop iterations, they still can appear due to bugs, zero timeouts set in the configuration (if this is not explicitly handled by the code), or due to external time changes on systems without clock_gettime(CLOCK_MONOTONIC). Fix is to call ngx_event_expire_timers() unconditionally. Calling it on each event loop iteration is not expected to be significant from performance point of view, especially compared to a syscall in ngx_process_events(). diffstat: src/event/ngx_event.c | 4 +--- 1 files changed, 1 insertions(+), 3 deletions(-) diffs (14 lines): diff -r ade8160120c1 -r 2ed5d03c2d90 src/event/ngx_event.c --- a/src/event/ngx_event.c Fri Mar 26 01:44:57 2021 +0300 +++ b/src/event/ngx_event.c Fri Mar 26 01:44:59 2021 +0300 @@ -257,9 +257,7 @@ ngx_process_events_and_timers(ngx_cycle_ ngx_shmtx_unlock(&ngx_accept_mutex); } - if (delta) { - ngx_event_expire_timers(); - } + ngx_event_expire_timers(); ngx_event_process_posted(cycle, &ngx_posted_events); } From alfred at huji.fr Fri Mar 26 17:05:27 2021 From: alfred at huji.fr (Alfred Sawaya) Date: Fri, 26 Mar 2021 18:05:27 +0100 Subject: Send headers in output body filter ? Message-ID: <26982118-de02-d3a8-15b7-f61f78812018@huji.fr> Hello, I am developping a nginx C module that aims to modify the backend response (handled by the proxy module) in order to redact specified strings. I have a working module, but I am stuck on a case where the module modfy the length of the backend response. As the output header filter is called before the output body filter, the content-length is already sent when the module get the backend response and modifies it. Is there any normal way to workaround this? I would like to avoid writing a complete upstream handler... I wonder if it is a known use case? Thanks ! Alfred From mdounin at mdounin.ru Fri Mar 26 19:02:29 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 26 Mar 2021 22:02:29 +0300 Subject: Send headers in output body filter ? In-Reply-To: <26982118-de02-d3a8-15b7-f61f78812018@huji.fr> References: <26982118-de02-d3a8-15b7-f61f78812018@huji.fr> Message-ID: Hello! On Fri, Mar 26, 2021 at 06:05:27PM +0100, Alfred Sawaya wrote: > I am developping a nginx C module that aims to modify the backend > response (handled by the proxy module) in order to redact specified strings. > > I have a working module, but I am stuck on a case where the module modfy > the length of the backend response. As the output header filter is > called before the output body filter, the content-length is already sent > when the module get the backend response and modifies it. > > Is there any normal way to workaround this? I would like to avoid > writing a complete upstream handler... I wonder if it is a known use case? The usual approach is to remove the Content-Length header if you expect that the response length might be changed by your body filter. You also have to either remove ETag or change it to a weak one, if present. For examples see SSI filter, sub filter, addition filter, and many others. -- Maxim Dounin http://mdounin.ru/ From alfred at huji.fr Fri Mar 26 20:05:50 2021 From: alfred at huji.fr (Alfred Sawaya) Date: Fri, 26 Mar 2021 21:05:50 +0100 Subject: Send headers in output body filter ? In-Reply-To: References: <26982118-de02-d3a8-15b7-f61f78812018@huji.fr> Message-ID: <27824d58-a673-248b-4275-49c7c540a4b0@huji.fr> Thank you for your reply. Yes, ngx_http_clear_content_length(r) works as the response becomes chunked, but actually I wonder if there is a possibility to set the Content-Length correctly in this case ? Maybe it is not possible? I could do a patch but I think that if it is not implemented yet, it should be because I have a wrong architecture or something like that. On 26/03/2021 20:02, Maxim Dounin wrote: > Hello! > > On Fri, Mar 26, 2021 at 06:05:27PM +0100, Alfred Sawaya wrote: > >> I am developping a nginx C module that aims to modify the backend >> response (handled by the proxy module) in order to redact specified strings. >> >> I have a working module, but I am stuck on a case where the module modfy >> the length of the backend response. As the output header filter is >> called before the output body filter, the content-length is already sent >> when the module get the backend response and modifies it. >> >> Is there any normal way to workaround this? I would like to avoid >> writing a complete upstream handler... I wonder if it is a known use case? > The usual approach is to remove the Content-Length header if you > expect that the response length might be changed by your body > filter. You also have to either remove ETag or change it to a > weak one, if present. For examples see SSI filter, sub filter, > addition filter, and many others. > From alfred at huji.fr Fri Mar 26 23:56:16 2021 From: alfred at huji.fr (Alfred Sawaya) Date: Sat, 27 Mar 2021 00:56:16 +0100 Subject: Send headers in output body filter ? In-Reply-To: <27824d58-a673-248b-4275-49c7c540a4b0@huji.fr> References: <26982118-de02-d3a8-15b7-f61f78812018@huji.fr> <27824d58-a673-248b-4275-49c7c540a4b0@huji.fr> Message-ID: <364b35f7-cb8a-9dd9-e964-0b6b44bfa6c9@huji.fr> Actually, I will need to modify the http status code depending on the output body contents, so it is not just a question of? content-length, it is more general than that : I need to have the full backend response (headers + body) in order to produce the full client response. It does not seem possible as I understand that the read upstream - write downstream loop is hardcoded into nginx, and a module does not have any chance to change that, isn't it ? ngx_http_upstream_send_request -> ??? ngx_http_upstream_process_header -> ??? ??? ngx_http_upstream_send_response -> ??? ??? ??? ngx_http_send_header and then loop read upstream / write downstream. Thank again. On 26/03/2021 21:05, Alfred Sawaya wrote: > Thank you for your reply. > > > Yes, ngx_http_clear_content_length(r) works as the response becomes > chunked, but actually I wonder if there is a possibility to set the > Content-Length correctly in this case ? > > Maybe it is not possible? I could do a patch but I think that if it is > not implemented yet, it should be because I have a wrong architecture or > something like that. > > > > On 26/03/2021 20:02, Maxim Dounin wrote: >> Hello! >> >> On Fri, Mar 26, 2021 at 06:05:27PM +0100, Alfred Sawaya wrote: >> >>> I am developping a nginx C module that aims to modify the backend >>> response (handled by the proxy module) in order to redact specified strings. >>> >>> I have a working module, but I am stuck on a case where the module modfy >>> the length of the backend response. As the output header filter is >>> called before the output body filter, the content-length is already sent >>> when the module get the backend response and modifies it. >>> >>> Is there any normal way to workaround this? I would like to avoid >>> writing a complete upstream handler... I wonder if it is a known use case? >> The usual approach is to remove the Content-Length header if you >> expect that the response length might be changed by your body >> filter. You also have to either remove ETag or change it to a >> weak one, if present. For examples see SSI filter, sub filter, >> addition filter, and many others. >> From mdounin at mdounin.ru Sat Mar 27 01:48:52 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 27 Mar 2021 04:48:52 +0300 Subject: Send headers in output body filter ? In-Reply-To: <364b35f7-cb8a-9dd9-e964-0b6b44bfa6c9@huji.fr> References: <26982118-de02-d3a8-15b7-f61f78812018@huji.fr> <27824d58-a673-248b-4275-49c7c540a4b0@huji.fr> <364b35f7-cb8a-9dd9-e964-0b6b44bfa6c9@huji.fr> Message-ID: Hello! On Sat, Mar 27, 2021 at 12:56:16AM +0100, Alfred Sawaya wrote: > Actually, I will need to modify the http status code depending on the > output body contents, so it is not just a question of? content-length, > it is more general than that : I need to have the full backend response > (headers + body) in order to produce the full client response. > > > It does not seem possible as I understand that the read upstream - write > downstream loop is hardcoded into nginx, and a module does not have any > chance to change that, isn't it ? If you want to change the response status and headers based on the response body content, the only way is to buffer the response body and send the response headers after you've done processsing the response body. Note that since this implies buffering, this is not going to work for large responses. Examples of how to implement this can be seen in the image filter and xslt filter modules. -- Maxim Dounin http://mdounin.ru/ From alfred at huji.fr Sat Mar 27 02:39:39 2021 From: alfred at huji.fr (Alfred Sawaya) Date: Sat, 27 Mar 2021 03:39:39 +0100 Subject: Send headers in output body filter ? In-Reply-To: References: <26982118-de02-d3a8-15b7-f61f78812018@huji.fr> <27824d58-a673-248b-4275-49c7c540a4b0@huji.fr> <364b35f7-cb8a-9dd9-e964-0b6b44bfa6c9@huji.fr> Message-ID: <38911199-8456-52b4-ebba-112373b22ae2@huji.fr> Hello again, Thank you very much Maxim! It works perfectly. Alfred On 27/03/2021 02:48, Maxim Dounin wrote: > Hello! > > On Sat, Mar 27, 2021 at 12:56:16AM +0100, Alfred Sawaya wrote: > >> Actually, I will need to modify the http status code depending on the >> output body contents, so it is not just a question of? content-length, >> it is more general than that : I need to have the full backend response >> (headers + body) in order to produce the full client response. >> >> >> It does not seem possible as I understand that the read upstream - write >> downstream loop is hardcoded into nginx, and a module does not have any >> chance to change that, isn't it ? > If you want to change the response status and headers based on the > response body content, the only way is to buffer the response body > and send the response headers after you've done processsing the > response body. Note that since this implies buffering, this is > not going to work for large responses. > > Examples of how to implement this can be seen in the image filter > and xslt filter modules. > From xeioex at nginx.com Sat Mar 27 16:49:33 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Sat, 27 Mar 2021 16:49:33 +0000 Subject: [njs] Version bump. Message-ID: details: https://hg.nginx.org/njs/rev/309e073c873d branches: changeset: 1621:309e073c873d user: Dmitry Volyntsev date: Sat Mar 27 16:45:52 2021 +0000 description: Version bump. diffstat: src/njs.h | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r e8bb829af224 -r 309e073c873d src/njs.h --- a/src/njs.h Tue Mar 09 13:28:31 2021 +0000 +++ b/src/njs.h Sat Mar 27 16:45:52 2021 +0000 @@ -11,7 +11,7 @@ #include -#define NJS_VERSION "0.5.2" +#define NJS_VERSION "0.5.3" #include /* STDOUT_FILENO, STDERR_FILENO */ From xeioex at nginx.com Sat Mar 27 16:49:34 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Sat, 27 Mar 2021 16:49:34 +0000 Subject: [njs] Modules: added js_var directive. Message-ID: details: https://hg.nginx.org/njs/rev/63f8aa50e03a branches: changeset: 1622:63f8aa50e03a user: Dmitry Volyntsev date: Sat Mar 27 16:21:21 2021 +0000 description: Modules: added js_var directive. diffstat: nginx/ngx_http_js_module.c | 99 ++++++++++++++++++++++++++++++++++++++++- nginx/ngx_stream_js_module.c | 102 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 194 insertions(+), 7 deletions(-) diffs (304 lines): diff -r 309e073c873d -r 63f8aa50e03a nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Sat Mar 27 16:45:52 2021 +0000 +++ b/nginx/ngx_http_js_module.c Sat Mar 27 16:21:21 2021 +0000 @@ -86,7 +86,9 @@ static void ngx_http_js_content_write_ev static void ngx_http_js_content_finalize(ngx_http_request_t *r, ngx_http_js_ctx_t *ctx); static ngx_int_t ngx_http_js_header_filter(ngx_http_request_t *r); -static ngx_int_t ngx_http_js_variable(ngx_http_request_t *r, +static ngx_int_t ngx_http_js_variable_set(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data); +static ngx_int_t ngx_http_js_variable_var(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_js_init_vm(ngx_http_request_t *r); static void ngx_http_js_cleanup_ctx(void *data); @@ -209,6 +211,7 @@ static char *ngx_http_js_include(ngx_con static char *ngx_http_js_import(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); static char *ngx_http_js_set(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +static char *ngx_http_js_var(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); static char *ngx_http_js_content(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); static char *ngx_http_js_body_filter_set(ngx_conf_t *cf, ngx_command_t *cmd, @@ -250,6 +253,13 @@ static ngx_command_t ngx_http_js_comman 0, NULL }, + { ngx_string("js_var"), + NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE12, + ngx_http_js_var, + 0, + 0, + NULL }, + { ngx_string("js_content"), NGX_HTTP_LOC_CONF|NGX_HTTP_LMT_CONF|NGX_CONF_TAKE1, ngx_http_js_content, @@ -968,7 +978,7 @@ ngx_http_js_body_filter(ngx_http_request static ngx_int_t -ngx_http_js_variable(ngx_http_request_t *r, ngx_http_variable_value_t *v, +ngx_http_js_variable_set(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data) { ngx_str_t *fname = (ngx_str_t *) data; @@ -1024,6 +1034,33 @@ ngx_http_js_variable(ngx_http_request_t static ngx_int_t +ngx_http_js_variable_var(ngx_http_request_t *r, ngx_http_variable_value_t *v, + uintptr_t data) +{ + ngx_http_complex_value_t *cv = (ngx_http_complex_value_t *) data; + + ngx_str_t value; + + if (cv != NULL) { + if (ngx_http_complex_value(r, cv, &value) != NGX_OK) { + return NGX_ERROR; + } + + } else { + ngx_str_null(&value); + } + + v->len = value.len; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + v->data = value.data; + + return NGX_OK; +} + + +static ngx_int_t ngx_http_js_init_vm(ngx_http_request_t *r) { njs_int_t rc; @@ -3735,7 +3772,7 @@ ngx_http_js_set(ngx_conf_t *cf, ngx_comm *fname = value[2]; - v->get_handler = ngx_http_js_variable; + v->get_handler = ngx_http_js_variable_set; v->data = (uintptr_t) fname; return NGX_CONF_OK; @@ -3743,6 +3780,62 @@ ngx_http_js_set(ngx_conf_t *cf, ngx_comm static char * +ngx_http_js_var(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_str_t *value; + ngx_int_t index; + ngx_http_variable_t *v; + ngx_http_complex_value_t *cv; + ngx_http_compile_complex_value_t ccv; + + value = cf->args->elts; + + if (value[1].data[0] != '$') { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid variable name \"%V\"", &value[1]); + return NGX_CONF_ERROR; + } + + value[1].len--; + value[1].data++; + + v = ngx_http_add_variable(cf, &value[1], NGX_HTTP_VAR_CHANGEABLE); + if (v == NULL) { + return NGX_CONF_ERROR; + } + + index = ngx_http_get_variable_index(cf, &value[1]); + if (index == NGX_ERROR) { + return NGX_CONF_ERROR; + } + + cv = NULL; + + if (cf->args->nelts == 3) { + cv = ngx_palloc(cf->pool, sizeof(ngx_http_complex_value_t)); + if (cv == NULL) { + return NGX_CONF_ERROR; + } + + ngx_memzero(&ccv, sizeof(ngx_http_compile_complex_value_t)); + + ccv.cf = cf; + ccv.value = &value[2]; + ccv.complex_value = cv; + + if (ngx_http_compile_complex_value(&ccv) != NGX_OK) { + return NGX_CONF_ERROR; + } + } + + v->get_handler = ngx_http_js_variable_var; + v->data = (uintptr_t) cv; + + return NGX_CONF_OK; +} + + +static char * ngx_http_js_content(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { ngx_http_js_loc_conf_t *jlcf = conf; diff -r 309e073c873d -r 63f8aa50e03a nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Sat Mar 27 16:45:52 2021 +0000 +++ b/nginx/ngx_stream_js_module.c Sat Mar 27 16:21:21 2021 +0000 @@ -75,7 +75,9 @@ static ngx_int_t ngx_stream_js_phase_han ngx_str_t *name); static ngx_int_t ngx_stream_js_body_filter(ngx_stream_session_t *s, ngx_chain_t *in, ngx_uint_t from_upstream); -static ngx_int_t ngx_stream_js_variable(ngx_stream_session_t *s, +static ngx_int_t ngx_stream_js_variable_set(ngx_stream_session_t *s, + ngx_stream_variable_value_t *v, uintptr_t data); +static ngx_int_t ngx_stream_js_variable_var(ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_stream_js_init_vm(ngx_stream_session_t *s); static void ngx_stream_js_drop_events(ngx_stream_js_ctx_t *ctx); @@ -123,6 +125,8 @@ static char *ngx_stream_js_import(ngx_co void *conf); static char *ngx_stream_js_set(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +static char *ngx_stream_js_var(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); static void *ngx_stream_js_create_main_conf(ngx_conf_t *cf); static char *ngx_stream_js_init_main_conf(ngx_conf_t *cf, void *conf); static void *ngx_stream_js_create_srv_conf(ngx_conf_t *cf); @@ -161,6 +165,13 @@ static ngx_command_t ngx_stream_js_comm 0, NULL }, + { ngx_string("js_var"), + NGX_STREAM_MAIN_CONF|NGX_CONF_TAKE12, + ngx_stream_js_var, + 0, + 0, + NULL }, + { ngx_string("js_access"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_slot, @@ -612,8 +623,8 @@ ngx_stream_js_body_filter(ngx_stream_ses static ngx_int_t -ngx_stream_js_variable(ngx_stream_session_t *s, ngx_stream_variable_value_t *v, - uintptr_t data) +ngx_stream_js_variable_set(ngx_stream_session_t *s, + ngx_stream_variable_value_t *v, uintptr_t data) { ngx_str_t *fname = (ngx_str_t *) data; @@ -668,6 +679,33 @@ ngx_stream_js_variable(ngx_stream_sessio static ngx_int_t +ngx_stream_js_variable_var(ngx_stream_session_t *s, + ngx_stream_variable_value_t *v, uintptr_t data) +{ + ngx_stream_complex_value_t *cv = (ngx_stream_complex_value_t *) data; + + ngx_str_t value; + + if (cv != NULL) { + if (ngx_stream_complex_value(s, cv, &value) != NGX_OK) { + return NGX_ERROR; + } + + } else { + ngx_str_null(&value); + } + + v->len = value.len; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + v->data = value.data; + + return NGX_OK; +} + + +static ngx_int_t ngx_stream_js_init_vm(ngx_stream_session_t *s) { njs_int_t rc; @@ -1737,13 +1775,69 @@ ngx_stream_js_set(ngx_conf_t *cf, ngx_co *fname = value[2]; - v->get_handler = ngx_stream_js_variable; + v->get_handler = ngx_stream_js_variable_set; v->data = (uintptr_t) fname; return NGX_CONF_OK; } +static char * +ngx_stream_js_var(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_str_t *value; + ngx_int_t index; + ngx_stream_variable_t *v; + ngx_stream_complex_value_t *cv; + ngx_stream_compile_complex_value_t ccv; + + value = cf->args->elts; + + if (value[1].data[0] != '$') { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid variable name \"%V\"", &value[1]); + return NGX_CONF_ERROR; + } + + value[1].len--; + value[1].data++; + + v = ngx_stream_add_variable(cf, &value[1], NGX_STREAM_VAR_CHANGEABLE); + if (v == NULL) { + return NGX_CONF_ERROR; + } + + index = ngx_stream_get_variable_index(cf, &value[1]); + if (index == NGX_ERROR) { + return NGX_CONF_ERROR; + } + + cv = NULL; + + if (cf->args->nelts == 3) { + cv = ngx_palloc(cf->pool, sizeof(ngx_stream_complex_value_t)); + if (cv == NULL) { + return NGX_CONF_ERROR; + } + + ngx_memzero(&ccv, sizeof(ngx_stream_compile_complex_value_t)); + + ccv.cf = cf; + ccv.value = &value[2]; + ccv.complex_value = cv; + + if (ngx_stream_compile_complex_value(&ccv) != NGX_OK) { + return NGX_CONF_ERROR; + } + } + + v->get_handler = ngx_stream_js_variable_var; + v->data = (uintptr_t) cv; + + return NGX_CONF_OK; +} + + static void * ngx_stream_js_create_main_conf(ngx_conf_t *cf) { From mdounin at mdounin.ru Sun Mar 28 14:47:06 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 28 Mar 2021 14:47:06 +0000 Subject: [nginx] Events: fixed "port_dissociate() failed" alerts with eventport. Message-ID: details: https://hg.nginx.org/nginx/rev/e0844646099b branches: changeset: 7807:e0844646099b user: Maxim Dounin date: Sun Mar 28 17:45:29 2021 +0300 description: Events: fixed "port_dissociate() failed" alerts with eventport. If an attempt is made to delete an event which was already reported, port_dissociate() returns an error. Fix is avoid doing anything if ev->active is not set. Possible alternative approach would be to avoid calling ngx_del_event() at all if ev->active is not set. This approach, however, will require something else to re-add the other event of the connection, since both read and write events are dissociated if an event is reported on a file descriptor. Currently ngx_eventport_del_event() re-associates write event if called to delete read event, and vice versa. diffstat: src/event/modules/ngx_eventport_module.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 2ed5d03c2d90 -r e0844646099b src/event/modules/ngx_eventport_module.c --- a/src/event/modules/ngx_eventport_module.c Fri Mar 26 01:44:59 2021 +0300 +++ b/src/event/modules/ngx_eventport_module.c Sun Mar 28 17:45:29 2021 +0300 @@ -399,7 +399,7 @@ ngx_eventport_del_event(ngx_event_t *ev, return NGX_ERROR; } - } else { + } else if (ev->active) { ngx_log_debug1(NGX_LOG_DEBUG_EVENT, ev->log, 0, "eventport del event: fd:%d", c->fd); From mdounin at mdounin.ru Sun Mar 28 14:47:08 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 28 Mar 2021 14:47:08 +0000 Subject: [nginx] Resolver: added missing event handling after reading. Message-ID: details: https://hg.nginx.org/nginx/rev/eb54227110f0 branches: changeset: 7808:eb54227110f0 user: Maxim Dounin date: Sun Mar 28 17:45:31 2021 +0300 description: Resolver: added missing event handling after reading. If we need to be notified about further events, ngx_handle_read_event() needs to be called after a read event is processed. Without this, an event can be removed from the kernel and won't be reported again, notably when using oneshot event methods, such as eventport on Solaris. While here, error handling is also added, similar to one present in ngx_resolver_tcp_read(). This is not expected to make a difference and mostly added for consistency. diffstat: src/core/ngx_resolver.c | 19 +++++++++++++++++-- 1 files changed, 17 insertions(+), 2 deletions(-) diffs (34 lines): diff -r e0844646099b -r eb54227110f0 src/core/ngx_resolver.c --- a/src/core/ngx_resolver.c Sun Mar 28 17:45:29 2021 +0300 +++ b/src/core/ngx_resolver.c Sun Mar 28 17:45:31 2021 +0300 @@ -1563,13 +1563,28 @@ ngx_resolver_udp_read(ngx_event_t *rev) do { n = ngx_udp_recv(c, buf, NGX_RESOLVER_UDP_SIZE); - if (n < 0) { - return; + if (n == NGX_AGAIN) { + break; + } + + if (n == NGX_ERROR) { + goto failed; } ngx_resolver_process_response(rec->resolver, buf, n, 0); } while (rev->ready); + + if (ngx_handle_read_event(rev, 0) != NGX_OK) { + goto failed; + } + + return; + +failed: + + ngx_close_connection(rec->udp); + rec->udp = NULL; } From mdounin at mdounin.ru Sun Mar 28 14:47:11 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 28 Mar 2021 14:47:11 +0000 Subject: [nginx] Upstream: fixed non-buffered proxying with eventport. Message-ID: details: https://hg.nginx.org/nginx/rev/b1302f1dd2f6 branches: changeset: 7809:b1302f1dd2f6 user: Maxim Dounin date: Sun Mar 28 17:45:35 2021 +0300 description: Upstream: fixed non-buffered proxying with eventport. For new data to be reported with eventport on Solaris, ngx_handle_read_event() needs to be called after reading response headers. To do so, ngx_http_upstream_process_non_buffered_upstream() now called unconditionally if there are no prepread data. This won't cause any read() syscalls as long as upstream connection is not ready for reading (c->read->ready is not set), but will result in proper handling of all events. diffstat: src/http/ngx_http_upstream.c | 4 +--- 1 files changed, 1 insertions(+), 3 deletions(-) diffs (14 lines): diff -r eb54227110f0 -r b1302f1dd2f6 src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Sun Mar 28 17:45:31 2021 +0300 +++ b/src/http/ngx_http_upstream.c Sun Mar 28 17:45:35 2021 +0300 @@ -3011,9 +3011,7 @@ ngx_http_upstream_send_response(ngx_http return; } - if (u->peer.connection->read->ready || u->length == 0) { - ngx_http_upstream_process_non_buffered_upstream(r, u); - } + ngx_http_upstream_process_non_buffered_upstream(r, u); } return; From mdounin at mdounin.ru Sun Mar 28 14:47:16 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 28 Mar 2021 14:47:16 +0000 Subject: [nginx] Upstream: fixed broken connection check with eventport. Message-ID: details: https://hg.nginx.org/nginx/rev/1bf8ab7063de branches: changeset: 7810:1bf8ab7063de user: Maxim Dounin date: Sun Mar 28 17:45:37 2021 +0300 description: Upstream: fixed broken connection check with eventport. For connection close to be reported with eventport on Solaris, ngx_handle_read_event() needs to be called. diffstat: src/http/ngx_http_upstream.c | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diffs (16 lines): diff -r b1302f1dd2f6 -r 1bf8ab7063de src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Sun Mar 28 17:45:35 2021 +0300 +++ b/src/http/ngx_http_upstream.c Sun Mar 28 17:45:37 2021 +0300 @@ -607,6 +607,12 @@ ngx_http_upstream_init_request(ngx_http_ u->store = u->conf->store; if (!u->store && !r->post_action && !u->conf->ignore_client_abort) { + + if (ngx_handle_read_event(r->connection->read, 0) != NGX_OK) { + ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + r->read_event_handler = ngx_http_upstream_rd_check_broken_connection; r->write_event_handler = ngx_http_upstream_wr_check_broken_connection; } From mdounin at mdounin.ru Sun Mar 28 14:47:20 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 28 Mar 2021 14:47:20 +0000 Subject: [nginx] Fixed handling of already closed connections. Message-ID: details: https://hg.nginx.org/nginx/rev/1ebd78df4ce7 branches: changeset: 7811:1ebd78df4ce7 user: Maxim Dounin date: Sun Mar 28 17:45:39 2021 +0300 description: Fixed handling of already closed connections. In limit_req, auth_delay, and upstream code to check for broken connections, tests for possible connection close by the client did not work if the connection was already closed when relevant event handler was set. This happened because there were no additional events in case of edge-triggered event methods, and read events were disabled in case of level-triggered ones. Fix is to explicitly post a read event if the c->read->ready flag is set. diffstat: src/http/modules/ngx_http_limit_req_module.c | 9 +++++++-- src/http/ngx_http_core_module.c | 9 +++++++-- src/http/ngx_http_upstream.c | 11 ++++++++--- 3 files changed, 22 insertions(+), 7 deletions(-) diffs (59 lines): diff -r 1bf8ab7063de -r 1ebd78df4ce7 src/http/modules/ngx_http_limit_req_module.c --- a/src/http/modules/ngx_http_limit_req_module.c Sun Mar 28 17:45:37 2021 +0300 +++ b/src/http/modules/ngx_http_limit_req_module.c Sun Mar 28 17:45:39 2021 +0300 @@ -310,8 +310,13 @@ ngx_http_limit_req_handler(ngx_http_requ r->main->limit_req_status = NGX_HTTP_LIMIT_REQ_DELAYED; - if (ngx_handle_read_event(r->connection->read, 0) != NGX_OK) { - return NGX_HTTP_INTERNAL_SERVER_ERROR; + if (r->connection->read->ready) { + ngx_post_event(r->connection->read, &ngx_posted_events); + + } else { + if (ngx_handle_read_event(r->connection->read, 0) != NGX_OK) { + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } } r->read_event_handler = ngx_http_test_reading; diff -r 1bf8ab7063de -r 1ebd78df4ce7 src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c Sun Mar 28 17:45:37 2021 +0300 +++ b/src/http/ngx_http_core_module.c Sun Mar 28 17:45:39 2021 +0300 @@ -1190,8 +1190,13 @@ ngx_http_core_auth_delay(ngx_http_reques ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, "delaying unauthorized request"); - if (ngx_handle_read_event(r->connection->read, 0) != NGX_OK) { - return NGX_HTTP_INTERNAL_SERVER_ERROR; + if (r->connection->read->ready) { + ngx_post_event(r->connection->read, &ngx_posted_events); + + } else { + if (ngx_handle_read_event(r->connection->read, 0) != NGX_OK) { + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } } r->read_event_handler = ngx_http_test_reading; diff -r 1bf8ab7063de -r 1ebd78df4ce7 src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Sun Mar 28 17:45:37 2021 +0300 +++ b/src/http/ngx_http_upstream.c Sun Mar 28 17:45:39 2021 +0300 @@ -608,9 +608,14 @@ ngx_http_upstream_init_request(ngx_http_ if (!u->store && !r->post_action && !u->conf->ignore_client_abort) { - if (ngx_handle_read_event(r->connection->read, 0) != NGX_OK) { - ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); - return; + if (r->connection->read->ready) { + ngx_post_event(r->connection->read, &ngx_posted_events); + + } else { + if (ngx_handle_read_event(r->connection->read, 0) != NGX_OK) { + ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } } r->read_event_handler = ngx_http_upstream_rd_check_broken_connection; From xeioex at nginx.com Tue Mar 30 13:57:39 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 30 Mar 2021 13:57:39 +0000 Subject: [njs] Version 0.5.3. Message-ID: details: https://hg.nginx.org/njs/rev/282b9412976c branches: changeset: 1623:282b9412976c user: Dmitry Volyntsev date: Tue Mar 30 13:57:17 2021 +0000 description: Version 0.5.3. diffstat: CHANGES | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) diffs (12 lines): diff -r 63f8aa50e03a -r 282b9412976c CHANGES --- a/CHANGES Sat Mar 27 16:21:21 2021 +0000 +++ b/CHANGES Tue Mar 30 13:57:17 2021 +0000 @@ -1,3 +1,8 @@ +Changes with njs 0.5.3 30 Mar 2021 + + nginx modules: + + *) Feature: added the "js_var" directive. Changes with njs 0.5.2 09 Mar 2021 From xeioex at nginx.com Tue Mar 30 13:58:36 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 30 Mar 2021 13:58:36 +0000 Subject: [njs] Added tag 0.5.3 for changeset 282b9412976c Message-ID: details: https://hg.nginx.org/njs/rev/d25d92370bfd branches: changeset: 1624:d25d92370bfd user: Dmitry Volyntsev date: Tue Mar 30 13:58:27 2021 +0000 description: Added tag 0.5.3 for changeset 282b9412976c diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r 282b9412976c -r d25d92370bfd .hgtags --- a/.hgtags Tue Mar 30 13:57:17 2021 +0000 +++ b/.hgtags Tue Mar 30 13:58:27 2021 +0000 @@ -41,3 +41,4 @@ fdfd580b0dd617a884ed9287d98341ebef03ee9f 69f07c6151628880bf7d5ac28bd8287ce96d8a36 0.5.0 d355071f55ef4612d89db0ba72e7aaeaa99deef7 0.5.1 e5de01378b1a8ab0a94dd3a8c4c6bb7a235f4b9c 0.5.2 +282b9412976ceee31eb12876f1499fe975e6f08c 0.5.3 From mdounin at mdounin.ru Tue Mar 30 14:52:52 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 30 Mar 2021 14:52:52 +0000 Subject: [nginx] Updated OpenSSL used for win32 builds. Message-ID: details: https://hg.nginx.org/nginx/rev/2fe9ee63ddcf branches: changeset: 7812:2fe9ee63ddcf user: Maxim Dounin date: Tue Mar 30 17:44:36 2021 +0300 description: Updated OpenSSL used for win32 builds. diffstat: misc/GNUmakefile | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 1ebd78df4ce7 -r 2fe9ee63ddcf misc/GNUmakefile --- a/misc/GNUmakefile Sun Mar 28 17:45:39 2021 +0300 +++ b/misc/GNUmakefile Tue Mar 30 17:44:36 2021 +0300 @@ -6,7 +6,7 @@ TEMP = tmp CC = cl OBJS = objs.msvc8 -OPENSSL = openssl-1.1.1j +OPENSSL = openssl-1.1.1k ZLIB = zlib-1.2.11 PCRE = pcre-8.44 From mdounin at mdounin.ru Tue Mar 30 14:52:55 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 30 Mar 2021 14:52:55 +0000 Subject: [nginx] nginx-1.19.9-RELEASE Message-ID: details: https://hg.nginx.org/nginx/rev/da571b8eaf8f branches: changeset: 7813:da571b8eaf8f user: Maxim Dounin date: Tue Mar 30 17:47:11 2021 +0300 description: nginx-1.19.9-RELEASE diffstat: docs/xml/nginx/changes.xml | 62 ++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 62 insertions(+), 0 deletions(-) diffs (72 lines): diff -r 2fe9ee63ddcf -r da571b8eaf8f docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml Tue Mar 30 17:44:36 2021 +0300 +++ b/docs/xml/nginx/changes.xml Tue Mar 30 17:47:11 2021 +0300 @@ -5,6 +5,68 @@ + + + + +nginx ?? ????????? ? ???????? ??????-????????, +?? ??? ?????? ngx_mail_ssl_module; +?????? ????????? ? 1.19.8. + + +nginx could not be built with the mail proxy module, +but without the ngx_mail_ssl_module; +the bug had appeared in 1.19.8. + + + + + +??? ?????? ? gRPC-????????? ????? ????????? ?????? +"upstream sent response body larger than indicated content length"; +?????? ????????? ? 1.19.1. + + +"upstream sent response body larger than indicated content length" +errors might occur when working with gRPC backends; +the bug had appeared in 1.19.1. + + + + + +???? ?????? ???????? ?????????? ? ?????? ???????????? ???? ???????, +nginx ??? ?? ??????? ?????????? ?? ????????? keepalive-????????. + + +nginx might not close a connection till keepalive timeout expiration +if the connection was closed by the client while discarding the request body. + + + + + +??? ???????? ???????? limit_req ??? auth_delay, ? ????? ??? ?????? ? ????????? +nginx ??? ?? ??????????, ??? ?????????? ??? ??????? ????????. + + +nginx might not detect that a connection was already closed by the client +when waiting for auth_delay or limit_req delay, or when working with backends. + + + + + +? ?????? ????????? ?????????? eventport. + + +in the eventport method. + + + + + + From mdounin at mdounin.ru Tue Mar 30 14:52:58 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 30 Mar 2021 14:52:58 +0000 Subject: [nginx] release-1.19.9 tag Message-ID: details: https://hg.nginx.org/nginx/rev/eb23d58bfd6b branches: changeset: 7814:eb23d58bfd6b user: Maxim Dounin date: Tue Mar 30 17:47:11 2021 +0300 description: release-1.19.9 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r da571b8eaf8f -r eb23d58bfd6b .hgtags --- a/.hgtags Tue Mar 30 17:47:11 2021 +0300 +++ b/.hgtags Tue Mar 30 17:47:11 2021 +0300 @@ -458,3 +458,4 @@ 8e5b068f761cd512d10c9671fbde0b568c1fd08b f618488eb769e0ed74ef0d93cd118d2ad79ef94d release-1.19.6 3fa6e2095a7a51acc630517e1c27a7b7ac41f7b3 release-1.19.7 8c65d21464aaa5923775f80c32474adc7a320068 release-1.19.8 +da571b8eaf8f30f36c43b3c9b25e01e31f47149c release-1.19.9