From mdounin at mdounin.ru Mon Dec 4 03:16:26 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 4 Dec 2023 06:16:26 +0300 Subject: [PATCH 1 of 2] Core: avoid calling memcpy() in edge cases In-Reply-To: References: Message-ID: Hello! On Fri, Oct 27, 2023 at 02:58:44PM +0300, Vladimir Homutov via nginx-devel wrote: > Patch subject is complete summary. > > > src/core/ngx_cycle.c | 10 ++++++---- > src/core/ngx_resolver.c | 2 +- > src/core/ngx_string.c | 15 +++++++++++++++ > src/http/modules/ngx_http_proxy_module.c | 4 ++-- > src/http/ngx_http_file_cache.c | 4 +++- > src/http/ngx_http_variables.c | 3 +++ > src/mail/ngx_mail_auth_http_module.c | 12 +++++++++--- > src/stream/ngx_stream_script.c | 4 +++- > 8 files changed, 42 insertions(+), 12 deletions(-) > > > # HG changeset patch > # User Vladimir Khomutov > # Date 1698407658 -10800 > # Fri Oct 27 14:54:18 2023 +0300 > # Node ID ef9f124b156aff0e9f66057e438af835bd7a60d2 > # Parent ea1f29c2010cda4940b741976f103d547308815a > Core: avoid calling memcpy() in edge cases. > > diff --git a/src/core/ngx_cycle.c b/src/core/ngx_cycle.c > --- a/src/core/ngx_cycle.c > +++ b/src/core/ngx_cycle.c > @@ -115,10 +115,12 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) > old_cycle->conf_file.len + 1); > > cycle->conf_param.len = old_cycle->conf_param.len; > - cycle->conf_param.data = ngx_pstrdup(pool, &old_cycle->conf_param); > - if (cycle->conf_param.data == NULL) { > - ngx_destroy_pool(pool); > - return NULL; > + if (cycle->conf_param.len) { > + cycle->conf_param.data = ngx_pstrdup(pool, &old_cycle->conf_param); > + if (cycle->conf_param.data == NULL) { > + ngx_destroy_pool(pool); > + return NULL; > + } > } > > > diff --git a/src/core/ngx_resolver.c b/src/core/ngx_resolver.c > --- a/src/core/ngx_resolver.c > +++ b/src/core/ngx_resolver.c > @@ -4206,7 +4206,7 @@ ngx_resolver_dup(ngx_resolver_t *r, void > > dst = ngx_resolver_alloc(r, size); > > - if (dst == NULL) { > + if (dst == NULL || size == 0 || src == NULL) { > return dst; > } > This looks simply wrong: ngx_resolver_dup() with src == NULL and with size != 0 should dereference the NULL pointer and segfault. Also, I can't say I'm happy with allocation error handling mixed with (size == 0) special case handling. > diff --git a/src/core/ngx_string.c b/src/core/ngx_string.c > --- a/src/core/ngx_string.c > +++ b/src/core/ngx_string.c > @@ -252,6 +252,11 @@ ngx_vslprintf(u_char *buf, u_char *last, > case 'V': > v = va_arg(args, ngx_str_t *); > > + if (v->len == 0 || v->data == NULL) { > + fmt++; > + continue; > + } > + > buf = ngx_sprintf_str(buf, last, v->data, v->len, hex); > fmt++; > > @@ -260,6 +265,11 @@ ngx_vslprintf(u_char *buf, u_char *last, > case 'v': > vv = va_arg(args, ngx_variable_value_t *); > > + if (vv->len == 0 || vv->data == NULL) { > + fmt++; > + continue; > + } > + > buf = ngx_sprintf_str(buf, last, vv->data, vv->len, hex); > fmt++; > > @@ -268,6 +278,11 @@ ngx_vslprintf(u_char *buf, u_char *last, > case 's': > p = va_arg(args, u_char *); > > + if (slen == 0 || p == NULL) { > + fmt++; > + continue; > + } > + > buf = ngx_sprintf_str(buf, last, p, slen, hex); > fmt++; > I tend to think that these should be handled in ngx_sprintf_str() or in ngx_cpymem(), if at all, see below. > diff --git a/src/http/modules/ngx_http_proxy_module.c b/src/http/modules/ngx_http_proxy_module.c > --- a/src/http/modules/ngx_http_proxy_module.c > +++ b/src/http/modules/ngx_http_proxy_module.c > @@ -1205,7 +1205,7 @@ ngx_http_proxy_create_key(ngx_http_reque > > key->data = p; > > - if (r->valid_location) { > + if (r->valid_location && ctx->vars.uri.len) { > p = ngx_copy(p, ctx->vars.uri.data, ctx->vars.uri.len); > } > > @@ -1422,7 +1422,7 @@ ngx_http_proxy_create_request(ngx_http_r > b->last = ngx_copy(b->last, r->unparsed_uri.data, r->unparsed_uri.len); > > } else { > - if (r->valid_location) { > + if (r->valid_location && ctx->vars.uri.len) { > b->last = ngx_copy(b->last, ctx->vars.uri.data, ctx->vars.uri.len); > } > > diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c > --- a/src/http/ngx_http_file_cache.c > +++ b/src/http/ngx_http_file_cache.c > @@ -1270,7 +1270,9 @@ ngx_http_file_cache_set_header(ngx_http_ > > if (c->etag.len <= NGX_HTTP_CACHE_ETAG_LEN) { > h->etag_len = (u_char) c->etag.len; > - ngx_memcpy(h->etag, c->etag.data, c->etag.len); > + if (c->etag.len) { > + ngx_memcpy(h->etag, c->etag.data, c->etag.len); > + } > } > > if (c->vary.len) { > diff --git a/src/http/ngx_http_variables.c b/src/http/ngx_http_variables.c > --- a/src/http/ngx_http_variables.c > +++ b/src/http/ngx_http_variables.c > @@ -2157,6 +2157,9 @@ ngx_http_variable_request_body(ngx_http_ > > for ( /* void */ ; cl; cl = cl->next) { > buf = cl->buf; > + if (buf->last == buf->pos) { > + continue; > + } > p = ngx_cpymem(p, buf->pos, buf->last - buf->pos); > } > > diff --git a/src/mail/ngx_mail_auth_http_module.c b/src/mail/ngx_mail_auth_http_module.c > --- a/src/mail/ngx_mail_auth_http_module.c > +++ b/src/mail/ngx_mail_auth_http_module.c > @@ -1314,11 +1314,15 @@ ngx_mail_auth_http_create_request(ngx_ma > *b->last++ = CR; *b->last++ = LF; > > b->last = ngx_cpymem(b->last, "Auth-User: ", sizeof("Auth-User: ") - 1); > - b->last = ngx_copy(b->last, login.data, login.len); > + if (login.len) { > + b->last = ngx_copy(b->last, login.data, login.len); > + } > *b->last++ = CR; *b->last++ = LF; > > b->last = ngx_cpymem(b->last, "Auth-Pass: ", sizeof("Auth-Pass: ") - 1); > - b->last = ngx_copy(b->last, passwd.data, passwd.len); > + if (passwd.len) { > + b->last = ngx_copy(b->last, passwd.data, passwd.len); > + } > *b->last++ = CR; *b->last++ = LF; > > if (s->auth_method != NGX_MAIL_AUTH_PLAIN && s->salt.len) { > @@ -1375,7 +1379,9 @@ ngx_mail_auth_http_create_request(ngx_ma > > b->last = ngx_cpymem(b->last, "Auth-SMTP-Helo: ", > sizeof("Auth-SMTP-Helo: ") - 1); > - b->last = ngx_copy(b->last, s->smtp_helo.data, s->smtp_helo.len); > + if (s->smtp_helo.len) { > + b->last = ngx_copy(b->last, s->smtp_helo.data, s->smtp_helo.len); > + } > *b->last++ = CR; *b->last++ = LF; > > b->last = ngx_cpymem(b->last, "Auth-SMTP-From: ", If at all, these probably should check for login.len, passwd.len, and s->smtp_helo.len, similarly to other cases nearby, and avoid sending the headers altogether. > diff --git a/src/stream/ngx_stream_script.c b/src/stream/ngx_stream_script.c > --- a/src/stream/ngx_stream_script.c > +++ b/src/stream/ngx_stream_script.c > @@ -842,7 +842,9 @@ ngx_stream_script_copy_var_code(ngx_stre > > if (value && !value->not_found) { > p = e->pos; > - e->pos = ngx_copy(p, value->data, value->len); > + if (value->len) { > + e->pos = ngx_copy(p, value->data, value->len); > + } > > ngx_log_debug2(NGX_LOG_DEBUG_STREAM, > e->session->connection->log, 0, Obviously enough, there should be a corresponding change in ngx_http_script_copy_var_code(). Overall, similarly to the other patch in the series, I'm highly sceptical about doing such scattered changes based on the UB sanitizer reports from some test runs. Rather, we should use UB sanitizer reports to identify problematic patterns, and fix these patterns all other the code (if at all). Further, in this particular case I tend to think that the problem is not with nginx code, but rather with the memcpy() interface UB sanitizer tries to enforce. It should be completely safe to call memcpy(p, NULL, 0), and if it doesn't, we might consider adding appropriate guards at interface level, such as in ngx_memcpy() / ngx_cpymem() wrappers, and not in each call. Trying to check length everywhere is just ugly and unreadable. Also, while recent versions of gcc are known to miscompile some code which uses memcpy(p, NULL, 0) (see [1], with "-O2" or with "-O1 -ftree-vrp" optimization flags in my tests), I don't think this affects nginx code. If it does, we might also consider force-switching off relevant optimizations (if enabled, as we use "-O1" by default). [1] https://stackoverflow.com/questions/5243012/is-it-guaranteed-to-be-safe-to-perform-memcpy0-0-0 -- Maxim Dounin http://mdounin.ru/ From ihabmohsen at proton.me Tue Dec 5 14:17:09 2023 From: ihabmohsen at proton.me (Ihab Mohsen) Date: Tue, 05 Dec 2023 14:17:09 +0000 Subject: Why Choose Digitology.co as Your Digital Marketing Agency in Egypt Message-ID: Looking for an exceptional [digital marketing agency in Egypt](https://digitology.co)? Look no further than [Digitology.co](https://digitology.co)! Renowned as Egypt's premier [digital marketing agency](https://digitology.co), we specialize in enhancing online presence and fostering unparalleled success for your business. What Sets [Digitology.co](https://digitology.co) Apart as Egypt's Best [Digital Marketing Agency](https://digitology.co)? 1. Unmatched Expertise in [Digital Marketing](https://digitology.co) At [Digitology.co](https://digitology.co), our team comprises seasoned professionals excelling in various digital marketing facets. From tailored [SEO strategies for the Egyptian market](https://digitology.co) to impactful social media campaigns, our experts craft personalized solutions to meet your unique business needs. 2. Proven Track Record of Success As Egypt's [top digital marketing agency](https://digitology.co), our consistent delivery of outstanding results speaks volumes. We've significantly enhanced visibility, traffic, and conversions for numerous clients. Our success stories testify to our unwavering commitment to excellence. 3. Comprehensive [SEO and Online Marketing](https://digitology.co) Approach Understanding the importance of a holistic approach, our strategies encompass diverse techniques like content optimization, link building, and technical SEO. This ensures prominent search engine rankings for your website. Why [Digitology.co](https://digitology.co) Stands Out Among Egypt's [SEO Agencies](https://digitology.co) As a leading [SEO agency in Egypt](https://digitology.co), we prioritize driving organic growth and maximizing online presence. Our tailored strategies aim to improve website visibility, increase organic traffic, and enhance conversions by resonating with Egyptian audiences. Choose [Digitology.co](https://digitology.co) for Unparalleled [Digital Marketing Solutions](https://digitology.co) Partnering with us grants access to cutting-edge strategies, personalized solutions, and a dedicated team committed to your success. We aim to propel your business to new heights through innovative, results-oriented digital marketing strategies. Take the Next Step Towards Success Ready to elevate your digital presence? Partner with [Digitology.co](https://digitology.co), Egypt's [best digital marketing agency](https://digitology.co). Contact us today to explore how our tailored solutions can revolutionize your online presence and drive tangible business growth. Explore [Digitology.co](https://digitology.co)'s services and witness firsthand how we transform your digital marketing endeavors. At [Digitology.co](https://digitology.co), your success is our priority! For more info, visit the best digital marketing agency in Egypt [here](https://digitology.co) -------------- next part -------------- An HTML attachment was scrubbed... URL: From ihabmohsen at proton.me Tue Dec 5 14:19:53 2023 From: ihabmohsen at proton.me (Ihab Mohsen) Date: Tue, 05 Dec 2023 14:19:53 +0000 Subject: Elevate Safety with ATISystems.com: Your Destination for Cutting-Edge Giant Voice and Outdoor Warning Systems Message-ID: Ensuring safety and security in outdoor spaces stands as a paramount concern, and [ATISystems.com](http://atisystems.com/) leads the way with innovative solutions crafted to safeguard communities and businesses. Specializing in state-of-the-art giant voice systems and outdoor warning sirens, [ATISystems.com](http://atisystems.com/) offers comprehensive solutions tailored to meet your safety requirements. What's Available at [ATISystems.com](http://atisystems.com/)? 1. Giant Voice Systems [ATISystems.com](http://atisystems.com/) stands out in the industry by providing robust giant voice systems designed to effectively broadcast emergency messages across expansive outdoor areas. These systems play a crucial role in emergency preparedness, facilitating clear and immediate communication during critical situations. 2. Outdoor Warning Sirens Offering a range of outdoor warning sirens, [ATISystems.com](http://atisystems.com/) ensures that communities and facilities have access to resilient alerting mechanisms. These sirens are engineered to emit high-decibel warnings, alerting individuals outdoors to potential threats or emergencies, thus enhancing overall safety protocols. Why Opt for [ATISystems.com](http://atisystems.com/) for Your Safety Solutions? 1. Cutting-Edge Technology [ATISystems.com](http://atisystems.com/) leverages cutting-edge technology in the development and deployment of their giant voice systems and outdoor warning sirens. The integration of advanced features ensures reliability and effectiveness precisely when it matters most. 2. Tailored Solutions Recognizing the uniqueness of each location and scenario, [ATISystems.com](http://atisystems.com/) offers customized solutions to match specific safety requirements. Whether for municipalities, industrial sites, or educational campuses, their systems can be tailored for optimal performance. 3. Dedication to Safety At the heart of [ATISystems.com](http://atisystems.com/) lies an unwavering commitment to safety. Their solutions are engineered to offer peace of mind, enabling swift and efficient communication during emergencies, thereby minimizing potential risks. Discover Unmatched Safety Solutions at [ATISystems.com](http://atisystems.com/) The dedication of [ATISystems.com](http://atisystems.com/) in delivering top-tier giant voice systems and outdoor warning sirens positions them as the go-to resource for enhancing outdoor safety measures. Their comprehensive range of products and services equips you with the necessary tools to mitigate risks and safeguard lives. Secure Your Environment Today Explore the cutting-edge solutions offered by [ATISystems.com](http://atisystems.com/) and take essential steps to fortify safety in your outdoor spaces. Delve into their giant voice systems and outdoor warning sirens to enhance your emergency preparedness. For more information, please visit: [Giant Voice System]([https://atisystems.com](https://atisystems.com/)), [Outdoor Warning System]([https://atisystems.com](https://atisystems.com/)), [Outdoor Warning Siren]([https://atisystems.com](https://atisystems.com/)) Visit [[ATISystems.com](http://atisystems.com/)]([https://atisystems.com](https://atisystems.com/)) today and empower your organization or community with reliable and effective safety solutions. With [ATISystems.com](http://atisystems.com/), safety is priority. -------------- next part -------------- An HTML attachment was scrubbed... URL: From xeioex at nginx.com Tue Dec 5 16:55:02 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Tue, 05 Dec 2023 16:55:02 +0000 Subject: [njs] Refactored asynchronous events. Message-ID: details: https://hg.nginx.org/njs/rev/bc80bcb3102c branches: changeset: 2245:bc80bcb3102c user: Dmitry Volyntsev date: Tue Dec 05 08:54:18 2023 -0800 description: Refactored asynchronous events. To align njs with other JS engines, async events are removed from njs core. The following functions were removed: njs_vm_add_event(), njs_vm_del_event(), njs_vm_waiting(). Instead the host is expected to manage async events by itself. In addition, the posted events are renamed to jobs, to better align with the ECMA specs. The following methods are removed: njs_vm_run(). Instead, the host is expected to call njs_vm_execute_pending_job() in a loop to execute pending jobs. The following functions were added: njs_vm_enqueue_job(). diffstat: auto/sources | 1 - external/njs_fs_module.c | 17 +-- external/njs_shell.c | 16 ++- external/njs_webcrypto_module.c | 8 +- nginx/ngx_http_js_module.c | 99 +++++++----------- nginx/ngx_js.c | 107 ++++++++++++-------- nginx/ngx_js.h | 33 ++++- nginx/ngx_js_fetch.c | 59 ++++++---- nginx/ngx_stream_js_module.c | 133 +++++++++---------------- src/njs.h | 59 +---------- src/njs_event.c | 95 ------------------ src/njs_event.h | 24 ---- src/njs_promise.c | 39 +------ src/njs_vm.c | 208 ++++++++++----------------------------- src/njs_vm.h | 4 +- src/test/njs_externals_test.c | 35 ++++-- src/test/njs_externals_test.h | 2 +- src/test/njs_unit_test.c | 24 ++- 18 files changed, 329 insertions(+), 634 deletions(-) diffs (truncated from 1774 to 1000 lines): diff -r 439ea33e531c -r bc80bcb3102c auto/sources --- a/auto/sources Wed Nov 29 20:46:36 2023 -0800 +++ b/auto/sources Tue Dec 05 08:54:18 2023 -0800 @@ -34,7 +34,6 @@ NJS_LIB_SRCS=" \ src/njs_generator.c \ src/njs_disassembler.c \ src/njs_module.c \ - src/njs_event.c \ src/njs_extern.c \ src/njs_boolean.c \ src/njs_number.c \ diff -r 439ea33e531c -r bc80bcb3102c external/njs_fs_module.c --- a/external/njs_fs_module.c Wed Nov 29 20:46:36 2023 -0800 +++ b/external/njs_fs_module.c Tue Dec 05 08:54:18 2023 -0800 @@ -3264,7 +3264,6 @@ njs_fs_result(njs_vm_t *vm, njs_opaque_v const njs_value_t *callback, njs_uint_t nargs, njs_value_t *retval) { njs_int_t ret; - njs_vm_event_t vm_event; njs_function_t *cb; njs_opaque_value_t promise, callbacks[2], arguments[2]; @@ -3290,16 +3289,11 @@ njs_fs_result(njs_vm_t *vm, njs_opaque_v return NJS_ERROR; } - vm_event = njs_vm_add_event(vm, cb, 1, NULL, NULL); - if (njs_slow_path(vm_event == NULL)) { - return NJS_ERROR; - } - njs_value_assign(&arguments[0], &callbacks[njs_value_is_error(njs_value_arg(result))]); njs_value_assign(&arguments[1], result); - ret = njs_vm_post_event(vm, vm_event, njs_value_arg(&arguments), 2); + ret = njs_vm_enqueue_job(vm, cb, njs_value_arg(&arguments), 2); if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } @@ -3318,13 +3312,8 @@ njs_fs_result(njs_vm_t *vm, njs_opaque_v njs_value_assign(&arguments[1], result); } - vm_event = njs_vm_add_event(vm, njs_value_function(callback), 1, NULL, - NULL); - if (njs_slow_path(vm_event == NULL)) { - return NJS_ERROR; - } - - ret = njs_vm_post_event(vm, vm_event, njs_value_arg(&arguments), 2); + ret = njs_vm_enqueue_job(vm, njs_value_function(callback), + njs_value_arg(&arguments), 2); if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } diff -r 439ea33e531c -r bc80bcb3102c external/njs_shell.c --- a/external/njs_shell.c Wed Nov 29 20:46:36 2023 -0800 +++ b/external/njs_shell.c Tue Dec 05 08:54:18 2023 -0800 @@ -1096,12 +1096,18 @@ njs_process_script(njs_vm_t *vm, void *r } for ( ;; ) { - ret = njs_vm_run(vm); - if (ret == NJS_ERROR) { - njs_process_output(vm, njs_value_arg(&retval), ret); + for ( ;; ) { + ret = njs_vm_execute_pending_job(vm); + if (ret <= NJS_OK) { + if (ret == NJS_ERROR || njs_vm_unhandled_rejection(vm)) { + njs_process_output(vm, NULL, ret); - if (!njs_vm_options(vm)->interactive) { - return NJS_ERROR; + if (!njs_vm_options(vm)->interactive) { + return NJS_ERROR; + } + } + + break; } } diff -r 439ea33e531c -r bc80bcb3102c external/njs_webcrypto_module.c --- a/external/njs_webcrypto_module.c Wed Nov 29 20:46:36 2023 -0800 +++ b/external/njs_webcrypto_module.c Tue Dec 05 08:54:18 2023 -0800 @@ -4771,7 +4771,6 @@ njs_webcrypto_result(njs_vm_t *vm, njs_o { njs_int_t ret; njs_function_t *callback; - njs_vm_event_t vm_event; njs_opaque_value_t promise, arguments[2]; ret = njs_vm_promise_create(vm, njs_value_arg(&promise), @@ -4785,11 +4784,6 @@ njs_webcrypto_result(njs_vm_t *vm, njs_o goto error; } - vm_event = njs_vm_add_event(vm, callback, 1, NULL, NULL); - if (vm_event == NULL) { - goto error; - } - njs_value_assign(&arguments[0], &arguments[(rc != NJS_OK)]); if (rc != NJS_OK) { @@ -4799,7 +4793,7 @@ njs_webcrypto_result(njs_vm_t *vm, njs_o njs_value_assign(&arguments[1], result); } - ret = njs_vm_post_event(vm, vm_event, njs_value_arg(&arguments), 2); + ret = njs_vm_enqueue_job(vm, callback, njs_value_arg(&arguments), 2); if (ret == NJS_ERROR) { goto error; } diff -r 439ea33e531c -r bc80bcb3102c nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Wed Nov 29 20:46:36 2023 -0800 +++ b/nginx/ngx_http_js_module.c Tue Dec 05 08:54:18 2023 -0800 @@ -266,9 +266,7 @@ static ngx_msec_t ngx_http_js_fetch_time static size_t ngx_http_js_buffer_size(njs_vm_t *vm, ngx_http_request_t *r); static size_t ngx_http_js_max_response_buffer_size(njs_vm_t *vm, ngx_http_request_t *r); -static void ngx_http_js_handle_vm_event(ngx_http_request_t *r, - njs_vm_event_t vm_event, njs_value_t *args, njs_uint_t nargs); -static void ngx_http_js_event_finalize(ngx_http_request_t *r, njs_int_t rc); +static void ngx_http_js_event_finalize(ngx_http_request_t *r, ngx_int_t rc); static ngx_js_ctx_t *ngx_http_js_ctx(njs_vm_t *vm, ngx_http_request_t *r); static void ngx_http_js_periodic_handler(ngx_event_t *ev); @@ -852,14 +850,13 @@ static uintptr_t ngx_http_js_uptr[] = { (uintptr_t) ngx_http_js_pool, (uintptr_t) ngx_http_js_resolver, (uintptr_t) ngx_http_js_resolver_timeout, - (uintptr_t) ngx_http_js_handle_vm_event, + (uintptr_t) ngx_http_js_event_finalize, (uintptr_t) ngx_http_js_ssl, (uintptr_t) ngx_http_js_ssl_verify, (uintptr_t) ngx_http_js_fetch_timeout, (uintptr_t) ngx_http_js_buffer_size, (uintptr_t) ngx_http_js_max_response_buffer_size, (uintptr_t) 0 /* main_conf ptr */, - (uintptr_t) ngx_http_js_event_finalize, (uintptr_t) ngx_http_js_ctx, }; @@ -950,8 +947,8 @@ ngx_http_js_content_event_handler(ngx_ht ctx->status = NGX_HTTP_INTERNAL_SERVER_ERROR; - rc = ngx_js_call(ctx->vm, &jlcf->content, r->connection->log, - &ctx->request, 1); + rc = ngx_js_name_call(ctx->vm, &jlcf->content, r->connection->log, + &ctx->request, 1); if (rc == NGX_ERROR) { ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); @@ -1081,8 +1078,8 @@ ngx_http_js_header_filter(ngx_http_reque ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http js header call \"%V\"", &jlcf->header_filter); - rc = ngx_js_call(ctx->vm, &jlcf->header_filter, r->connection->log, - &ctx->request, 1); + rc = ngx_js_name_call(ctx->vm, &jlcf->header_filter, r->connection->log, + &ctx->request, 1); if (rc == NGX_ERROR) { return NGX_ERROR; @@ -1184,8 +1181,8 @@ ngx_http_js_body_filter(ngx_http_request ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http js body call \"%V\"", &jlcf->body_filter); - rc = ngx_js_call(ctx->vm, &jlcf->body_filter, c->log, &arguments[0], - 3); + rc = ngx_js_name_call(ctx->vm, &jlcf->body_filter, c->log, + &arguments[0], 3); if (rc == NGX_ERROR) { return NGX_ERROR; @@ -1260,8 +1257,8 @@ ngx_http_js_variable_set(ngx_http_reques pending = ngx_vm_pending(ctx); - rc = ngx_js_invoke(ctx->vm, fname, r->connection->log, &ctx->request, 1, - &ctx->retval); + rc = ngx_js_name_invoke(ctx->vm, fname, r->connection->log, &ctx->request, + 1, &ctx->retval); if (rc == NGX_ERROR) { v->not_found = 1; @@ -3389,7 +3386,7 @@ ngx_http_js_subrequest(ngx_http_request_ { ngx_int_t flags; ngx_str_t uri, args; - njs_vm_event_t vm_event; + ngx_js_event_t *event; ngx_http_js_ctx_t *ctx; ngx_http_post_subrequest_t *ps; @@ -3404,20 +3401,25 @@ ngx_http_js_subrequest(ngx_http_request_ return NJS_ERROR; } - vm_event = njs_vm_add_event(ctx->vm, callback, 1, NULL, NULL); - if (vm_event == NULL) { - njs_vm_error(ctx->vm, "internal error"); + event = njs_mp_zalloc(njs_vm_memory_pool(ctx->vm), + sizeof(ngx_js_event_t)); + if (njs_slow_path(event == NULL)) { + njs_vm_memory_error(ctx->vm); return NJS_ERROR; } + event->vm = ctx->vm; + event->function = callback; + event->fd = ctx->event_id++; + ps->handler = ngx_http_js_subrequest_done; - ps->data = vm_event; + ps->data = event; flags |= NGX_HTTP_SUBREQUEST_IN_MEMORY; } else { ps = NULL; - vm_event = NULL; + event = NULL; } uri.len = uri_arg->length; @@ -3429,14 +3431,14 @@ ngx_http_js_subrequest(ngx_http_request_ if (ngx_http_subrequest(r, &uri, args.len ? &args : NULL, sr, ps, flags) != NGX_OK) { - if (vm_event != NULL) { - njs_vm_del_event(ctx->vm, vm_event); - } - njs_vm_error(ctx->vm, "subrequest creation failed"); return NJS_ERROR; } + if (event != NULL) { + ngx_js_add_event(ctx, event); + } + return NJS_OK; } @@ -3444,7 +3446,7 @@ ngx_http_js_subrequest(ngx_http_request_ static ngx_int_t ngx_http_js_subrequest_done(ngx_http_request_t *r, void *data, ngx_int_t rc) { - njs_vm_event_t vm_event = data; + ngx_js_event_t *event = data; njs_int_t ret; ngx_http_js_ctx_t *ctx; @@ -3493,7 +3495,11 @@ ngx_http_js_subrequest_done(ngx_http_req return NGX_ERROR; } - ngx_http_js_handle_vm_event(r->parent, vm_event, njs_value_arg(&reply), 1); + rc = ngx_js_call(ctx->vm, event->function, njs_value_arg(&reply), 1); + + ngx_js_del_event(ctx, event); + + ngx_http_js_event_finalize(r->parent, rc); return NGX_OK; } @@ -4248,8 +4254,8 @@ ngx_http_js_periodic_handler(ngx_event_t r->count++; - rc = ngx_js_invoke(ctx->vm, &periodic->method, &periodic->log, - &ctx->request, 1, &ctx->retval); + rc = ngx_js_name_invoke(ctx->vm, &periodic->method, &periodic->log, + &ctx->request, 1, &ctx->retval); if (rc == NGX_AGAIN) { rc = NGX_OK; @@ -4440,41 +4446,12 @@ ngx_http_js_max_response_buffer_size(njs static void -ngx_http_js_handle_vm_event(ngx_http_request_t *r, njs_vm_event_t vm_event, - njs_value_t *args, njs_uint_t nargs) -{ - njs_int_t rc; - ngx_str_t exception; - ngx_http_js_ctx_t *ctx; - - ctx = ngx_http_get_module_ctx(r, ngx_http_js_module); - - njs_vm_post_event(ctx->vm, vm_event, args, nargs); - - rc = njs_vm_run(ctx->vm); - - ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, - "http js post event handler rc: %i event: %p", - (ngx_int_t) rc, vm_event); - - if (rc == NJS_ERROR) { - ngx_js_exception(ctx->vm, &exception); - - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, - "js exception: %V", &exception); - } - - ngx_http_js_event_finalize(r, rc); -} - - -static void -ngx_http_js_event_finalize(ngx_http_request_t *r, njs_int_t rc) +ngx_http_js_event_finalize(ngx_http_request_t *r, ngx_int_t rc) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, - "http js event finalize rc: %i", (ngx_int_t) rc); - - if (rc == NJS_ERROR) { + "http js event finalize rc: %i", rc); + + if (rc == NGX_ERROR) { if (r->health_check) { ngx_http_js_periodic_finalize(r, NGX_ERROR); return; @@ -4484,7 +4461,7 @@ ngx_http_js_event_finalize(ngx_http_requ return; } - if (rc == NJS_OK) { + if (rc == NGX_OK) { ngx_http_post_request(r, NULL); } diff -r 439ea33e531c -r bc80bcb3102c nginx/ngx_js.c --- a/nginx/ngx_js.c Wed Nov 29 20:46:36 2023 -0800 +++ b/nginx/ngx_js.c Tue Dec 05 08:54:18 2023 -0800 @@ -334,17 +334,57 @@ static njs_int_t ngx_js_console_pro ngx_int_t -ngx_js_call(njs_vm_t *vm, ngx_str_t *fname, ngx_log_t *log, +ngx_js_call(njs_vm_t *vm, njs_function_t *func, njs_value_t *args, + njs_uint_t nargs) +{ + njs_int_t ret; + ngx_str_t exception; + ngx_connection_t *c; + + ret = njs_vm_call(vm, func, args, nargs); + if (ret == NJS_ERROR) { + ngx_js_exception(vm, &exception); + + c = ngx_external_connection(vm, njs_vm_external_ptr(vm)); + + ngx_log_error(NGX_LOG_ERR, c->log, 0, + "js exception: %V", &exception); + return NGX_ERROR; + } + + for ( ;; ) { + ret = njs_vm_execute_pending_job(vm); + if (ret <= NJS_OK) { + c = ngx_external_connection(vm, njs_vm_external_ptr(vm)); + + if (ret == NJS_ERROR || njs_vm_unhandled_rejection(vm)) { + ngx_js_exception(vm, &exception); + + ngx_log_error(NGX_LOG_ERR, c->log, 0, + "js job exception: %V", &exception); + return NGX_ERROR; + } + + break; + } + } + + return NGX_OK; +} + + +ngx_int_t +ngx_js_name_call(njs_vm_t *vm, ngx_str_t *fname, ngx_log_t *log, njs_opaque_value_t *args, njs_uint_t nargs) { njs_opaque_value_t unused; - return ngx_js_invoke(vm, fname, log, args, nargs, &unused); + return ngx_js_name_invoke(vm, fname, log, args, nargs, &unused); } ngx_int_t -ngx_js_invoke(njs_vm_t *vm, ngx_str_t *fname, ngx_log_t *log, +ngx_js_name_invoke(njs_vm_t *vm, ngx_str_t *fname, ngx_log_t *log, njs_opaque_value_t *args, njs_uint_t nargs, njs_opaque_value_t *retval) { njs_int_t ret; @@ -374,18 +414,19 @@ ngx_js_invoke(njs_vm_t *vm, ngx_str_t *f return NGX_ERROR; } - ret = njs_vm_run(vm); - if (ret == NJS_ERROR) { - ngx_js_exception(vm, &exception); + for ( ;; ) { + ret = njs_vm_execute_pending_job(vm); + if (ret <= NJS_OK) { + if (ret == NJS_ERROR || njs_vm_unhandled_rejection(vm)) { + ngx_js_exception(vm, &exception); - ngx_log_error(NGX_LOG_ERR, log, 0, - "js exception: %V", &exception); + ngx_log_error(NGX_LOG_ERR, log, 0, + "js job exception: %V", &exception); + return NGX_ERROR; + } - return NGX_ERROR; - } - - if (ret == NJS_AGAIN) { - return NGX_AGAIN; + break; + } } ctx = ngx_external_ctx(vm, njs_vm_external_ptr(vm)); @@ -966,34 +1007,21 @@ not_found: static void ngx_js_timer_handler(ngx_event_t *ev) { - njs_vm_t *vm; - njs_int_t ret; - ngx_str_t exception; - ngx_js_ctx_t *ctx; - ngx_js_event_t *event; - ngx_connection_t *c; - njs_external_ptr_t external; + njs_vm_t *vm; + ngx_int_t rc; + ngx_js_ctx_t *ctx; + ngx_js_event_t *event; event = (ngx_js_event_t *) ((u_char *) ev - offsetof(ngx_js_event_t, ev)); vm = event->vm; - ret = njs_vm_call(vm, event->function, event->args, event->nargs); - - external = njs_vm_external_ptr(vm); - ctx = ngx_external_ctx(vm, external); - njs_rbtree_delete(&ctx->waiting_events, &event->node); + rc = ngx_js_call(vm, event->function, event->args, event->nargs); - if (ret == NJS_ERROR) { - ngx_js_exception(vm, &exception); - - c = ngx_external_connection(vm, njs_vm_external_ptr(vm)); + ctx = ngx_external_ctx(vm, njs_vm_external_ptr(vm)); + ngx_js_del_event(ctx, event); - ngx_log_error(NGX_LOG_ERR, c->log, 0, - "js exception: %V", &exception); - } - - ngx_external_event_finalize(vm)(external, ret); + ngx_external_event_finalize(vm)(njs_vm_external_ptr(vm), rc); } @@ -1065,7 +1093,7 @@ njs_set_timer(njs_vm_t *vm, njs_value_t sizeof(njs_opaque_value_t) * event->nargs); } - njs_rbtree_insert(&ctx->waiting_events, &event->node); + ngx_js_add_event(ctx, event); ngx_add_timer(&event->ev, delay); @@ -1113,14 +1141,9 @@ njs_clear_timeout(njs_vm_t *vm, njs_valu return NJS_ERROR; } - event = (ngx_js_event_t *) ((u_char *) rb - - offsetof(ngx_js_event_t, node)); + event = (ngx_js_event_t *) ((u_char *) rb - offsetof(ngx_js_event_t, node)); - if (event->ev.timer_set) { - ngx_del_timer(&event->ev); - } - - njs_rbtree_delete(&ctx->waiting_events, (njs_rbtree_part_t *) rb); + ngx_js_del_event(ctx, event); njs_value_undefined_set(retval); diff -r 439ea33e531c -r bc80bcb3102c nginx/ngx_js.h --- a/nginx/ngx_js.h Wed Nov 29 20:46:36 2023 -0800 +++ b/nginx/ngx_js.h Tue Dec 05 08:54:18 2023 -0800 @@ -39,9 +39,7 @@ typedef struct ngx_js_ctx_s ngx_js_ctx_ typedef ngx_pool_t *(*ngx_external_pool_pt)(njs_vm_t *vm, njs_external_ptr_t e); -typedef void (*ngx_js_event_handler_pt)(njs_external_ptr_t e, - njs_vm_event_t vm_event, njs_value_t *args, njs_uint_t nargs); -typedef void (*ngx_js_event_finalize_pt)(njs_external_ptr_t e, njs_int_t rc); +typedef void (*ngx_js_event_finalize_pt)(njs_external_ptr_t e, ngx_int_t rc); typedef ngx_resolver_t *(*ngx_external_resolver_pt)(njs_vm_t *vm, njs_external_ptr_t e); typedef ngx_msec_t (*ngx_external_timeout_pt)(njs_vm_t *vm, @@ -73,6 +71,7 @@ struct ngx_js_event_s { void (*destructor)(njs_external_ptr_t external, ngx_js_event_t *event); ngx_event_t ev; + void *data; }; @@ -116,6 +115,20 @@ struct ngx_js_event_s { ngx_socket_t event_id +#define ngx_js_add_event(ctx, event) \ + njs_rbtree_insert(&(ctx)->waiting_events, &(event)->node) + + +#define ngx_js_del_event(ctx, event) \ + do { \ + if ((event)->destructor) { \ + (event)->destructor(njs_vm_external_ptr((event)->vm), event); \ + } \ + \ + njs_rbtree_delete(&(ctx)->waiting_events, &(event)->node); \ + } while (0) + + typedef struct { NGX_JS_COMMON_MAIN_CONF; } ngx_js_main_conf_t; @@ -139,8 +152,8 @@ struct ngx_js_ctx_s { ((ngx_external_resolver_pt) njs_vm_meta(vm, 2))(vm, e) #define ngx_external_resolver_timeout(vm, e) \ ((ngx_external_timeout_pt) njs_vm_meta(vm, 3))(vm, e) -#define ngx_external_event_handler(vm, e) \ - ((ngx_js_event_handler_pt) njs_vm_meta(vm, 4)) +#define ngx_external_event_finalize(vm) \ + ((ngx_js_event_finalize_pt) njs_vm_meta(vm, 4)) #define ngx_external_ssl(vm, e) \ ((ngx_external_ssl_pt) njs_vm_meta(vm, 5))(vm, e) #define ngx_external_ssl_verify(vm, e) \ @@ -154,10 +167,8 @@ struct ngx_js_ctx_s { #define NGX_JS_MAIN_CONF_INDEX 10 #define ngx_main_conf(vm) \ ((ngx_js_main_conf_t *) njs_vm_meta(vm, NGX_JS_MAIN_CONF_INDEX)) -#define ngx_external_event_finalize(vm) \ - ((ngx_js_event_finalize_pt) njs_vm_meta(vm, 11)) #define ngx_external_ctx(vm, e) \ - ((ngx_js_external_ctx_pt) njs_vm_meta(vm, 12))(vm, e) + ((ngx_js_external_ctx_pt) njs_vm_meta(vm, 11))(vm, e) #define ngx_js_prop(vm, type, value, start, len) \ @@ -171,9 +182,11 @@ struct ngx_js_ctx_s { void ngx_js_ctx_init(ngx_js_ctx_t *ctx); void ngx_js_ctx_destroy(ngx_js_ctx_t *ctx); -ngx_int_t ngx_js_call(njs_vm_t *vm, ngx_str_t *fname, ngx_log_t *log, +ngx_int_t ngx_js_call(njs_vm_t *vm, njs_function_t *func, njs_value_t *args, + njs_uint_t nargs); +ngx_int_t ngx_js_name_call(njs_vm_t *vm, ngx_str_t *fname, ngx_log_t *log, njs_opaque_value_t *args, njs_uint_t nargs); -ngx_int_t ngx_js_invoke(njs_vm_t *vm, ngx_str_t *fname, ngx_log_t *log, +ngx_int_t ngx_js_name_invoke(njs_vm_t *vm, ngx_str_t *fname, ngx_log_t *log, njs_opaque_value_t *args, njs_uint_t nargs, njs_opaque_value_t *retval); ngx_int_t ngx_js_exception(njs_vm_t *vm, ngx_str_t *s); diff -r 439ea33e531c -r bc80bcb3102c nginx/ngx_js_fetch.c --- a/nginx/ngx_js_fetch.c Wed Nov 29 20:46:36 2023 -0800 +++ b/nginx/ngx_js_fetch.c Tue Dec 05 08:54:18 2023 -0800 @@ -116,9 +116,7 @@ struct ngx_js_http_s { ngx_pool_t *pool; njs_vm_t *vm; - njs_external_ptr_t external; - njs_vm_event_t vm_event; - ngx_js_event_handler_pt event_handler; + ngx_js_event_t *event; ngx_resolver_ctx_t *ctx; ngx_addr_t addr; @@ -177,7 +175,7 @@ static njs_int_t ngx_js_headers_fill(njs static ngx_js_http_t *ngx_js_http_alloc(njs_vm_t *vm, ngx_pool_t *pool, ngx_log_t *log); static void njs_js_http_destructor(njs_external_ptr_t external, - njs_host_event_t host); + ngx_js_event_t *event); static void ngx_js_resolve_handler(ngx_resolver_ctx_t *ctx); static njs_int_t ngx_js_fetch_promissified_result(njs_vm_t *vm, njs_value_t *result, njs_int_t rc, njs_value_t *retval); @@ -695,9 +693,6 @@ ngx_js_ext_fetch(njs_vm_t *vm, njs_value return NJS_ERROR; } - http->external = external; - http->event_handler = ngx_external_event_handler(vm, external); - ret = ngx_js_request_constructor(vm, &request, &u, external, args, nargs); if (ret != NJS_OK) { goto fail; @@ -1273,8 +1268,9 @@ static ngx_js_http_t * ngx_js_http_alloc(njs_vm_t *vm, ngx_pool_t *pool, ngx_log_t *log) { njs_int_t ret; + ngx_js_ctx_t *ctx; ngx_js_http_t *http; - njs_vm_event_t vm_event; + ngx_js_event_t *event; njs_function_t *callback; http = ngx_pcalloc(pool, sizeof(ngx_js_http_t)); @@ -1301,12 +1297,22 @@ ngx_js_http_alloc(njs_vm_t *vm, ngx_pool goto failed; } - vm_event = njs_vm_add_event(vm, callback, 1, http, njs_js_http_destructor); - if (vm_event == NULL) { + event = njs_mp_zalloc(njs_vm_memory_pool(vm), sizeof(ngx_js_event_t)); + if (njs_slow_path(event == NULL)) { goto failed; } - http->vm_event = vm_event; + ctx = ngx_external_ctx(vm, njs_vm_external_ptr(vm)); + + event->vm = vm; + event->function = callback; + event->destructor = njs_js_http_destructor; + event->fd = ctx->event_id++; + event->data = http; + + ngx_js_add_event(ctx, event); + + http->event = event; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, log, 0, "js fetch alloc:%p", http); @@ -1428,11 +1434,11 @@ ngx_js_http_close_connection(ngx_connect static void -njs_js_http_destructor(njs_external_ptr_t external, njs_host_event_t host) +njs_js_http_destructor(njs_external_ptr_t external, ngx_js_event_t *event) { ngx_js_http_t *http; - http = host; + http = event->data; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, http->log, 0, "js fetch destructor:%p", http); @@ -1455,7 +1461,6 @@ ngx_js_fetch_promissified_result(njs_vm_ { njs_int_t ret; njs_function_t *callback; - njs_vm_event_t vm_event; njs_opaque_value_t promise, arguments[2]; ret = njs_vm_promise_create(vm, njs_value_arg(&promise), @@ -1469,11 +1474,6 @@ ngx_js_fetch_promissified_result(njs_vm_ goto error; } - vm_event = njs_vm_add_event(vm, callback, 1, NULL, NULL); - if (vm_event == NULL) { - goto error; - } - njs_value_assign(&arguments[0], &arguments[(rc != NJS_OK)]); if (rc != NJS_OK) { @@ -1483,7 +1483,7 @@ ngx_js_fetch_promissified_result(njs_vm_ njs_value_assign(&arguments[1], result); } - ret = njs_vm_post_event(vm, vm_event, njs_value_arg(&arguments), 2); + ret = njs_vm_enqueue_job(vm, callback, njs_value_arg(&arguments), 2); if (ret == NJS_ERROR) { goto error; } @@ -1504,7 +1504,10 @@ static void ngx_js_http_fetch_done(ngx_js_http_t *http, njs_opaque_value_t *retval, njs_int_t rc) { - njs_opaque_value_t arguments[2], *action; + njs_vm_t *vm; + ngx_js_ctx_t *ctx; + ngx_js_event_t *event; + njs_opaque_value_t arguments[2], *action; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, http->log, 0, "js fetch done http:%p rc:%i", http, (ngx_int_t) rc); @@ -1514,12 +1517,20 @@ ngx_js_http_fetch_done(ngx_js_http_t *ht http->peer.connection = NULL; } - if (http->vm_event != NULL) { + if (http->event != NULL) { action = &http->promise_callbacks[(rc != NJS_OK)]; njs_value_assign(&arguments[0], action); njs_value_assign(&arguments[1], retval); - http->event_handler(http->external, http->vm_event, - njs_value_arg(&arguments), 2); + + vm = http->vm; + event = http->event; + + rc = ngx_js_call(vm, event->function, njs_value_arg(&arguments), 2); + + ctx = ngx_external_ctx(vm, njs_vm_external_ptr(vm)); + ngx_js_del_event(ctx, event); + + ngx_external_event_finalize(vm)(njs_vm_external_ptr(vm), rc); } } diff -r 439ea33e531c -r bc80bcb3102c nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Wed Nov 29 20:46:36 2023 -0800 +++ b/nginx/ngx_stream_js_module.c Tue Dec 05 08:54:18 2023 -0800 @@ -22,7 +22,7 @@ typedef struct { typedef struct { - njs_vm_event_t ev; + njs_function_t *function; ngx_uint_t data_type; } ngx_stream_js_ev_t; @@ -67,6 +67,12 @@ typedef struct { } ngx_stream_js_ctx_t; +#define ngx_stream_pending(ctx) \ + (ngx_vm_pending(ctx) \ + || (ctx)->events[NGX_JS_EVENT_UPLOAD].function != NULL \ + || (ctx)->events[NGX_JS_EVENT_DOWNLOAD].function != NULL) + + static ngx_int_t ngx_stream_js_access_handler(ngx_stream_session_t *s); static ngx_int_t ngx_stream_js_preread_handler(ngx_stream_session_t *s); static ngx_int_t ngx_stream_js_phase_handler(ngx_stream_session_t *s, @@ -86,7 +92,7 @@ static void ngx_stream_js_cleanup(void * static njs_int_t ngx_stream_js_run_event(ngx_stream_session_t *s, ngx_stream_js_ctx_t *ctx, ngx_stream_js_ev_t *event, ngx_uint_t from_upstream); -static njs_vm_event_t *ngx_stream_js_event(ngx_stream_session_t *s, +static njs_function_t **ngx_stream_js_event(ngx_stream_session_t *s, njs_str_t *event); static njs_int_t ngx_stream_js_ext_get_remote_address(njs_vm_t *vm, @@ -123,9 +129,7 @@ static ngx_msec_t ngx_stream_js_fetch_ti static size_t ngx_stream_js_buffer_size(njs_vm_t *vm, ngx_stream_session_t *s); static size_t ngx_stream_js_max_response_buffer_size(njs_vm_t *vm, ngx_stream_session_t *s); -static void ngx_stream_js_handle_event(ngx_stream_session_t *s, - njs_vm_event_t vm_event, njs_value_t *args, njs_uint_t nargs); -static void ngx_stream_js_event_finalize(ngx_stream_session_t *s, njs_int_t rc); +static void ngx_stream_js_event_finalize(ngx_stream_session_t *s, ngx_int_t rc); static ngx_js_ctx_t *ngx_stream_js_ctx(njs_vm_t *vm, ngx_stream_session_t *s); static void ngx_stream_js_periodic_handler(ngx_event_t *ev); @@ -615,14 +619,13 @@ static uintptr_t ngx_stream_js_uptr[] = (uintptr_t) ngx_stream_js_pool, (uintptr_t) ngx_stream_js_resolver, (uintptr_t) ngx_stream_js_resolver_timeout, - (uintptr_t) ngx_stream_js_handle_event, + (uintptr_t) ngx_stream_js_event_finalize, (uintptr_t) ngx_stream_js_ssl, (uintptr_t) ngx_stream_js_ssl_verify, (uintptr_t) ngx_stream_js_fetch_timeout, (uintptr_t) ngx_stream_js_buffer_size, (uintptr_t) ngx_stream_js_max_response_buffer_size, (uintptr_t) 0 /* main_conf ptr */, - (uintptr_t) ngx_stream_js_event_finalize, (uintptr_t) ngx_stream_js_ctx, }; @@ -734,7 +737,7 @@ ngx_stream_js_phase_handler(ngx_stream_s ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream js phase call \"%V\"", name); - rc = ngx_js_call(ctx->vm, name, c->log, &ctx->args[0], 1); + rc = ngx_js_name_call(ctx->vm, name, c->log, &ctx->args[0], 1); if (rc == NGX_ERROR) { return rc; @@ -751,9 +754,9 @@ ngx_stream_js_phase_handler(ngx_stream_s return NGX_ERROR; } - if (ngx_vm_pending(ctx)) { + if (ngx_stream_pending(ctx)) { ctx->in_progress = 1; - rc = ctx->events[NGX_JS_EVENT_UPLOAD].ev ? NGX_AGAIN : NGX_DONE; + rc = ctx->events[NGX_JS_EVENT_UPLOAD].function ? NGX_AGAIN : NGX_DONE; } else { ctx->in_progress = 0; @@ -811,7 +814,7 @@ ngx_stream_js_body_filter(ngx_stream_ses ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream js filter call \"%V\"" , &jscf->filter); - rc = ngx_js_call(ctx->vm, &jscf->filter, c->log, &ctx->args[0], 1); + rc = ngx_js_name_call(ctx->vm, &jscf->filter, c->log, &ctx->args[0], 1); if (rc == NGX_ERROR) { return rc; @@ -827,7 +830,7 @@ ngx_stream_js_body_filter(ngx_stream_ses event = ngx_stream_event(from_upstream); - if (event->ev != NULL) { + if (event->function != NULL) { ret = ngx_stream_js_run_event(s, ctx, event, from_upstream); if (ret != NJS_OK) { ngx_js_exception(ctx->vm, &exception); @@ -922,10 +925,10 @@ ngx_stream_js_variable_set(ngx_stream_se ctx = ngx_stream_get_module_ctx(s, ngx_stream_js_module); - pending = ngx_vm_pending(ctx); - - rc = ngx_js_invoke(ctx->vm, fname, s->connection->log, &ctx->args[0], 1, - &ctx->retval); + pending = ngx_stream_pending(ctx); + + rc = ngx_js_name_invoke(ctx->vm, fname, s->connection->log, &ctx->args[0], + 1, &ctx->retval); if (rc == NGX_ERROR) { v->not_found = 1; @@ -1078,9 +1081,8 @@ ngx_stream_js_drop_events(ngx_stream_js_ ngx_uint_t i; for (i = 0; i < NGX_JS_EVENT_MAX; i++) { - if (ctx->events[i].ev != NULL) { - njs_vm_del_event(ctx->vm, ctx->events[i].ev); - ctx->events[i].ev = NULL; + if (ctx->events[i].function != NULL) { + ctx->events[i].function = NULL; } } } @@ -1119,7 +1121,7 @@ ngx_stream_js_run_event(ngx_stream_sessi uintptr_t flags; ngx_connection_t *c; - if (event->ev == NULL) { + if (event->function == NULL) { return NJS_OK; } @@ -1152,18 +1154,12 @@ ngx_stream_js_run_event(ngx_stream_sessi return NGX_ERROR; } - njs_vm_post_event(ctx->vm, event->ev, njs_value_arg(&ctx->args[1]), 2); - - ret = njs_vm_run(ctx->vm); - if (ret == NJS_ERROR) { - return ret; - } - - return NJS_OK; + return ngx_js_call(ctx->vm, event->function, njs_value_arg(&ctx->args[1]), + 2); } -static njs_vm_event_t * +static njs_function_t ** ngx_stream_js_event(ngx_stream_session_t *s, njs_str_t *event) { ngx_uint_t i, n, type; @@ -1231,7 +1227,7 @@ ngx_stream_js_event(ngx_stream_session_t } } - return &ctx->events[events[i].id].ev; + return &ctx->events[events[i].id].function; } @@ -1317,10 +1313,10 @@ static njs_int_t ngx_stream_js_ext_on(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval) { - njs_str_t name; - njs_value_t *callback; - njs_vm_event_t *event; - ngx_stream_session_t *s; + njs_str_t name; + njs_value_t *callback; + njs_function_t **cb; + ngx_stream_session_t *s; s = njs_vm_external(vm, ngx_stream_js_session_proto_id, njs_argument(args, 0)); @@ -1340,21 +1336,17 @@ ngx_stream_js_ext_on(njs_vm_t *vm, njs_v return NJS_ERROR; } - event = ngx_stream_js_event(s, &name); - if (event == NULL) { + cb = ngx_stream_js_event(s, &name); + if (cb == NULL) { return NJS_ERROR; } - if (*event != NULL) { + if (*cb != NULL) { njs_vm_error(vm, "event handler \"%V\" is already set", &name); return NJS_ERROR; } - *event = njs_vm_add_event(vm, njs_value_function(callback), 0, NULL, NULL); - if (*event == NULL) { - njs_vm_error(vm, "internal error"); - return NJS_ERROR; - } + *cb = njs_value_function(callback); njs_value_undefined_set(retval); @@ -1366,9 +1358,9 @@ static njs_int_t ngx_stream_js_ext_off(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval) { - njs_str_t name; - njs_vm_event_t *event; - ngx_stream_session_t *s; + njs_str_t name; + njs_function_t **callback; + ngx_stream_session_t *s; s = njs_vm_external(vm, ngx_stream_js_session_proto_id, njs_argument(args, 0)); @@ -1382,14 +1374,12 @@ ngx_stream_js_ext_off(njs_vm_t *vm, njs_ return NJS_ERROR; } - event = ngx_stream_js_event(s, &name); - if (event == NULL) { + callback = ngx_stream_js_event(s, &name); + if (callback == NULL) { return NJS_ERROR; } - njs_vm_del_event(vm, *event); - - *event = NULL; + *callback = NULL; njs_value_undefined_set(retval); @@ -1726,41 +1716,12 @@ ngx_stream_js_max_response_buffer_size(n static void -ngx_stream_js_handle_event(ngx_stream_session_t *s, njs_vm_event_t vm_event, - njs_value_t *args, njs_uint_t nargs) -{ - njs_int_t rc; - ngx_str_t exception; - ngx_stream_js_ctx_t *ctx; - - ctx = ngx_stream_get_module_ctx(s, ngx_stream_js_module); - - njs_vm_post_event(ctx->vm, vm_event, args, nargs); - - rc = njs_vm_run(ctx->vm); - - ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, - "stream js post event handler rc: %i event: %p", - (ngx_int_t) rc, vm_event); - - if (rc == NJS_ERROR) { - ngx_js_exception(ctx->vm, &exception); - - ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, - "js exception: %V", &exception); - } - - ngx_stream_js_event_finalize(s, rc); -} - - -static void From julio.suarez at foss.arm.com Wed Dec 6 16:06:57 2023 From: julio.suarez at foss.arm.com (julio.suarez at foss.arm.com) Date: Wed, 06 Dec 2023 10:06:57 -0600 Subject: [PATCH] Added asm ISB as asm pause for ngx_cpu_pause() for aarch64 Message-ID: <53d289b8676fc678ca90.1701878817@u101385.austin.arm.com> # HG changeset patch # User Julio Suarez # Date 1701877879 21600 # Wed Dec 06 09:51:19 2023 -0600 # Node ID 53d289b8676fc678ca90e02ece174300a3631f79 # Parent f366007dd23a6ce8e8427c1b3042781b618a2ade Added asm ISB as asm pause for ngx_cpu_pause() for aarch64 For aarch64 (A.K.A. Arm64), ngx_cpu_pause() evaluates to empty by the GCC preprocessor. This results in an empty for loop in the lock check code in ngx_rwlock.c/ngx_spinlock.c (a bug). Thus, on Arm CPUs, there is no wait between checks of a lock. When a delay like the PAUSE that is there for x86 is added, there is a 2-5% increase in the number of requests/sec Arm CPUs can achieve under high load. Currently, options for a spin lock delay element are YIELD and ISB. YIELD is assembled into a NOP for most Arm CPUs. YIELD is for Arm implementations with SMT. Most Arm implementations are single core - single thread (non-SMT). Thus, this commit uses ISB (Instruction Barrier Sync) since it will provide a slightly longer delay than a NOP. Other projects that implement spin locks have used ISB as the delay element which has yielded performance gains as well. Last, ISB is not an architecturally defined solution for short spin lock delays. A candidate for a short delay solution is the WFET instruction (Wait For Event with Timeout). However, there aren't any Arm implementations in the market that support this instruction yet. When that occurs, WFET can be tested as a replacement for ISB. Until then, ISB will do. diff -r f366007dd23a -r 53d289b8676f src/os/unix/ngx_atomic.h --- a/src/os/unix/ngx_atomic.h Tue Nov 14 15:26:02 2023 +0400 +++ b/src/os/unix/ngx_atomic.h Wed Dec 06 09:51:19 2023 -0600 @@ -66,6 +66,8 @@ #if ( __i386__ || __i386 || __amd64__ || __amd64 ) #define ngx_cpu_pause() __asm__ ("pause") +#elif ( __aarch64__ ) +#define ngx_cpu_pause() __asm__ ("isb" ::: "memory") #else #define ngx_cpu_pause() #endif From teward at thomas-ward.net Thu Dec 7 02:32:16 2023 From: teward at thomas-ward.net (Thomas Ward) Date: Wed, 6 Dec 2023 21:32:16 -0500 Subject: nginx 1.20.0 coverity errors In-Reply-To: References: Message-ID: You should probably be sending development related questions like this one to nginx-devel at nginx.org. Additionally, what I've learned as a developer and a Security person is that **coverity static testing issues** are not always actual *issues* that need addressed.  Without details specifically on what tests're run, and in what circumstances, etc. static code analysis is never an all-holy solution that absolutely needs everything *fixed*. It's been stated in the past on the nginx-devel list (CC'd) that these Coverity reports, etc. are usually false-positives or non-issues and therefore don't need to be constantly sent to NGINX for their awareness.  And additionally, one should not blindly trust Coverity testing/output to be 100% accurate/correct with their analyses. Thomas On 12/6/23 20:34, BILL wrote (to nginx at nginx.org): > Hi, > > We have a coverity testing on nginx 1.20.0 and we got some errors. > Have any plan to resolve these errors? > > > Checker Number > ARRAY_VS_SINGLETON 3 > BAD_FREE 3 > BUFFER_SIZE 1 > CHECKED_RETURN 10 > COPY_PASTE_ERROR 1 > DC.WEAK_CRYPTO 18 > DEADCODE 8 > FORWARD_NULL 49 > MISSING_RESTORE 1 > NO_EFFECT 8 > NULL_RETURNS 8 > OVERRUN 12 > PW.INCLUDE_RECURSION 8 > RESOURCE_LEAK 5 > REVERSE_INULL 5 > SIGN_EXTENSION 1 > SIZEOF_MISMATCH 8 > STACK_USE 1 > STRING_NULL 1 > TAINTED_SCALAR 1 > TOCTOU 12 > UNINIT 10 > UNREACHABLE 63 > UNUSED_VALUE 4 > USE_AFTER_FREE 1 > Total 242 > > > _______________________________________________ > nginx mailing list > nginx at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx -------------- next part -------------- An HTML attachment was scrubbed... URL: From osa at freebsd.org.ru Thu Dec 7 14:34:23 2023 From: osa at freebsd.org.ru (Sergey A. Osokin) Date: Thu, 7 Dec 2023 17:34:23 +0300 Subject: Error: nginx: [emerg] unknown directive "stream" in /etc/nginx/nginx.conf:11 In-Reply-To: References: Message-ID: Hi, thanks for your question. On Thu, Dec 07, 2023 at 09:17:37AM +0000, Gandla, Kesavardhana via nginx wrote: > Dear NGINX community, > > I am Kesavardhana Gandla from Medtronic R&D, Bangalore. I am trying to evaluate the nginx on YOCTO Linux based embedded product. > My Linux version: 5.15.71 aarch64 GNU/Linux. > While using the stream option on target the below error is coming. > > Error: nginx: [emerg] unknown directive "stream" in /etc/nginx/nginx.conf:11 Please avoid cross-posting to multiple mailing lists. Since this question is related to nginx development, it can be addressed to nginx-devel mailing list. Thank you. -- Sergey A. Osokin From mdounin at mdounin.ru Thu Dec 7 15:58:35 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 7 Dec 2023 18:58:35 +0300 Subject: Error: nginx: [emerg] unknown directive "stream" in /etc/nginx/nginx.conf:11 In-Reply-To: References: Message-ID: Hello! On Thu, Dec 07, 2023 at 05:34:23PM +0300, Sergey A. Osokin wrote: > On Thu, Dec 07, 2023 at 09:17:37AM +0000, Gandla, Kesavardhana via nginx wrote: > > Dear NGINX community, > > > > I am Kesavardhana Gandla from Medtronic R&D, Bangalore. I am trying to evaluate the nginx on YOCTO Linux based embedded product. > > My Linux version: 5.15.71 aarch64 GNU/Linux. > > While using the stream option on target the below error is coming. > > > > Error: nginx: [emerg] unknown directive "stream" in /etc/nginx/nginx.conf:11 > > Please avoid cross-posting to multiple mailing lists. Since this > question is related to nginx development, it can be addressed to > nginx-devel mailing list. Certainly this is not related to nginx development. For user-level questions, please use the nginx@ mailing list. Alternatively, reading the docs might be a good idea. Thanks for understanding. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Sat Dec 9 04:26:48 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 9 Dec 2023 07:26:48 +0300 Subject: [PATCH] Added asm ISB as asm pause for ngx_cpu_pause() for aarch64 In-Reply-To: <53d289b8676fc678ca90.1701878817@u101385.austin.arm.com> References: <53d289b8676fc678ca90.1701878817@u101385.austin.arm.com> Message-ID: Hello! Thank you for the patch. Some comments and questions below. On Wed, Dec 06, 2023 at 10:06:57AM -0600, julio.suarez at foss.arm.com wrote: > # HG changeset patch > # User Julio Suarez > # Date 1701877879 21600 > # Wed Dec 06 09:51:19 2023 -0600 > # Node ID 53d289b8676fc678ca90e02ece174300a3631f79 > # Parent f366007dd23a6ce8e8427c1b3042781b618a2ade > Added asm ISB as asm pause for ngx_cpu_pause() for aarch64 Nitpicking: Added ISB as ngx_cpu_pause() for aarch64. > > For aarch64 (A.K.A. Arm64), ngx_cpu_pause() evaluates to empty by the > GCC preprocessor. This results in an empty for loop in the lock > check code in ngx_rwlock.c/ngx_spinlock.c (a bug). > Thus, on Arm CPUs, there is no wait between checks of a lock. Could you please clarify what do you mean by "a bug"? An empty ngx_cpu_pause() is certainly not a bug, it's just a lack of a more optimal solution for the particular architecture. > When a delay like the PAUSE that is there for x86 is added, > there is a 2-5% increase in the number of requests/sec Arm CPUs > can achieve under high load. Could you please provide some details on how did you get these numbers? > Currently, options for a spin lock delay element > are YIELD and ISB. YIELD is assembled into a NOP for most Arm CPUs. > YIELD is for Arm implementations with SMT. > Most Arm implementations are single core - single thread (non-SMT). > Thus, this commit uses ISB (Instruction Barrier Sync) since it > will provide a slightly longer delay than a NOP. > > Other projects that implement spin locks have used ISB as > the delay element which has yielded performance gains as well. Looking through various open source projects, I'm seeing the following uses on arm64 CPUs: FreeBSD, sys/arm64/include/cpu.h: #define cpu_spinwait() __asm __volatile("yield" ::: "memory") FreeBSD, lib/libthr/arch/aarch64/include/pthread_md.h: #define CPU_SPINWAIT Linux, arch/arm64/include/asm/vdso/processor.h: static inline void cpu_relax(void) { asm volatile("yield" ::: "memory"); } The only popular project I was able to find which uses ISB is Rust: https://github.com/rust-lang/rust/commit/c064b6560b7ce0adeb9bbf5d7dcf12b1acb0c807 >From the commit log it looks like it mostly focuses on the delay introduced by the instruction, ignoring other effects. In particular, YIELD is expected to be more friendly for various emulation environments, see Linux commit here: https://github.com/torvalds/linux/commit/1baa82f48030f38d1895301f1ec93acbcb3d15db Overall, the YIELD instruction seems to be better suited and specifically designed for the task in question, as per the documentation (https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/YIELD--YIELD-): : YIELD is a hint instruction. Software with a multithreading : capability can use a YIELD instruction to indicate to the PE that : it is performing a task, for example a spin-lock, that could be : swapped out to improve overall system performance. The PE can use : this hint to suspend and resume multiple software threads if it : supports the capability. Please also note that ngx_cpu_pause() is only used on multiprocessor systems: the code checks if (ngx_ncpu > 1) before using it. > > Last, ISB is not an architecturally defined solution > for short spin lock delays. > A candidate for a short delay solution is the WFET > instruction (Wait For Event with Timeout). > However, there aren't any Arm implementations in the market that > support this instruction yet. When that occurs, > WFET can be tested as a replacement for ISB. Until then, > ISB will do. > > diff -r f366007dd23a -r 53d289b8676f src/os/unix/ngx_atomic.h > --- a/src/os/unix/ngx_atomic.h Tue Nov 14 15:26:02 2023 +0400 > +++ b/src/os/unix/ngx_atomic.h Wed Dec 06 09:51:19 2023 -0600 > @@ -66,6 +66,8 @@ > > #if ( __i386__ || __i386 || __amd64__ || __amd64 ) > #define ngx_cpu_pause() __asm__ ("pause") > +#elif ( __aarch64__ ) > +#define ngx_cpu_pause() __asm__ ("isb" ::: "memory") Could you please clarify reasons for the "memory" clobber here? > #else > #define ngx_cpu_pause() > #endif -- Maxim Dounin http://mdounin.ru/ From jordanc.carter at outlook.com Sat Dec 9 07:46:08 2023 From: jordanc.carter at outlook.com (=?iso-8859-1?q?J_Carter?=) Date: Sat, 09 Dec 2023 07:46:08 +0000 Subject: [PATCH 0 of 2] Debug_random directive & win32 ngx_random fix Message-ID: This patch series reworks previous proposal for debug_random directive, with (re)consideration for advice on unessecary complexity. (see mailing list, August 2nd 2023) https://mailman.nginx.org/pipermail/nginx-devel/2023-August/BQNBXJFIIHFLHRE2ASU4QHPTMWPJUP5D.html Resubmitting as a series due to later discovered issue with win32's ngx_random. - First patch implements debug_random directive. - Second patch provides a dependent fix for win32 api's implementation of ngx_random. Any feedback would be much appreciated. From jordanc.carter at outlook.com Sat Dec 9 07:46:09 2023 From: jordanc.carter at outlook.com (=?iso-8859-1?q?J_Carter?=) Date: Sat, 09 Dec 2023 07:46:09 +0000 Subject: [PATCH 1 of 2] Events: debug_random directive In-Reply-To: References: Message-ID: # HG changeset patch # User J Carter # Date 1702098547 0 # Sat Dec 09 05:09:07 2023 +0000 # Node ID d9275e982a7188a1ea7855295ffa93362ea9830f # Parent f366007dd23a6ce8e8427c1b3042781b618a2ade Events: debug_random directive This directive enables debug logging for only a percentage of connections that match debug_connection/s. This directive is useful for sample style debugging of nginx when under high load scenarios, where logging all connections would incur excessive overhead. This directive takes a value between 0.01%-99.99% inclusive. Example usage: events { worker_connections 1024; debug_connection 0.0.0.0/0; debug_connection ::0; debug_random 1%; } diff -r f366007dd23a -r d9275e982a71 src/event/ngx_event.c --- a/src/event/ngx_event.c Tue Nov 14 15:26:02 2023 +0400 +++ b/src/event/ngx_event.c Sat Dec 09 05:09:07 2023 +0000 @@ -30,6 +30,8 @@ static char *ngx_event_use(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); static char *ngx_event_debug_connection(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +static char *ngx_event_debug_random(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); static void *ngx_event_core_create_conf(ngx_cycle_t *cycle); static char *ngx_event_core_init_conf(ngx_cycle_t *cycle, void *conf); @@ -162,6 +164,13 @@ 0, NULL }, + { ngx_string("debug_random"), + NGX_EVENT_CONF|NGX_CONF_TAKE1, + ngx_event_debug_random, + 0, + 0, + NULL }, + ngx_null_command }; @@ -1254,6 +1263,51 @@ } +static char * +ngx_event_debug_random(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ +#if (NGX_DEBUG) + ngx_event_conf_t *ecf = conf; + + ngx_int_t n; + ngx_str_t *value; + + if (ecf->debug_percent != NGX_CONF_UNSET_UINT) { + return "is duplicate"; + } + + value = cf->args->elts; + + if (value[1].len == 0 || value[1].data[value[1].len - 1] != '%') { + goto invalid; + } + + n = ngx_atofp(value[1].data, value[1].len - 1, 2); + if (n == NGX_ERROR || n == 0 || n > 9999) { + goto invalid; + } + + ecf->debug_percent = n; + + return NGX_CONF_OK; + +invalid: + + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid percent value \"%V\"", &value[1]); + +#else + + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, + "\"debug_random\" is ignored, you need to rebuild " + "nginx using --with-debug option to enable it"); + +#endif + + return NGX_CONF_ERROR; +} + + static void * ngx_event_core_create_conf(ngx_cycle_t *cycle) { @@ -1279,6 +1333,8 @@ return NULL; } + ecf->debug_percent = NGX_CONF_UNSET_UINT; + #endif return ecf; @@ -1369,5 +1425,9 @@ ngx_conf_init_value(ecf->accept_mutex, 0); ngx_conf_init_msec_value(ecf->accept_mutex_delay, 500); +#if (NGX_DEBUG) + ngx_conf_init_uint_value(ecf->debug_percent, 0); +#endif + return NGX_CONF_OK; } diff -r f366007dd23a -r d9275e982a71 src/event/ngx_event.h --- a/src/event/ngx_event.h Tue Nov 14 15:26:02 2023 +0400 +++ b/src/event/ngx_event.h Sat Dec 09 05:09:07 2023 +0000 @@ -438,6 +438,7 @@ u_char *name; #if (NGX_DEBUG) + ngx_uint_t debug_percent; ngx_array_t debug_connection; #endif } ngx_event_conf_t; diff -r f366007dd23a -r d9275e982a71 src/event/ngx_event_accept.c --- a/src/event/ngx_event_accept.c Tue Nov 14 15:26:02 2023 +0400 +++ b/src/event/ngx_event_accept.c Sat Dec 09 05:09:07 2023 +0000 @@ -561,6 +561,12 @@ break; } + if (ecf->debug_percent + && ecf->debug_percent <= (ngx_uint_t) ngx_random() % 10000) + { + break; + } + c->log->log_level = NGX_LOG_DEBUG_CONNECTION|NGX_LOG_DEBUG_ALL; break; From jordanc.carter at outlook.com Sat Dec 9 07:46:10 2023 From: jordanc.carter at outlook.com (=?iso-8859-1?q?J_Carter?=) Date: Sat, 09 Dec 2023 07:46:10 +0000 Subject: [PATCH 2 of 2] Win32: extended ngx_random range to 0x7fffffff In-Reply-To: References: Message-ID: # HG changeset patch # User J Carter # Date 1702101635 0 # Sat Dec 09 06:00:35 2023 +0000 # Node ID 1a77698f82d2580aa8b8f62ce89b4dbb6d678c5d # Parent d9275e982a7188a1ea7855295ffa93362ea9830f Win32: extended ngx_random range to 0x7fffffff rand() is used on win32. RAND_MAX is implementation defined. win32's is 0x7fff. Existing uses of ngx_random rely upon 0x7fffffff range provided by posix implementations of random(). diff -r d9275e982a71 -r 1a77698f82d2 src/os/win32/ngx_win32_config.h --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 06:00:35 2023 +0000 @@ -280,7 +280,9 @@ #define NGX_HAVE_GETADDRINFO 1 -#define ngx_random rand +#define ngx_random \ + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) + #define ngx_debug_init() From jordanc.carter at outlook.com Sat Dec 9 08:42:11 2023 From: jordanc.carter at outlook.com (J Carter) Date: Sat, 9 Dec 2023 08:42:11 +0000 Subject: [PATCH 2 of 2] Win32: extended ngx_random range to 0x7fffffff In-Reply-To: References: Message-ID: On Sat, 09 Dec 2023 07:46:10 +0000 J Carter wrote: > # HG changeset patch > # User J Carter > # Date 1702101635 0 > # Sat Dec 09 06:00:35 2023 +0000 > # Node ID 1a77698f82d2580aa8b8f62ce89b4dbb6d678c5d > # Parent d9275e982a7188a1ea7855295ffa93362ea9830f > Win32: extended ngx_random range to 0x7fffffff > > rand() is used on win32. RAND_MAX is implementation defined. win32's is > 0x7fff. > > Existing uses of ngx_random rely upon 0x7fffffff range provided by > posix implementations of random(). > > diff -r d9275e982a71 -r 1a77698f82d2 src/os/win32/ngx_win32_config.h > --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 > +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 06:00:35 2023 +0000 > @@ -280,7 +280,9 @@ > > #define NGX_HAVE_GETADDRINFO 1 > > -#define ngx_random rand > +#define ngx_random \ > + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) > + > #define ngx_debug_init() > > ^ my mistake - copying error.. # HG changeset patch # User J Carter # Date 1702111094 0 # Sat Dec 09 08:38:14 2023 +0000 # Node ID 10ef59a412a330872fc6d46de64f42e32e997b3a # Parent d9275e982a7188a1ea7855295ffa93362ea9830f Win32: extended ngx_random range to 0x7fffffff rand() is used on win32. RAND_MAX is implementation defined. win32's is 0x7fff. Existing uses of ngx_random rely upon 0x7fffffff range provided by posix implementations of random(). diff -r d9275e982a71 -r 10ef59a412a3 src/os/win32/ngx_win32_config.h --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 08:38:14 2023 +0000 @@ -280,7 +280,9 @@ #define NGX_HAVE_GETADDRINFO 1 -#define ngx_random rand +#define ngx_random() \ + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) + #define ngx_debug_init() From pluknet at nginx.com Mon Dec 11 09:40:06 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 11 Dec 2023 13:40:06 +0400 Subject: [PATCH 0 of 6] QUIC PATH_CHALLENGE-related series In-Reply-To: References: Message-ID: <9E408E60-FCFC-43D0-85DA-3576EE49251C@nginx.com> > On 30 Nov 2023, at 15:05, Roman Arutyunyan wrote: > > Hi, > > A number of patches discussed previously. > A couple more patches to follow. Last two weeks I spent pondering if we have to migrate to per-path congestion control and RTT estimator (as inspired by Alibaba patch https://mailman.nginx.org/pipermail/nginx-devel/2023-January/PNDV4MEDZPN5QJ6RZ2FQCWA7NGQ2ILSX.html), in order to alleviate severe congestion controller bugs in in-flight accounting that may result in connection stalls due to a type underflow. Previously existed, they become more visible within this series (that makes PATH_CHALLENGE frames congestion aware), such that any migration may end up in connection stalls, easily seen in tests. After discussion with Roman, I came to conclusion that it's possible to fix those bugs without touching a single context. Other than that below are various optimization I happened to see while testing in-flight accounting during connection migration. # HG changeset patch # User Sergey Kandaurov # Date 1702282940 -14400 # Mon Dec 11 12:22:20 2023 +0400 # Node ID 34311bcfd27d3cc420771b9349108e839d6a532e # Parent 3518c40102b2a0f4a5be00fef00becdff70921cb QUIC: reset RTT estimator for the new path. RTT is a property of the path, it must be reset on confirming a peer's ownership of its new address. diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -260,14 +260,7 @@ ngx_quic_new_connection(ngx_connection_t ngx_queue_init(&qc->free_frames); - qc->avg_rtt = NGX_QUIC_INITIAL_RTT; - qc->rttvar = NGX_QUIC_INITIAL_RTT / 2; - qc->min_rtt = NGX_TIMER_INFINITE; - qc->first_rtt = NGX_TIMER_INFINITE; - - /* - * qc->latest_rtt = 0 - */ + ngx_quic_init_rtt(qc); qc->pto.log = c->log; qc->pto.data = c; diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h --- a/src/event/quic/ngx_event_quic_connection.h +++ b/src/event/quic/ngx_event_quic_connection.h @@ -65,6 +65,13 @@ typedef struct ngx_quic_keys_s ng #define ngx_quic_get_socket(c) ((ngx_quic_socket_t *)((c)->udp)) +#define ngx_quic_init_rtt(qc) \ + (qc)->avg_rtt = NGX_QUIC_INITIAL_RTT; \ + (qc)->rttvar = NGX_QUIC_INITIAL_RTT / 2; \ + (qc)->min_rtt = NGX_TIMER_INFINITE; \ + (qc)->first_rtt = NGX_TIMER_INFINITE; \ + (qc)->latest_rtt = 0; + typedef enum { NGX_QUIC_PATH_IDLE = 0, diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c --- a/src/event/quic/ngx_event_quic_migration.c +++ b/src/event/quic/ngx_event_quic_migration.c @@ -181,6 +181,8 @@ valid: 14720)); qc->congestion.ssthresh = (size_t) -1; qc->congestion.recovery_start = ngx_current_msec; + + ngx_quic_init_rtt(qc); } path->validated = 1; # HG changeset patch # User Sergey Kandaurov # Date 1702285561 -14400 # Mon Dec 11 13:06:01 2023 +0400 # Node ID 2e747e7b203e9b62455fc6b8457bd10879a88bec # Parent 34311bcfd27d3cc420771b9349108e839d6a532e QUIC: path aware in-flight bytes accounting. On-packet acknowledgement is made path aware, as per RFC 9000, Section 9.4: Packets sent on the old path MUST NOT contribute to congestion control or RTT estimation for the new path. To make it possible over a single congestion control context, the first packet to be sent after the new path has been validated, which includes resetting the congestion controller and RTT estimator, is now remembered in the connection. Packets sent previously, such as on the old path, are not taken into account. diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -325,6 +325,10 @@ ngx_quic_congestion_ack(ngx_connection_t qc = ngx_quic_get_connection(c); cg = &qc->congestion; + if (f->pnum < qc->rst_pnum) { + return; + } + blocked = (cg->in_flight >= cg->window) ? 1 : 0; cg->in_flight -= f->plen; @@ -667,6 +671,10 @@ ngx_quic_congestion_lost(ngx_connection_ qc = ngx_quic_get_connection(c); cg = &qc->congestion; + if (f->pnum < qc->rst_pnum) { + return; + } + blocked = (cg->in_flight >= cg->window) ? 1 : 0; cg->in_flight -= f->plen; diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h --- a/src/event/quic/ngx_event_quic_connection.h +++ b/src/event/quic/ngx_event_quic_connection.h @@ -266,6 +266,8 @@ struct ngx_quic_connection_s { ngx_quic_streams_t streams; ngx_quic_congestion_t congestion; + uint64_t rst_pnum; /* first on validated path */ + off_t received; ngx_uint_t error; diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c --- a/src/event/quic/ngx_event_quic_migration.c +++ b/src/event/quic/ngx_event_quic_migration.c @@ -110,6 +110,7 @@ ngx_quic_handle_path_response_frame(ngx_ ngx_uint_t rst; ngx_queue_t *q; ngx_quic_path_t *path, *prev; + ngx_quic_send_ctx_t *ctx; ngx_quic_connection_t *qc; qc = ngx_quic_get_connection(c); @@ -174,6 +175,11 @@ valid: } if (rst) { + /* prevent old path packets contribution to congestion control */ + + ctx = ngx_quic_get_send_ctx(qc, ssl_encryption_application); + qc->rst_pnum = ctx->pnum; + ngx_memzero(&qc->congestion, sizeof(ngx_quic_congestion_t)); qc->congestion.window = ngx_min(10 * qc->tp.max_udp_payload_size, # HG changeset patch # User Sergey Kandaurov # Date 1702287236 -14400 # Mon Dec 11 13:33:56 2023 +0400 # Node ID 561791598a9610e79ea4bc24788ff887d032b3b3 # Parent 2e747e7b203e9b62455fc6b8457bd10879a88bec QUIC: abandoned PMTU discovery on the old path. It is seemingly a useless work to probe MTU on the old path, which may be gone. This also goes in line with RFC 9000, Section 8.2.4, Failed Path Validation, which prescribes to abandon old path validation if switching to a new path. diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c --- a/src/event/quic/ngx_event_quic_migration.c +++ b/src/event/quic/ngx_event_quic_migration.c @@ -497,6 +497,7 @@ ngx_quic_handle_migration(ngx_connection } } + qc->path->state = NGX_QUIC_PATH_IDLE; qc->path->tag = NGX_QUIC_PATH_BACKUP; ngx_quic_path_dbg(c, "is now backup", qc->path); # HG changeset patch # User Sergey Kandaurov # Date 1702287237 -14400 # Mon Dec 11 13:33:57 2023 +0400 # Node ID 2f65a579a235d08902e47cf796db3c1bc1ca8790 # Parent 561791598a9610e79ea4bc24788ff887d032b3b3 QUIC: do not arm PTO timer for path validation. As per RFC 9000, Section 9.4, a separate timer is set when a PATH_CHALLENGE is sent. Further, PATH_CHALLENGE frames have a distinct retransmission policy. Thus, it makes no sense to arm the PTO timer for PATH_CHALLENGE, which may result in spurious PTO events. diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -752,7 +752,23 @@ ngx_quic_set_lost_timer(ngx_connection_t } } - q = ngx_queue_last(&ctx->sent); + /* PATH_CHALLENGE timer is separate (see RFC 9000, 9.4) */ + + for (q = ngx_queue_last(&ctx->sent); + q != ngx_queue_sentinel(&ctx->sent); + q = ngx_queue_prev(q)) + { + f = ngx_queue_data(q, ngx_quic_frame_t, queue); + + if (f->type != NGX_QUIC_FT_PATH_CHALLENGE) { + break; + } + } + + if (q == ngx_queue_sentinel(&ctx->sent)) { + continue; + } + f = ngx_queue_data(q, ngx_quic_frame_t, queue); w = (ngx_msec_int_t) (f->send_time + (ngx_quic_pto(c, ctx) << qc->pto_count) - now); # HG changeset patch # User Sergey Kandaurov # Date 1702287238 -14400 # Mon Dec 11 13:33:58 2023 +0400 # Node ID 4e1706b406978b8546f68a72e873c8579b8bc1d5 # Parent 2f65a579a235d08902e47cf796db3c1bc1ca8790 QUIC: do not consider PATH_CHALLENGE acknowledgment an RTT sample. Since PATH_CHALLENGE frames use a separate retransmission timer (other than PTO), it makes no sense to use PATH_CHALLENGE ACKs for RTT samples as well. This extends RFC 9002, Section 6.2.2 to ACK frames. Previously, through influence on RTT estimator, it could be possible to affect PTO duration, which is not used for resending PATH_CHALLENGE frames. diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c --- a/src/event/quic/ngx_event_quic_ack.c +++ b/src/event/quic/ngx_event_quic_ack.c @@ -264,7 +264,9 @@ ngx_quic_handle_ack_frame_range(ngx_conn break; } - if (f->pnum == max) { + /* PATH_CHALLENGE timer is separate (see RFC 9000, 9.4) */ + + if (f->pnum == max && f->type != NGX_QUIC_FT_PATH_CHALLENGE) { st->max_pn = f->send_time; } -- Sergey Kandaurov From arut at nginx.com Mon Dec 11 13:49:24 2023 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 11 Dec 2023 17:49:24 +0400 Subject: [PATCH 0 of 6] QUIC PATH_CHALLENGE-related series In-Reply-To: <9E408E60-FCFC-43D0-85DA-3576EE49251C@nginx.com> References: <9E408E60-FCFC-43D0-85DA-3576EE49251C@nginx.com> Message-ID: <20231211134924.fxhyrglyuaw6ggui@N00W24XTQX> Hi, On Mon, Dec 11, 2023 at 01:40:06PM +0400, Sergey Kandaurov wrote: > > > On 30 Nov 2023, at 15:05, Roman Arutyunyan wrote: > > > > Hi, > > > > A number of patches discussed previously. > > > > A couple more patches to follow. > > Last two weeks I spent pondering if we have to migrate to per-path > congestion control and RTT estimator (as inspired by Alibaba patch > https://mailman.nginx.org/pipermail/nginx-devel/2023-January/PNDV4MEDZPN5QJ6RZ2FQCWA7NGQ2ILSX.html), > in order to alleviate severe congestion controller bugs in in-flight > accounting that may result in connection stalls due to a type underflow. > Previously existed, they become more visible within this series (that > makes PATH_CHALLENGE frames congestion aware), such that any migration > may end up in connection stalls, easily seen in tests. > > After discussion with Roman, I came to conclusion that it's possible > to fix those bugs without touching a single context. > > Other than that below are various optimization I happened to see > while testing in-flight accounting during connection migration. > > # HG changeset patch > # User Sergey Kandaurov > # Date 1702282940 -14400 > # Mon Dec 11 12:22:20 2023 +0400 > # Node ID 34311bcfd27d3cc420771b9349108e839d6a532e > # Parent 3518c40102b2a0f4a5be00fef00becdff70921cb > QUIC: reset RTT estimator for the new path. > > RTT is a property of the path, it must be reset on confirming a peer's > ownership of its new address. > > diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c > --- a/src/event/quic/ngx_event_quic.c > +++ b/src/event/quic/ngx_event_quic.c > @@ -260,14 +260,7 @@ ngx_quic_new_connection(ngx_connection_t > > ngx_queue_init(&qc->free_frames); > > - qc->avg_rtt = NGX_QUIC_INITIAL_RTT; > - qc->rttvar = NGX_QUIC_INITIAL_RTT / 2; > - qc->min_rtt = NGX_TIMER_INFINITE; > - qc->first_rtt = NGX_TIMER_INFINITE; > - > - /* > - * qc->latest_rtt = 0 > - */ > + ngx_quic_init_rtt(qc); > > qc->pto.log = c->log; > qc->pto.data = c; > diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h > --- a/src/event/quic/ngx_event_quic_connection.h > +++ b/src/event/quic/ngx_event_quic_connection.h > @@ -65,6 +65,13 @@ typedef struct ngx_quic_keys_s ng > > #define ngx_quic_get_socket(c) ((ngx_quic_socket_t *)((c)->udp)) > > +#define ngx_quic_init_rtt(qc) \ > + (qc)->avg_rtt = NGX_QUIC_INITIAL_RTT; \ > + (qc)->rttvar = NGX_QUIC_INITIAL_RTT / 2; \ > + (qc)->min_rtt = NGX_TIMER_INFINITE; \ > + (qc)->first_rtt = NGX_TIMER_INFINITE; \ > + (qc)->latest_rtt = 0; > + > > typedef enum { > NGX_QUIC_PATH_IDLE = 0, > diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c > --- a/src/event/quic/ngx_event_quic_migration.c > +++ b/src/event/quic/ngx_event_quic_migration.c > @@ -181,6 +181,8 @@ valid: > 14720)); > qc->congestion.ssthresh = (size_t) -1; > qc->congestion.recovery_start = ngx_current_msec; > + > + ngx_quic_init_rtt(qc); > } > > path->validated = 1; Looks fine. > # HG changeset patch > # User Sergey Kandaurov > # Date 1702285561 -14400 > # Mon Dec 11 13:06:01 2023 +0400 > # Node ID 2e747e7b203e9b62455fc6b8457bd10879a88bec > # Parent 34311bcfd27d3cc420771b9349108e839d6a532e > QUIC: path aware in-flight bytes accounting. > > On-packet acknowledgement is made path aware, as per RFC 9000, Section 9.4: > Packets sent on the old path MUST NOT contribute to congestion control > or RTT estimation for the new path. > > To make it possible over a single congestion control context, the first packet > to be sent after the new path has been validated, which includes resetting the > congestion controller and RTT estimator, is now remembered in the connection. > Packets sent previously, such as on the old path, are not taken into account. > > diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c > --- a/src/event/quic/ngx_event_quic_ack.c > +++ b/src/event/quic/ngx_event_quic_ack.c > @@ -325,6 +325,10 @@ ngx_quic_congestion_ack(ngx_connection_t > qc = ngx_quic_get_connection(c); > cg = &qc->congestion; > > + if (f->pnum < qc->rst_pnum) { > + return; > + } While this condition looks simple, there's something more complex going on. The value for pnum corresponds to the first application level packet that is send over the new path. Here however there's no check for packet level because other levels can only show up while rst_pnum is zero. I think this should be explained in the commit log. > + > blocked = (cg->in_flight >= cg->window) ? 1 : 0; > > cg->in_flight -= f->plen; > @@ -667,6 +671,10 @@ ngx_quic_congestion_lost(ngx_connection_ > qc = ngx_quic_get_connection(c); > cg = &qc->congestion; > > + if (f->pnum < qc->rst_pnum) { > + return; > + } The above applies here as well. > + > blocked = (cg->in_flight >= cg->window) ? 1 : 0; > > cg->in_flight -= f->plen; > diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h > --- a/src/event/quic/ngx_event_quic_connection.h > +++ b/src/event/quic/ngx_event_quic_connection.h > @@ -266,6 +266,8 @@ struct ngx_quic_connection_s { > ngx_quic_streams_t streams; > ngx_quic_congestion_t congestion; > > + uint64_t rst_pnum; /* first on validated path */ > + > off_t received; > > ngx_uint_t error; > diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c > --- a/src/event/quic/ngx_event_quic_migration.c > +++ b/src/event/quic/ngx_event_quic_migration.c > @@ -110,6 +110,7 @@ ngx_quic_handle_path_response_frame(ngx_ > ngx_uint_t rst; > ngx_queue_t *q; > ngx_quic_path_t *path, *prev; > + ngx_quic_send_ctx_t *ctx; > ngx_quic_connection_t *qc; > > qc = ngx_quic_get_connection(c); > @@ -174,6 +175,11 @@ valid: > } > > if (rst) { > + /* prevent old path packets contribution to congestion control */ > + > + ctx = ngx_quic_get_send_ctx(qc, ssl_encryption_application); > + qc->rst_pnum = ctx->pnum; > + > ngx_memzero(&qc->congestion, sizeof(ngx_quic_congestion_t)); > > qc->congestion.window = ngx_min(10 * qc->tp.max_udp_payload_size, Here we call a reset function for rtt below, but there's no such function for congestion. As discussed in private, this will be addresses later while improving congestion algorithm. The patch looks ok. > # HG changeset patch > # User Sergey Kandaurov > # Date 1702287236 -14400 > # Mon Dec 11 13:33:56 2023 +0400 > # Node ID 561791598a9610e79ea4bc24788ff887d032b3b3 > # Parent 2e747e7b203e9b62455fc6b8457bd10879a88bec > QUIC: abandoned PMTU discovery on the old path. > > It is seemingly a useless work to probe MTU on the old path, which may be gone. > This also goes in line with RFC 9000, Section 8.2.4, Failed Path Validation, > which prescribes to abandon old path validation if switching to a new path. > > diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c > --- a/src/event/quic/ngx_event_quic_migration.c > +++ b/src/event/quic/ngx_event_quic_migration.c > @@ -497,6 +497,7 @@ ngx_quic_handle_migration(ngx_connection > } > } > > + qc->path->state = NGX_QUIC_PATH_IDLE; > qc->path->tag = NGX_QUIC_PATH_BACKUP; > ngx_quic_path_dbg(c, "is now backup", qc->path); When new path validation fails, we return to the backup path with potentially unfinished MTU discovery. We need to restart MTU discovery in this case by calling ngx_quic_discover_path_mtu(). If the discovery was finished, the function will do nothing. A separate case is when the path was validated, but initial MTU (1200) was not verified due to amplification limit. This should be addressed as well. > # HG changeset patch > # User Sergey Kandaurov > # Date 1702287237 -14400 > # Mon Dec 11 13:33:57 2023 +0400 > # Node ID 2f65a579a235d08902e47cf796db3c1bc1ca8790 > # Parent 561791598a9610e79ea4bc24788ff887d032b3b3 > QUIC: do not arm PTO timer for path validation. > > As per RFC 9000, Section 9.4, a separate timer is set when a PATH_CHALLENGE > is sent. Further, PATH_CHALLENGE frames have a distinct retransmission policy. > Thus, it makes no sense to arm the PTO timer for PATH_CHALLENGE, which may > result in spurious PTO events. > > diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c > --- a/src/event/quic/ngx_event_quic_ack.c > +++ b/src/event/quic/ngx_event_quic_ack.c > @@ -752,7 +752,23 @@ ngx_quic_set_lost_timer(ngx_connection_t > } > } > > - q = ngx_queue_last(&ctx->sent); > + /* PATH_CHALLENGE timer is separate (see RFC 9000, 9.4) */ > + > + for (q = ngx_queue_last(&ctx->sent); > + q != ngx_queue_sentinel(&ctx->sent); > + q = ngx_queue_prev(q)) > + { > + f = ngx_queue_data(q, ngx_quic_frame_t, queue); > + > + if (f->type != NGX_QUIC_FT_PATH_CHALLENGE) { > + break; > + } > + } > + > + if (q == ngx_queue_sentinel(&ctx->sent)) { > + continue; > + } > + > f = ngx_queue_data(q, ngx_quic_frame_t, queue); > w = (ngx_msec_int_t) > (f->send_time + (ngx_quic_pto(c, ctx) << qc->pto_count) - now); > # HG changeset patch > # User Sergey Kandaurov > # Date 1702287238 -14400 > # Mon Dec 11 13:33:58 2023 +0400 > # Node ID 4e1706b406978b8546f68a72e873c8579b8bc1d5 > # Parent 2f65a579a235d08902e47cf796db3c1bc1ca8790 > QUIC: do not consider PATH_CHALLENGE acknowledgment an RTT sample. > > Since PATH_CHALLENGE frames use a separate retransmission timer (other than > PTO), it makes no sense to use PATH_CHALLENGE ACKs for RTT samples as well. > This extends RFC 9002, Section 6.2.2 to ACK frames. Previously, through > influence on RTT estimator, it could be possible to affect PTO duration, > which is not used for resending PATH_CHALLENGE frames. > > diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c > --- a/src/event/quic/ngx_event_quic_ack.c > +++ b/src/event/quic/ngx_event_quic_ack.c > @@ -264,7 +264,9 @@ ngx_quic_handle_ack_frame_range(ngx_conn > break; > } > > - if (f->pnum == max) { > + /* PATH_CHALLENGE timer is separate (see RFC 9000, 9.4) */ > + > + if (f->pnum == max && f->type != NGX_QUIC_FT_PATH_CHALLENGE) { > st->max_pn = f->send_time; > } The last two patches will be addressed separately. -- Roman Arutyunyan From pluknet at nginx.com Mon Dec 11 13:51:23 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Mon, 11 Dec 2023 13:51:23 +0000 Subject: [nginx] HTTP: removed unused r->port_start and r->port_end. Message-ID: details: https://hg.nginx.org/nginx/rev/dacad3a9c7b8 branches: changeset: 9187:dacad3a9c7b8 user: Vladimir Khomutov date: Tue Nov 28 12:57:14 2023 +0300 description: HTTP: removed unused r->port_start and r->port_end. Neither r->port_start nor r->port_end were ever used. The r->port_end is set by the parser, though it was never used by the following code (and was never usable, since not copied by the ngx_http_alloc_large_header_buffer() without r->port_start set). diffstat: src/http/ngx_http_parse.c | 3 --- src/http/ngx_http_request.c | 5 ----- src/http/ngx_http_request.h | 2 -- 3 files changed, 0 insertions(+), 10 deletions(-) diffs (50 lines): diff -r f366007dd23a -r dacad3a9c7b8 src/http/ngx_http_parse.c --- a/src/http/ngx_http_parse.c Tue Nov 14 15:26:02 2023 +0400 +++ b/src/http/ngx_http_parse.c Tue Nov 28 12:57:14 2023 +0300 @@ -451,19 +451,16 @@ ngx_http_parse_request_line(ngx_http_req switch (ch) { case '/': - r->port_end = p; r->uri_start = p; state = sw_after_slash_in_uri; break; case '?': - r->port_end = p; r->uri_start = p; r->args_start = p + 1; r->empty_path_in_uri = 1; state = sw_uri; break; case ' ': - r->port_end = p; /* * use single "/" from request line to preserve pointers, * if request line will be copied to large client buffer diff -r f366007dd23a -r dacad3a9c7b8 src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Tue Nov 14 15:26:02 2023 +0400 +++ b/src/http/ngx_http_request.c Tue Nov 28 12:57:14 2023 +0300 @@ -1735,11 +1735,6 @@ ngx_http_alloc_large_header_buffer(ngx_h } } - if (r->port_start) { - r->port_start = new + (r->port_start - old); - r->port_end = new + (r->port_end - old); - } - if (r->uri_ext) { r->uri_ext = new + (r->uri_ext - old); } diff -r f366007dd23a -r dacad3a9c7b8 src/http/ngx_http_request.h --- a/src/http/ngx_http_request.h Tue Nov 14 15:26:02 2023 +0400 +++ b/src/http/ngx_http_request.h Tue Nov 28 12:57:14 2023 +0300 @@ -597,8 +597,6 @@ struct ngx_http_request_s { u_char *schema_end; u_char *host_start; u_char *host_end; - u_char *port_start; - u_char *port_end; unsigned http_minor:16; unsigned http_major:16; From pluknet at nginx.com Mon Dec 11 13:51:26 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Mon, 11 Dec 2023 13:51:26 +0000 Subject: [nginx] HTTP: uniform checks in ngx_http_alloc_large_header_buffer(). Message-ID: details: https://hg.nginx.org/nginx/rev/b05c622715fa branches: changeset: 9188:b05c622715fa user: Vladimir Khomutov date: Wed Nov 29 11:13:05 2023 +0300 description: HTTP: uniform checks in ngx_http_alloc_large_header_buffer(). If URI is not fully parsed yet, some pointers are not set. As a result, the calculation of "new + (ptr - old)" expression is flawed. According to C11, 6.5.6 Additive operators, p.9: : When two pointers are subtracted, both shall point to elements : of the same array object, or one past the last element of the : array object Since "ptr" is not set, subtraction leads to undefined behaviour, because "ptr" and "old" are not in the same buffer (i.e. array objects). Prodded by GCC undefined behaviour sanitizer. diffstat: src/http/ngx_http_request.c | 34 ++++++++++++++++++++++++++-------- 1 files changed, 26 insertions(+), 8 deletions(-) diffs (54 lines): diff -r dacad3a9c7b8 -r b05c622715fa src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Tue Nov 28 12:57:14 2023 +0300 +++ b/src/http/ngx_http_request.c Wed Nov 29 11:13:05 2023 +0300 @@ -1718,14 +1718,23 @@ ngx_http_alloc_large_header_buffer(ngx_h r->request_end = new + (r->request_end - old); } - r->method_end = new + (r->method_end - old); - - r->uri_start = new + (r->uri_start - old); - r->uri_end = new + (r->uri_end - old); + if (r->method_end) { + r->method_end = new + (r->method_end - old); + } + + if (r->uri_start) { + r->uri_start = new + (r->uri_start - old); + } + + if (r->uri_end) { + r->uri_end = new + (r->uri_end - old); + } if (r->schema_start) { r->schema_start = new + (r->schema_start - old); - r->schema_end = new + (r->schema_end - old); + if (r->schema_end) { + r->schema_end = new + (r->schema_end - old); + } } if (r->host_start) { @@ -1749,9 +1758,18 @@ ngx_http_alloc_large_header_buffer(ngx_h } else { r->header_name_start = new; - r->header_name_end = new + (r->header_name_end - old); - r->header_start = new + (r->header_start - old); - r->header_end = new + (r->header_end - old); + + if (r->header_name_end) { + r->header_name_end = new + (r->header_name_end - old); + } + + if (r->header_start) { + r->header_start = new + (r->header_start - old); + } + + if (r->header_end) { + r->header_end = new + (r->header_end - old); + } } r->header_in = b; From pluknet at nginx.com Mon Dec 11 14:45:45 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 11 Dec 2023 18:45:45 +0400 Subject: [PATCH 0 of 6] QUIC PATH_CHALLENGE-related series In-Reply-To: <20231211134924.fxhyrglyuaw6ggui@N00W24XTQX> References: <9E408E60-FCFC-43D0-85DA-3576EE49251C@nginx.com> <20231211134924.fxhyrglyuaw6ggui@N00W24XTQX> Message-ID: <436A418D-3B4A-4826-BFC9-580104AA2A47@nginx.com> > On 11 Dec 2023, at 17:49, Roman Arutyunyan wrote: > > Hi, > > On Mon, Dec 11, 2023 at 01:40:06PM +0400, Sergey Kandaurov wrote: >> >>> On 30 Nov 2023, at 15:05, Roman Arutyunyan wrote: >>> >>> Hi, >>> >>> A number of patches discussed previously. >>> >> >> A couple more patches to follow. >> >> Last two weeks I spent pondering if we have to migrate to per-path >> congestion control and RTT estimator (as inspired by Alibaba patch >> https://mailman.nginx.org/pipermail/nginx-devel/2023-January/PNDV4MEDZPN5QJ6RZ2FQCWA7NGQ2ILSX.html), >> in order to alleviate severe congestion controller bugs in in-flight >> accounting that may result in connection stalls due to a type underflow. >> Previously existed, they become more visible within this series (that >> makes PATH_CHALLENGE frames congestion aware), such that any migration >> may end up in connection stalls, easily seen in tests. >> >> After discussion with Roman, I came to conclusion that it's possible >> to fix those bugs without touching a single context. >> >> Other than that below are various optimization I happened to see >> while testing in-flight accounting during connection migration. >> >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1702282940 -14400 >> # Mon Dec 11 12:22:20 2023 +0400 >> # Node ID 34311bcfd27d3cc420771b9349108e839d6a532e >> # Parent 3518c40102b2a0f4a5be00fef00becdff70921cb >> QUIC: reset RTT estimator for the new path. >> >> RTT is a property of the path, it must be reset on confirming a peer's >> ownership of its new address. >> >> diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c >> --- a/src/event/quic/ngx_event_quic.c >> +++ b/src/event/quic/ngx_event_quic.c >> @@ -260,14 +260,7 @@ ngx_quic_new_connection(ngx_connection_t >> >> ngx_queue_init(&qc->free_frames); >> >> - qc->avg_rtt = NGX_QUIC_INITIAL_RTT; >> - qc->rttvar = NGX_QUIC_INITIAL_RTT / 2; >> - qc->min_rtt = NGX_TIMER_INFINITE; >> - qc->first_rtt = NGX_TIMER_INFINITE; >> - >> - /* >> - * qc->latest_rtt = 0 >> - */ >> + ngx_quic_init_rtt(qc); >> >> qc->pto.log = c->log; >> qc->pto.data = c; >> diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h >> --- a/src/event/quic/ngx_event_quic_connection.h >> +++ b/src/event/quic/ngx_event_quic_connection.h >> @@ -65,6 +65,13 @@ typedef struct ngx_quic_keys_s ng >> >> #define ngx_quic_get_socket(c) ((ngx_quic_socket_t *)((c)->udp)) >> >> +#define ngx_quic_init_rtt(qc) \ >> + (qc)->avg_rtt = NGX_QUIC_INITIAL_RTT; \ >> + (qc)->rttvar = NGX_QUIC_INITIAL_RTT / 2; \ >> + (qc)->min_rtt = NGX_TIMER_INFINITE; \ >> + (qc)->first_rtt = NGX_TIMER_INFINITE; \ >> + (qc)->latest_rtt = 0; >> + >> >> typedef enum { >> NGX_QUIC_PATH_IDLE = 0, >> diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c >> --- a/src/event/quic/ngx_event_quic_migration.c >> +++ b/src/event/quic/ngx_event_quic_migration.c >> @@ -181,6 +181,8 @@ valid: >> 14720)); >> qc->congestion.ssthresh = (size_t) -1; >> qc->congestion.recovery_start = ngx_current_msec; >> + >> + ngx_quic_init_rtt(qc); >> } >> >> path->validated = 1; > > Looks fine. > >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1702285561 -14400 >> # Mon Dec 11 13:06:01 2023 +0400 >> # Node ID 2e747e7b203e9b62455fc6b8457bd10879a88bec >> # Parent 34311bcfd27d3cc420771b9349108e839d6a532e >> QUIC: path aware in-flight bytes accounting. >> >> On-packet acknowledgement is made path aware, as per RFC 9000, Section 9.4: >> Packets sent on the old path MUST NOT contribute to congestion control >> or RTT estimation for the new path. >> >> To make it possible over a single congestion control context, the first packet >> to be sent after the new path has been validated, which includes resetting the >> congestion controller and RTT estimator, is now remembered in the connection. >> Packets sent previously, such as on the old path, are not taken into account. >> >> diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c >> --- a/src/event/quic/ngx_event_quic_ack.c >> +++ b/src/event/quic/ngx_event_quic_ack.c >> @@ -325,6 +325,10 @@ ngx_quic_congestion_ack(ngx_connection_t >> qc = ngx_quic_get_connection(c); >> cg = &qc->congestion; >> >> + if (f->pnum < qc->rst_pnum) { >> + return; >> + } > > While this condition looks simple, there's something more complex going on. > The value for pnum corresponds to the first application level packet that is > send over the new path. Here however there's no check for packet level because > other levels can only show up while rst_pnum is zero. I think this should be > explained in the commit log. Thanks, added this para at the end for clarity: Note that although the packet number is saved per-connection, the added checks affect application level packets only. For non-application level packets, which are only processed prior to the handshake is complete, the remembered packet number remains set to zero. > >> + >> blocked = (cg->in_flight >= cg->window) ? 1 : 0; >> >> cg->in_flight -= f->plen; >> @@ -667,6 +671,10 @@ ngx_quic_congestion_lost(ngx_connection_ >> qc = ngx_quic_get_connection(c); >> cg = &qc->congestion; >> >> + if (f->pnum < qc->rst_pnum) { >> + return; >> + } > > The above applies here as well. > >> + >> blocked = (cg->in_flight >= cg->window) ? 1 : 0; >> >> cg->in_flight -= f->plen; >> diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h >> --- a/src/event/quic/ngx_event_quic_connection.h >> +++ b/src/event/quic/ngx_event_quic_connection.h >> @@ -266,6 +266,8 @@ struct ngx_quic_connection_s { >> ngx_quic_streams_t streams; >> ngx_quic_congestion_t congestion; >> >> + uint64_t rst_pnum; /* first on validated path */ >> + >> off_t received; >> >> ngx_uint_t error; >> diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c >> --- a/src/event/quic/ngx_event_quic_migration.c >> +++ b/src/event/quic/ngx_event_quic_migration.c >> @@ -110,6 +110,7 @@ ngx_quic_handle_path_response_frame(ngx_ >> ngx_uint_t rst; >> ngx_queue_t *q; >> ngx_quic_path_t *path, *prev; >> + ngx_quic_send_ctx_t *ctx; >> ngx_quic_connection_t *qc; >> >> qc = ngx_quic_get_connection(c); >> @@ -174,6 +175,11 @@ valid: >> } >> >> if (rst) { >> + /* prevent old path packets contribution to congestion control */ >> + >> + ctx = ngx_quic_get_send_ctx(qc, ssl_encryption_application); >> + qc->rst_pnum = ctx->pnum; >> + >> ngx_memzero(&qc->congestion, sizeof(ngx_quic_congestion_t)); >> >> qc->congestion.window = ngx_min(10 * qc->tp.max_udp_payload_size, > > Here we call a reset function for rtt below, but there's no such function for > congestion. As discussed in private, this will be addresses later while > improving congestion algorithm. > > The patch looks ok. > >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1702287236 -14400 >> # Mon Dec 11 13:33:56 2023 +0400 >> # Node ID 561791598a9610e79ea4bc24788ff887d032b3b3 >> # Parent 2e747e7b203e9b62455fc6b8457bd10879a88bec >> QUIC: abandoned PMTU discovery on the old path. >> >> It is seemingly a useless work to probe MTU on the old path, which may be gone. >> This also goes in line with RFC 9000, Section 8.2.4, Failed Path Validation, >> which prescribes to abandon old path validation if switching to a new path. >> >> diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c >> --- a/src/event/quic/ngx_event_quic_migration.c >> +++ b/src/event/quic/ngx_event_quic_migration.c >> @@ -497,6 +497,7 @@ ngx_quic_handle_migration(ngx_connection >> } >> } >> >> + qc->path->state = NGX_QUIC_PATH_IDLE; >> qc->path->tag = NGX_QUIC_PATH_BACKUP; >> ngx_quic_path_dbg(c, "is now backup", qc->path); > > When new path validation fails, we return to the backup path with potentially > unfinished MTU discovery. We need to restart MTU discovery in this case by > calling ngx_quic_discover_path_mtu(). If the discovery was finished, the > function will do nothing. A separate case is when the path was validated, but > initial MTU (1200) was not verified due to amplification limit. This should be > addressed as well. While restarting PMTUD sounds doable, restoring MTU path validation may need more work. I think we can postpone this patch, since it is not directly related to this series. > >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1702287237 -14400 >> # Mon Dec 11 13:33:57 2023 +0400 >> # Node ID 2f65a579a235d08902e47cf796db3c1bc1ca8790 >> # Parent 561791598a9610e79ea4bc24788ff887d032b3b3 >> QUIC: do not arm PTO timer for path validation. >> >> As per RFC 9000, Section 9.4, a separate timer is set when a PATH_CHALLENGE >> is sent. Further, PATH_CHALLENGE frames have a distinct retransmission policy. >> Thus, it makes no sense to arm the PTO timer for PATH_CHALLENGE, which may >> result in spurious PTO events. >> >> diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c >> --- a/src/event/quic/ngx_event_quic_ack.c >> +++ b/src/event/quic/ngx_event_quic_ack.c >> @@ -752,7 +752,23 @@ ngx_quic_set_lost_timer(ngx_connection_t >> } >> } >> >> - q = ngx_queue_last(&ctx->sent); >> + /* PATH_CHALLENGE timer is separate (see RFC 9000, 9.4) */ >> + >> + for (q = ngx_queue_last(&ctx->sent); >> + q != ngx_queue_sentinel(&ctx->sent); >> + q = ngx_queue_prev(q)) >> + { >> + f = ngx_queue_data(q, ngx_quic_frame_t, queue); >> + >> + if (f->type != NGX_QUIC_FT_PATH_CHALLENGE) { >> + break; >> + } >> + } >> + >> + if (q == ngx_queue_sentinel(&ctx->sent)) { >> + continue; >> + } >> + >> f = ngx_queue_data(q, ngx_quic_frame_t, queue); >> w = (ngx_msec_int_t) >> (f->send_time + (ngx_quic_pto(c, ctx) << qc->pto_count) - now); >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1702287238 -14400 >> # Mon Dec 11 13:33:58 2023 +0400 >> # Node ID 4e1706b406978b8546f68a72e873c8579b8bc1d5 >> # Parent 2f65a579a235d08902e47cf796db3c1bc1ca8790 >> QUIC: do not consider PATH_CHALLENGE acknowledgment an RTT sample. >> >> Since PATH_CHALLENGE frames use a separate retransmission timer (other than >> PTO), it makes no sense to use PATH_CHALLENGE ACKs for RTT samples as well. >> This extends RFC 9002, Section 6.2.2 to ACK frames. Previously, through >> influence on RTT estimator, it could be possible to affect PTO duration, >> which is not used for resending PATH_CHALLENGE frames. >> >> diff --git a/src/event/quic/ngx_event_quic_ack.c b/src/event/quic/ngx_event_quic_ack.c >> --- a/src/event/quic/ngx_event_quic_ack.c >> +++ b/src/event/quic/ngx_event_quic_ack.c >> @@ -264,7 +264,9 @@ ngx_quic_handle_ack_frame_range(ngx_conn >> break; >> } >> >> - if (f->pnum == max) { >> + /* PATH_CHALLENGE timer is separate (see RFC 9000, 9.4) */ >> + >> + if (f->pnum == max && f->type != NGX_QUIC_FT_PATH_CHALLENGE) { >> st->max_pn = f->send_time; >> } > > The last two patches will be addressed separately. -- Sergey Kandaurov From julio.suarez at foss.arm.com Mon Dec 11 23:09:17 2023 From: julio.suarez at foss.arm.com (Julio Suarez) Date: Mon, 11 Dec 2023 17:09:17 -0600 Subject: [PATCH] Added asm ISB as asm pause for ngx_cpu_pause() for aarch64 Message-ID: Hi Maxim, Nitpicking: Added ISB as ngx_cpu_pause() for aarch64. Yes, we can make that change. Could you please clarify what do you mean by "a bug"? An empty ngx_cpu_pause() is certainly not a bug, it's just a lack of a more optimal solution for the particular architecture. Agree, not a bug. I'm in a team that focuses on performance, so sub-optimal performance is a "bug" to us. This is not a functional bug. Replacing the word bug with "sub-optimal" is more appropriate. When a delay like the PAUSE that is there for x86 is added, there is a 2-5% increase in the number of requests/sec Arm CPUs can achieve under high load. Yes, the test setup is very similar to what's described here (note, those particular instances in the blog isn't what I tested): https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/nginx-performance-on-graviton-3 Also, we tested on Nginx Open Source (without JWT), not Nginx-Plus like in the blog. We tested for the max RPS of a 512B file that can be pulled through a reverse proxy. We select the number of upstreams to be large (8 to be exact), they are also high in core count (16+ CPU). The load generator node is also large (64 CPUs). This ensures the bottleneck is at the reverse proxy. We test small files because large files make the test network bounded, while smaller files make the test CPU bounded. I tested both ISB and YIELD (will talk about YIELD further below). Results of these tests are something like this: ISB uplift from no delay across 3 runs: - 2 CPU: 1.03 - 1.22% - 4 CPU: 2.70 - 10.75% (I'm treating the 10.75% here as an outlier, dropping that 10.75% gets ~5% on the high end of the range, hence why I'm just saying ~2-5% in change log, I don't want to overstate the perf improvement) - 8 CPU: 1.1 -2.33% YIELD uplift from no delay across 3 runs: - 2 CPU: 0 - 0.51% - 4 CPU: 0 - 1.41% - 8 CPU: 1.05 - 2.31% ISB produced the highest uplift, particularly for a 4 CPU reverse proxy. Hence why I submitted with ISB. Still, we see benefit with YIELD too. Variation comes from tearing down cloud infrastructure and redeploying. Results can vary depending on where you land in the data center. I'm intentionally leaving out exactly which HW/cloud I used in this data, but I can say we see similar uplift across a variety of Arm systems. With respect to using YIELD and other projects that use alternatively use ISB: With respect to ISB Vs YIELD. Yes, as documented, YIELD is the conceptually right thing to use. However, in practice, it's a NOP which produces a shorter delay than ISB. Hence why ISB appears to work better. Also, YIELD is intended for SMT systems (uncommon on Arm), and hence, it's going to be a NOP for any current Arm system you'll find in the cloud. That said, YIELD produces uplift in RPS as well because even a small delay is better than no delay. I'm 100% good with using YIELD if you want to stay true to what is currently documented. I was going for max perf uplift which is also why some other projects are also using ISB. Whether it's YIELD or ISB, a revisit with WFET would be in order in the more distant future. For today, YIELD or ISB would work better than nothing (as it currently is). If YIELD is more acceptable, then I can do YIELD. Projects that previously used YIELD and switched to ISB after noting performance improvement (I don't think these projects shared data anywhere, we just have to take their word): MongoDB: https://github.com/mongodb/mongo/blob/b7a92e4194cca52665e01d81dd7f9b037b59b362/src/mongo/platform/pause.h#L61 MySQL: https://github.com/mysql/mysql-server/blob/87307d4ddd88405117e3f1e51323836d57ab1f57/storage/innobase/include/ut0ut.h#L108 Jemalloc: https://github.com/jemalloc/jemalloc/blob/e4817c8d89a2a413e835c4adeab5c5c4412f9235/configure.ac#L436 Could you please clarify reasons for the "memory" clobber here? Putting in the memory clobber for ISB is redundant because ISB is a barrier itself, but it's probably the GCC appropriate thing to do. I also like it as a hint for someone not familiar with ISB. ISB will pause the frontend (fetch-decode) to allow the CPU backend (execute-retire) to finish whatever operations are in flight. It's possible that some of those operations are writes to memory. Hence why we should tell the compiler "this instruction may update memory". __________________________________________________________________ Hello! Thank you for the patch. Some comments and questions below. On Wed, Dec 06, 2023 at 10:06:57AM -0600,julio.suarez at foss.arm.com wrote: > # HG changeset patch > # User Julio Suarez > # Date 1701877879 21600 > # Wed Dec 06 09:51:19 2023 -0600 > # Node ID 53d289b8676fc678ca90e02ece174300a3631f79 > # Parent f366007dd23a6ce8e8427c1b3042781b618a2ade > Added asm ISB as asm pause for ngx_cpu_pause() for aarch64 Nitpicking: Added ISB as ngx_cpu_pause() for aarch64. > For aarch64 (A.K.A. Arm64), ngx_cpu_pause() evaluates to empty by the > GCC preprocessor. This results in an empty for loop in the lock > check code in ngx_rwlock.c/ngx_spinlock.c (a bug). > Thus, on Arm CPUs, there is no wait between checks of a lock. Could you please clarify what do you mean by "a bug"? An empty ngx_cpu_pause() is certainly not a bug, it's just a lack of a more optimal solution for the particular architecture. > When a delay like the PAUSE that is there for x86 is added, > there is a 2-5% increase in the number of requests/sec Arm CPUs > can achieve under high load. Could you please provide some details on how did you get these numbers? > Currently, options for a spin lock delay element > are YIELD and ISB. YIELD is assembled into a NOP for most Arm CPUs. > YIELD is for Arm implementations with SMT. > Most Arm implementations are single core - single thread (non-SMT). > Thus, this commit uses ISB (Instruction Barrier Sync) since it > will provide a slightly longer delay than a NOP. > > Other projects that implement spin locks have used ISB as > the delay element which has yielded performance gains as well. Looking through various open source projects, I'm seeing the following uses on arm64 CPUs: FreeBSD, sys/arm64/include/cpu.h: #define cpu_spinwait() __asm __volatile("yield" ::: "memory") FreeBSD, lib/libthr/arch/aarch64/include/pthread_md.h: #define CPU_SPINWAIT Linux, arch/arm64/include/asm/vdso/processor.h: static inline void cpu_relax(void) { asm volatile("yield" ::: "memory"); } The only popular project I was able to find which uses ISB is Rust: https://github.com/rust-lang/rust/commit/c064b6560b7ce0adeb9bbf5d7dcf12b1acb0c807 >From the commit log it looks like it mostly focuses on the delay introduced by the instruction, ignoring other effects. In particular, YIELD is expected to be more friendly for various emulation environments, see Linux commit here: https://github.com/torvalds/linux/commit/1baa82f48030f38d1895301f1ec93acbcb3d15db Overall, the YIELD instruction seems to be better suited and specifically designed for the task in question, as per the documentation (https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/YIELD--YIELD-): : YIELD is a hint instruction. Software with a multithreading : capability can use a YIELD instruction to indicate to the PE that : it is performing a task, for example a spin-lock, that could be : swapped out to improve overall system performance. The PE can use : this hint to suspend and resume multiple software threads if it : supports the capability. Please also note that ngx_cpu_pause() is only used on multiprocessor systems: the code checks if (ngx_ncpu > 1) before using it. > Last, ISB is not an architecturally defined solution > for short spin lock delays. > A candidate for a short delay solution is the WFET > instruction (Wait For Event with Timeout). > However, there aren't any Arm implementations in the market that > support this instruction yet. When that occurs, > WFET can be tested as a replacement for ISB. Until then, > ISB will do. > > diff -r f366007dd23a -r 53d289b8676f src/os/unix/ngx_atomic.h > --- a/src/os/unix/ngx_atomic.h Tue Nov 14 15:26:02 2023 +0400 > +++ b/src/os/unix/ngx_atomic.h Wed Dec 06 09:51:19 2023 -0600 > @@ -66,6 +66,8 @@ > > #if ( __i386__ || __i386 || __amd64__ || __amd64 ) > #define ngx_cpu_pause() __asm__ ("pause") > +#elif ( __aarch64__ ) > +#define ngx_cpu_pause() __asm__ ("isb" ::: "memory") Could you please clarify reasons for the "memory" clobber here? > #else > #define ngx_cpu_pause() > #endif -- Maxim Dounin http://mdounin.ru/ -------------- next part -------------- An HTML attachment was scrubbed... URL: From thresh at nginx.com Tue Dec 12 01:45:03 2023 From: thresh at nginx.com (=?iso-8859-1?q?Konstantin_Pavlov?=) Date: Mon, 11 Dec 2023 17:45:03 -0800 Subject: [PATCH] Linux packages: actualized supported Alpine Linux versions Message-ID: <55f8ce8a8cb0acf9b360.1702345503@qgcd7xg9r9.olympus.f5net.com> # HG changeset patch # User Konstantin Pavlov # Date 1702345379 28800 # Mon Dec 11 17:42:59 2023 -0800 # Node ID 55f8ce8a8cb0acf9b360e47fd5d0023f16451a80 # Parent 08533e33d0744bd27bc42d87c47607399903eae5 Linux packages: actualized supported Alpine Linux versions. diff -r 08533e33d074 -r 55f8ce8a8cb0 xml/en/linux_packages.xml --- a/xml/en/linux_packages.xml Mon Nov 27 21:30:25 2023 +0000 +++ b/xml/en/linux_packages.xml Mon Dec 11 17:42:59 2023 -0800 @@ -7,7 +7,7 @@
+ rev="93">
@@ -134,11 +134,6 @@ versions: -3.15 -x86_64, aarch64/arm64 - - - 3.16 x86_64, aarch64/arm64 @@ -153,6 +148,11 @@ versions: x86_64, aarch64/arm64 + +3.19 +x86_64, aarch64/arm64 + + diff -r 08533e33d074 -r 55f8ce8a8cb0 xml/ru/linux_packages.xml --- a/xml/ru/linux_packages.xml Mon Nov 27 21:30:25 2023 +0000 +++ b/xml/ru/linux_packages.xml Mon Dec 11 17:42:59 2023 -0800 @@ -7,7 +7,7 @@
+ rev="93">
@@ -134,11 +134,6 @@ -3.15 -x86_64, aarch64/arm64 - - - 3.16 x86_64, aarch64/arm64 @@ -153,6 +148,11 @@ x86_64, aarch64/arm64 + +3.19 +x86_64, aarch64/arm64 + + From v.zhestikov at f5.com Tue Dec 12 03:13:01 2023 From: v.zhestikov at f5.com (=?utf-8?q?Vadim_Zhestikov?=) Date: Tue, 12 Dec 2023 03:13:01 +0000 Subject: [njs] Removed not used arg from njs_get_own_ordered_keys(). Message-ID: details: https://hg.nginx.org/njs/rev/59bd78b060c9 branches: changeset: 2246:59bd78b060c9 user: Vadim Zhestikov date: Mon Dec 11 19:10:36 2023 -0800 description: Removed not used arg from njs_get_own_ordered_keys(). diffstat: src/njs_object.c | 9 ++++----- 1 files changed, 4 insertions(+), 5 deletions(-) diffs (33 lines): diff -r bc80bcb3102c -r 59bd78b060c9 src/njs_object.c --- a/src/njs_object.c Tue Dec 05 08:54:18 2023 -0800 +++ b/src/njs_object.c Mon Dec 11 19:10:36 2023 -0800 @@ -897,7 +897,7 @@ njs_object_enumerate_object(njs_vm_t *vm static njs_int_t njs_get_own_ordered_keys(njs_vm_t *vm, const njs_object_t *object, - const njs_object_t *parent, njs_array_t *items, njs_object_enum_t kind, + const njs_object_t *parent, njs_array_t *items, njs_object_enum_type_t type, njs_bool_t all) { double num; @@ -1163,8 +1163,7 @@ njs_object_own_enumerate_object(njs_vm_t switch (kind) { case NJS_ENUM_KEYS: - ret = njs_get_own_ordered_keys(vm, object, parent, items, kind, type, - all); + ret = njs_get_own_ordered_keys(vm, object, parent, items, type, all); if (ret != NJS_OK) { return NJS_ERROR; } @@ -1178,8 +1177,8 @@ njs_object_own_enumerate_object(njs_vm_t return NJS_ERROR; } - ret = njs_get_own_ordered_keys(vm, object, parent, items_sorted, kind, - type, all); + ret = njs_get_own_ordered_keys(vm, object, parent, items_sorted, type, + all); if (ret != NJS_OK) { return NJS_ERROR; } From v.zhestikov at f5.com Tue Dec 12 03:13:03 2023 From: v.zhestikov at f5.com (=?utf-8?q?Vadim_Zhestikov?=) Date: Tue, 12 Dec 2023 03:13:03 +0000 Subject: [njs] Removed not required argument for JSON functions. Message-ID: details: https://hg.nginx.org/njs/rev/34df3f0796cf branches: changeset: 2247:34df3f0796cf user: Vadim Zhestikov date: Mon Dec 11 19:10:38 2023 -0800 description: Removed not required argument for JSON functions. diffstat: src/njs_json.c | 59 ++++++++++++++++++++++++++++----------------------------- 1 files changed, 29 insertions(+), 30 deletions(-) diffs (234 lines): diff -r 59bd78b060c9 -r 34df3f0796cf src/njs_json.c --- a/src/njs_json.c Mon Dec 11 19:10:36 2023 -0800 +++ b/src/njs_json.c Mon Dec 11 19:10:38 2023 -0800 @@ -68,16 +68,15 @@ static njs_int_t njs_json_internalize_pr static void njs_json_parse_exception(njs_json_parse_ctx_t *ctx, const char *msg, const u_char *pos); -static njs_int_t njs_json_stringify_iterator(njs_vm_t *vm, - njs_json_stringify_t *stringify, njs_value_t *value, njs_value_t *retval); +static njs_int_t njs_json_stringify_iterator(njs_json_stringify_t *stringify, + njs_value_t *value, njs_value_t *retval); static njs_function_t *njs_object_to_json_function(njs_vm_t *vm, njs_value_t *value); static njs_int_t njs_json_stringify_to_json(njs_json_stringify_t* stringify, njs_json_state_t *state, njs_value_t *key, njs_value_t *value); static njs_int_t njs_json_stringify_replacer(njs_json_stringify_t* stringify, njs_json_state_t *state, njs_value_t *key, njs_value_t *value); -static njs_int_t njs_json_stringify_array(njs_vm_t *vm, - njs_json_stringify_t *stringify); +static njs_int_t njs_json_stringify_array(njs_json_stringify_t *stringify); static njs_int_t njs_json_append_value(njs_vm_t *vm, njs_chb_t *chain, njs_value_t *value); @@ -197,7 +196,7 @@ njs_json_stringify(njs_vm_t *vm, njs_val if (njs_is_function(replacer) || njs_is_array(replacer)) { stringify->replacer = *replacer; if (njs_is_array(replacer)) { - ret = njs_json_stringify_array(vm, stringify); + ret = njs_json_stringify_array(stringify); if (njs_slow_path(ret != NJS_OK)) { goto memory_error; } @@ -263,7 +262,7 @@ njs_json_stringify(njs_vm_t *vm, njs_val break; } - return njs_json_stringify_iterator(vm, stringify, njs_arg(args, nargs, 1), + return njs_json_stringify_iterator(stringify, njs_arg(args, nargs, 1), retval); memory_error: @@ -960,14 +959,14 @@ njs_json_parse_exception(njs_json_parse_ static njs_json_state_t * -njs_json_push_stringify_state(njs_vm_t *vm, njs_json_stringify_t *stringify, +njs_json_push_stringify_state(njs_json_stringify_t *stringify, njs_value_t *value) { njs_int_t ret; njs_json_state_t *state; if (njs_slow_path(stringify->depth >= NJS_JSON_MAX_DEPTH)) { - njs_type_error(vm, "Nested too deep or a cyclic structure"); + njs_type_error(stringify->vm, "Nested too deep or a cyclic structure"); return NULL; } @@ -988,18 +987,18 @@ njs_json_push_stringify_state(njs_vm_t * state->keys = njs_array(&stringify->replacer); } else if (state->array) { - state->keys = njs_array_keys(vm, value, 1); + state->keys = njs_array_keys(stringify->vm, value, 1); if (njs_slow_path(state->keys == NULL)) { return NULL; } - ret = njs_object_length(vm, &state->value, &state->length); + ret = njs_object_length(stringify->vm, &state->value, &state->length); if (njs_slow_path(ret == NJS_ERROR)) { return NULL; } } else { - state->keys = njs_value_own_enumerate(vm, value, NJS_ENUM_KEYS, + state->keys = njs_value_own_enumerate(stringify->vm, value, NJS_ENUM_KEYS, stringify->keys_type, 0); if (njs_slow_path(state->keys == NULL)) { @@ -1085,7 +1084,7 @@ njs_json_stringify_done(njs_json_state_t static njs_int_t -njs_json_stringify_iterator(njs_vm_t *vm, njs_json_stringify_t *stringify, +njs_json_stringify_iterator(njs_json_stringify_t *stringify, njs_value_t *object, njs_value_t *retval) { int64_t size; @@ -1095,17 +1094,17 @@ njs_json_stringify_iterator(njs_vm_t *vm njs_object_t *obj; njs_json_state_t *state; - obj = njs_json_wrap_value(vm, &wrapper, object); + obj = njs_json_wrap_value(stringify->vm, &wrapper, object); if (njs_slow_path(obj == NULL)) { goto memory_error; } - state = njs_json_push_stringify_state(vm, stringify, &wrapper); + state = njs_json_push_stringify_state(stringify, &wrapper); if (njs_slow_path(state == NULL)) { goto memory_error; } - njs_chb_init(&chain, vm->mem_pool); + njs_chb_init(&chain, stringify->vm->mem_pool); for ( ;; ) { if (state->index == 0) { @@ -1135,7 +1134,7 @@ njs_json_stringify_iterator(njs_vm_t *vm key = &state->keys->start[state->index]; } - ret = njs_value_property(vm, &state->value, key, value); + ret = njs_value_property(stringify->vm, &state->value, key, value); if (njs_slow_path(ret == NJS_ERROR)) { return ret; } @@ -1181,7 +1180,7 @@ njs_json_stringify_iterator(njs_vm_t *vm } if (njs_json_is_object(value)) { - state = njs_json_push_stringify_state(vm, stringify, value); + state = njs_json_push_stringify_state(stringify, value); if (njs_slow_path(state == NULL)) { return NJS_ERROR; } @@ -1189,7 +1188,7 @@ njs_json_stringify_iterator(njs_vm_t *vm continue; } - ret = njs_json_append_value(vm, &chain, value); + ret = njs_json_append_value(stringify->vm, &chain, value); if (njs_slow_path(ret != NJS_OK)) { return ret; } @@ -1221,7 +1220,7 @@ done: goto release; } - ret = njs_string_create_chb(vm, retval, &chain); + ret = njs_string_create_chb(stringify->vm, retval, &chain); if (njs_slow_path(ret != NJS_OK)) { njs_chb_destroy(&chain); goto memory_error; @@ -1235,7 +1234,7 @@ release: memory_error: - njs_memory_error(vm); + njs_memory_error(stringify->vm); return NJS_ERROR; } @@ -1322,28 +1321,28 @@ njs_json_stringify_replacer(njs_json_str static njs_int_t -njs_json_stringify_array(njs_vm_t *vm, njs_json_stringify_t *stringify) +njs_json_stringify_array(njs_json_stringify_t *stringify) { njs_int_t ret; int64_t i, k, length; njs_value_t *value, *item; njs_array_t *properties; - ret = njs_object_length(vm, &stringify->replacer, &length); + ret = njs_object_length(stringify->vm, &stringify->replacer, &length); if (njs_slow_path(ret != NJS_OK)) { return ret; } - properties = njs_array_alloc(vm, 1, 0, NJS_ARRAY_SPARE); + properties = njs_array_alloc(stringify->vm, 1, 0, NJS_ARRAY_SPARE); if (njs_slow_path(properties == NULL)) { return NJS_ERROR; } - item = njs_array_push(vm, properties); + item = njs_array_push(stringify->vm, properties); njs_value_assign(item, &njs_string_empty); for (i = 0; i < length; i++) { - ret = njs_value_property_i64(vm, &stringify->replacer, i, + ret = njs_value_property_i64(stringify->vm, &stringify->replacer, i, &stringify->retval); if (njs_slow_path(ret == NJS_ERROR)) { return ret; @@ -1356,7 +1355,7 @@ njs_json_stringify_array(njs_vm_t *vm, n break; case NJS_NUMBER: - ret = njs_number_to_string(vm, value, value); + ret = njs_number_to_string(stringify->vm, value, value); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } @@ -1367,7 +1366,7 @@ njs_json_stringify_array(njs_vm_t *vm, n switch (njs_object_value(value)->type) { case NJS_NUMBER: case NJS_STRING: - ret = njs_value_to_string(vm, value, value); + ret = njs_value_to_string(stringify->vm, value, value); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } @@ -1391,7 +1390,7 @@ njs_json_stringify_array(njs_vm_t *vm, n } if (k == properties->length) { - item = njs_array_push(vm, properties); + item = njs_array_push(stringify->vm, properties); if (njs_slow_path(item == NULL)) { return NJS_ERROR; } @@ -2006,7 +2005,7 @@ njs_vm_value_dump(njs_vm_t *vm, njs_str_ njs_memset(stringify->space.start, ' ', indent); - state = njs_json_push_stringify_state(vm, stringify, value); + state = njs_json_push_stringify_state(stringify, value); if (njs_slow_path(state == NULL)) { goto memory_error; } @@ -2127,7 +2126,7 @@ njs_vm_value_dump(njs_vm_t *vm, njs_str_ continue; } - state = njs_json_push_stringify_state(vm, stringify, val); + state = njs_json_push_stringify_state(stringify, val); if (njs_slow_path(state == NULL)) { goto exception; } From pluknet at nginx.com Tue Dec 12 13:17:31 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 12 Dec 2023 17:17:31 +0400 Subject: [PATCH 1 of 3] Stream: socket peek in preread phase In-Reply-To: <966331bb4936888ef2f0.1699610839@arut-laptop> References: <966331bb4936888ef2f0.1699610839@arut-laptop> Message-ID: > On 10 Nov 2023, at 14:07, Roman Arutyunyan wrote: > > # HG changeset patch > # User Roman Arutyunyan > # Date 1699456644 -14400 > # Wed Nov 08 19:17:24 2023 +0400 > # Node ID 966331bb4936888ef2f034aa2700c130514d0b57 > # Parent 7ec761f0365f418511e30b82e9adf80bc56681df > Stream: socket peek in preread phase. > > Previously, preread buffer was always read out from socket, which made it > impossible to terminate SSL on the connection without introducing additional > SSL BIOs. The following patches will rely on this. > > Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. > It's called if SSL is not already terminated and if an egde-triggered event > method is used. For epoll, EPOLLRDHUP support is also required. Not sure if it is a good way to introduce new functionality that depends on connection processing methods. > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -10,6 +10,10 @@ > #include > > > +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, > + ngx_stream_phase_handler_t *ph); > +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, > + ngx_stream_phase_handler_t *ph); > static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); > static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); > static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); > @@ -203,8 +207,6 @@ ngx_int_t > ngx_stream_core_preread_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph) > { > - size_t size; > - ssize_t n; > ngx_int_t rc; > ngx_connection_t *c; > ngx_stream_core_srv_conf_t *cscf; > @@ -217,56 +219,40 @@ ngx_stream_core_preread_phase(ngx_stream > > if (c->read->timedout) { > rc = NGX_STREAM_OK; > + goto done; > + } > > - } else if (c->read->timer_set) { > - rc = NGX_AGAIN; > + if (!c->read->timer_set) { > + rc = ph->handler(s); > > - } else { > - rc = ph->handler(s); > + if (rc != NGX_AGAIN) { > + goto done; > + } > } > > - while (rc == NGX_AGAIN) { > - > + if (c->buffer == NULL) { > + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > if (c->buffer == NULL) { > - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > - if (c->buffer == NULL) { > - rc = NGX_ERROR; > - break; > - } > + rc = NGX_ERROR; > + goto done; > } > - > - size = c->buffer->end - c->buffer->last; > - > - if (size == 0) { > - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > - rc = NGX_STREAM_BAD_REQUEST; > - break; > - } > + } > > - if (c->read->eof) { > - rc = NGX_STREAM_OK; > - break; > - } > - > - if (!c->read->ready) { > - break; > - } > - > - n = c->recv(c, c->buffer->last, size); > + if (c->ssl == NULL > + && (ngx_event_flags & NGX_USE_CLEAR_EVENT) > + && ((ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0 > +#if (NGX_HAVE_EPOLLRDHUP) > + || ngx_use_epoll_rdhup > +#endif > + )) > + { > + rc = ngx_stream_preread_peek(s, ph); > > - if (n == NGX_ERROR || n == 0) { > - rc = NGX_STREAM_OK; > - break; > - } > + } else { > + rc = ngx_stream_preread(s, ph); > + } > > - if (n == NGX_AGAIN) { > - break; > - } > - > - c->buffer->last += n; > - > - rc = ph->handler(s); > - } > +done: > > if (rc == NGX_AGAIN) { > if (ngx_handle_read_event(c->read, 0) != NGX_OK) { > @@ -311,6 +297,95 @@ ngx_stream_core_preread_phase(ngx_stream > } > > > +static ngx_int_t > +ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > +{ > + ssize_t n; > + ngx_int_t rc; > + ngx_err_t err; > + ngx_connection_t *c; > + > + c = s->connection; > + > + n = recv(c->fd, (char *) c->buffer->last, > + c->buffer->end - c->buffer->last, MSG_PEEK); > + > + err = ngx_socket_errno; > + > + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, typo: NGX_LOG_DEBUG_STREAM > + "stream recv(MSG_PEEK): %z", n); Nitpicking: I couldn't find precedence to log "MSG_PEEK", e.g.: src/mail/ngx_mail_handler.c: n = recv(c->fd, (char *) buf, sizeof(buf), MSG_PEEK); src/mail/ngx_mail_handler.c- src/mail/ngx_mail_handler.c- err = ngx_socket_errno; src/mail/ngx_mail_handler.c- src/mail/ngx_mail_handler.c- ngx_log_debug1(NGX_LOG_DEBUG_MAIL, c->log, 0, "recv(): %z", n); src/mail/ngx_mail_handler.c- -- src/stream/ngx_stream_handler.c: n = recv(c->fd, (char *) buf, sizeof(buf), MSG_PEEK); src/stream/ngx_stream_handler.c- src/stream/ngx_stream_handler.c- err = ngx_socket_errno; src/stream/ngx_stream_handler.c- src/stream/ngx_stream_handler.c- ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "recv(): %z", n); src/stream/ngx_stream_handler.c- Might be "stream recv(): %z" or just "recv(): %z" is enough. > + > + if (n == -1) { > + if (err == NGX_EAGAIN) { You don't reset c->read->ready, which introduces a bad pattern. > + return NGX_AGAIN; > + } > + > + ngx_connection_error(c, err, "recv() failed"); > + return NGX_STREAM_OK; > + } > + > + if (n == 0) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last += n; > + > + rc = ph->handler(s); > + > + if (rc == NGX_AGAIN && c->buffer->last == c->buffer->end) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > + return NGX_STREAM_BAD_REQUEST; > + } > + > + if (rc == NGX_AGAIN && c->read->pending_eof) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last = c->buffer->pos; > + > + return rc; > +} Don't you want to make ngx_stream_preread_peek() more similar to ngx_stream_preread() ? Something like this: rc = ph->handler(s); if (rc != NGX_AGAIN) { c->buffer->last = c->buffer->pos; return rc; } if (c->buffer->last == c->buffer->end) { ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); return NGX_STREAM_BAD_REQUEST; } if (c->read->pending_eof) { return NGX_STREAM_OK; } c->buffer->last = c->buffer->pos; return NGX_AGAIN; > + > + > +static ngx_int_t > +ngx_stream_preread(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > +{ > + ssize_t n; > + ngx_int_t rc; > + ngx_connection_t *c; > + > + c = s->connection; > + > + while (c->read->ready) { > + > + n = c->recv(c, c->buffer->last, c->buffer->end - c->buffer->last); > + > + if (n == NGX_AGAIN) { > + return NGX_AGAIN; > + } > + > + if (n == NGX_ERROR || n == 0) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last += n; > + > + rc = ph->handler(s); > + > + if (rc != NGX_AGAIN) { > + return rc; > + } > + > + if (c->buffer->last == c->buffer->end) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > + return NGX_STREAM_BAD_REQUEST; > + } > + } > + > + return NGX_AGAIN; > +} > + > + > ngx_int_t > ngx_stream_core_content_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph) > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Sergey Kandaurov From arut at nginx.com Tue Dec 12 13:47:43 2023 From: arut at nginx.com (=?utf-8?q?Roman_Arutyunyan?=) Date: Tue, 12 Dec 2023 13:47:43 +0000 Subject: [nginx] QUIC: avoid partial expansion of PATH_CHALLENGE/PATH_RESPONSE. Message-ID: details: https://hg.nginx.org/nginx/rev/fcec773dd249 branches: changeset: 9189:fcec773dd249 user: Roman Arutyunyan date: Wed Nov 29 18:13:25 2023 +0400 description: QUIC: avoid partial expansion of PATH_CHALLENGE/PATH_RESPONSE. By default packets with these frames are expanded to 1200 bytes. Previously, if anti-amplification limit did not allow this expansion, it was limited to whatever size was allowed. However RFC 9000 clearly states no partial expansion should happen in both cases. Section 8.2.1. Initiating Path Validation: An endpoint MUST expand datagrams that contain a PATH_CHALLENGE frame to at least the smallest allowed maximum datagram size of 1200 bytes, unless the anti-amplification limit for the path does not permit sending a datagram of this size. Section 8.2.2. Path Validation Responses: An endpoint MUST expand datagrams that contain a PATH_RESPONSE frame to at least the smallest allowed maximum datagram size of 1200 bytes. ... However, an endpoint MUST NOT expand the datagram containing the PATH_RESPONSE if the resulting data exceeds the anti-amplification limit. diffstat: src/event/quic/ngx_event_quic_connection.h | 3 +- src/event/quic/ngx_event_quic_migration.c | 53 +++++++++++++++-------------- src/event/quic/ngx_event_quic_output.c | 4 +- src/event/quic/ngx_event_quic_output.h | 2 + 4 files changed, 32 insertions(+), 30 deletions(-) diffs (147 lines): diff -r b05c622715fa -r fcec773dd249 src/event/quic/ngx_event_quic_connection.h --- a/src/event/quic/ngx_event_quic_connection.h Wed Nov 29 11:13:05 2023 +0300 +++ b/src/event/quic/ngx_event_quic_connection.h Wed Nov 29 18:13:25 2023 +0400 @@ -106,8 +106,7 @@ struct ngx_quic_path_s { size_t max_mtu; off_t sent; off_t received; - u_char challenge1[8]; - u_char challenge2[8]; + u_char challenge[2][8]; uint64_t seqnum; uint64_t mtu_pnum[NGX_QUIC_PATH_RETRIES]; ngx_str_t addr_text; diff -r b05c622715fa -r fcec773dd249 src/event/quic/ngx_event_quic_migration.c --- a/src/event/quic/ngx_event_quic_migration.c Wed Nov 29 11:13:05 2023 +0300 +++ b/src/event/quic/ngx_event_quic_migration.c Wed Nov 29 18:13:25 2023 +0400 @@ -36,6 +36,7 @@ ngx_int_t ngx_quic_handle_path_challenge_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_path_challenge_frame_t *f) { + size_t min; ngx_quic_frame_t frame, *fp; ngx_quic_connection_t *qc; @@ -57,8 +58,14 @@ ngx_quic_handle_path_challenge_frame(ngx /* * An endpoint MUST expand datagrams that contain a PATH_RESPONSE frame * to at least the smallest allowed maximum datagram size of 1200 bytes. + * ... + * However, an endpoint MUST NOT expand the datagram containing the + * PATH_RESPONSE if the resulting data exceeds the anti-amplification limit. */ - if (ngx_quic_frame_sendto(c, &frame, 1200, pkt->path) == NGX_ERROR) { + + min = (ngx_quic_path_limit(c, pkt->path, 1200) < 1200) ? 0 : 1200; + + if (ngx_quic_frame_sendto(c, &frame, min, pkt->path) == NGX_ERROR) { return NGX_ERROR; } @@ -113,8 +120,8 @@ ngx_quic_handle_path_response_frame(ngx_ continue; } - if (ngx_memcmp(path->challenge1, f->data, sizeof(f->data)) == 0 - || ngx_memcmp(path->challenge2, f->data, sizeof(f->data)) == 0) + if (ngx_memcmp(path->challenge[0], f->data, sizeof(f->data)) == 0 + || ngx_memcmp(path->challenge[1], f->data, sizeof(f->data)) == 0) { goto valid; } @@ -510,11 +517,7 @@ ngx_quic_validate_path(ngx_connection_t path->tries = 0; - if (RAND_bytes(path->challenge1, 8) != 1) { - return NGX_ERROR; - } - - if (RAND_bytes(path->challenge2, 8) != 1) { + if (RAND_bytes((u_char *) path->challenge, sizeof(path->challenge)) != 1) { return NGX_ERROR; } @@ -535,6 +538,8 @@ ngx_quic_validate_path(ngx_connection_t static ngx_int_t ngx_quic_send_path_challenge(ngx_connection_t *c, ngx_quic_path_t *path) { + size_t min; + ngx_uint_t n; ngx_quic_frame_t frame; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, @@ -546,26 +551,24 @@ ngx_quic_send_path_challenge(ngx_connect frame.level = ssl_encryption_application; frame.type = NGX_QUIC_FT_PATH_CHALLENGE; - ngx_memcpy(frame.u.path_challenge.data, path->challenge1, 8); + for (n = 0; n < 2; n++) { + + ngx_memcpy(frame.u.path_challenge.data, path->challenge[n], 8); - /* - * RFC 9000, 8.2.1. Initiating Path Validation - * - * An endpoint MUST expand datagrams that contain a PATH_CHALLENGE frame - * to at least the smallest allowed maximum datagram size of 1200 bytes, - * unless the anti-amplification limit for the path does not permit - * sending a datagram of this size. - */ + /* + * RFC 9000, 8.2.1. Initiating Path Validation + * + * An endpoint MUST expand datagrams that contain a PATH_CHALLENGE frame + * to at least the smallest allowed maximum datagram size of 1200 bytes, + * unless the anti-amplification limit for the path does not permit + * sending a datagram of this size. + */ - /* same applies to PATH_RESPONSE frames */ - if (ngx_quic_frame_sendto(c, &frame, 1200, path) == NGX_ERROR) { - return NGX_ERROR; - } + min = (ngx_quic_path_limit(c, path, 1200) < 1200) ? 0 : 1200; - ngx_memcpy(frame.u.path_challenge.data, path->challenge2, 8); - - if (ngx_quic_frame_sendto(c, &frame, 1200, path) == NGX_ERROR) { - return NGX_ERROR; + if (ngx_quic_frame_sendto(c, &frame, min, path) == NGX_ERROR) { + return NGX_ERROR; + } } return NGX_OK; diff -r b05c622715fa -r fcec773dd249 src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c Wed Nov 29 11:13:05 2023 +0300 +++ b/src/event/quic/ngx_event_quic_output.c Wed Nov 29 18:13:25 2023 +0400 @@ -63,8 +63,6 @@ static ssize_t ngx_quic_send(ngx_connect struct sockaddr *sockaddr, socklen_t socklen); static void ngx_quic_set_packet_number(ngx_quic_header_t *pkt, ngx_quic_send_ctx_t *ctx); -static size_t ngx_quic_path_limit(ngx_connection_t *c, ngx_quic_path_t *path, - size_t size); ngx_int_t @@ -1250,7 +1248,7 @@ ngx_quic_frame_sendto(ngx_connection_t * } -static size_t +size_t ngx_quic_path_limit(ngx_connection_t *c, ngx_quic_path_t *path, size_t size) { off_t max; diff -r b05c622715fa -r fcec773dd249 src/event/quic/ngx_event_quic_output.h --- a/src/event/quic/ngx_event_quic_output.h Wed Nov 29 11:13:05 2023 +0300 +++ b/src/event/quic/ngx_event_quic_output.h Wed Nov 29 18:13:25 2023 +0400 @@ -34,5 +34,7 @@ ngx_int_t ngx_quic_send_ack_range(ngx_co ngx_int_t ngx_quic_frame_sendto(ngx_connection_t *c, ngx_quic_frame_t *frame, size_t min, ngx_quic_path_t *path); +size_t ngx_quic_path_limit(ngx_connection_t *c, ngx_quic_path_t *path, + size_t size); #endif /* _NGX_EVENT_QUIC_OUTPUT_H_INCLUDED_ */ From arut at nginx.com Tue Dec 12 13:47:46 2023 From: arut at nginx.com (=?utf-8?q?Roman_Arutyunyan?=) Date: Tue, 12 Dec 2023 13:47:46 +0000 Subject: [nginx] QUIC: fixed anti-amplification with explicit send. Message-ID: details: https://hg.nginx.org/nginx/rev/3a67dd34b6cc branches: changeset: 9190:3a67dd34b6cc user: Roman Arutyunyan date: Wed Nov 22 14:52:21 2023 +0400 description: QUIC: fixed anti-amplification with explicit send. Previously, when using ngx_quic_frame_sendto() to explicitly send a packet with a single frame, anti-amplification limit was not properly enforced. Even when there was no quota left for the packet, it was sent anyway, but with no padding. Now the packet is not sent at all. This function is called to send PATH_CHALLENGE/PATH_RESPONSE, PMTUD and probe packets. For all these cases packet send is retried later in case the send was not successful. diffstat: src/event/quic/ngx_event_quic_migration.c | 6 ++++++ src/event/quic/ngx_event_quic_output.c | 24 ++++++++++++++++-------- 2 files changed, 22 insertions(+), 8 deletions(-) diffs (85 lines): diff -r fcec773dd249 -r 3a67dd34b6cc src/event/quic/ngx_event_quic_migration.c --- a/src/event/quic/ngx_event_quic_migration.c Wed Nov 29 18:13:25 2023 +0400 +++ b/src/event/quic/ngx_event_quic_migration.c Wed Nov 22 14:52:21 2023 +0400 @@ -872,6 +872,7 @@ ngx_quic_expire_path_mtu_discovery(ngx_c static ngx_int_t ngx_quic_send_path_mtu_probe(ngx_connection_t *c, ngx_quic_path_t *path) { + size_t mtu; ngx_int_t rc; ngx_uint_t log_error; ngx_quic_frame_t frame; @@ -895,7 +896,12 @@ ngx_quic_send_path_mtu_probe(ngx_connect log_error = c->log_error; c->log_error = NGX_ERROR_IGNORE_EMSGSIZE; + mtu = path->mtu; + path->mtu = path->mtud; + rc = ngx_quic_frame_sendto(c, &frame, path->mtud, path); + + path->mtu = mtu; c->log_error = log_error; if (rc == NGX_ERROR) { diff -r fcec773dd249 -r 3a67dd34b6cc src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c Wed Nov 29 18:13:25 2023 +0400 +++ b/src/event/quic/ngx_event_quic_output.c Wed Nov 22 14:52:21 2023 +0400 @@ -1181,7 +1181,7 @@ ngx_int_t ngx_quic_frame_sendto(ngx_connection_t *c, ngx_quic_frame_t *frame, size_t min, ngx_quic_path_t *path) { - size_t min_payload, pad; + size_t max, max_payload, min_payload, pad; ssize_t len, sent; ngx_str_t res; ngx_quic_header_t pkt; @@ -1194,15 +1194,25 @@ ngx_quic_frame_sendto(ngx_connection_t * qc = ngx_quic_get_connection(c); ctx = ngx_quic_get_send_ctx(qc, frame->level); + max = ngx_quic_path_limit(c, path, path->mtu); + + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic sendto %s packet max:%uz min:%uz", + ngx_quic_level_name(ctx->level), max, min); + ngx_quic_init_packet(c, ctx, &pkt, path); - min = ngx_quic_path_limit(c, path, min); + min_payload = ngx_quic_payload_size(&pkt, min); + max_payload = ngx_quic_payload_size(&pkt, max); - min_payload = min ? ngx_quic_payload_size(&pkt, min) : 0; - + /* RFC 9001, 5.4.2. Header Protection Sample */ pad = 4 - pkt.num_len; min_payload = ngx_max(min_payload, pad); + if (min_payload > max_payload) { + return NGX_AGAIN; + } + #if (NGX_DEBUG) frame->pnum = pkt.number; #endif @@ -1210,8 +1220,8 @@ ngx_quic_frame_sendto(ngx_connection_t * ngx_quic_log_frame(c->log, frame, 1); len = ngx_quic_create_frame(NULL, frame); - if (len > NGX_QUIC_MAX_UDP_PAYLOAD_SIZE) { - return NGX_ERROR; + if ((size_t) len > max_payload) { + return NGX_AGAIN; } len = ngx_quic_create_frame(src, frame); @@ -1258,8 +1268,6 @@ ngx_quic_path_limit(ngx_connection_t *c, max = (path->sent >= max) ? 0 : max - path->sent; if ((off_t) size > max) { - ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic path limit %uz - %O", size, max); return max; } } From arut at nginx.com Tue Dec 12 13:47:49 2023 From: arut at nginx.com (=?utf-8?q?Roman_Arutyunyan?=) Date: Tue, 12 Dec 2023 13:47:49 +0000 Subject: [nginx] QUIC: ignore duplicate PATH_CHALLENGE frames. Message-ID: details: https://hg.nginx.org/nginx/rev/618132842e7c branches: changeset: 9191:618132842e7c user: Roman Arutyunyan date: Wed Nov 22 14:48:12 2023 +0400 description: QUIC: ignore duplicate PATH_CHALLENGE frames. According to RFC 9000, an endpoint SHOULD NOT send multiple PATH_CHALLENGE frames in a single packet. The change adds a check to enforce this claim to optimize server behavior. Previously each PATH_CHALLENGE always resulted in a single response datagram being sent to client. The effect of this was however limited by QUIC flood protection. Also, PATH_CHALLENGE is explicitly disabled in Initial and Handshake levels, see RFC 9000, Table 3. However, technically it may be sent by client in 0-RTT over a new path without actual migration, even though the migration itself is prohibited during handshake. This allows client to coalesce multiple 0-RTT packets each carrying a PATH_CHALLENGE and end up with multiple PATH_CHALLENGEs per datagram. This again leads to suboptimal behavior, see above. Since the purpose of sending PATH_CHALLENGE frames in 0-RTT is unclear, these frames are now only allowed in 1-RTT. For 0-RTT they are silently ignored. diffstat: src/event/quic/ngx_event_quic_migration.c | 8 ++++++++ src/event/quic/ngx_event_quic_transport.h | 1 + 2 files changed, 9 insertions(+), 0 deletions(-) diffs (29 lines): diff -r 3a67dd34b6cc -r 618132842e7c src/event/quic/ngx_event_quic_migration.c --- a/src/event/quic/ngx_event_quic_migration.c Wed Nov 22 14:52:21 2023 +0400 +++ b/src/event/quic/ngx_event_quic_migration.c Wed Nov 22 14:48:12 2023 +0400 @@ -40,6 +40,14 @@ ngx_quic_handle_path_challenge_frame(ngx ngx_quic_frame_t frame, *fp; ngx_quic_connection_t *qc; + if (pkt->level != ssl_encryption_application || pkt->path_challenged) { + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic ignoring PATH_CHALLENGE"); + return NGX_OK; + } + + pkt->path_challenged = 1; + qc = ngx_quic_get_connection(c); ngx_memzero(&frame, sizeof(ngx_quic_frame_t)); diff -r 3a67dd34b6cc -r 618132842e7c src/event/quic/ngx_event_quic_transport.h --- a/src/event/quic/ngx_event_quic_transport.h Wed Nov 22 14:52:21 2023 +0400 +++ b/src/event/quic/ngx_event_quic_transport.h Wed Nov 22 14:48:12 2023 +0400 @@ -336,6 +336,7 @@ typedef struct { unsigned retried:1; unsigned first:1; unsigned rebound:1; + unsigned path_challenged:1; } ngx_quic_header_t; From arut at nginx.com Tue Dec 12 13:47:52 2023 From: arut at nginx.com (=?utf-8?q?Roman_Arutyunyan?=) Date: Tue, 12 Dec 2023 13:47:52 +0000 Subject: [nginx] QUIC: congestion control in ngx_quic_frame_sendto(). Message-ID: details: https://hg.nginx.org/nginx/rev/efcdaa66df2e branches: changeset: 9192:efcdaa66df2e user: Roman Arutyunyan date: Wed Nov 29 21:41:29 2023 +0400 description: QUIC: congestion control in ngx_quic_frame_sendto(). Previously ngx_quic_frame_sendto() ignored congestion control and did not contribute to in_flight counter. Now congestion control window is checked unless ignore_congestion flag is set. Also, in_flight counter is incremented and the frame is stored in ctx->sent queue if it's ack-eliciting. This behavior is now similar to ngx_quic_output_packet(). diffstat: src/event/quic/ngx_event_quic_ack.c | 32 ++++++++++---- src/event/quic/ngx_event_quic_migration.c | 51 ++++++++++++++---------- src/event/quic/ngx_event_quic_output.c | 65 ++++++++++++++++++++++++++---- src/event/quic/ngx_event_quic_transport.h | 1 + 4 files changed, 108 insertions(+), 41 deletions(-) diffs (344 lines): diff -r 618132842e7c -r efcdaa66df2e src/event/quic/ngx_event_quic_ack.c --- a/src/event/quic/ngx_event_quic_ack.c Wed Nov 22 14:48:12 2023 +0400 +++ b/src/event/quic/ngx_event_quic_ack.c Wed Nov 29 21:41:29 2023 +0400 @@ -593,6 +593,7 @@ ngx_quic_resend_frames(ngx_connection_t break; case NGX_QUIC_FT_PING: + case NGX_QUIC_FT_PATH_CHALLENGE: case NGX_QUIC_FT_PATH_RESPONSE: case NGX_QUIC_FT_CONNECTION_CLOSE: ngx_quic_free_frame(c, f); @@ -824,11 +825,11 @@ void ngx_quic_lost_handler(ngx_event_t * void ngx_quic_pto_handler(ngx_event_t *ev) { - ngx_uint_t i; + ngx_uint_t i, n; ngx_msec_t now; ngx_queue_t *q; ngx_connection_t *c; - ngx_quic_frame_t *f, frame; + ngx_quic_frame_t *f; ngx_quic_send_ctx_t *ctx; ngx_quic_connection_t *qc; @@ -865,16 +866,20 @@ ngx_quic_pto_handler(ngx_event_t *ev) "quic pto %s pto_count:%ui", ngx_quic_level_name(ctx->level), qc->pto_count); - ngx_memzero(&frame, sizeof(ngx_quic_frame_t)); + for (n = 0; n < 2; n++) { - frame.level = ctx->level; - frame.type = NGX_QUIC_FT_PING; + f = ngx_quic_alloc_frame(c); + if (f == NULL) { + goto failed; + } - if (ngx_quic_frame_sendto(c, &frame, 0, qc->path) != NGX_OK - || ngx_quic_frame_sendto(c, &frame, 0, qc->path) != NGX_OK) - { - ngx_quic_close_connection(c, NGX_ERROR); - return; + f->level = ctx->level; + f->type = NGX_QUIC_FT_PING; + f->ignore_congestion = 1; + + if (ngx_quic_frame_sendto(c, f, 0, qc->path) == NGX_ERROR) { + goto failed; + } } } @@ -883,6 +888,13 @@ ngx_quic_pto_handler(ngx_event_t *ev) ngx_quic_set_lost_timer(c); ngx_quic_connstate_dbg(c); + + return; + +failed: + + ngx_quic_close_connection(c, NGX_ERROR); + return; } diff -r 618132842e7c -r efcdaa66df2e src/event/quic/ngx_event_quic_migration.c --- a/src/event/quic/ngx_event_quic_migration.c Wed Nov 22 14:48:12 2023 +0400 +++ b/src/event/quic/ngx_event_quic_migration.c Wed Nov 29 21:41:29 2023 +0400 @@ -37,7 +37,7 @@ ngx_quic_handle_path_challenge_frame(ngx ngx_quic_header_t *pkt, ngx_quic_path_challenge_frame_t *f) { size_t min; - ngx_quic_frame_t frame, *fp; + ngx_quic_frame_t *fp; ngx_quic_connection_t *qc; if (pkt->level != ssl_encryption_application || pkt->path_challenged) { @@ -50,11 +50,14 @@ ngx_quic_handle_path_challenge_frame(ngx qc = ngx_quic_get_connection(c); - ngx_memzero(&frame, sizeof(ngx_quic_frame_t)); + fp = ngx_quic_alloc_frame(c); + if (fp == NULL) { + return NGX_ERROR; + } - frame.level = ssl_encryption_application; - frame.type = NGX_QUIC_FT_PATH_RESPONSE; - frame.u.path_response = *f; + fp->level = ssl_encryption_application; + fp->type = NGX_QUIC_FT_PATH_RESPONSE; + fp->u.path_response = *f; /* * RFC 9000, 8.2.2. Path Validation Responses @@ -73,7 +76,7 @@ ngx_quic_handle_path_challenge_frame(ngx min = (ngx_quic_path_limit(c, pkt->path, 1200) < 1200) ? 0 : 1200; - if (ngx_quic_frame_sendto(c, &frame, min, pkt->path) == NGX_ERROR) { + if (ngx_quic_frame_sendto(c, fp, min, pkt->path) == NGX_ERROR) { return NGX_ERROR; } @@ -546,22 +549,25 @@ ngx_quic_validate_path(ngx_connection_t static ngx_int_t ngx_quic_send_path_challenge(ngx_connection_t *c, ngx_quic_path_t *path) { - size_t min; - ngx_uint_t n; - ngx_quic_frame_t frame; + size_t min; + ngx_uint_t n; + ngx_quic_frame_t *frame; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic path seq:%uL send path_challenge tries:%ui", path->seqnum, path->tries); - ngx_memzero(&frame, sizeof(ngx_quic_frame_t)); - - frame.level = ssl_encryption_application; - frame.type = NGX_QUIC_FT_PATH_CHALLENGE; - for (n = 0; n < 2; n++) { - ngx_memcpy(frame.u.path_challenge.data, path->challenge[n], 8); + frame = ngx_quic_alloc_frame(c); + if (frame == NULL) { + return NGX_ERROR; + } + + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_PATH_CHALLENGE; + + ngx_memcpy(frame->u.path_challenge.data, path->challenge[n], 8); /* * RFC 9000, 8.2.1. Initiating Path Validation @@ -574,7 +580,7 @@ ngx_quic_send_path_challenge(ngx_connect min = (ngx_quic_path_limit(c, path, 1200) < 1200) ? 0 : 1200; - if (ngx_quic_frame_sendto(c, &frame, min, path) == NGX_ERROR) { + if (ngx_quic_frame_sendto(c, frame, min, path) == NGX_ERROR) { return NGX_ERROR; } } @@ -883,14 +889,17 @@ ngx_quic_send_path_mtu_probe(ngx_connect size_t mtu; ngx_int_t rc; ngx_uint_t log_error; - ngx_quic_frame_t frame; + ngx_quic_frame_t *frame; ngx_quic_send_ctx_t *ctx; ngx_quic_connection_t *qc; - ngx_memzero(&frame, sizeof(ngx_quic_frame_t)); + frame = ngx_quic_alloc_frame(c); + if (frame == NULL) { + return NGX_ERROR; + } - frame.level = ssl_encryption_application; - frame.type = NGX_QUIC_FT_PING; + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_PING; qc = ngx_quic_get_connection(c); ctx = ngx_quic_get_send_ctx(qc, ssl_encryption_application); @@ -907,7 +916,7 @@ ngx_quic_send_path_mtu_probe(ngx_connect mtu = path->mtu; path->mtu = path->mtud; - rc = ngx_quic_frame_sendto(c, &frame, path->mtud, path); + rc = ngx_quic_frame_sendto(c, frame, path->mtud, path); path->mtu = mtu; c->log_error = log_error; diff -r 618132842e7c -r efcdaa66df2e src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c Wed Nov 22 14:48:12 2023 +0400 +++ b/src/event/quic/ngx_event_quic_output.c Wed Nov 29 21:41:29 2023 +0400 @@ -844,7 +844,7 @@ ngx_quic_send_stateless_reset(ngx_connec ngx_int_t ngx_quic_send_cc(ngx_connection_t *c) { - ngx_quic_frame_t frame; + ngx_quic_frame_t *frame; ngx_quic_connection_t *qc; qc = ngx_quic_get_connection(c); @@ -860,22 +860,27 @@ ngx_quic_send_cc(ngx_connection_t *c) return NGX_OK; } - ngx_memzero(&frame, sizeof(ngx_quic_frame_t)); + frame = ngx_quic_alloc_frame(c); + if (frame == NULL) { + return NGX_ERROR; + } - frame.level = qc->error_level; - frame.type = qc->error_app ? NGX_QUIC_FT_CONNECTION_CLOSE_APP - : NGX_QUIC_FT_CONNECTION_CLOSE; - frame.u.close.error_code = qc->error; - frame.u.close.frame_type = qc->error_ftype; + frame->level = qc->error_level; + frame->type = qc->error_app ? NGX_QUIC_FT_CONNECTION_CLOSE_APP + : NGX_QUIC_FT_CONNECTION_CLOSE; + frame->u.close.error_code = qc->error; + frame->u.close.frame_type = qc->error_ftype; if (qc->error_reason) { - frame.u.close.reason.len = ngx_strlen(qc->error_reason); - frame.u.close.reason.data = (u_char *) qc->error_reason; + frame->u.close.reason.len = ngx_strlen(qc->error_reason); + frame->u.close.reason.data = (u_char *) qc->error_reason; } + frame->ignore_congestion = 1; + qc->last_cc = ngx_current_msec; - return ngx_quic_frame_sendto(c, &frame, 0, qc->path); + return ngx_quic_frame_sendto(c, frame, 0, qc->path); } @@ -1184,22 +1189,32 @@ ngx_quic_frame_sendto(ngx_connection_t * size_t max, max_payload, min_payload, pad; ssize_t len, sent; ngx_str_t res; + ngx_msec_t now; ngx_quic_header_t pkt; ngx_quic_send_ctx_t *ctx; + ngx_quic_congestion_t *cg; ngx_quic_connection_t *qc; static u_char src[NGX_QUIC_MAX_UDP_PAYLOAD_SIZE]; static u_char dst[NGX_QUIC_MAX_UDP_PAYLOAD_SIZE]; qc = ngx_quic_get_connection(c); + cg = &qc->congestion; ctx = ngx_quic_get_send_ctx(qc, frame->level); + now = ngx_current_msec; + max = ngx_quic_path_limit(c, path, path->mtu); ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic sendto %s packet max:%uz min:%uz", ngx_quic_level_name(ctx->level), max, min); + if (cg->in_flight >= cg->window && !frame->ignore_congestion) { + ngx_quic_free_frame(c, frame); + return NGX_AGAIN; + } + ngx_quic_init_packet(c, ctx, &pkt, path); min_payload = ngx_quic_payload_size(&pkt, min); @@ -1210,6 +1225,7 @@ ngx_quic_frame_sendto(ngx_connection_t * min_payload = ngx_max(min_payload, pad); if (min_payload > max_payload) { + ngx_quic_free_frame(c, frame); return NGX_AGAIN; } @@ -1221,11 +1237,13 @@ ngx_quic_frame_sendto(ngx_connection_t * len = ngx_quic_create_frame(NULL, frame); if ((size_t) len > max_payload) { + ngx_quic_free_frame(c, frame); return NGX_AGAIN; } len = ngx_quic_create_frame(src, frame); if (len == -1) { + ngx_quic_free_frame(c, frame); return NGX_ERROR; } @@ -1242,18 +1260,45 @@ ngx_quic_frame_sendto(ngx_connection_t * ngx_quic_log_packet(c->log, &pkt); if (ngx_quic_encrypt(&pkt, &res) != NGX_OK) { + ngx_quic_free_frame(c, frame); return NGX_ERROR; } + frame->pnum = ctx->pnum; + frame->first = now; + frame->last = now; + frame->plen = res.len; + ctx->pnum++; sent = ngx_quic_send(c, res.data, res.len, path->sockaddr, path->socklen); if (sent < 0) { + ngx_quic_free_frame(c, frame); return sent; } path->sent += sent; + if (frame->need_ack && !qc->closing) { + ngx_queue_insert_tail(&ctx->sent, &frame->queue); + + cg->in_flight += frame->plen; + + } else { + ngx_quic_free_frame(c, frame); + return NGX_OK; + } + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic congestion send if:%uz", cg->in_flight); + + if (!qc->send_timer_set) { + qc->send_timer_set = 1; + ngx_add_timer(c->read, qc->tp.max_idle_timeout); + } + + ngx_quic_set_lost_timer(c); + return NGX_OK; } diff -r 618132842e7c -r efcdaa66df2e src/event/quic/ngx_event_quic_transport.h --- a/src/event/quic/ngx_event_quic_transport.h Wed Nov 22 14:48:12 2023 +0400 +++ b/src/event/quic/ngx_event_quic_transport.h Wed Nov 29 21:41:29 2023 +0400 @@ -271,6 +271,7 @@ struct ngx_quic_frame_s { ssize_t len; unsigned need_ack:1; unsigned pkt_need_ack:1; + unsigned ignore_congestion:1; ngx_chain_t *data; union { From arut at nginx.com Tue Dec 12 13:47:55 2023 From: arut at nginx.com (=?utf-8?q?Roman_Arutyunyan?=) Date: Tue, 12 Dec 2023 13:47:55 +0000 Subject: [nginx] QUIC: ngx_quic_frame_t time fields cleanup. Message-ID: details: https://hg.nginx.org/nginx/rev/ce1ff81e9b92 branches: changeset: 9193:ce1ff81e9b92 user: Roman Arutyunyan date: Thu Nov 30 15:03:06 2023 +0400 description: QUIC: ngx_quic_frame_t time fields cleanup. The field "first" is removed. It's unused since 909b989ec088. The field "last" is renamed to "send_time". It holds frame send time. diffstat: src/event/quic/ngx_event_quic_ack.c | 40 ++++++++++++++++-------------- src/event/quic/ngx_event_quic_output.c | 6 +--- src/event/quic/ngx_event_quic_transport.h | 3 +- 3 files changed, 24 insertions(+), 25 deletions(-) diffs (157 lines): diff -r efcdaa66df2e -r ce1ff81e9b92 src/event/quic/ngx_event_quic_ack.c --- a/src/event/quic/ngx_event_quic_ack.c Wed Nov 29 21:41:29 2023 +0400 +++ b/src/event/quic/ngx_event_quic_ack.c Thu Nov 30 15:03:06 2023 +0400 @@ -265,16 +265,16 @@ ngx_quic_handle_ack_frame_range(ngx_conn } if (f->pnum == max) { - st->max_pn = f->last; + st->max_pn = f->send_time; } /* save earliest and latest send times of frames ack'ed */ - if (st->oldest == NGX_TIMER_INFINITE || f->last < st->oldest) { - st->oldest = f->last; + if (st->oldest == NGX_TIMER_INFINITE || f->send_time < st->oldest) { + st->oldest = f->send_time; } - if (st->newest == NGX_TIMER_INFINITE || f->last > st->newest) { - st->newest = f->last; + if (st->newest == NGX_TIMER_INFINITE || f->send_time > st->newest) { + st->newest = f->send_time; } ngx_queue_remove(&f->queue); @@ -329,7 +329,7 @@ ngx_quic_congestion_ack(ngx_connection_t cg->in_flight -= f->plen; - timer = f->last - cg->recovery_start; + timer = f->send_time - cg->recovery_start; if ((ngx_msec_int_t) timer <= 0) { ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, @@ -465,7 +465,7 @@ ngx_quic_detect_lost(ngx_connection_t *c break; } - wait = start->last + thr - now; + wait = start->send_time + thr - now; ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic detect_lost pnum:%uL thr:%M wait:%i level:%d", @@ -477,14 +477,14 @@ ngx_quic_detect_lost(ngx_connection_t *c break; } - if (start->last > qc->first_rtt) { + if (start->send_time > qc->first_rtt) { - if (oldest == NGX_TIMER_INFINITE || start->last < oldest) { - oldest = start->last; + if (oldest == NGX_TIMER_INFINITE || start->send_time < oldest) { + oldest = start->send_time; } - if (newest == NGX_TIMER_INFINITE || start->last > newest) { - newest = start->last; + if (newest == NGX_TIMER_INFINITE || start->send_time > newest) { + newest = start->send_time; } nlost++; @@ -672,7 +672,7 @@ ngx_quic_congestion_lost(ngx_connection_ cg->in_flight -= f->plen; f->plen = 0; - timer = f->last - cg->recovery_start; + timer = f->send_time - cg->recovery_start; if ((ngx_msec_int_t) timer <= 0) { ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, @@ -730,7 +730,8 @@ ngx_quic_set_lost_timer(ngx_connection_t if (ctx->largest_ack != NGX_QUIC_UNSET_PN) { q = ngx_queue_head(&ctx->sent); f = ngx_queue_data(q, ngx_quic_frame_t, queue); - w = (ngx_msec_int_t) (f->last + ngx_quic_lost_threshold(qc) - now); + w = (ngx_msec_int_t) + (f->send_time + ngx_quic_lost_threshold(qc) - now); if (f->pnum <= ctx->largest_ack) { if (w < 0 || ctx->largest_ack - f->pnum >= NGX_QUIC_PKT_THR) { @@ -745,8 +746,8 @@ ngx_quic_set_lost_timer(ngx_connection_t q = ngx_queue_last(&ctx->sent); f = ngx_queue_data(q, ngx_quic_frame_t, queue); - w = (ngx_msec_int_t) (f->last + (ngx_quic_pto(c, ctx) << qc->pto_count) - - now); + w = (ngx_msec_int_t) + (f->send_time + (ngx_quic_pto(c, ctx) << qc->pto_count) - now); if (w < 0) { w = 0; @@ -828,6 +829,7 @@ ngx_quic_pto_handler(ngx_event_t *ev) ngx_uint_t i, n; ngx_msec_t now; ngx_queue_t *q; + ngx_msec_int_t w; ngx_connection_t *c; ngx_quic_frame_t *f; ngx_quic_send_ctx_t *ctx; @@ -849,6 +851,8 @@ ngx_quic_pto_handler(ngx_event_t *ev) q = ngx_queue_last(&ctx->sent); f = ngx_queue_data(q, ngx_quic_frame_t, queue); + w = (ngx_msec_int_t) + (f->send_time + (ngx_quic_pto(c, ctx) << qc->pto_count) - now); if (f->pnum <= ctx->largest_ack && ctx->largest_ack != NGX_QUIC_UNSET_PN) @@ -856,9 +860,7 @@ ngx_quic_pto_handler(ngx_event_t *ev) continue; } - if ((ngx_msec_int_t) (f->last + (ngx_quic_pto(c, ctx) << qc->pto_count) - - now) > 0) - { + if (w > 0) { continue; } diff -r efcdaa66df2e -r ce1ff81e9b92 src/event/quic/ngx_event_quic_output.c --- a/src/event/quic/ngx_event_quic_output.c Wed Nov 29 21:41:29 2023 +0400 +++ b/src/event/quic/ngx_event_quic_output.c Thu Nov 30 15:03:06 2023 +0400 @@ -586,8 +586,7 @@ ngx_quic_output_packet(ngx_connection_t } f->pnum = ctx->pnum; - f->first = now; - f->last = now; + f->send_time = now; f->plen = 0; ngx_quic_log_frame(c->log, f, 1); @@ -1265,8 +1264,7 @@ ngx_quic_frame_sendto(ngx_connection_t * } frame->pnum = ctx->pnum; - frame->first = now; - frame->last = now; + frame->send_time = now; frame->plen = res.len; ctx->pnum++; diff -r efcdaa66df2e -r ce1ff81e9b92 src/event/quic/ngx_event_quic_transport.h --- a/src/event/quic/ngx_event_quic_transport.h Wed Nov 29 21:41:29 2023 +0400 +++ b/src/event/quic/ngx_event_quic_transport.h Thu Nov 30 15:03:06 2023 +0400 @@ -266,8 +266,7 @@ struct ngx_quic_frame_s { ngx_queue_t queue; uint64_t pnum; size_t plen; - ngx_msec_t first; - ngx_msec_t last; + ngx_msec_t send_time; ssize_t len; unsigned need_ack:1; unsigned pkt_need_ack:1; From arut at nginx.com Tue Dec 12 13:47:58 2023 From: arut at nginx.com (=?utf-8?q?Roman_Arutyunyan?=) Date: Tue, 12 Dec 2023 13:47:58 +0000 Subject: [nginx] QUIC: path revalidation after expansion failure. Message-ID: details: https://hg.nginx.org/nginx/rev/a6f79f044de5 branches: changeset: 9194:a6f79f044de5 user: Roman Arutyunyan date: Wed Nov 29 10:58:21 2023 +0400 description: QUIC: path revalidation after expansion failure. As per RFC 9000, Section 8.2.1: When an endpoint is unable to expand the datagram size to 1200 bytes due to the anti-amplification limit, the path MTU will not be validated. To ensure that the path MTU is large enough, the endpoint MUST perform a second path validation by sending a PATH_CHALLENGE frame in a datagram of at least 1200 bytes. diffstat: src/event/quic/ngx_event_quic_connection.h | 3 ++- src/event/quic/ngx_event_quic_migration.c | 20 +++++++++++++++++--- 2 files changed, 19 insertions(+), 4 deletions(-) diffs (64 lines): diff -r ce1ff81e9b92 -r a6f79f044de5 src/event/quic/ngx_event_quic_connection.h --- a/src/event/quic/ngx_event_quic_connection.h Thu Nov 30 15:03:06 2023 +0400 +++ b/src/event/quic/ngx_event_quic_connection.h Wed Nov 29 10:58:21 2023 +0400 @@ -111,7 +111,8 @@ struct ngx_quic_path_s { uint64_t mtu_pnum[NGX_QUIC_PATH_RETRIES]; ngx_str_t addr_text; u_char text[NGX_SOCKADDR_STRLEN]; - ngx_uint_t validated; /* unsigned validated:1; */ + unsigned validated:1; + unsigned mtu_unvalidated:1; }; diff -r ce1ff81e9b92 -r a6f79f044de5 src/event/quic/ngx_event_quic_migration.c --- a/src/event/quic/ngx_event_quic_migration.c Thu Nov 30 15:03:06 2023 +0400 +++ b/src/event/quic/ngx_event_quic_migration.c Wed Nov 29 10:58:21 2023 +0400 @@ -169,6 +169,7 @@ valid: path->mtu = prev->mtu; path->max_mtu = prev->max_mtu; + path->mtu_unvalidated = 0; } } @@ -182,6 +183,13 @@ valid: qc->congestion.recovery_start = ngx_current_msec; } + path->validated = 1; + + if (path->mtu_unvalidated) { + path->mtu_unvalidated = 0; + return ngx_quic_validate_path(c, path); + } + /* * RFC 9000, 9.3. Responding to Connection Migration * @@ -199,8 +207,6 @@ valid: ngx_quic_path_dbg(c, "is validated", path); - path->validated = 1; - ngx_quic_discover_path_mtu(c, path); return NGX_OK; @@ -578,7 +584,15 @@ ngx_quic_send_path_challenge(ngx_connect * sending a datagram of this size. */ - min = (ngx_quic_path_limit(c, path, 1200) < 1200) ? 0 : 1200; + if (path->mtu_unvalidated + || ngx_quic_path_limit(c, path, 1200) < 1200) + { + min = 0; + path->mtu_unvalidated = 1; + + } else { + min = 1200; + } if (ngx_quic_frame_sendto(c, frame, min, path) == NGX_ERROR) { return NGX_ERROR; From pluknet at nginx.com Tue Dec 12 14:52:15 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 12 Dec 2023 18:52:15 +0400 Subject: [PATCH] Linux packages: actualized supported Alpine Linux versions In-Reply-To: <55f8ce8a8cb0acf9b360.1702345503@qgcd7xg9r9.olympus.f5net.com> References: <55f8ce8a8cb0acf9b360.1702345503@qgcd7xg9r9.olympus.f5net.com> Message-ID: <038D9E82-CB9B-435B-8378-FA823F1F5FF4@nginx.com> > On 12 Dec 2023, at 05:45, Konstantin Pavlov wrote: > > # HG changeset patch > # User Konstantin Pavlov > # Date 1702345379 28800 > # Mon Dec 11 17:42:59 2023 -0800 > # Node ID 55f8ce8a8cb0acf9b360e47fd5d0023f16451a80 > # Parent 08533e33d0744bd27bc42d87c47607399903eae5 > Linux packages: actualized supported Alpine Linux versions. > Looks good. -- Sergey Kandaurov From pluknet at nginx.com Tue Dec 12 16:25:51 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Tue, 12 Dec 2023 16:25:51 +0000 Subject: [nginx] QUIC: reset RTT estimator for the new path. Message-ID: details: https://hg.nginx.org/nginx/rev/ff452f283aa9 branches: changeset: 9195:ff452f283aa9 user: Sergey Kandaurov date: Tue Dec 12 20:20:51 2023 +0400 description: QUIC: reset RTT estimator for the new path. RTT is a property of the path, it must be reset on confirming a peer's ownership of its new address. diffstat: src/event/quic/ngx_event_quic.c | 9 +-------- src/event/quic/ngx_event_quic_connection.h | 7 +++++++ src/event/quic/ngx_event_quic_migration.c | 2 ++ 3 files changed, 10 insertions(+), 8 deletions(-) diffs (48 lines): diff -r a6f79f044de5 -r ff452f283aa9 src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c Wed Nov 29 10:58:21 2023 +0400 +++ b/src/event/quic/ngx_event_quic.c Tue Dec 12 20:20:51 2023 +0400 @@ -260,14 +260,7 @@ ngx_quic_new_connection(ngx_connection_t ngx_queue_init(&qc->free_frames); - qc->avg_rtt = NGX_QUIC_INITIAL_RTT; - qc->rttvar = NGX_QUIC_INITIAL_RTT / 2; - qc->min_rtt = NGX_TIMER_INFINITE; - qc->first_rtt = NGX_TIMER_INFINITE; - - /* - * qc->latest_rtt = 0 - */ + ngx_quic_init_rtt(qc); qc->pto.log = c->log; qc->pto.data = c; diff -r a6f79f044de5 -r ff452f283aa9 src/event/quic/ngx_event_quic_connection.h --- a/src/event/quic/ngx_event_quic_connection.h Wed Nov 29 10:58:21 2023 +0400 +++ b/src/event/quic/ngx_event_quic_connection.h Tue Dec 12 20:20:51 2023 +0400 @@ -65,6 +65,13 @@ typedef struct ngx_quic_keys_s ng #define ngx_quic_get_socket(c) ((ngx_quic_socket_t *)((c)->udp)) +#define ngx_quic_init_rtt(qc) \ + (qc)->avg_rtt = NGX_QUIC_INITIAL_RTT; \ + (qc)->rttvar = NGX_QUIC_INITIAL_RTT / 2; \ + (qc)->min_rtt = NGX_TIMER_INFINITE; \ + (qc)->first_rtt = NGX_TIMER_INFINITE; \ + (qc)->latest_rtt = 0; + typedef enum { NGX_QUIC_PATH_IDLE = 0, diff -r a6f79f044de5 -r ff452f283aa9 src/event/quic/ngx_event_quic_migration.c --- a/src/event/quic/ngx_event_quic_migration.c Wed Nov 29 10:58:21 2023 +0400 +++ b/src/event/quic/ngx_event_quic_migration.c Tue Dec 12 20:20:51 2023 +0400 @@ -181,6 +181,8 @@ valid: 14720)); qc->congestion.ssthresh = (size_t) -1; qc->congestion.recovery_start = ngx_current_msec; + + ngx_quic_init_rtt(qc); } path->validated = 1; From pluknet at nginx.com Tue Dec 12 16:25:54 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Tue, 12 Dec 2023 16:25:54 +0000 Subject: [nginx] QUIC: path aware in-flight bytes accounting. Message-ID: details: https://hg.nginx.org/nginx/rev/6c8595b77e66 branches: changeset: 9196:6c8595b77e66 user: Sergey Kandaurov date: Tue Dec 12 20:21:12 2023 +0400 description: QUIC: path aware in-flight bytes accounting. On-packet acknowledgement is made path aware, as per RFC 9000, Section 9.4: Packets sent on the old path MUST NOT contribute to congestion control or RTT estimation for the new path. To make this possible in a single congestion control context, the first packet to be sent after the new path has been validated, which includes resetting the congestion controller and RTT estimator, is now remembered in the connection. Packets sent previously, such as on the old path, are not taken into account. Note that although the packet number is saved per-connection, the added checks affect application level packets only. For non-application level packets, which are only processed prior to the handshake is complete, the remembered packet number remains set to zero. diffstat: src/event/quic/ngx_event_quic_ack.c | 8 ++++++++ src/event/quic/ngx_event_quic_connection.h | 2 ++ src/event/quic/ngx_event_quic_migration.c | 6 ++++++ 3 files changed, 16 insertions(+), 0 deletions(-) diffs (60 lines): diff -r ff452f283aa9 -r 6c8595b77e66 src/event/quic/ngx_event_quic_ack.c --- a/src/event/quic/ngx_event_quic_ack.c Tue Dec 12 20:20:51 2023 +0400 +++ b/src/event/quic/ngx_event_quic_ack.c Tue Dec 12 20:21:12 2023 +0400 @@ -325,6 +325,10 @@ ngx_quic_congestion_ack(ngx_connection_t qc = ngx_quic_get_connection(c); cg = &qc->congestion; + if (f->pnum < qc->rst_pnum) { + return; + } + blocked = (cg->in_flight >= cg->window) ? 1 : 0; cg->in_flight -= f->plen; @@ -667,6 +671,10 @@ ngx_quic_congestion_lost(ngx_connection_ qc = ngx_quic_get_connection(c); cg = &qc->congestion; + if (f->pnum < qc->rst_pnum) { + return; + } + blocked = (cg->in_flight >= cg->window) ? 1 : 0; cg->in_flight -= f->plen; diff -r ff452f283aa9 -r 6c8595b77e66 src/event/quic/ngx_event_quic_connection.h --- a/src/event/quic/ngx_event_quic_connection.h Tue Dec 12 20:20:51 2023 +0400 +++ b/src/event/quic/ngx_event_quic_connection.h Tue Dec 12 20:21:12 2023 +0400 @@ -266,6 +266,8 @@ struct ngx_quic_connection_s { ngx_quic_streams_t streams; ngx_quic_congestion_t congestion; + uint64_t rst_pnum; /* first on validated path */ + off_t received; ngx_uint_t error; diff -r ff452f283aa9 -r 6c8595b77e66 src/event/quic/ngx_event_quic_migration.c --- a/src/event/quic/ngx_event_quic_migration.c Tue Dec 12 20:20:51 2023 +0400 +++ b/src/event/quic/ngx_event_quic_migration.c Tue Dec 12 20:21:12 2023 +0400 @@ -110,6 +110,7 @@ ngx_quic_handle_path_response_frame(ngx_ ngx_uint_t rst; ngx_queue_t *q; ngx_quic_path_t *path, *prev; + ngx_quic_send_ctx_t *ctx; ngx_quic_connection_t *qc; qc = ngx_quic_get_connection(c); @@ -174,6 +175,11 @@ valid: } if (rst) { + /* prevent old path packets contribution to congestion control */ + + ctx = ngx_quic_get_send_ctx(qc, ssl_encryption_application); + qc->rst_pnum = ctx->pnum; + ngx_memzero(&qc->congestion, sizeof(ngx_quic_congestion_t)); qc->congestion.window = ngx_min(10 * qc->tp.max_udp_payload_size, From mdounin at mdounin.ru Wed Dec 13 00:16:46 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 13 Dec 2023 03:16:46 +0300 Subject: [PATCH] Added asm ISB as asm pause for ngx_cpu_pause() for aarch64 In-Reply-To: References: Message-ID: Hello! On Mon, Dec 11, 2023 at 05:09:17PM -0600, Julio Suarez wrote: > Hi Maxim, > > > Nitpicking: Added ISB as ngx_cpu_pause() for aarch64. > > Yes, we can make that change. > > > Could you please clarify what do you mean by "a bug"? An empty > > ngx_cpu_pause() is certainly not a bug, it's just a lack of a more > > optimal solution for the particular architecture. > > Agree, not a bug. I'm in a team that focuses on performance, so > sub-optimal performance is a "bug" to us. This is not a functional bug. > Replacing the word bug with "sub-optimal" is more appropriate. > > When a delay like the PAUSE that is there for x86 is added, there is a > 2-5% increase in the number of requests/sec Arm CPUs can achieve under > high load. > > Yes, the test setup is very similar to what's described here (note, > those particular instances in the blog isn't what I tested): > > https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/nginx-performance-on-graviton-3 > > Also, we tested on Nginx Open Source (without JWT), not Nginx-Plus like > in the blog. The only nginx configs I see there (or, rather, in linked articles) do not use neither shared memory zones nor thread pools. That is, ngx_cpu_pause() is not used in these configurations at all. If you think it was actually used in your tests, could you please provide nginx configuration you've used for testing? If nginx configs you've used do not actually contain shared memory zones or thread pools, the performance changes you are seeing, even if these are statistically significant (see below about ministat(1)), might be the result of code alignment changes. MySQL bug mentioned below uses -falign-{functions,jumps}=64 to minimize effects of the code alignment changes (https://bugs.mysql.com/bug.php?id=100664), it might worth to do the same. > We tested for the max RPS of a 512B file that can be pulled through a > reverse proxy. We select the number of upstreams to be large (8 to be > exact), they are also high in core count (16+ CPU). The load generator > node is also large (64 CPUs). This ensures the bottleneck is at the > reverse proxy. We test small files because large files make the test > network bounded, while smaller files make the test CPU bounded. > > I tested both ISB and YIELD (will talk about YIELD further below). > > Results of these tests are something like this: > > ISB uplift from no delay across 3 runs: > > - 2 CPU: 1.03 - 1.22% > > - 4 CPU: 2.70 - 10.75% (I'm treating the 10.75% here as an outlier, > dropping that 10.75% gets ~5% on the high end of the range, hence why > I'm just saying ~2-5% in change log, I don't want to overstate the perf > improvement) > > - 8 CPU: 1.1 -2.33% > > > YIELD uplift from no delay across 3 runs: > > - 2 CPU: 0 - 0.51% > > - 4 CPU: 0 - 1.41% > > - 8 CPU: 1.05 - 2.31% > > ISB produced the highest uplift, particularly for a 4 CPU reverse proxy. > Hence why I submitted with ISB. Still, we see benefit with YIELD too. > > Variation comes from tearing down cloud infrastructure and redeploying. > Results can vary depending on where you land in the data center. I'm > intentionally leaving out exactly which HW/cloud I used in this data, > but I can say we see similar uplift across a variety of Arm systems. Could you please share some raw numbers from different tests? It would be interesting to see ministat(1) results. > With respect to using YIELD and other projects that use alternatively > use ISB: > > > With respect to ISB Vs YIELD. Yes, as documented, YIELD is the > conceptually right thing to use. However, in practice, it's a NOP which > produces a shorter delay than ISB. Hence why ISB appears to work better. > Also, YIELD is intended for SMT systems (uncommon on Arm), and hence, > it's going to be a NOP for any current Arm system you'll find in the > cloud. That said, YIELD produces uplift in RPS as well because even a > small delay is better than no delay. I'm 100% good with using YIELD if > you want to stay true to what is currently documented. I was going for > max perf uplift which is also why some other projects are also using > ISB. Whether it's YIELD or ISB, a revisit with WFET would be in order in > the more distant future. For today, YIELD or ISB would work better than > nothing (as it currently is). If YIELD is more acceptable, then I can do > YIELD. > > Projects that previously used YIELD and switched to ISB after noting > performance improvement (I don't think these projects shared data > anywhere, we just have to take their word): > > MongoDB: > https://github.com/mongodb/mongo/blob/b7a92e4194cca52665e01d81dd7f9b037b59b362/src/mongo/platform/pause.h#L61 > > MySQL: > https://github.com/mysql/mysql-server/blob/87307d4ddd88405117e3f1e51323836d57ab1f57/storage/innobase/include/ut0ut.h#L108 > > Jemalloc: > https://github.com/jemalloc/jemalloc/blob/e4817c8d89a2a413e835c4adeab5c5c4412f9235/configure.ac#L436 Thanks for the links. For the record, here are relevant commits / pull requests: https://github.com/wiredtiger/wiredtiger/pull/6080 https://github.com/mongodb/mongo/commit/6979525674af67405984c58585766dd4d0c3f2a8 https://bugs.mysql.com/bug.php?id=100664 https://github.com/mysql/mysql-server/commit/f2a4ed5b65a6c03ee1bea60b8c3bb4db64dbed10 https://github.com/jemalloc/jemalloc/pull/2205 https://github.com/jemalloc/jemalloc/commit/89fe8ee6bf7a23556350d883a310c0224a171879 At least MySQL bug seems to have some interesting details. > > Could you please clarify reasons for the "memory" clobber here? > > Putting in the memory clobber for ISB is redundant because ISB is a > barrier itself, but it's probably the GCC appropriate thing to do. I > also like it as a hint for someone not familiar with ISB. ISB will pause > the frontend (fetch-decode) to allow the CPU backend (execute-retire) to > finish whatever operations are in flight. It's possible that some of > those operations are writes to memory. Hence why we should tell the > compiler "this instruction may update memory". I don't think this interpretation is correct - memory is updated by other instructions, not ISB. And it's the compiler who emitted these instructions, so it perfectly knows that these will eventually update memory. Note that the ngx_cpu_pause() call does not need to be a barrier, neither a hardware barrier nor a compiler barrier. Instead, nginx relies on using volatile variables for locks, so these are always loaded from memory by the compiler on pre-lock checks (and will test the updated value if it's changed by other CPUs), and proper barrier semantics of ngx_atomic_cmp_set(). The "memory" clobber essentially tells compiler that it cannot optimize stores and loads across the call, and must reload anything from memory after the call. While this might be expected in other uses (for example, cpu_relax() in Linux is expected to be a compiler barrier), this is not something needed in ngx_cpu_pause(). [...] -- Maxim Dounin http://mdounin.ru/ From zaihan at unrealasia.net Wed Dec 13 07:55:56 2023 From: zaihan at unrealasia.net (Muhammad Nuzaihan) Date: Wed, 13 Dec 2023 15:55:56 +0800 Subject: processing a request without body Message-ID: Hi, I need to process requests with only URI path (without body) for a module. It seems ngx_http_request_body_filter_pt is *not* executed whenever there is a request without a body (it looked like it bypassed without request body) and only ngx_http_output_body_filter_pt part of the code is executed. For example i do a request curl curl like this: curl -vvvv -X POST http://localhost:8080/proxy/profile/alice/comment and i need to validate /proxy/profile/alice/comment in my module and there is no http headers and no body. Only URI path. Is there something similar done before? Thank you, Muhammad Nuzaihan From zaihan at unrealasia.net Wed Dec 13 08:32:15 2023 From: zaihan at unrealasia.net (Muhammad Nuzaihan) Date: Wed, 13 Dec 2023 16:32:15 +0800 Subject: processing a request without body In-Reply-To: References: Message-ID: Hi, Sorry about the previous email. I meant ngx_http_top_request_body_filter is not executed whenever there is no body. Thank you, Muhammad Nuzaihan On Wed, Dec 13, 2023 at 3:55 PM Muhammad Nuzaihan wrote: > > Hi, > > I need to process requests with only URI path (without body) for a module. > > It seems ngx_http_request_body_filter_pt is *not* executed whenever > there is a request without a body (it looked like it bypassed without > request body) and only ngx_http_output_body_filter_pt part of the > code is executed. > > For example i do a request curl curl like this: > > curl -vvvv -X POST http://localhost:8080/proxy/profile/alice/comment > > and i need to validate /proxy/profile/alice/comment in my module and > there is no http headers and no body. Only URI path. > > Is there something similar done before? > > Thank you, > Muhammad Nuzaihan From pluknet at nginx.com Wed Dec 13 13:40:09 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 13 Dec 2023 17:40:09 +0400 Subject: [PATCH 2 of 3] Stream: virtual servers In-Reply-To: <1d3464283405a4d8ac54.1699610840@arut-laptop> References: <1d3464283405a4d8ac54.1699610840@arut-laptop> Message-ID: <979C0B23-C12F-4BDE-83B1-963126729EF9@nginx.com> > On 10 Nov 2023, at 14:07, Roman Arutyunyan wrote: > > # HG changeset patch > # User Roman Arutyunyan > # Date 1699035295 -14400 > # Fri Nov 03 22:14:55 2023 +0400 > # Node ID 1d3464283405a4d8ac54caae9bf1815c723f04c5 > # Parent 966331bb4936888ef2f034aa2700c130514d0b57 > Stream: virtual servers. > > Server name is taken either from ngx_stream_ssl_module or > ngx_stream_ssl_preread_module. > You may want to consider mentioning here about various directives introduced in this change, for the reference. > diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c > --- a/src/stream/ngx_stream.c > +++ b/src/stream/ngx_stream.c > @@ -16,16 +16,34 @@ static ngx_int_t ngx_stream_init_phases( > ngx_stream_core_main_conf_t *cmcf); > static ngx_int_t ngx_stream_init_phase_handlers(ngx_conf_t *cf, > ngx_stream_core_main_conf_t *cmcf); > -static ngx_int_t ngx_stream_add_ports(ngx_conf_t *cf, ngx_array_t *ports, > - ngx_stream_listen_t *listen); > -static char *ngx_stream_optimize_servers(ngx_conf_t *cf, ngx_array_t *ports); > + > +static ngx_int_t ngx_stream_add_addresses(ngx_conf_t *cf, > + ngx_stream_core_srv_conf_t *cscf, ngx_stream_conf_port_t *port, > + ngx_stream_listen_opt_t *lsopt); > +static ngx_int_t ngx_stream_add_address(ngx_conf_t *cf, > + ngx_stream_core_srv_conf_t *cscf, ngx_stream_conf_port_t *port, > + ngx_stream_listen_opt_t *lsopt); > +static ngx_int_t ngx_stream_add_server(ngx_conf_t *cf, > + ngx_stream_core_srv_conf_t *cscf, ngx_stream_conf_addr_t *addr); > + > +static ngx_int_t ngx_stream_optimize_servers(ngx_conf_t *cf, > + ngx_stream_core_main_conf_t *cmcf, ngx_array_t *ports); > +static ngx_int_t ngx_stream_server_names(ngx_conf_t *cf, > + ngx_stream_core_main_conf_t *cmcf, ngx_stream_conf_addr_t *addr); > +static ngx_int_t ngx_stream_cmp_conf_addrs(const void *one, const void *two); > +static int ngx_libc_cdecl ngx_stream_cmp_dns_wildcards(const void *one, > + const void *two); > + > +static ngx_int_t ngx_stream_init_listening(ngx_conf_t *cf, > + ngx_stream_conf_port_t *port); > +static ngx_listening_t *ngx_stream_add_listening(ngx_conf_t *cf, > + ngx_stream_conf_addr_t *addr); > static ngx_int_t ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *stport, > ngx_stream_conf_addr_t *addr); > #if (NGX_HAVE_INET6) > static ngx_int_t ngx_stream_add_addrs6(ngx_conf_t *cf, > ngx_stream_port_t *stport, ngx_stream_conf_addr_t *addr); > #endif > -static ngx_int_t ngx_stream_cmp_conf_addrs(const void *one, const void *two); > > > ngx_uint_t ngx_stream_max_module; > @@ -74,10 +92,8 @@ static char * > ngx_stream_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > { > char *rv; > - ngx_uint_t i, m, mi, s; > + ngx_uint_t m, mi, s; Nitpicking: virtual servers support is largely based on the existing code in http module, and we'd like to keep it as similar as possible to reduce maintenance costs. Still, it has subtle differences. For example, http has a different declaration order in a similar code for ngx_http_block(). As part of this change, you may want to re-align this in stream to how it's done in http, up to you. > ngx_conf_t pcf; > - ngx_array_t ports; > - ngx_stream_listen_t *listen; > ngx_stream_module_t *module; > ngx_stream_conf_ctx_t *ctx; > ngx_stream_core_srv_conf_t **cscfp; > @@ -251,21 +267,13 @@ ngx_stream_block(ngx_conf_t *cf, ngx_com > return NGX_CONF_ERROR; > } > > - if (ngx_array_init(&ports, cf->temp_pool, 4, sizeof(ngx_stream_conf_port_t)) > - != NGX_OK) > - { > + /* optimize the lists of ports, addresses and server names */ > + > + if (ngx_stream_optimize_servers(cf, cmcf, cmcf->ports) != NGX_OK) { > return NGX_CONF_ERROR; > } > > - listen = cmcf->listen.elts; > - > - for (i = 0; i < cmcf->listen.nelts; i++) { > - if (ngx_stream_add_ports(cf, &ports, &listen[i]) != NGX_OK) { > - return NGX_CONF_ERROR; > - } > - } > - > - return ngx_stream_optimize_servers(cf, &ports); > + return NGX_CONF_OK; > } > > > @@ -377,73 +385,295 @@ ngx_stream_init_phase_handlers(ngx_conf_ > } > > > -static ngx_int_t > -ngx_stream_add_ports(ngx_conf_t *cf, ngx_array_t *ports, > - ngx_stream_listen_t *listen) > +ngx_int_t > +ngx_stream_add_listen(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, > + ngx_stream_listen_opt_t *lsopt) > { > - in_port_t p; > - ngx_uint_t i; > - struct sockaddr *sa; > - ngx_stream_conf_port_t *port; > - ngx_stream_conf_addr_t *addr; > + in_port_t p; > + ngx_uint_t i; > + struct sockaddr *sa; > + ngx_stream_conf_port_t *port; > + ngx_stream_core_main_conf_t *cmcf; > + > + cmcf = ngx_stream_conf_get_module_main_conf(cf, ngx_stream_core_module); > > - sa = listen->sockaddr; > + if (cmcf->ports == NULL) { > + cmcf->ports = ngx_array_create(cf->temp_pool, 2, > + sizeof(ngx_stream_conf_port_t)); > + if (cmcf->ports == NULL) { > + return NGX_ERROR; > + } > + } > + > + sa = lsopt->sockaddr; > p = ngx_inet_get_port(sa); > > - port = ports->elts; > - for (i = 0; i < ports->nelts; i++) { > + port = cmcf->ports->elts; > + for (i = 0; i < cmcf->ports->nelts; i++) { > > - if (p == port[i].port > - && listen->type == port[i].type > - && sa->sa_family == port[i].family) > + if (p != port[i].port > + || lsopt->type != port[i].type > + || sa->sa_family != port[i].family) > { > - /* a port is already in the port list */ > + continue; > + } > > - port = &port[i]; > - goto found; > - } > + /* a port is already in the port list */ > + > + return ngx_stream_add_addresses(cf, cscf, &port[i], lsopt); > } > > /* add a port to the port list */ > > - port = ngx_array_push(ports); > + port = ngx_array_push(cmcf->ports); > if (port == NULL) { > return NGX_ERROR; > } > > port->family = sa->sa_family; > - port->type = listen->type; > + port->type = lsopt->type; > port->port = p; > + port->addrs.elts = NULL; > + > + return ngx_stream_add_address(cf, cscf, port, lsopt); > +} > + > + > +static ngx_int_t > +ngx_stream_add_addresses(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, > + ngx_stream_conf_port_t *port, ngx_stream_listen_opt_t *lsopt) > +{ > + ngx_uint_t i, default_server, proxy_protocol, > + protocols, protocols_prev; > + ngx_stream_conf_addr_t *addr; > +#if (NGX_STREAM_SSL) > + ngx_uint_t ssl; > +#endif > + > + /* > + * we cannot compare whole sockaddr struct's as kernel > + * may fill some fields in inherited sockaddr struct's > + */ > + > + addr = port->addrs.elts; > + > + for (i = 0; i < port->addrs.nelts; i++) { > + > + if (ngx_cmp_sockaddr(lsopt->sockaddr, lsopt->socklen, > + addr[i].opt.sockaddr, > + addr[i].opt.socklen, 0) > + != NGX_OK) > + { > + continue; > + } > + > + /* the address is already in the address list */ > + > + if (ngx_stream_add_server(cf, cscf, &addr[i]) != NGX_OK) { > + return NGX_ERROR; > + } > + > + /* preserve default_server bit during listen options overwriting */ > + default_server = addr[i].opt.default_server; > + > + proxy_protocol = lsopt->proxy_protocol || addr[i].opt.proxy_protocol; > + protocols = lsopt->proxy_protocol; > + protocols_prev = addr[i].opt.proxy_protocol; > + > +#if (NGX_STREAM_SSL) > + ssl = lsopt->ssl || addr[i].opt.ssl; > + protocols |= lsopt->ssl << 1; > + protocols_prev |= addr[i].opt.ssl << 1; > +#endif > + > + if (lsopt->set) { > + > + if (addr[i].opt.set) { > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "duplicate listen options for %V", > + &addr[i].opt.addr_text); > + return NGX_ERROR; > + } > + > + addr[i].opt = *lsopt; > + } > + > + /* check the duplicate "default" server for this address:port */ > > - if (ngx_array_init(&port->addrs, cf->temp_pool, 2, > - sizeof(ngx_stream_conf_addr_t)) > - != NGX_OK) > - { > - return NGX_ERROR; > + if (lsopt->default_server) { > + > + if (default_server) { > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "a duplicate default server for %V", > + &addr[i].opt.addr_text); > + return NGX_ERROR; > + } > + > + default_server = 1; > + addr[i].default_server = cscf; > + } > + > + /* check for conflicting protocol options */ > + > + if ((protocols | protocols_prev) != protocols_prev) { > + > + /* options added */ > + > + if ((addr[i].opt.set && !lsopt->set) > + || addr[i].protocols_changed > + || (protocols | protocols_prev) != protocols) > + { > + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, > + "protocol options redefined for %V", > + &addr[i].opt.addr_text); > + } > + > + addr[i].protocols = protocols_prev; > + addr[i].protocols_set = 1; > + addr[i].protocols_changed = 1; > + > + } else if ((protocols_prev | protocols) != protocols) { > + > + /* options removed */ > + > + if (lsopt->set > + || (addr[i].protocols_set && protocols != addr[i].protocols)) > + { > + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, > + "protocol options redefined for %V", > + &addr[i].opt.addr_text); > + } > + > + addr[i].protocols = protocols; > + addr[i].protocols_set = 1; > + addr[i].protocols_changed = 1; > + > + } else { > + > + /* the same options */ > + > + if ((lsopt->set && addr[i].protocols_changed) > + || (addr[i].protocols_set && protocols != addr[i].protocols)) > + { > + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, > + "protocol options redefined for %V", > + &addr[i].opt.addr_text); > + } > + > + addr[i].protocols = protocols; > + addr[i].protocols_set = 1; > + } > + > + addr[i].opt.default_server = default_server; > + addr[i].opt.proxy_protocol = proxy_protocol; > +#if (NGX_STREAM_SSL) > + addr[i].opt.ssl = ssl; > +#endif > + > + return NGX_OK; > } > > -found: > + /* add the address to the addresses list that bound to this port */ > + > + return ngx_stream_add_address(cf, cscf, port, lsopt); > +} > + > + > +/* > + * add the server address, the server names and the server core module > + * configurations to the port list > + */ > + > +static ngx_int_t > +ngx_stream_add_address(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, > + ngx_stream_conf_port_t *port, ngx_stream_listen_opt_t *lsopt) > +{ > + ngx_stream_conf_addr_t *addr; > + > + if (port->addrs.elts == NULL) { > + if (ngx_array_init(&port->addrs, cf->temp_pool, 4, > + sizeof(ngx_stream_conf_addr_t)) > + != NGX_OK) > + { > + return NGX_ERROR; > + } > + } > > addr = ngx_array_push(&port->addrs); > if (addr == NULL) { > return NGX_ERROR; > } > > - addr->opt = *listen; > + addr->opt = *lsopt; > + addr->protocols = 0; > + addr->protocols_set = 0; > + addr->protocols_changed = 0; > + addr->hash.buckets = NULL; > + addr->hash.size = 0; > + addr->wc_head = NULL; > + addr->wc_tail = NULL; > +#if (NGX_PCRE) > + addr->nregex = 0; > + addr->regex = NULL; > +#endif > + addr->default_server = cscf; > + addr->servers.elts = NULL; > + > + return ngx_stream_add_server(cf, cscf, addr); > +} > + > + > +/* add the server core module configuration to the address:port */ > + > +static ngx_int_t > +ngx_stream_add_server(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, > + ngx_stream_conf_addr_t *addr) > +{ > + ngx_uint_t i; > + ngx_stream_core_srv_conf_t **server; > + > + if (addr->servers.elts == NULL) { > + if (ngx_array_init(&addr->servers, cf->temp_pool, 4, > + sizeof(ngx_stream_core_srv_conf_t *)) > + != NGX_OK) > + { > + return NGX_ERROR; > + } > + > + } else { > + server = addr->servers.elts; > + for (i = 0; i < addr->servers.nelts; i++) { > + if (server[i] == cscf) { > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "a duplicate listen %V", > + &addr->opt.addr_text); > + return NGX_ERROR; > + } > + } > + } > + > + server = ngx_array_push(&addr->servers); > + if (server == NULL) { > + return NGX_ERROR; > + } > + > + *server = cscf; > > return NGX_OK; > } > > > -static char * > -ngx_stream_optimize_servers(ngx_conf_t *cf, ngx_array_t *ports) > +static ngx_int_t > +ngx_stream_optimize_servers(ngx_conf_t *cf, ngx_stream_core_main_conf_t *cmcf, > + ngx_array_t *ports) > { > - ngx_uint_t i, p, last, bind_wildcard; > - ngx_listening_t *ls; > - ngx_stream_port_t *stport; > - ngx_stream_conf_port_t *port; > - ngx_stream_conf_addr_t *addr; > - ngx_stream_core_srv_conf_t *cscf; > + ngx_uint_t p, a; > + ngx_stream_conf_port_t *port; > + ngx_stream_conf_addr_t *addr; > + > + if (ports == NULL) { > + return NGX_OK; > + } > > port = ports->elts; > for (p = 0; p < ports->nelts; p++) { > @@ -451,175 +681,191 @@ ngx_stream_optimize_servers(ngx_conf_t * > ngx_sort(port[p].addrs.elts, (size_t) port[p].addrs.nelts, > sizeof(ngx_stream_conf_addr_t), ngx_stream_cmp_conf_addrs); > > - addr = port[p].addrs.elts; > - last = port[p].addrs.nelts; > - > /* > - * if there is the binding to the "*:port" then we need to bind() > - * to the "*:port" only and ignore the other bindings > + * check whether all name-based servers have the same > + * configuration as a default server for given address:port > */ > > - if (addr[last - 1].opt.wildcard) { > - addr[last - 1].opt.bind = 1; > - bind_wildcard = 1; > + addr = port[p].addrs.elts; > + for (a = 0; a < port[p].addrs.nelts; a++) { > > - } else { > - bind_wildcard = 0; > + if (addr[a].servers.nelts > 1 > +#if (NGX_PCRE) > + || addr[a].default_server->captures > +#endif > + ) > + { > + if (ngx_stream_server_names(cf, cmcf, &addr[a]) != NGX_OK) { > + return NGX_ERROR; > + } > + } > } > > - i = 0; > - > - while (i < last) { > - > - if (bind_wildcard && !addr[i].opt.bind) { > - i++; > - continue; > - } > - > - ls = ngx_create_listening(cf, addr[i].opt.sockaddr, > - addr[i].opt.socklen); > - if (ls == NULL) { > - return NGX_CONF_ERROR; > - } > - > - ls->addr_ntop = 1; > - ls->handler = ngx_stream_init_connection; > - ls->pool_size = 256; > - ls->type = addr[i].opt.type; > - > - cscf = addr->opt.ctx->srv_conf[ngx_stream_core_module.ctx_index]; > - > - ls->logp = cscf->error_log; > - ls->log.data = &ls->addr_text; > - ls->log.handler = ngx_accept_log_error; > - > - ls->backlog = addr[i].opt.backlog; > - ls->rcvbuf = addr[i].opt.rcvbuf; > - ls->sndbuf = addr[i].opt.sndbuf; > - > - ls->wildcard = addr[i].opt.wildcard; > - > - ls->keepalive = addr[i].opt.so_keepalive; > -#if (NGX_HAVE_KEEPALIVE_TUNABLE) > - ls->keepidle = addr[i].opt.tcp_keepidle; > - ls->keepintvl = addr[i].opt.tcp_keepintvl; > - ls->keepcnt = addr[i].opt.tcp_keepcnt; > -#endif > - > -#if (NGX_HAVE_INET6) > - ls->ipv6only = addr[i].opt.ipv6only; > -#endif > - > -#if (NGX_HAVE_TCP_FASTOPEN) > - ls->fastopen = addr[i].opt.fastopen; > -#endif > - > -#if (NGX_HAVE_REUSEPORT) > - ls->reuseport = addr[i].opt.reuseport; > -#endif > - > - stport = ngx_palloc(cf->pool, sizeof(ngx_stream_port_t)); > - if (stport == NULL) { > - return NGX_CONF_ERROR; > - } > - > - ls->servers = stport; > - > - stport->naddrs = i + 1; > - > - switch (ls->sockaddr->sa_family) { > -#if (NGX_HAVE_INET6) > - case AF_INET6: > - if (ngx_stream_add_addrs6(cf, stport, addr) != NGX_OK) { > - return NGX_CONF_ERROR; > - } > - break; > -#endif > - default: /* AF_INET */ > - if (ngx_stream_add_addrs(cf, stport, addr) != NGX_OK) { > - return NGX_CONF_ERROR; > - } > - break; > - } > - > - addr++; > - last--; > + if (ngx_stream_init_listening(cf, &port[p]) != NGX_OK) { > + return NGX_ERROR; > } > } > > - return NGX_CONF_OK; > -} > - > - > -static ngx_int_t > -ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *stport, > - ngx_stream_conf_addr_t *addr) > -{ > - ngx_uint_t i; > - struct sockaddr_in *sin; > - ngx_stream_in_addr_t *addrs; > - > - stport->addrs = ngx_pcalloc(cf->pool, > - stport->naddrs * sizeof(ngx_stream_in_addr_t)); > - if (stport->addrs == NULL) { > - return NGX_ERROR; > - } > - > - addrs = stport->addrs; > - > - for (i = 0; i < stport->naddrs; i++) { > - > - sin = (struct sockaddr_in *) addr[i].opt.sockaddr; > - addrs[i].addr = sin->sin_addr.s_addr; > - > - addrs[i].conf.ctx = addr[i].opt.ctx; > -#if (NGX_STREAM_SSL) > - addrs[i].conf.ssl = addr[i].opt.ssl; > -#endif > - addrs[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; > - addrs[i].conf.addr_text = addr[i].opt.addr_text; > - } > - > return NGX_OK; > } > > > -#if (NGX_HAVE_INET6) > - > static ngx_int_t > -ngx_stream_add_addrs6(ngx_conf_t *cf, ngx_stream_port_t *stport, > +ngx_stream_server_names(ngx_conf_t *cf, ngx_stream_core_main_conf_t *cmcf, > ngx_stream_conf_addr_t *addr) > { > - ngx_uint_t i; > - struct sockaddr_in6 *sin6; > - ngx_stream_in6_addr_t *addrs6; > + ngx_int_t rc; > + ngx_uint_t n, s; > + ngx_hash_init_t hash; > + ngx_hash_keys_arrays_t ha; > + ngx_stream_server_name_t *name; > + ngx_stream_core_srv_conf_t **cscfp; > +#if (NGX_PCRE) > + ngx_uint_t regex, i; > > - stport->addrs = ngx_pcalloc(cf->pool, > - stport->naddrs * sizeof(ngx_stream_in6_addr_t)); > - if (stport->addrs == NULL) { > + regex = 0; > +#endif > + > + ngx_memzero(&ha, sizeof(ngx_hash_keys_arrays_t)); > + > + ha.temp_pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, cf->log); > + if (ha.temp_pool == NULL) { > return NGX_ERROR; > } > > - addrs6 = stport->addrs; > + ha.pool = cf->pool; > + > + if (ngx_hash_keys_array_init(&ha, NGX_HASH_LARGE) != NGX_OK) { > + goto failed; > + } > + > + cscfp = addr->servers.elts; > + > + for (s = 0; s < addr->servers.nelts; s++) { > + > + name = cscfp[s]->server_names.elts; > + > + for (n = 0; n < cscfp[s]->server_names.nelts; n++) { > > - for (i = 0; i < stport->naddrs; i++) { > +#if (NGX_PCRE) > + if (name[n].regex) { > + regex++; > + continue; > + } > +#endif > > - sin6 = (struct sockaddr_in6 *) addr[i].opt.sockaddr; > - addrs6[i].addr6 = sin6->sin6_addr; > + rc = ngx_hash_add_key(&ha, &name[n].name, name[n].server, > + NGX_HASH_WILDCARD_KEY); > + > + if (rc == NGX_ERROR) { > + goto failed; > + } > > - addrs6[i].conf.ctx = addr[i].opt.ctx; > -#if (NGX_STREAM_SSL) > - addrs6[i].conf.ssl = addr[i].opt.ssl; > -#endif > - addrs6[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; > - addrs6[i].conf.addr_text = addr[i].opt.addr_text; > + if (rc == NGX_DECLINED) { > + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > + "invalid server name or wildcard \"%V\" on %V", > + &name[n].name, &addr->opt.addr_text); > + goto failed; > + } > + > + if (rc == NGX_BUSY) { > + ngx_log_error(NGX_LOG_WARN, cf->log, 0, > + "conflicting server name \"%V\" on %V, ignored", > + &name[n].name, &addr->opt.addr_text); > + } > + } > + } > + > + hash.key = ngx_hash_key_lc; > + hash.max_size = cmcf->server_names_hash_max_size; > + hash.bucket_size = cmcf->server_names_hash_bucket_size; > + hash.name = "server_names_hash"; > + hash.pool = cf->pool; > + > + if (ha.keys.nelts) { > + hash.hash = &addr->hash; > + hash.temp_pool = NULL; > + > + if (ngx_hash_init(&hash, ha.keys.elts, ha.keys.nelts) != NGX_OK) { > + goto failed; > + } > } > > - return NGX_OK; > -} > + if (ha.dns_wc_head.nelts) { > + > + ngx_qsort(ha.dns_wc_head.elts, (size_t) ha.dns_wc_head.nelts, > + sizeof(ngx_hash_key_t), ngx_stream_cmp_dns_wildcards); > + > + hash.hash = NULL; > + hash.temp_pool = ha.temp_pool; > + > + if (ngx_hash_wildcard_init(&hash, ha.dns_wc_head.elts, > + ha.dns_wc_head.nelts) > + != NGX_OK) > + { > + goto failed; > + } > + > + addr->wc_head = (ngx_hash_wildcard_t *) hash.hash; > + } > + > + if (ha.dns_wc_tail.nelts) { > + > + ngx_qsort(ha.dns_wc_tail.elts, (size_t) ha.dns_wc_tail.nelts, > + sizeof(ngx_hash_key_t), ngx_stream_cmp_dns_wildcards); > + > + hash.hash = NULL; > + hash.temp_pool = ha.temp_pool; > + > + if (ngx_hash_wildcard_init(&hash, ha.dns_wc_tail.elts, > + ha.dns_wc_tail.nelts) > + != NGX_OK) > + { > + goto failed; > + } > + > + addr->wc_tail = (ngx_hash_wildcard_t *) hash.hash; > + } > + > + ngx_destroy_pool(ha.temp_pool); > + > +#if (NGX_PCRE) > + > + if (regex == 0) { > + return NGX_OK; > + } > + > + addr->nregex = regex; > + addr->regex = ngx_palloc(cf->pool, > + regex * sizeof(ngx_stream_server_name_t)); > + if (addr->regex == NULL) { > + return NGX_ERROR; > + } > + > + i = 0; > + > + for (s = 0; s < addr->servers.nelts; s++) { > + > + name = cscfp[s]->server_names.elts; > + > + for (n = 0; n < cscfp[s]->server_names.nelts; n++) { > + if (name[n].regex) { > + addr->regex[i++] = name[n]; > + } > + } > + } > > #endif > > + return NGX_OK; > + > +failed: > + > + ngx_destroy_pool(ha.temp_pool); > + > + return NGX_ERROR; > +} > + > > static ngx_int_t > ngx_stream_cmp_conf_addrs(const void *one, const void *two) > @@ -630,12 +876,12 @@ ngx_stream_cmp_conf_addrs(const void *on > second = (ngx_stream_conf_addr_t *) two; > > if (first->opt.wildcard) { > - /* a wildcard must be the last resort, shift it to the end */ > + /* a wildcard address must be the last resort, shift it to the end */ > return 1; > } > > if (second->opt.wildcard) { > - /* a wildcard must be the last resort, shift it to the end */ > + /* a wildcard address must be the last resort, shift it to the end */ > return -1; > } > > @@ -653,3 +899,289 @@ ngx_stream_cmp_conf_addrs(const void *on > > return 0; > } > + > + > +static int ngx_libc_cdecl > +ngx_stream_cmp_dns_wildcards(const void *one, const void *two) > +{ > + ngx_hash_key_t *first, *second; > + > + first = (ngx_hash_key_t *) one; > + second = (ngx_hash_key_t *) two; > + > + return ngx_dns_strcmp(first->key.data, second->key.data); > +} > + > + > +static ngx_int_t > +ngx_stream_init_listening(ngx_conf_t *cf, ngx_stream_conf_port_t *port) > +{ > + ngx_uint_t i, last, bind_wildcard; > + ngx_listening_t *ls; > + ngx_stream_port_t *hport; Here and below you renamed "stport" (as in "stream port") back to "hport" (as in "http port"), which apparently doesn't belong here. > + ngx_stream_conf_addr_t *addr; > + > + addr = port->addrs.elts; > + last = port->addrs.nelts; > + > + /* > + * If there is a binding to an "*:port" then we need to bind() to > + * the "*:port" only and ignore other implicit bindings. The bindings > + * have been already sorted: explicit bindings are on the start, then > + * implicit bindings go, and wildcard binding is in the end. > + */ > + > + if (addr[last - 1].opt.wildcard) { > + addr[last - 1].opt.bind = 1; > + bind_wildcard = 1; > + > + } else { > + bind_wildcard = 0; > + } > + > + i = 0; > + > + while (i < last) { > + > + if (bind_wildcard && !addr[i].opt.bind) { > + i++; > + continue; > + } > + > + ls = ngx_stream_add_listening(cf, &addr[i]); > + if (ls == NULL) { > + return NGX_ERROR; > + } > + > + hport = ngx_pcalloc(cf->pool, sizeof(ngx_stream_port_t)); > + if (hport == NULL) { > + return NGX_ERROR; > + } > + > + ls->servers = hport; > + > + hport->naddrs = i + 1; > + > + switch (ls->sockaddr->sa_family) { > + > +#if (NGX_HAVE_INET6) > + case AF_INET6: > + if (ngx_stream_add_addrs6(cf, hport, addr) != NGX_OK) { > + return NGX_ERROR; > + } > + break; > +#endif > + default: /* AF_INET */ > + if (ngx_stream_add_addrs(cf, hport, addr) != NGX_OK) { > + return NGX_ERROR; > + } > + break; > + } > + > + addr++; > + last--; > + } > + > + return NGX_OK; > +} > + > + > +static ngx_listening_t * > +ngx_stream_add_listening(ngx_conf_t *cf, ngx_stream_conf_addr_t *addr) > +{ > + ngx_listening_t *ls; > + ngx_stream_core_srv_conf_t *cscf; > + > + ls = ngx_create_listening(cf, addr->opt.sockaddr, addr->opt.socklen); > + if (ls == NULL) { > + return NULL; > + } > + > + ls->addr_ntop = 1; > + > + ls->handler = ngx_stream_init_connection; > + > + cscf = addr->default_server; > + ls->pool_size = 256; Nitpicking. Current code has the following initialization order: ls->addr_ntop = 1; ls->handler = ngx_stream_init_connection; ls->pool_size = 256; ls->type = addr[i].opt.type cscf = addr->opt.ctx->srv_conf[ngx_stream_core_module.ctx_index]; Besides "type", it makes sense to keep this order in the new code, as well: ls->handler = ngx_stream_init_connection; ls->pool_size = 256; cscf = addr->default_server; ls->logp = cscf->error_log; > + > + ls->logp = cscf->error_log; > + ls->log.data = &ls->addr_text; > + ls->log.handler = ngx_accept_log_error; > + > +#if (NGX_WIN32) > + { > + ngx_iocp_conf_t *iocpcf = NULL; > + > + if (ngx_get_conf(cf->cycle->conf_ctx, ngx_events_module)) { > + iocpcf = ngx_event_get_conf(cf->cycle->conf_ctx, ngx_iocp_module); > + } > + if (iocpcf && iocpcf->acceptex_read) { > + ls->post_accept_buffer_size = cscf->client_header_buffer_size; > + } > + } > +#endif This part is out of scope of this change, and apparently it won't compile. > + > + ls->type = addr->opt.type; > + ls->backlog = addr->opt.backlog; > + ls->rcvbuf = addr->opt.rcvbuf; > + ls->sndbuf = addr->opt.sndbuf; > + > + ls->keepalive = addr->opt.so_keepalive; > +#if (NGX_HAVE_KEEPALIVE_TUNABLE) > + ls->keepidle = addr->opt.tcp_keepidle; > + ls->keepintvl = addr->opt.tcp_keepintvl; > + ls->keepcnt = addr->opt.tcp_keepcnt; > +#endif > + > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > + ls->accept_filter = addr->opt.accept_filter; > +#endif > + > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > + ls->deferred_accept = addr->opt.deferred_accept; > +#endif > + > +#if (NGX_HAVE_INET6) > + ls->ipv6only = addr->opt.ipv6only; > +#endif > + > +#if (NGX_HAVE_SETFIB) > + ls->setfib = addr->opt.setfib; > +#endif This introduces accept_filter, deferred_accept, and setfib fields, which is out of scope of this change. Anyway, this is useless without corresponding support in ngx_stream_core_listen(). > + > +#if (NGX_HAVE_TCP_FASTOPEN) > + ls->fastopen = addr->opt.fastopen; > +#endif > + > +#if (NGX_HAVE_REUSEPORT) > + ls->reuseport = addr->opt.reuseport; > +#endif > + > + ls->wildcard = addr->opt.wildcard; > + > + return ls; > +} > + > + > +static ngx_int_t > +ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *hport, > + ngx_stream_conf_addr_t *addr) > +{ > + ngx_uint_t i; > + struct sockaddr_in *sin; > + ngx_stream_in_addr_t *addrs; > + ngx_stream_virtual_names_t *vn; > + > + hport->addrs = ngx_pcalloc(cf->pool, > + hport->naddrs * sizeof(ngx_stream_in_addr_t)); > + if (hport->addrs == NULL) { > + return NGX_ERROR; > + } > + > + addrs = hport->addrs; > + > + for (i = 0; i < hport->naddrs; i++) { > + > + sin = (struct sockaddr_in *) addr[i].opt.sockaddr; > + addrs[i].addr = sin->sin_addr.s_addr; > + addrs[i].conf.default_server = addr[i].default_server; > +#if (NGX_STREAM_SSL) > + addrs[i].conf.ssl = addr[i].opt.ssl; > +#endif > + addrs[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; > + > + if (addr[i].hash.buckets == NULL > + && (addr[i].wc_head == NULL > + || addr[i].wc_head->hash.buckets == NULL) > + && (addr[i].wc_tail == NULL > + || addr[i].wc_tail->hash.buckets == NULL) > +#if (NGX_PCRE) > + && addr[i].nregex == 0 > +#endif > + ) > + { > + continue; > + } > + > + vn = ngx_palloc(cf->pool, sizeof(ngx_stream_virtual_names_t)); > + if (vn == NULL) { > + return NGX_ERROR; > + } > + > + addrs[i].conf.virtual_names = vn; > + > + vn->names.hash = addr[i].hash; > + vn->names.wc_head = addr[i].wc_head; > + vn->names.wc_tail = addr[i].wc_tail; > +#if (NGX_PCRE) > + vn->nregex = addr[i].nregex; > + vn->regex = addr[i].regex; > +#endif > + } > + > + return NGX_OK; > +} > + > + > +#if (NGX_HAVE_INET6) > + > +static ngx_int_t > +ngx_stream_add_addrs6(ngx_conf_t *cf, ngx_stream_port_t *hport, > + ngx_stream_conf_addr_t *addr) > +{ > + ngx_uint_t i; > + struct sockaddr_in6 *sin6; > + ngx_stream_in6_addr_t *addrs6; > + ngx_stream_virtual_names_t *vn; > + > + hport->addrs = ngx_pcalloc(cf->pool, > + hport->naddrs * sizeof(ngx_stream_in6_addr_t)); > + if (hport->addrs == NULL) { > + return NGX_ERROR; > + } > + > + addrs6 = hport->addrs; > + > + for (i = 0; i < hport->naddrs; i++) { > + > + sin6 = (struct sockaddr_in6 *) addr[i].opt.sockaddr; > + addrs6[i].addr6 = sin6->sin6_addr; > + addrs6[i].conf.default_server = addr[i].default_server; > +#if (NGX_STREAM_SSL) > + addrs6[i].conf.ssl = addr[i].opt.ssl; > +#endif > + addrs6[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; > + > + if (addr[i].hash.buckets == NULL > + && (addr[i].wc_head == NULL > + || addr[i].wc_head->hash.buckets == NULL) > + && (addr[i].wc_tail == NULL > + || addr[i].wc_tail->hash.buckets == NULL) > +#if (NGX_PCRE) > + && addr[i].nregex == 0 > +#endif > + ) > + { > + continue; > + } > + > + vn = ngx_palloc(cf->pool, sizeof(ngx_stream_virtual_names_t)); > + if (vn == NULL) { > + return NGX_ERROR; > + } > + > + addrs6[i].conf.virtual_names = vn; > + > + vn->names.hash = addr[i].hash; > + vn->names.wc_head = addr[i].wc_head; > + vn->names.wc_tail = addr[i].wc_tail; > +#if (NGX_PCRE) > + vn->nregex = addr[i].nregex; > + vn->regex = addr[i].regex; > +#endif > + } > + > + return NGX_OK; > +} > + > +#endif > diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h > --- a/src/stream/ngx_stream.h > +++ b/src/stream/ngx_stream.h > @@ -45,74 +45,39 @@ typedef struct { > socklen_t socklen; > ngx_str_t addr_text; > > - /* server ctx */ > - ngx_stream_conf_ctx_t *ctx; > - > + unsigned set:1; > + unsigned default_server:1; > unsigned bind:1; > unsigned wildcard:1; > unsigned ssl:1; > #if (NGX_HAVE_INET6) > unsigned ipv6only:1; > #endif > + unsigned deferred_accept:1; > unsigned reuseport:1; > unsigned so_keepalive:2; > unsigned proxy_protocol:1; > + > + int backlog; > + int rcvbuf; > + int sndbuf; > + int type; > +#if (NGX_HAVE_SETFIB) > + int setfib; > +#endif > +#if (NGX_HAVE_TCP_FASTOPEN) > + int fastopen; > +#endif > #if (NGX_HAVE_KEEPALIVE_TUNABLE) > int tcp_keepidle; > int tcp_keepintvl; > int tcp_keepcnt; > #endif > - int backlog; > - int rcvbuf; > - int sndbuf; > -#if (NGX_HAVE_TCP_FASTOPEN) > - int fastopen; > + > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > + char *accept_filter; > #endif Besides introducing unused fields, this part reshuffles backlog .. type fields without a reason. It can be as minimal as: diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h --- a/src/stream/ngx_stream.h +++ b/src/stream/ngx_stream.h @@ -45,9 +45,8 @@ typedef struct { socklen_t socklen; ngx_str_t addr_text; - /* server ctx */ - ngx_stream_conf_ctx_t *ctx; - + unsigned set:1; + unsigned default_server:1; unsigned bind:1; unsigned wildcard:1; unsigned ssl:1; > - int type; > -} ngx_stream_listen_t; > - > - > -typedef struct { > - ngx_stream_conf_ctx_t *ctx; > - ngx_str_t addr_text; > - unsigned ssl:1; > - unsigned proxy_protocol:1; > -} ngx_stream_addr_conf_t; > - > -typedef struct { > - in_addr_t addr; > - ngx_stream_addr_conf_t conf; > -} ngx_stream_in_addr_t; > - > - > -#if (NGX_HAVE_INET6) > - > -typedef struct { > - struct in6_addr addr6; > - ngx_stream_addr_conf_t conf; > -} ngx_stream_in6_addr_t; > - > -#endif > - > - > -typedef struct { > - /* ngx_stream_in_addr_t or ngx_stream_in6_addr_t */ > - void *addrs; > - ngx_uint_t naddrs; > -} ngx_stream_port_t; > - > - > -typedef struct { > - int family; > - int type; > - in_port_t port; > - ngx_array_t addrs; /* array of ngx_stream_conf_addr_t */ > -} ngx_stream_conf_port_t; > - > - > -typedef struct { > - ngx_stream_listen_t opt; > -} ngx_stream_conf_addr_t; > +} ngx_stream_listen_opt_t; > > > typedef enum { > @@ -153,7 +118,6 @@ typedef struct { > > typedef struct { > ngx_array_t servers; /* ngx_stream_core_srv_conf_t */ > - ngx_array_t listen; /* ngx_stream_listen_t */ > > ngx_stream_phase_engine_t phase_engine; > > @@ -163,16 +127,24 @@ typedef struct { > ngx_array_t prefix_variables; /* ngx_stream_variable_t */ > ngx_uint_t ncaptures; > > + ngx_uint_t server_names_hash_max_size; > + ngx_uint_t server_names_hash_bucket_size; > + > ngx_uint_t variables_hash_max_size; > ngx_uint_t variables_hash_bucket_size; > > ngx_hash_keys_arrays_t *variables_keys; > > + ngx_array_t *ports; > + > ngx_stream_phase_t phases[NGX_STREAM_LOG_PHASE + 1]; > } ngx_stream_core_main_conf_t; > > > typedef struct { > + /* array of the ngx_stream_server_name_t, "server_name" directive */ > + ngx_array_t server_names; > + > ngx_stream_content_handler_pt handler; > > ngx_stream_conf_ctx_t *ctx; > @@ -180,6 +152,8 @@ typedef struct { > u_char *file_name; > ngx_uint_t line; > > + ngx_str_t server_name; > + > ngx_flag_t tcp_nodelay; > size_t preread_buffer_size; > ngx_msec_t preread_timeout; > @@ -191,10 +165,99 @@ typedef struct { > > ngx_msec_t proxy_protocol_timeout; > > - ngx_uint_t listen; /* unsigned listen:1; */ > + unsigned listen:1; > +#if (NGX_PCRE) > + unsigned captures:1; > +#endif > } ngx_stream_core_srv_conf_t; > > > +/* list of structures to find core_srv_conf quickly at run time */ > + > + > +typedef struct { > +#if (NGX_PCRE) > + ngx_stream_regex_t *regex; > +#endif > + ngx_stream_core_srv_conf_t *server; /* virtual name server conf */ > + ngx_str_t name; > +} ngx_stream_server_name_t; > + > + > +typedef struct { > + ngx_hash_combined_t names; > + > + ngx_uint_t nregex; > + ngx_stream_server_name_t *regex; > +} ngx_stream_virtual_names_t; > + > + > +typedef struct { > + /* the default server configuration for this address:port */ > + ngx_stream_core_srv_conf_t *default_server; > + > + ngx_stream_virtual_names_t *virtual_names; > + > + ngx_str_t addr_text; This field is now unused. > + unsigned ssl:1; > + unsigned proxy_protocol:1; > +} ngx_stream_addr_conf_t; > + > + > +typedef struct { > + in_addr_t addr; > + ngx_stream_addr_conf_t conf; > +} ngx_stream_in_addr_t; > + > + > +#if (NGX_HAVE_INET6) > + > +typedef struct { > + struct in6_addr addr6; > + ngx_stream_addr_conf_t conf; > +} ngx_stream_in6_addr_t; > + > +#endif > + > + > +typedef struct { > + /* ngx_stream_in_addr_t or ngx_stream_in6_addr_t */ > + void *addrs; > + ngx_uint_t naddrs; > +} ngx_stream_port_t; > + > + > +typedef struct { > + int family; > + int type; > + in_port_t port; > + ngx_array_t addrs; /* array of ngx_stream_conf_addr_t */ > +} ngx_stream_conf_port_t; > + > + > +typedef struct { > + ngx_stream_listen_opt_t opt; > + > + unsigned protocols:3; > + unsigned protocols_set:1; > + unsigned protocols_changed:1; > + > + ngx_hash_t hash; > + ngx_hash_wildcard_t *wc_head; > + ngx_hash_wildcard_t *wc_tail; > + > +#if (NGX_PCRE) > + ngx_uint_t nregex; > + ngx_stream_server_name_t *regex; > +#endif > + > + /* the default server configuration for this address:port */ > + ngx_stream_core_srv_conf_t *default_server; > + ngx_array_t servers; > + /* array of ngx_stream_core_srv_conf_t */ misaligned to the right side (off by one) > +} ngx_stream_conf_addr_t; > + > + > struct ngx_stream_session_s { > uint32_t signature; /* "STRM" */ > > @@ -210,6 +273,8 @@ struct ngx_stream_session_s { > void **main_conf; > void **srv_conf; > > + ngx_stream_virtual_names_t *virtual_names; > + > ngx_stream_upstream_t *upstream; > ngx_array_t *upstream_states; > /* of ngx_stream_upstream_state_t */ > @@ -283,6 +348,8 @@ typedef struct { > #define NGX_STREAM_WRITE_BUFFERED 0x10 > > > +ngx_int_t ngx_stream_add_listen(ngx_conf_t *cf, > + ngx_stream_core_srv_conf_t *cscf, ngx_stream_listen_opt_t *lsopt); This deserves two blank lines to divide configuration and runtime functions, and somewhat similar to http. > void ngx_stream_core_run_phases(ngx_stream_session_t *s); > ngx_int_t ngx_stream_core_generic_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph); > @@ -290,6 +357,10 @@ ngx_int_t ngx_stream_core_preread_phase( > ngx_stream_phase_handler_t *ph); > ngx_int_t ngx_stream_core_content_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph); This deserves a blank line. > +ngx_int_t ngx_stream_find_virtual_server(ngx_stream_session_t *s, > + ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp); > +ngx_int_t ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, > + ngx_uint_t alloc); Apparently, it makes sense to reverse-order these functions to make them appear in the order they are called. > > > void ngx_stream_init_connection(ngx_connection_t *c); > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -26,6 +26,8 @@ static char *ngx_stream_core_server(ngx_ > void *conf); > static char *ngx_stream_core_listen(ngx_conf_t *cf, ngx_command_t *cmd, > void *conf); > +static char *ngx_stream_core_server_name(ngx_conf_t *cf, ngx_command_t *cmd, > + void *conf); > static char *ngx_stream_core_resolver(ngx_conf_t *cf, ngx_command_t *cmd, > void *conf); > > @@ -46,6 +48,20 @@ static ngx_command_t ngx_stream_core_co > offsetof(ngx_stream_core_main_conf_t, variables_hash_bucket_size), > NULL }, > > + { ngx_string("server_names_hash_max_size"), > + NGX_STREAM_MAIN_CONF|NGX_CONF_TAKE1, > + ngx_conf_set_num_slot, > + NGX_STREAM_MAIN_CONF_OFFSET, > + offsetof(ngx_stream_core_main_conf_t, server_names_hash_max_size), > + NULL }, > + > + { ngx_string("server_names_hash_bucket_size"), > + NGX_STREAM_MAIN_CONF|NGX_CONF_TAKE1, > + ngx_conf_set_num_slot, > + NGX_STREAM_MAIN_CONF_OFFSET, > + offsetof(ngx_stream_core_main_conf_t, server_names_hash_bucket_size), > + NULL }, > + > { ngx_string("server"), > NGX_STREAM_MAIN_CONF|NGX_CONF_BLOCK|NGX_CONF_NOARGS, > ngx_stream_core_server, > @@ -60,6 +76,13 @@ static ngx_command_t ngx_stream_core_co > 0, > NULL }, > > + { ngx_string("server_name"), > + NGX_STREAM_SRV_CONF|NGX_CONF_1MORE, > + ngx_stream_core_server_name, > + NGX_STREAM_SRV_CONF_OFFSET, > + 0, > + NULL }, > + > { ngx_string("error_log"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_1MORE, > ngx_stream_core_error_log, > @@ -413,6 +436,149 @@ ngx_stream_core_content_phase(ngx_stream > } > > > +ngx_int_t > +ngx_stream_find_virtual_server(ngx_stream_session_t *s, > + ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp) > +{ > + ngx_stream_core_srv_conf_t *cscf; > + > + if (s->virtual_names == NULL) { > + return NGX_DECLINED; > + } > + > + cscf = ngx_hash_find_combined(&s->virtual_names->names, > + ngx_hash_key(host->data, host->len), > + host->data, host->len); > + > + if (cscf) { > + *cscfp = cscf; > + return NGX_OK; > + } > + > +#if (NGX_PCRE) > + > + if (host->len && s->virtual_names->nregex) { > + ngx_int_t n; > + ngx_uint_t i; > + ngx_stream_server_name_t *sn; > + > + sn = s->virtual_names->regex; > + > + for (i = 0; i < s->virtual_names->nregex; i++) { > + > + n = ngx_stream_regex_exec(s, sn[i].regex, host); > + > + if (n == NGX_DECLINED) { > + continue; > + } > + > + if (n == NGX_OK) { > + *cscfp = sn[i].server; > + return NGX_OK; > + } > + > + return NGX_ERROR; > + } > + } > + > +#endif /* NGX_PCRE */ > + > + return NGX_DECLINED; > +} > + > + > +ngx_int_t > +ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, ngx_uint_t alloc) > +{ > + u_char *h, ch; > + size_t i, dot_pos, host_len; > + > + enum { > + sw_usual = 0, > + sw_literal, > + sw_rest > + } state; > + > + dot_pos = host->len; > + host_len = host->len; > + > + h = host->data; > + > + state = sw_usual; > + > + for (i = 0; i < host->len; i++) { > + ch = h[i]; > + > + switch (ch) { > + > + case '.': > + if (dot_pos == i - 1) { > + return NGX_DECLINED; > + } > + dot_pos = i; > + break; > + > + case ':': > + if (state == sw_usual) { > + host_len = i; > + state = sw_rest; > + } > + break; > + > + case '[': > + if (i == 0) { > + state = sw_literal; > + } > + break; > + > + case ']': > + if (state == sw_literal) { > + host_len = i + 1; > + state = sw_rest; > + } > + break; > + > + default: > + > + if (ngx_path_separator(ch)) { > + return NGX_DECLINED; > + } > + > + if (ch <= 0x20 || ch == 0x7f) { > + return NGX_DECLINED; > + } > + > + if (ch >= 'A' && ch <= 'Z') { > + alloc = 1; > + } > + > + break; > + } > + } > + > + if (dot_pos == host_len - 1) { > + host_len--; > + } > + > + if (host_len == 0) { > + return NGX_DECLINED; > + } > + > + if (alloc) { > + host->data = ngx_pnalloc(pool, host_len); > + if (host->data == NULL) { > + return NGX_ERROR; > + } > + > + ngx_strlow(host->data, h, host_len); > + } > + > + host->len = host_len; > + > + return NGX_OK; > +} Same here. > + > + > static ngx_int_t > ngx_stream_core_preconfiguration(ngx_conf_t *cf) > { > @@ -437,11 +603,8 @@ ngx_stream_core_create_main_conf(ngx_con > return NULL; > } > > - if (ngx_array_init(&cmcf->listen, cf->pool, 4, sizeof(ngx_stream_listen_t)) > - != NGX_OK) > - { > - return NULL; > - } > + cmcf->server_names_hash_max_size = NGX_CONF_UNSET_UINT; > + cmcf->server_names_hash_bucket_size = NGX_CONF_UNSET_UINT; > > cmcf->variables_hash_max_size = NGX_CONF_UNSET_UINT; > cmcf->variables_hash_bucket_size = NGX_CONF_UNSET_UINT; > @@ -455,6 +618,14 @@ ngx_stream_core_init_main_conf(ngx_conf_ > { > ngx_stream_core_main_conf_t *cmcf = conf; > > + ngx_conf_init_uint_value(cmcf->server_names_hash_max_size, 512); > + ngx_conf_init_uint_value(cmcf->server_names_hash_bucket_size, > + ngx_cacheline_size); > + > + cmcf->server_names_hash_bucket_size = > + ngx_align(cmcf->server_names_hash_bucket_size, ngx_cacheline_size); > + > + extra blank line > ngx_conf_init_uint_value(cmcf->variables_hash_max_size, 1024); > ngx_conf_init_uint_value(cmcf->variables_hash_bucket_size, 64); > > @@ -486,6 +657,13 @@ ngx_stream_core_create_srv_conf(ngx_conf > * cscf->error_log = NULL; > */ > > + if (ngx_array_init(&cscf->server_names, cf->temp_pool, 4, > + sizeof(ngx_stream_server_name_t)) > + != NGX_OK) > + { > + return NULL; > + } > + > cscf->file_name = cf->conf_file->file.name.data; > cscf->line = cf->conf_file->line; > cscf->resolver_timeout = NGX_CONF_UNSET_MSEC; > @@ -504,6 +682,9 @@ ngx_stream_core_merge_srv_conf(ngx_conf_ > ngx_stream_core_srv_conf_t *prev = parent; > ngx_stream_core_srv_conf_t *conf = child; > > + ngx_str_t name; > + ngx_stream_server_name_t *sn; > + > ngx_conf_merge_msec_value(conf->resolver_timeout, > prev->resolver_timeout, 30000); > > @@ -551,6 +732,37 @@ ngx_stream_core_merge_srv_conf(ngx_conf_ > ngx_conf_merge_msec_value(conf->preread_timeout, > prev->preread_timeout, 30000); > > + if (conf->server_names.nelts == 0) { > + /* the array has 4 empty preallocated elements, so push cannot fail */ > + sn = ngx_array_push(&conf->server_names); > +#if (NGX_PCRE) > + sn->regex = NULL; > +#endif > + sn->server = conf; > + ngx_str_set(&sn->name, ""); > + } > + > + sn = conf->server_names.elts; > + name = sn[0].name; > + > +#if (NGX_PCRE) > + if (sn->regex) { > + name.len++; > + name.data--; > + } else > +#endif > + > + if (name.data[0] == '.') { > + name.len--; > + name.data++; > + } > + > + conf->server_name.len = name.len; > + conf->server_name.data = ngx_pstrdup(cf->pool, &name); > + if (conf->server_name.data == NULL) { > + return NGX_CONF_ERROR; > + } > + > return NGX_CONF_OK; > } > > @@ -650,11 +862,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > { > ngx_stream_core_srv_conf_t *cscf = conf; > > - ngx_str_t *value, size; > - ngx_url_t u; > - ngx_uint_t i, n, backlog; > - ngx_stream_listen_t *ls, *als, *nls; > - ngx_stream_core_main_conf_t *cmcf; > + ngx_str_t *value, size; > + ngx_url_t u; > + ngx_uint_t i, n, backlog; > + ngx_stream_listen_opt_t lsopt; > > cscf->listen = 1; > > @@ -675,51 +886,48 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > return NGX_CONF_ERROR; > } > > - cmcf = ngx_stream_conf_get_module_main_conf(cf, ngx_stream_core_module); > - > - ls = ngx_array_push(&cmcf->listen); > - if (ls == NULL) { > - return NGX_CONF_ERROR; > - } > - > - ngx_memzero(ls, sizeof(ngx_stream_listen_t)); > + ngx_memzero(&lsopt, sizeof(ngx_stream_listen_opt_t)); > > - ls->backlog = NGX_LISTEN_BACKLOG; > - ls->rcvbuf = -1; > - ls->sndbuf = -1; > - ls->type = SOCK_STREAM; > - ls->ctx = cf->ctx; > - > + lsopt.backlog = NGX_LISTEN_BACKLOG; > + lsopt.type = SOCK_STREAM; > + lsopt.rcvbuf = -1; > + lsopt.sndbuf = -1; > #if (NGX_HAVE_TCP_FASTOPEN) > - ls->fastopen = -1; > + lsopt.fastopen = -1; > #endif > - > #if (NGX_HAVE_INET6) > - ls->ipv6only = 1; > + lsopt.ipv6only = 1; > #endif > > backlog = 0; > > for (i = 2; i < cf->args->nelts; i++) { > > + if (ngx_strcmp(value[i].data, "default_server") == 0 > + || ngx_strcmp(value[i].data, "default") == 0) I don't think we should reintroduce "default" legacy in stream. > + { > + lsopt.default_server = 1; > + continue; > + } > + > #if !(NGX_WIN32) > if (ngx_strcmp(value[i].data, "udp") == 0) { > - ls->type = SOCK_DGRAM; > + lsopt.type = SOCK_DGRAM; > continue; > } > #endif > > if (ngx_strcmp(value[i].data, "bind") == 0) { > - ls->bind = 1; > + lsopt.bind = 1; Note that here and below, setting lsopt.set is missing. This renders unusable duplicate socket-level listen parameter checks in ngx_stream_add_addresses(). Aside from that, there are several unimproved error messages in ngx_stream_core_listen() such as "bind ipv6only is not supported". They were fixed once in http in 1b05b9bbcebf, but similar fixes were missed in mail at the time. Then stream was based on mail, they reappeared there. It makes sense to fix them separately. > continue; > } > > #if (NGX_HAVE_TCP_FASTOPEN) > if (ngx_strncmp(value[i].data, "fastopen=", 9) == 0) { > - ls->fastopen = ngx_atoi(value[i].data + 9, value[i].len - 9); > - ls->bind = 1; > + lsopt.fastopen = ngx_atoi(value[i].data + 9, value[i].len - 9); > + lsopt.bind = 1; > > - if (ls->fastopen == NGX_ERROR) { > + if (lsopt.fastopen == NGX_ERROR) { > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > "invalid fastopen \"%V\"", &value[i]); > return NGX_CONF_ERROR; > @@ -730,10 +938,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > #endif > > if (ngx_strncmp(value[i].data, "backlog=", 8) == 0) { > - ls->backlog = ngx_atoi(value[i].data + 8, value[i].len - 8); > - ls->bind = 1; > + lsopt.backlog = ngx_atoi(value[i].data + 8, value[i].len - 8); > + lsopt.bind = 1; > > - if (ls->backlog == NGX_ERROR || ls->backlog == 0) { > + if (lsopt.backlog == NGX_ERROR || lsopt.backlog == 0) { > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > "invalid backlog \"%V\"", &value[i]); > return NGX_CONF_ERROR; > @@ -748,10 +956,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > size.len = value[i].len - 7; > size.data = value[i].data + 7; > > - ls->rcvbuf = ngx_parse_size(&size); > - ls->bind = 1; > + lsopt.rcvbuf = ngx_parse_size(&size); > + lsopt.bind = 1; > > - if (ls->rcvbuf == NGX_ERROR) { > + if (lsopt.rcvbuf == NGX_ERROR) { > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > "invalid rcvbuf \"%V\"", &value[i]); > return NGX_CONF_ERROR; > @@ -764,10 +972,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > size.len = value[i].len - 7; > size.data = value[i].data + 7; > > - ls->sndbuf = ngx_parse_size(&size); > - ls->bind = 1; > + lsopt.sndbuf = ngx_parse_size(&size); > + lsopt.bind = 1; > > - if (ls->sndbuf == NGX_ERROR) { > + if (lsopt.sndbuf == NGX_ERROR) { > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > "invalid sndbuf \"%V\"", &value[i]); > return NGX_CONF_ERROR; > @@ -779,10 +987,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > if (ngx_strncmp(value[i].data, "ipv6only=o", 10) == 0) { > #if (NGX_HAVE_INET6 && defined IPV6_V6ONLY) > if (ngx_strcmp(&value[i].data[10], "n") == 0) { > - ls->ipv6only = 1; > + lsopt.ipv6only = 1; > > } else if (ngx_strcmp(&value[i].data[10], "ff") == 0) { > - ls->ipv6only = 0; > + lsopt.ipv6only = 0; > > } else { > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > @@ -791,7 +999,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > return NGX_CONF_ERROR; > } > > - ls->bind = 1; > + lsopt.bind = 1; > continue; > #else > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > @@ -803,8 +1011,8 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > if (ngx_strcmp(value[i].data, "reuseport") == 0) { > #if (NGX_HAVE_REUSEPORT) > - ls->reuseport = 1; > - ls->bind = 1; > + lsopt.reuseport = 1; > + lsopt.bind = 1; > #else > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > "reuseport is not supported " > @@ -824,7 +1032,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > sslcf->file = cf->conf_file->file.name.data; > sslcf->line = cf->conf_file->line; > > - ls->ssl = 1; > + lsopt.ssl = 1; Note that your change keeps sslcf->listen, checked in ngx_stream_core_merge_srv_conf(). Currently, without virtual servers support, this is perfectly fine because if you didn't specify the listen ssl parameter, then no need to create ssl context and check/load certificates. With virtual servers support though, sslcf->listen makes harm, because you cannot specify non-default servers with ssl parameter, but without certificates, which is pretty valid: server { listen 127.0.0.1:8091 ssl; server_name foo; return FOO; ssl_certificate_key localhost.key; ssl_certificate localhost.crt; } server { listen 127.0.0.1:8091 ssl; server_name bar; return BAR; } nginx: [emerg] no "ssl_certificate" is defined for the "listen ... ssl" directive So it should be removed and replaced with appropriate certificate checks in ngx_stream_core_merge_srv_conf(). I propose to take the checks from ngx_http_core_merge_srv_conf(). Additionally, this will buy us the missing "ssl_reject_handshake" functionality, to selectively disable SSL handshakes in virtual servers based on SNI. > > continue; > #else > @@ -838,10 +1046,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > if (ngx_strncmp(value[i].data, "so_keepalive=", 13) == 0) { > > if (ngx_strcmp(&value[i].data[13], "on") == 0) { > - ls->so_keepalive = 1; > + lsopt.so_keepalive = 1; > > } else if (ngx_strcmp(&value[i].data[13], "off") == 0) { > - ls->so_keepalive = 2; > + lsopt.so_keepalive = 2; > > } else { > > @@ -860,8 +1068,8 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > if (p > s.data) { > s.len = p - s.data; > > - ls->tcp_keepidle = ngx_parse_time(&s, 1); > - if (ls->tcp_keepidle == (time_t) NGX_ERROR) { > + lsopt.tcp_keepidle = ngx_parse_time(&s, 1); > + if (lsopt.tcp_keepidle == (time_t) NGX_ERROR) { > goto invalid_so_keepalive; > } > } > @@ -876,8 +1084,8 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > if (p > s.data) { > s.len = p - s.data; > > - ls->tcp_keepintvl = ngx_parse_time(&s, 1); > - if (ls->tcp_keepintvl == (time_t) NGX_ERROR) { > + lsopt.tcp_keepintvl = ngx_parse_time(&s, 1); > + if (lsopt.tcp_keepintvl == (time_t) NGX_ERROR) { > goto invalid_so_keepalive; > } > } > @@ -887,19 +1095,19 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > if (s.data < end) { > s.len = end - s.data; > > - ls->tcp_keepcnt = ngx_atoi(s.data, s.len); > - if (ls->tcp_keepcnt == NGX_ERROR) { > + lsopt.tcp_keepcnt = ngx_atoi(s.data, s.len); > + if (lsopt.tcp_keepcnt == NGX_ERROR) { > goto invalid_so_keepalive; > } > } > > - if (ls->tcp_keepidle == 0 && ls->tcp_keepintvl == 0 > - && ls->tcp_keepcnt == 0) > + if (lsopt.tcp_keepidle == 0 && lsopt.tcp_keepintvl == 0 > + && lsopt.tcp_keepcnt == 0) > { > goto invalid_so_keepalive; > } > > - ls->so_keepalive = 1; > + lsopt.so_keepalive = 1; > > #else > > @@ -911,7 +1119,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > #endif > } > > - ls->bind = 1; > + lsopt.bind = 1; > > continue; > > @@ -926,7 +1134,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > } > > if (ngx_strcmp(value[i].data, "proxy_protocol") == 0) { > - ls->proxy_protocol = 1; > + lsopt.proxy_protocol = 1; > continue; > } > > @@ -935,27 +1143,27 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > return NGX_CONF_ERROR; > } > > - if (ls->type == SOCK_DGRAM) { > + if (lsopt.type == SOCK_DGRAM) { > if (backlog) { > return "\"backlog\" parameter is incompatible with \"udp\""; > } > > #if (NGX_STREAM_SSL) > - if (ls->ssl) { > + if (lsopt.ssl) { > return "\"ssl\" parameter is incompatible with \"udp\""; > } > #endif > > - if (ls->so_keepalive) { > + if (lsopt.so_keepalive) { > return "\"so_keepalive\" parameter is incompatible with \"udp\""; > } > > - if (ls->proxy_protocol) { > + if (lsopt.proxy_protocol) { > return "\"proxy_protocol\" parameter is incompatible with \"udp\""; > } > > #if (NGX_HAVE_TCP_FASTOPEN) > - if (ls->fastopen != -1) { > + if (lsopt.fastopen != -1) { > return "\"fastopen\" parameter is incompatible with \"udp\""; > } > #endif > @@ -972,40 +1180,12 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > } > } > > - if (n != 0) { > - nls = ngx_array_push(&cmcf->listen); > - if (nls == NULL) { > - return NGX_CONF_ERROR; > - } > - > - *nls = *ls; > - > - } else { > - nls = ls; > - } > - > - nls->sockaddr = u.addrs[n].sockaddr; > - nls->socklen = u.addrs[n].socklen; > - nls->addr_text = u.addrs[n].name; > - nls->wildcard = ngx_inet_wildcard(nls->sockaddr); > + lsopt.sockaddr = u.addrs[n].sockaddr; > + lsopt.socklen = u.addrs[n].socklen; > + lsopt.addr_text = u.addrs[n].name; > + lsopt.wildcard = ngx_inet_wildcard(lsopt.sockaddr); > > - als = cmcf->listen.elts; > - > - for (i = 0; i < cmcf->listen.nelts - 1; i++) { > - if (nls->type != als[i].type) { > - continue; > - } > - > - if (ngx_cmp_sockaddr(als[i].sockaddr, als[i].socklen, > - nls->sockaddr, nls->socklen, 1) > - != NGX_OK) > - { > - continue; > - } > - > - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > - "duplicate \"%V\" address and port pair", > - &nls->addr_text); > + if (ngx_stream_add_listen(cf, cscf, &lsopt) != NGX_OK) { > return NGX_CONF_ERROR; > } > > @@ -1018,6 +1198,107 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > > static char * > +ngx_stream_core_server_name(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > +{ > + ngx_stream_core_srv_conf_t *cscf = conf; > + > + u_char ch; > + ngx_str_t *value; > + ngx_uint_t i; > + ngx_stream_server_name_t *sn; wrong indentation > + > + value = cf->args->elts; > + > + for (i = 1; i < cf->args->nelts; i++) { > + > + ch = value[i].data[0]; > + > + if ((ch == '*' && (value[i].len < 3 || value[i].data[1] != '.')) > + || (ch == '.' && value[i].len < 2)) > + { > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "server name \"%V\" is invalid", &value[i]); > + return NGX_CONF_ERROR; > + } > + > + if (ngx_strchr(value[i].data, '/')) { > + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, > + "server name \"%V\" has suspicious symbols", > + &value[i]); > + } > + > + sn = ngx_array_push(&cscf->server_names); > + if (sn == NULL) { > + return NGX_CONF_ERROR; > + } > + > +#if (NGX_PCRE) > + sn->regex = NULL; > +#endif > + sn->server = cscf; > + > + if (ngx_strcasecmp(value[i].data, (u_char *) "$hostname") == 0) { > + sn->name = cf->cycle->hostname; > + > + } else { > + sn->name = value[i]; > + } > + > + if (value[i].data[0] != '~') { > + ngx_strlow(sn->name.data, sn->name.data, sn->name.len); > + continue; > + } > + > +#if (NGX_PCRE) > + { > + u_char *p; > + ngx_regex_compile_t rc; > + u_char errstr[NGX_MAX_CONF_ERRSTR]; > + > + if (value[i].len == 1) { > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "empty regex in server name \"%V\"", &value[i]); > + return NGX_CONF_ERROR; > + } > + > + value[i].len--; > + value[i].data++; > + > + ngx_memzero(&rc, sizeof(ngx_regex_compile_t)); > + > + rc.pattern = value[i]; > + rc.err.len = NGX_MAX_CONF_ERRSTR; > + rc.err.data = errstr; > + > + for (p = value[i].data; p < value[i].data + value[i].len; p++) { > + if (*p >= 'A' && *p <= 'Z') { > + rc.options = NGX_REGEX_CASELESS; > + break; > + } > + } > + > + sn->regex = ngx_stream_regex_compile(cf, &rc); > + if (sn->regex == NULL) { > + return NGX_CONF_ERROR; > + } > + > + sn->name = value[i]; > + cscf->captures = (rc.captures > 0); > + } > +#else > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "using regex \"%V\" " > + "requires PCRE library", &value[i]); > + > + return NGX_CONF_ERROR; > +#endif > + } > + > + return NGX_CONF_OK; > +} > + > + > +static char * > ngx_stream_core_resolver(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > { > ngx_stream_core_srv_conf_t *cscf = conf; > diff --git a/src/stream/ngx_stream_handler.c b/src/stream/ngx_stream_handler.c > --- a/src/stream/ngx_stream_handler.c > +++ b/src/stream/ngx_stream_handler.c > @@ -30,6 +30,7 @@ ngx_stream_init_connection(ngx_connectio > struct sockaddr_in *sin; > ngx_stream_in_addr_t *addr; > ngx_stream_session_t *s; > + ngx_stream_conf_ctx_t *ctx; > ngx_stream_addr_conf_t *addr_conf; > #if (NGX_HAVE_INET6) > struct sockaddr_in6 *sin6; > @@ -121,9 +122,12 @@ ngx_stream_init_connection(ngx_connectio > return; > } > > + ctx = addr_conf->default_server->ctx; > + > s->signature = NGX_STREAM_MODULE; > - s->main_conf = addr_conf->ctx->main_conf; > - s->srv_conf = addr_conf->ctx->srv_conf; > + s->main_conf = ctx->main_conf; > + s->srv_conf = ctx->srv_conf; > + s->virtual_names = addr_conf->virtual_names; > > #if (NGX_STREAM_SSL) > s->ssl = addr_conf->ssl; > @@ -144,7 +148,7 @@ ngx_stream_init_connection(ngx_connectio > > ngx_log_error(NGX_LOG_INFO, c->log, 0, "*%uA %sclient %*s connected to %V", > c->number, c->type == SOCK_DGRAM ? "udp " : "", > - len, text, &addr_conf->addr_text); > + len, text, &c->listening->addr_text); > > c->log->connection = c->number; > c->log->handler = ngx_stream_log_error; > diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c > --- a/src/stream/ngx_stream_ssl_module.c > +++ b/src/stream/ngx_stream_ssl_module.c > @@ -458,7 +458,104 @@ ngx_stream_ssl_handshake_handler(ngx_con > static int > ngx_stream_ssl_servername(ngx_ssl_conn_t *ssl_conn, int *ad, void *arg) > { > + ngx_int_t rc; > + ngx_str_t host; > + const char *servername; > + ngx_connection_t *c; > + ngx_stream_session_t *s; > + ngx_stream_ssl_conf_t *sscf; Note that stream (as well as mail) consistently uses sslcf naming for keeping ssl configuration, unlike in http. Probably it makes sense for a separate sweeping change with renaming sslcf to sscf. > + ngx_stream_core_srv_conf_t *cscf; > + > + c = ngx_ssl_get_connection(ssl_conn); > + > + if (c->ssl->handshaked) { > + *ad = SSL_AD_NO_RENEGOTIATION; > + return SSL_TLSEXT_ERR_ALERT_FATAL; > + } > + > + s = c->data; > + > + servername = SSL_get_servername(ssl_conn, TLSEXT_NAMETYPE_host_name); > + > + if (servername == NULL) { > + ngx_log_debug0(NGX_LOG_DEBUG_STREAM, c->log, 0, > + "SSL server name: null"); > + goto done; > + } > + > + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, > + "SSL server name: \"%s\"", servername); > + > + host.len = ngx_strlen(servername); > + > + if (host.len == 0) { > + goto done; > + } > + > + host.data = (u_char *) servername; > + > + rc = ngx_stream_validate_host(&host, c->pool, 1); > + > + if (rc == NGX_ERROR) { > + goto error; > + } > + > + if (rc == NGX_DECLINED) { > + goto done; > + } > + > + rc = ngx_stream_find_virtual_server(s, &host, &cscf); > + > + if (rc == NGX_ERROR) { > + goto error; > + } > + > + if (rc == NGX_DECLINED) { > + goto done; > + } > + > + s->srv_conf = cscf->ctx->srv_conf; > + > + sscf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); Looks like a copy-paste error from http, where connection log is set based on the location configuration. Here it just makes sense to move setting sscf closer to its use. > + > + ngx_set_connection_log(c, cscf->error_log); > + > + if (sscf->ssl.ctx) { > + if (SSL_set_SSL_CTX(ssl_conn, sscf->ssl.ctx) == NULL) { > + goto error; > + } > + > + /* > + * SSL_set_SSL_CTX() only changes certs as of 1.0.0d > + * adjust other things we care about > + */ > + > + SSL_set_verify(ssl_conn, SSL_CTX_get_verify_mode(sscf->ssl.ctx), > + SSL_CTX_get_verify_callback(sscf->ssl.ctx)); > + > + SSL_set_verify_depth(ssl_conn, SSL_CTX_get_verify_depth(sscf->ssl.ctx)); > + > +#if OPENSSL_VERSION_NUMBER >= 0x009080dfL > + /* only in 0.9.8m+ */ > + SSL_clear_options(ssl_conn, SSL_get_options(ssl_conn) & > + ~SSL_CTX_get_options(sscf->ssl.ctx)); > +#endif > + > + SSL_set_options(ssl_conn, SSL_CTX_get_options(sscf->ssl.ctx)); > + > +#ifdef SSL_OP_NO_RENEGOTIATION > + SSL_set_options(ssl_conn, SSL_OP_NO_RENEGOTIATION); > +#endif > + } > + > +done: > + The reject_handshake functionality is missing there, it could be added in this change or separately (see below for a proposed addendum). > return SSL_TLSEXT_ERR_OK; > + > +error: > + > + *ad = SSL_AD_INTERNAL_ERROR; > + return SSL_TLSEXT_ERR_ALERT_FATAL; > } > > #endif > diff --git a/src/stream/ngx_stream_ssl_preread_module.c b/src/stream/ngx_stream_ssl_preread_module.c > --- a/src/stream/ngx_stream_ssl_preread_module.c > +++ b/src/stream/ngx_stream_ssl_preread_module.c > @@ -33,6 +33,8 @@ typedef struct { > static ngx_int_t ngx_stream_ssl_preread_handler(ngx_stream_session_t *s); > static ngx_int_t ngx_stream_ssl_preread_parse_record( > ngx_stream_ssl_preread_ctx_t *ctx, u_char *pos, u_char *last); > +static ngx_int_t ngx_stream_ssl_preread_servername(ngx_stream_session_t *s, > + ngx_str_t *servername); > static ngx_int_t ngx_stream_ssl_preread_protocol_variable( > ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_stream_ssl_preread_server_name_variable( > @@ -187,6 +189,10 @@ ngx_stream_ssl_preread_handler(ngx_strea > return NGX_DECLINED; > } > > + if (rc == NGX_OK) { > + return ngx_stream_ssl_preread_servername(s, &ctx->host); > + } > + > if (rc != NGX_AGAIN) { > return rc; > } > @@ -404,9 +410,6 @@ ngx_stream_ssl_preread_parse_record(ngx_ > case sw_sni_host: > ctx->host.len = (p[1] << 8) + p[2]; > > - ngx_log_debug1(NGX_LOG_DEBUG_STREAM, ctx->log, 0, > - "ssl preread: SNI hostname \"%V\"", &ctx->host); > - > state = sw_ext; > dst = NULL; > size = ext; > @@ -497,6 +500,56 @@ ngx_stream_ssl_preread_parse_record(ngx_ > > > static ngx_int_t > +ngx_stream_ssl_preread_servername(ngx_stream_session_t *s, > + ngx_str_t *servername) > +{ > + ngx_int_t rc; > + ngx_str_t host; > + ngx_connection_t *c; > + ngx_stream_core_srv_conf_t *cscf; > + > + c = s->connection; > + > + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, > + "SSL preread server name: \"%V\"", servername); > + > + if (servername->len == 0) { > + return NGX_OK; > + } > + > + host = *servername; > + > + rc = ngx_stream_validate_host(&host, c->pool, 1); > + > + if (rc == NGX_ERROR) { > + return NGX_ERROR; > + } > + > + if (rc == NGX_DECLINED) { > + return NGX_OK; > + } > + > + rc = ngx_stream_find_virtual_server(s, &host, &cscf); > + > + if (rc == NGX_ERROR) { > + return NGX_ERROR; > + } > + > + if (rc == NGX_DECLINED) { > + return NGX_OK; > + } > + > + s->srv_conf = cscf->ctx->srv_conf; > + > + cscf = ngx_stream_get_module_srv_conf(s, ngx_stream_core_module); The server configuration is already obtained in ngx_stream_find_virtual_server(), no need to do this again. > + > + ngx_set_connection_log(c, cscf->error_log); > + > + return NGX_OK; > +} > + > + > +static ngx_int_t > ngx_stream_ssl_preread_protocol_variable(ngx_stream_session_t *s, > ngx_variable_value_t *v, uintptr_t data) > { Together, this makes the following update on top of your change: diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c --- a/src/stream/ngx_stream.c +++ b/src/stream/ngx_stream.c @@ -92,7 +92,7 @@ static char * ngx_stream_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { char *rv; - ngx_uint_t m, mi, s; + ngx_uint_t mi, m, s; ngx_conf_t pcf; ngx_stream_module_t *module; ngx_stream_conf_ctx_t *ctx; @@ -918,7 +918,7 @@ ngx_stream_init_listening(ngx_conf_t *cf { ngx_uint_t i, last, bind_wildcard; ngx_listening_t *ls; - ngx_stream_port_t *hport; + ngx_stream_port_t *stport; ngx_stream_conf_addr_t *addr; addr = port->addrs.elts; @@ -953,26 +953,26 @@ ngx_stream_init_listening(ngx_conf_t *cf return NGX_ERROR; } - hport = ngx_pcalloc(cf->pool, sizeof(ngx_stream_port_t)); - if (hport == NULL) { + stport = ngx_palloc(cf->pool, sizeof(ngx_stream_port_t)); + if (stport == NULL) { return NGX_ERROR; } - ls->servers = hport; + ls->servers = stport; - hport->naddrs = i + 1; + stport->naddrs = i + 1; switch (ls->sockaddr->sa_family) { #if (NGX_HAVE_INET6) case AF_INET6: - if (ngx_stream_add_addrs6(cf, hport, addr) != NGX_OK) { + if (ngx_stream_add_addrs6(cf, stport, addr) != NGX_OK) { return NGX_ERROR; } break; #endif default: /* AF_INET */ - if (ngx_stream_add_addrs(cf, hport, addr) != NGX_OK) { + if (ngx_stream_add_addrs(cf, stport, addr) != NGX_OK) { return NGX_ERROR; } break; @@ -1001,26 +1001,14 @@ ngx_stream_add_listening(ngx_conf_t *cf, ls->handler = ngx_stream_init_connection; + ls->pool_size = 256; + cscf = addr->default_server; - ls->pool_size = 256; ls->logp = cscf->error_log; ls->log.data = &ls->addr_text; ls->log.handler = ngx_accept_log_error; -#if (NGX_WIN32) - { - ngx_iocp_conf_t *iocpcf = NULL; - - if (ngx_get_conf(cf->cycle->conf_ctx, ngx_events_module)) { - iocpcf = ngx_event_get_conf(cf->cycle->conf_ctx, ngx_iocp_module); - } - if (iocpcf && iocpcf->acceptex_read) { - ls->post_accept_buffer_size = cscf->client_header_buffer_size; - } - } -#endif - ls->type = addr->opt.type; ls->backlog = addr->opt.backlog; ls->rcvbuf = addr->opt.rcvbuf; @@ -1033,22 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, ls->keepcnt = addr->opt.tcp_keepcnt; #endif -#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) - ls->accept_filter = addr->opt.accept_filter; -#endif - -#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) - ls->deferred_accept = addr->opt.deferred_accept; -#endif - #if (NGX_HAVE_INET6) ls->ipv6only = addr->opt.ipv6only; #endif -#if (NGX_HAVE_SETFIB) - ls->setfib = addr->opt.setfib; -#endif - #if (NGX_HAVE_TCP_FASTOPEN) ls->fastopen = addr->opt.fastopen; #endif @@ -1064,7 +1040,7 @@ ngx_stream_add_listening(ngx_conf_t *cf, static ngx_int_t -ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *hport, +ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *stport, ngx_stream_conf_addr_t *addr) { ngx_uint_t i; @@ -1072,15 +1048,15 @@ ngx_stream_add_addrs(ngx_conf_t *cf, ngx ngx_stream_in_addr_t *addrs; ngx_stream_virtual_names_t *vn; - hport->addrs = ngx_pcalloc(cf->pool, - hport->naddrs * sizeof(ngx_stream_in_addr_t)); - if (hport->addrs == NULL) { + stport->addrs = ngx_pcalloc(cf->pool, + stport->naddrs * sizeof(ngx_stream_in_addr_t)); + if (stport->addrs == NULL) { return NGX_ERROR; } - addrs = hport->addrs; + addrs = stport->addrs; - for (i = 0; i < hport->naddrs; i++) { + for (i = 0; i < stport->naddrs; i++) { sin = (struct sockaddr_in *) addr[i].opt.sockaddr; addrs[i].addr = sin->sin_addr.s_addr; @@ -1126,7 +1102,7 @@ ngx_stream_add_addrs(ngx_conf_t *cf, ngx #if (NGX_HAVE_INET6) static ngx_int_t -ngx_stream_add_addrs6(ngx_conf_t *cf, ngx_stream_port_t *hport, +ngx_stream_add_addrs6(ngx_conf_t *cf, ngx_stream_port_t *stport, ngx_stream_conf_addr_t *addr) { ngx_uint_t i; @@ -1134,15 +1110,15 @@ ngx_stream_add_addrs6(ngx_conf_t *cf, ng ngx_stream_in6_addr_t *addrs6; ngx_stream_virtual_names_t *vn; - hport->addrs = ngx_pcalloc(cf->pool, - hport->naddrs * sizeof(ngx_stream_in6_addr_t)); - if (hport->addrs == NULL) { + stport->addrs = ngx_pcalloc(cf->pool, + stport->naddrs * sizeof(ngx_stream_in6_addr_t)); + if (stport->addrs == NULL) { return NGX_ERROR; } - addrs6 = hport->addrs; + addrs6 = stport->addrs; - for (i = 0; i < hport->naddrs; i++) { + for (i = 0; i < stport->naddrs; i++) { sin6 = (struct sockaddr_in6 *) addr[i].opt.sockaddr; addrs6[i].addr6 = sin6->sin6_addr; diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h --- a/src/stream/ngx_stream.h +++ b/src/stream/ngx_stream.h @@ -53,30 +53,21 @@ typedef struct { #if (NGX_HAVE_INET6) unsigned ipv6only:1; #endif - unsigned deferred_accept:1; unsigned reuseport:1; unsigned so_keepalive:2; unsigned proxy_protocol:1; - - int backlog; - int rcvbuf; - int sndbuf; - int type; -#if (NGX_HAVE_SETFIB) - int setfib; -#endif -#if (NGX_HAVE_TCP_FASTOPEN) - int fastopen; -#endif #if (NGX_HAVE_KEEPALIVE_TUNABLE) int tcp_keepidle; int tcp_keepintvl; int tcp_keepcnt; #endif - -#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) - char *accept_filter; + int backlog; + int rcvbuf; + int sndbuf; +#if (NGX_HAVE_TCP_FASTOPEN) + int fastopen; #endif + int type; } ngx_stream_listen_opt_t; @@ -198,7 +189,6 @@ typedef struct { ngx_stream_virtual_names_t *virtual_names; - ngx_str_t addr_text; unsigned ssl:1; unsigned proxy_protocol:1; } ngx_stream_addr_conf_t; @@ -254,7 +244,7 @@ typedef struct { /* the default server configuration for this address:port */ ngx_stream_core_srv_conf_t *default_server; ngx_array_t servers; - /* array of ngx_stream_core_srv_conf_t */ + /* array of ngx_stream_core_srv_conf_t */ } ngx_stream_conf_addr_t; @@ -350,6 +340,8 @@ typedef struct { ngx_int_t ngx_stream_add_listen(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, ngx_stream_listen_opt_t *lsopt); + + void ngx_stream_core_run_phases(ngx_stream_session_t *s); ngx_int_t ngx_stream_core_generic_phase(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph); @@ -357,11 +349,11 @@ ngx_int_t ngx_stream_core_preread_phase( ngx_stream_phase_handler_t *ph); ngx_int_t ngx_stream_core_content_phase(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph); + +ngx_int_t ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, + ngx_uint_t alloc); ngx_int_t ngx_stream_find_virtual_server(ngx_stream_session_t *s, ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp); -ngx_int_t ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, - ngx_uint_t alloc); - void ngx_stream_init_connection(ngx_connection_t *c); void ngx_stream_session_handler(ngx_event_t *rev); diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -437,57 +437,6 @@ ngx_stream_core_content_phase(ngx_stream ngx_int_t -ngx_stream_find_virtual_server(ngx_stream_session_t *s, - ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp) -{ - ngx_stream_core_srv_conf_t *cscf; - - if (s->virtual_names == NULL) { - return NGX_DECLINED; - } - - cscf = ngx_hash_find_combined(&s->virtual_names->names, - ngx_hash_key(host->data, host->len), - host->data, host->len); - - if (cscf) { - *cscfp = cscf; - return NGX_OK; - } - -#if (NGX_PCRE) - - if (host->len && s->virtual_names->nregex) { - ngx_int_t n; - ngx_uint_t i; - ngx_stream_server_name_t *sn; - - sn = s->virtual_names->regex; - - for (i = 0; i < s->virtual_names->nregex; i++) { - - n = ngx_stream_regex_exec(s, sn[i].regex, host); - - if (n == NGX_DECLINED) { - continue; - } - - if (n == NGX_OK) { - *cscfp = sn[i].server; - return NGX_OK; - } - - return NGX_ERROR; - } - } - -#endif /* NGX_PCRE */ - - return NGX_DECLINED; -} - - -ngx_int_t ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, ngx_uint_t alloc) { u_char *h, ch; @@ -579,6 +528,57 @@ ngx_stream_validate_host(ngx_str_t *host } +ngx_int_t +ngx_stream_find_virtual_server(ngx_stream_session_t *s, + ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp) +{ + ngx_stream_core_srv_conf_t *cscf; + + if (s->virtual_names == NULL) { + return NGX_DECLINED; + } + + cscf = ngx_hash_find_combined(&s->virtual_names->names, + ngx_hash_key(host->data, host->len), + host->data, host->len); + + if (cscf) { + *cscfp = cscf; + return NGX_OK; + } + +#if (NGX_PCRE) + + if (host->len && s->virtual_names->nregex) { + ngx_int_t n; + ngx_uint_t i; + ngx_stream_server_name_t *sn; + + sn = s->virtual_names->regex; + + for (i = 0; i < s->virtual_names->nregex; i++) { + + n = ngx_stream_regex_exec(s, sn[i].regex, host); + + if (n == NGX_DECLINED) { + continue; + } + + if (n == NGX_OK) { + *cscfp = sn[i].server; + return NGX_OK; + } + + return NGX_ERROR; + } + } + +#endif /* NGX_PCRE */ + + return NGX_DECLINED; +} + + static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf) { @@ -625,7 +625,6 @@ ngx_stream_core_init_main_conf(ngx_conf_ cmcf->server_names_hash_bucket_size = ngx_align(cmcf->server_names_hash_bucket_size, ngx_cacheline_size); - ngx_conf_init_uint_value(cmcf->variables_hash_max_size, 1024); ngx_conf_init_uint_value(cmcf->variables_hash_bucket_size, 64); @@ -864,7 +863,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n ngx_str_t *value, size; ngx_url_t u; - ngx_uint_t i, n, backlog; + ngx_uint_t n, i, backlog; ngx_stream_listen_opt_t lsopt; cscf->listen = 1; @@ -903,9 +902,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n for (i = 2; i < cf->args->nelts; i++) { - if (ngx_strcmp(value[i].data, "default_server") == 0 - || ngx_strcmp(value[i].data, "default") == 0) - { + if (ngx_strcmp(value[i].data, "default_server") == 0) { lsopt.default_server = 1; continue; } @@ -918,6 +915,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n #endif if (ngx_strcmp(value[i].data, "bind") == 0) { + lsopt.set = 1; lsopt.bind = 1; continue; } @@ -925,6 +923,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n #if (NGX_HAVE_TCP_FASTOPEN) if (ngx_strncmp(value[i].data, "fastopen=", 9) == 0) { lsopt.fastopen = ngx_atoi(value[i].data + 9, value[i].len - 9); + lsopt.set = 1; lsopt.bind = 1; if (lsopt.fastopen == NGX_ERROR) { @@ -939,6 +938,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n if (ngx_strncmp(value[i].data, "backlog=", 8) == 0) { lsopt.backlog = ngx_atoi(value[i].data + 8, value[i].len - 8); + lsopt.set = 1; lsopt.bind = 1; if (lsopt.backlog == NGX_ERROR || lsopt.backlog == 0) { @@ -957,6 +957,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n size.data = value[i].data + 7; lsopt.rcvbuf = ngx_parse_size(&size); + lsopt.set = 1; lsopt.bind = 1; if (lsopt.rcvbuf == NGX_ERROR) { @@ -973,6 +974,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n size.data = value[i].data + 7; lsopt.sndbuf = ngx_parse_size(&size); + lsopt.set = 1; lsopt.bind = 1; if (lsopt.sndbuf == NGX_ERROR) { @@ -999,11 +1001,13 @@ ngx_stream_core_listen(ngx_conf_t *cf, n return NGX_CONF_ERROR; } + lsopt.set = 1; lsopt.bind = 1; + continue; #else ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "bind ipv6only is not supported " + "ipv6only is not supported " "on this platform"); return NGX_CONF_ERROR; #endif @@ -1012,6 +1016,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n if (ngx_strcmp(value[i].data, "reuseport") == 0) { #if (NGX_HAVE_REUSEPORT) lsopt.reuseport = 1; + lsopt.set = 1; lsopt.bind = 1; #else ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, @@ -1023,17 +1028,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n if (ngx_strcmp(value[i].data, "ssl") == 0) { #if (NGX_STREAM_SSL) - ngx_stream_ssl_conf_t *sslcf; - - sslcf = ngx_stream_conf_get_module_srv_conf(cf, - ngx_stream_ssl_module); - - sslcf->listen = 1; - sslcf->file = cf->conf_file->file.name.data; - sslcf->line = cf->conf_file->line; - lsopt.ssl = 1; - continue; #else ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, @@ -1119,6 +1114,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n #endif } + lsopt.set = 1; lsopt.bind = 1; continue; @@ -1139,7 +1135,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n } ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "the invalid \"%V\" parameter", &value[i]); + "invalid parameter \"%V\"", &value[i]); return NGX_CONF_ERROR; } @@ -1202,9 +1198,9 @@ ngx_stream_core_server_name(ngx_conf_t * { ngx_stream_core_srv_conf_t *cscf = conf; - u_char ch; - ngx_str_t *value; - ngx_uint_t i; + u_char ch; + ngx_str_t *value; + ngx_uint_t i; ngx_stream_server_name_t *sn; value = cf->args->elts; diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c +++ b/src/stream/ngx_stream_ssl_module.c @@ -219,6 +219,13 @@ static ngx_command_t ngx_stream_ssl_com offsetof(ngx_stream_ssl_conf_t, conf_commands), &ngx_stream_ssl_conf_command_post }, + { ngx_string("ssl_reject_handshake"), + NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_STREAM_SRV_CONF_OFFSET, + offsetof(ngx_stream_ssl_conf_t, reject_handshake), + NULL }, + { ngx_string("ssl_alpn"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_1MORE, ngx_stream_ssl_alpn, @@ -463,7 +470,7 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t const char *servername; ngx_connection_t *c; ngx_stream_session_t *s; - ngx_stream_ssl_conf_t *sscf; + ngx_stream_ssl_conf_t *sslcf; ngx_stream_core_srv_conf_t *cscf; c = ngx_ssl_get_connection(ssl_conn); @@ -516,12 +523,12 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t s->srv_conf = cscf->ctx->srv_conf; - sscf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); - ngx_set_connection_log(c, cscf->error_log); - if (sscf->ssl.ctx) { - if (SSL_set_SSL_CTX(ssl_conn, sscf->ssl.ctx) == NULL) { + sslcf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); + + if (sslcf->ssl.ctx) { + if (SSL_set_SSL_CTX(ssl_conn, sslcf->ssl.ctx) == NULL) { goto error; } @@ -530,18 +537,19 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t * adjust other things we care about */ - SSL_set_verify(ssl_conn, SSL_CTX_get_verify_mode(sscf->ssl.ctx), - SSL_CTX_get_verify_callback(sscf->ssl.ctx)); + SSL_set_verify(ssl_conn, SSL_CTX_get_verify_mode(sslcf->ssl.ctx), + SSL_CTX_get_verify_callback(sslcf->ssl.ctx)); - SSL_set_verify_depth(ssl_conn, SSL_CTX_get_verify_depth(sscf->ssl.ctx)); + SSL_set_verify_depth(ssl_conn, + SSL_CTX_get_verify_depth(sslcf->ssl.ctx)); #if OPENSSL_VERSION_NUMBER >= 0x009080dfL /* only in 0.9.8m+ */ SSL_clear_options(ssl_conn, SSL_get_options(ssl_conn) & - ~SSL_CTX_get_options(sscf->ssl.ctx)); + ~SSL_CTX_get_options(sslcf->ssl.ctx)); #endif - SSL_set_options(ssl_conn, SSL_CTX_get_options(sscf->ssl.ctx)); + SSL_set_options(ssl_conn, SSL_CTX_get_options(sslcf->ssl.ctx)); #ifdef SSL_OP_NO_RENEGOTIATION SSL_set_options(ssl_conn, SSL_OP_NO_RENEGOTIATION); @@ -550,6 +558,14 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t done: + sslcf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); + + if (sslcf->reject_handshake) { + c->ssl->handshake_rejected = 1; + *ad = SSL_AD_UNRECOGNIZED_NAME; + return SSL_TLSEXT_ERR_ALERT_FATAL; + } + return SSL_TLSEXT_ERR_OK; error: @@ -752,7 +768,6 @@ ngx_stream_ssl_create_conf(ngx_conf_t *c /* * set by ngx_pcalloc(): * - * scf->listen = 0; * scf->protocols = 0; * scf->certificate_values = NULL; * scf->dhparam = { 0, NULL }; @@ -771,6 +786,7 @@ ngx_stream_ssl_create_conf(ngx_conf_t *c scf->passwords = NGX_CONF_UNSET_PTR; scf->conf_commands = NGX_CONF_UNSET_PTR; scf->prefer_server_ciphers = NGX_CONF_UNSET; + scf->reject_handshake = NGX_CONF_UNSET; scf->verify = NGX_CONF_UNSET_UINT; scf->verify_depth = NGX_CONF_UNSET_UINT; scf->builtin_session_cache = NGX_CONF_UNSET; @@ -799,6 +815,8 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf ngx_conf_merge_value(conf->prefer_server_ciphers, prev->prefer_server_ciphers, 0); + ngx_conf_merge_value(conf->reject_handshake, prev->reject_handshake, 0); + ngx_conf_merge_bitmask_value(conf->protocols, prev->protocols, (NGX_CONF_BITMASK_SET |NGX_SSL_TLSv1|NGX_SSL_TLSv1_1 @@ -832,35 +850,21 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf conf->ssl.log = cf->log; - if (!conf->listen) { - return NGX_CONF_OK; - } - - if (conf->certificates == NULL) { - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, - "no \"ssl_certificate\" is defined for " - "the \"listen ... ssl\" directive in %s:%ui", - conf->file, conf->line); - return NGX_CONF_ERROR; - } + if (conf->certificates) { - if (conf->certificate_keys == NULL) { - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, - "no \"ssl_certificate_key\" is defined for " - "the \"listen ... ssl\" directive in %s:%ui", - conf->file, conf->line); - return NGX_CONF_ERROR; - } + if (conf->certificate_keys == NULL + || conf->certificate_keys->nelts < conf->certificates->nelts) + { + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "no \"ssl_certificate_key\" is defined " + "for certificate \"%V\"", + ((ngx_str_t *) conf->certificates->elts) + + conf->certificates->nelts - 1); + return NGX_CONF_ERROR; + } - if (conf->certificate_keys->nelts < conf->certificates->nelts) { - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, - "no \"ssl_certificate_key\" is defined " - "for certificate \"%V\" and " - "the \"listen ... ssl\" directive in %s:%ui", - ((ngx_str_t *) conf->certificates->elts) - + conf->certificates->nelts - 1, - conf->file, conf->line); - return NGX_CONF_ERROR; + } else if (!conf->reject_handshake) { + return NGX_CONF_OK; } if (ngx_ssl_create(&conf->ssl, conf->protocols, NULL) != NGX_OK) { @@ -915,7 +919,7 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf return NGX_CONF_ERROR; #endif - } else { + } else if (conf->certificates) { /* configure certificates */ @@ -1014,6 +1018,10 @@ ngx_stream_ssl_compile_certificates(ngx_ ngx_stream_complex_value_t *cv; ngx_stream_compile_complex_value_t ccv; + if (conf->certificates == NULL) { + return NGX_OK; + } + cert = conf->certificates->elts; key = conf->certificate_keys->elts; nelts = conf->certificates->nelts; @@ -1292,8 +1300,13 @@ ngx_stream_ssl_conf_command_check(ngx_co static ngx_int_t ngx_stream_ssl_init(ngx_conf_t *cf) { - ngx_stream_handler_pt *h; - ngx_stream_core_main_conf_t *cmcf; + ngx_uint_t a, p, s; + ngx_stream_handler_pt *h; + ngx_stream_ssl_conf_t *sslcf; + ngx_stream_conf_addr_t *addr; + ngx_stream_conf_port_t *port; + ngx_stream_core_srv_conf_t **cscfp, *cscf; + ngx_stream_core_main_conf_t *cmcf; cmcf = ngx_stream_conf_get_module_main_conf(cf, ngx_stream_core_module); @@ -1304,5 +1317,58 @@ ngx_stream_ssl_init(ngx_conf_t *cf) *h = ngx_stream_ssl_handler; + if (cmcf->ports == NULL) { + return NGX_OK; + } + + port = cmcf->ports->elts; + for (p = 0; p < cmcf->ports->nelts; p++) { + + addr = port[p].addrs.elts; + for (a = 0; a < port[p].addrs.nelts; a++) { + + if (!addr[a].opt.ssl) { + continue; + } + + cscf = addr[a].default_server; + sslcf = cscf->ctx->srv_conf[ngx_stream_ssl_module.ctx_index]; + + if (sslcf->certificates) { + continue; + } + + if (!sslcf->reject_handshake) { + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "no \"ssl_certificate\" is defined for " + "the \"listen ... ssl\" directive in %s:%ui", + cscf->file_name, cscf->line); + return NGX_ERROR; + } + + /* + * if no certificates are defined in the default server, + * check all non-default server blocks + */ + + cscfp = addr[a].servers.elts; + for (s = 0; s < addr[a].servers.nelts; s++) { + + cscf = cscfp[s]; + sslcf = cscf->ctx->srv_conf[ngx_stream_ssl_module.ctx_index]; + + if (sslcf->certificates || sslcf->reject_handshake) { + continue; + } + + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "no \"ssl_certificate\" is defined for " + "the \"listen ... ssl\" directive in %s:%ui", + cscf->file_name, cscf->line); + return NGX_ERROR; + } + } + } + return NGX_OK; } diff --git a/src/stream/ngx_stream_ssl_module.h b/src/stream/ngx_stream_ssl_module.h --- a/src/stream/ngx_stream_ssl_module.h +++ b/src/stream/ngx_stream_ssl_module.h @@ -18,10 +18,10 @@ typedef struct { ngx_msec_t handshake_timeout; ngx_flag_t prefer_server_ciphers; + ngx_flag_t reject_handshake; ngx_ssl_t ssl; - ngx_uint_t listen; ngx_uint_t protocols; ngx_uint_t verify; @@ -53,9 +53,6 @@ typedef struct { ngx_flag_t session_tickets; ngx_array_t *session_ticket_keys; - - u_char *file; - ngx_uint_t line; } ngx_stream_ssl_conf_t; diff --git a/src/stream/ngx_stream_ssl_preread_module.c b/src/stream/ngx_stream_ssl_preread_module.c --- a/src/stream/ngx_stream_ssl_preread_module.c +++ b/src/stream/ngx_stream_ssl_preread_module.c @@ -541,8 +541,6 @@ ngx_stream_ssl_preread_servername(ngx_st s->srv_conf = cscf->ctx->srv_conf; - cscf = ngx_stream_get_module_srv_conf(s, ngx_stream_core_module); - ngx_set_connection_log(c, cscf->error_log); return NGX_OK; -- Sergey Kandaurov From arut at nginx.com Wed Dec 13 14:06:59 2023 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 13 Dec 2023 18:06:59 +0400 Subject: [PATCH 1 of 3] Stream: socket peek in preread phase In-Reply-To: References: <966331bb4936888ef2f0.1699610839@arut-laptop> Message-ID: <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> Hi, On Tue, Dec 12, 2023 at 05:17:31PM +0400, Sergey Kandaurov wrote: > > > On 10 Nov 2023, at 14:07, Roman Arutyunyan wrote: > > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1699456644 -14400 > > # Wed Nov 08 19:17:24 2023 +0400 > > # Node ID 966331bb4936888ef2f034aa2700c130514d0b57 > > # Parent 7ec761f0365f418511e30b82e9adf80bc56681df > > Stream: socket peek in preread phase. > > > > Previously, preread buffer was always read out from socket, which made it > > impossible to terminate SSL on the connection without introducing additional > > SSL BIOs. The following patches will rely on this. > > > > Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. > > It's called if SSL is not already terminated and if an egde-triggered event > > method is used. For epoll, EPOLLRDHUP support is also required. > > Not sure if it is a good way to introduce new functionality > that depends on connection processing methods. I agree. On the other hand, systems lacking edge-triggered event methods are rare these days (let's leave Windows out of this discussion). In most cases however a level-triggered event method can be enough since a typical preread only analyzes the first packet (SSL ClientHello is usually small and fits in the first TCP packet). However there's no 100% guarantee the packet will not be fragmented. We can further discuss the possibility to reimplement preread in a way that for level-triggered event methods we only analyze the first packet. This however will restrict more complex preread cases. > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > > --- a/src/stream/ngx_stream_core_module.c > > +++ b/src/stream/ngx_stream_core_module.c > > @@ -10,6 +10,10 @@ > > #include > > > > > > +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, > > + ngx_stream_phase_handler_t *ph); > > +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, > > + ngx_stream_phase_handler_t *ph); > > static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); > > static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); > > static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); > > @@ -203,8 +207,6 @@ ngx_int_t > > ngx_stream_core_preread_phase(ngx_stream_session_t *s, > > ngx_stream_phase_handler_t *ph) > > { > > - size_t size; > > - ssize_t n; > > ngx_int_t rc; > > ngx_connection_t *c; > > ngx_stream_core_srv_conf_t *cscf; > > @@ -217,56 +219,40 @@ ngx_stream_core_preread_phase(ngx_stream > > > > if (c->read->timedout) { > > rc = NGX_STREAM_OK; > > + goto done; > > + } > > > > - } else if (c->read->timer_set) { > > - rc = NGX_AGAIN; > > + if (!c->read->timer_set) { > > + rc = ph->handler(s); > > > > - } else { > > - rc = ph->handler(s); > > + if (rc != NGX_AGAIN) { > > + goto done; > > + } > > } > > > > - while (rc == NGX_AGAIN) { > > - > > + if (c->buffer == NULL) { > > + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > > if (c->buffer == NULL) { > > - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > > - if (c->buffer == NULL) { > > - rc = NGX_ERROR; > > - break; > > - } > > + rc = NGX_ERROR; > > + goto done; > > } > > - > > - size = c->buffer->end - c->buffer->last; > > - > > - if (size == 0) { > > - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > > - rc = NGX_STREAM_BAD_REQUEST; > > - break; > > - } > > + } > > > > - if (c->read->eof) { > > - rc = NGX_STREAM_OK; > > - break; > > - } > > - > > - if (!c->read->ready) { > > - break; > > - } > > - > > - n = c->recv(c, c->buffer->last, size); > > + if (c->ssl == NULL > > + && (ngx_event_flags & NGX_USE_CLEAR_EVENT) > > + && ((ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0 > > +#if (NGX_HAVE_EPOLLRDHUP) > > + || ngx_use_epoll_rdhup > > +#endif > > + )) > > + { > > + rc = ngx_stream_preread_peek(s, ph); > > > > - if (n == NGX_ERROR || n == 0) { > > - rc = NGX_STREAM_OK; > > - break; > > - } > > + } else { > > + rc = ngx_stream_preread(s, ph); > > + } > > > > - if (n == NGX_AGAIN) { > > - break; > > - } > > - > > - c->buffer->last += n; > > - > > - rc = ph->handler(s); > > - } > > +done: > > > > if (rc == NGX_AGAIN) { > > if (ngx_handle_read_event(c->read, 0) != NGX_OK) { > > @@ -311,6 +297,95 @@ ngx_stream_core_preread_phase(ngx_stream > > } > > > > > > +static ngx_int_t > > +ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > > +{ > > + ssize_t n; > > + ngx_int_t rc; > > + ngx_err_t err; > > + ngx_connection_t *c; > > + > > + c = s->connection; > > + > > + n = recv(c->fd, (char *) c->buffer->last, > > + c->buffer->end - c->buffer->last, MSG_PEEK); > > + > > + err = ngx_socket_errno; > > + > > + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, > > typo: NGX_LOG_DEBUG_STREAM Thanks. > > + "stream recv(MSG_PEEK): %z", n); > > Nitpicking: I couldn't find precedence to log "MSG_PEEK", e.g.: > > src/mail/ngx_mail_handler.c: n = recv(c->fd, (char *) buf, sizeof(buf), MSG_PEEK); > src/mail/ngx_mail_handler.c- > src/mail/ngx_mail_handler.c- err = ngx_socket_errno; > src/mail/ngx_mail_handler.c- > src/mail/ngx_mail_handler.c- ngx_log_debug1(NGX_LOG_DEBUG_MAIL, c->log, 0, "recv(): %z", n); > src/mail/ngx_mail_handler.c- > -- > src/stream/ngx_stream_handler.c: n = recv(c->fd, (char *) buf, sizeof(buf), MSG_PEEK); > src/stream/ngx_stream_handler.c- > src/stream/ngx_stream_handler.c- err = ngx_socket_errno; > src/stream/ngx_stream_handler.c- > src/stream/ngx_stream_handler.c- ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "recv(): %z", n); > src/stream/ngx_stream_handler.c- > > Might be "stream recv(): %z" or just "recv(): %z" is enough. OK, this is also similar to logging in ngx_http_ssl_handshake(). > > + if (n == -1) { > > + if (err == NGX_EAGAIN) { > > You don't reset c->read->ready, which introduces a bad pattern. OK, let's add it. The reason for not adding the reset was that for edge-triggered we don't really need it. > > + return NGX_AGAIN; > > + } > > + > > + ngx_connection_error(c, err, "recv() failed"); > > + return NGX_STREAM_OK; > > + } > > + > > + if (n == 0) { > > + return NGX_STREAM_OK; > > + } > > + > > + c->buffer->last += n; > > + > > + rc = ph->handler(s); > > + > > + if (rc == NGX_AGAIN && c->buffer->last == c->buffer->end) { > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > > + return NGX_STREAM_BAD_REQUEST; > > + } > > + > > + if (rc == NGX_AGAIN && c->read->pending_eof) { > > + return NGX_STREAM_OK; > > + } > > + > > + c->buffer->last = c->buffer->pos; > > + > > + return rc; > > +} > > Don't you want to make ngx_stream_preread_peek() more similar to > ngx_stream_preread() ? Something like this: > > rc = ph->handler(s); > > if (rc != NGX_AGAIN) { > c->buffer->last = c->buffer->pos; > return rc; > } > > if (c->buffer->last == c->buffer->end) { > ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > return NGX_STREAM_BAD_REQUEST; > } > > if (c->read->pending_eof) { > return NGX_STREAM_OK; > } > > c->buffer->last = c->buffer->pos; > > return NGX_AGAIN; Still not fully similar, but ok. > > +static ngx_int_t > > +ngx_stream_preread(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > > +{ > > + ssize_t n; > > + ngx_int_t rc; > > + ngx_connection_t *c; > > + > > + c = s->connection; > > + > > + while (c->read->ready) { > > + > > + n = c->recv(c, c->buffer->last, c->buffer->end - c->buffer->last); > > + > > + if (n == NGX_AGAIN) { > > + return NGX_AGAIN; > > + } > > + > > + if (n == NGX_ERROR || n == 0) { > > + return NGX_STREAM_OK; > > + } > > + > > + c->buffer->last += n; > > + > > + rc = ph->handler(s); > > + > > + if (rc != NGX_AGAIN) { > > + return rc; > > + } > > + > > + if (c->buffer->last == c->buffer->end) { > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > > + return NGX_STREAM_BAD_REQUEST; > > + } > > + } > > + > > + return NGX_AGAIN; > > +} > > + > > + > > ngx_int_t > > ngx_stream_core_content_phase(ngx_stream_session_t *s, > > ngx_stream_phase_handler_t *ph) > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > -- > Sergey Kandaurov > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1702476295 -14400 # Wed Dec 13 18:04:55 2023 +0400 # Node ID 844486cdd43a32d10b78493d7e7b80e9e2239d7e # Parent 6c8595b77e667bd58fd28186939ed820f2e55e0e Stream: socket peek in preread phase. Previously, preread buffer was always read out from socket, which made it impossible to terminate SSL on the connection without introducing additional SSL BIOs. The following patches will rely on this. Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. It's called if SSL is not already terminated and if an egde-triggered event method is used. For epoll, EPOLLRDHUP support is also required. diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -10,6 +10,10 @@ #include +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, + ngx_stream_phase_handler_t *ph); +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, + ngx_stream_phase_handler_t *ph); static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); @@ -203,8 +207,6 @@ ngx_int_t ngx_stream_core_preread_phase(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) { - size_t size; - ssize_t n; ngx_int_t rc; ngx_connection_t *c; ngx_stream_core_srv_conf_t *cscf; @@ -217,56 +219,40 @@ ngx_stream_core_preread_phase(ngx_stream if (c->read->timedout) { rc = NGX_STREAM_OK; + goto done; + } - } else if (c->read->timer_set) { - rc = NGX_AGAIN; + if (!c->read->timer_set) { + rc = ph->handler(s); - } else { - rc = ph->handler(s); + if (rc != NGX_AGAIN) { + goto done; + } } - while (rc == NGX_AGAIN) { - + if (c->buffer == NULL) { + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); if (c->buffer == NULL) { - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); - if (c->buffer == NULL) { - rc = NGX_ERROR; - break; - } + rc = NGX_ERROR; + goto done; } - - size = c->buffer->end - c->buffer->last; - - if (size == 0) { - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); - rc = NGX_STREAM_BAD_REQUEST; - break; - } + } - if (c->read->eof) { - rc = NGX_STREAM_OK; - break; - } - - if (!c->read->ready) { - break; - } - - n = c->recv(c, c->buffer->last, size); + if (c->ssl == NULL + && (ngx_event_flags & NGX_USE_CLEAR_EVENT) + && ((ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0 +#if (NGX_HAVE_EPOLLRDHUP) + || ngx_use_epoll_rdhup +#endif + )) + { + rc = ngx_stream_preread_peek(s, ph); - if (n == NGX_ERROR || n == 0) { - rc = NGX_STREAM_OK; - break; - } + } else { + rc = ngx_stream_preread(s, ph); + } - if (n == NGX_AGAIN) { - break; - } - - c->buffer->last += n; - - rc = ph->handler(s); - } +done: if (rc == NGX_AGAIN) { if (ngx_handle_read_event(c->read, 0) != NGX_OK) { @@ -311,6 +297,100 @@ ngx_stream_core_preread_phase(ngx_stream } +static ngx_int_t +ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) +{ + ssize_t n; + ngx_int_t rc; + ngx_err_t err; + ngx_connection_t *c; + + c = s->connection; + + n = recv(c->fd, (char *) c->buffer->last, + c->buffer->end - c->buffer->last, MSG_PEEK); + + err = ngx_socket_errno; + + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream recv(): %z", n); + + if (n == -1) { + if (err == NGX_EAGAIN) { + c->read->ready = 0; + return NGX_AGAIN; + } + + ngx_connection_error(c, err, "recv() failed"); + return NGX_STREAM_OK; + } + + if (n == 0) { + return NGX_STREAM_OK; + } + + c->buffer->last += n; + + rc = ph->handler(s); + + if (rc != NGX_AGAIN) { + c->buffer->last = c->buffer->pos; + return rc; + } + + if (c->buffer->last == c->buffer->end) { + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); + return NGX_STREAM_BAD_REQUEST; + } + + if (c->read->pending_eof) { + return NGX_STREAM_OK; + } + + c->buffer->last = c->buffer->pos; + + return NGX_AGAIN; +} + + +static ngx_int_t +ngx_stream_preread(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) +{ + ssize_t n; + ngx_int_t rc; + ngx_connection_t *c; + + c = s->connection; + + while (c->read->ready) { + + n = c->recv(c, c->buffer->last, c->buffer->end - c->buffer->last); + + if (n == NGX_AGAIN) { + return NGX_AGAIN; + } + + if (n == NGX_ERROR || n == 0) { + return NGX_STREAM_OK; + } + + c->buffer->last += n; + + rc = ph->handler(s); + + if (rc != NGX_AGAIN) { + return rc; + } + + if (c->buffer->last == c->buffer->end) { + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); + return NGX_STREAM_BAD_REQUEST; + } + } + + return NGX_AGAIN; +} + + ngx_int_t ngx_stream_core_content_phase(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) From pluknet at nginx.com Wed Dec 13 15:45:10 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 13 Dec 2023 19:45:10 +0400 Subject: [PATCH 3 of 3] Stream: ngx_stream_pass_module In-Reply-To: <3cab85fe55272835674b.1699610841@arut-laptop> References: <3cab85fe55272835674b.1699610841@arut-laptop> Message-ID: <2551500F-2FB0-4763-AC5D-5838275B1D29@nginx.com> > On 10 Nov 2023, at 14:07, Roman Arutyunyan wrote: > > # HG changeset patch > # User Roman Arutyunyan > # Date 1699543504 -14400 > # Thu Nov 09 19:25:04 2023 +0400 > # Node ID 3cab85fe55272835674b7f1c296796955256d019 > # Parent 1d3464283405a4d8ac54caae9bf1815c723f04c5 > Stream: ngx_stream_pass_module. > > The module allows to pass connections from Stream to other modules such as HTTP > or Mail, as well as back to Stream. Previously, this was only possible with > proxying. Connections with preread buffer read out from socket cannot be > passed. > > The module allows to terminate SSL selectively based on SNI. > > stream { > server { > listen 8000 default_server; > ssl_preread on; > ... > } > > server { > listen 8000; > server_name foo.example.com; > pass 8001; # to HTTP > } > > server { > listen 8000; > server_name bar.example.com; > ... > } > } > > http { > server { > listen 8001 ssl; > ... > > location / { > root html; > } > } > } > > diff --git a/auto/modules b/auto/modules > --- a/auto/modules > +++ b/auto/modules > @@ -1166,6 +1166,16 @@ if [ $STREAM != NO ]; then > . auto/module > fi > > + if [ $STREAM_PASS = YES ]; then > + ngx_module_name=ngx_stream_pass_module > + ngx_module_deps= > + ngx_module_srcs=src/stream/ngx_stream_pass_module.c > + ngx_module_libs= > + ngx_module_link=$STREAM_PASS > + > + . auto/module > + fi > + > if [ $STREAM_SET = YES ]; then > ngx_module_name=ngx_stream_set_module > ngx_module_deps= > diff --git a/auto/options b/auto/options > --- a/auto/options > +++ b/auto/options > @@ -127,6 +127,7 @@ STREAM_GEOIP=NO > STREAM_MAP=YES > STREAM_SPLIT_CLIENTS=YES > STREAM_RETURN=YES > +STREAM_PASS=YES > STREAM_SET=YES > STREAM_UPSTREAM_HASH=YES > STREAM_UPSTREAM_LEAST_CONN=YES > @@ -337,6 +338,7 @@ use the \"--with-mail_ssl_module\" optio > --without-stream_split_clients_module) > STREAM_SPLIT_CLIENTS=NO ;; > --without-stream_return_module) STREAM_RETURN=NO ;; > + --without-stream_pass_module) STREAM_PASS=NO ;; > --without-stream_set_module) STREAM_SET=NO ;; > --without-stream_upstream_hash_module) > STREAM_UPSTREAM_HASH=NO ;; > @@ -556,6 +558,7 @@ cat << END > --without-stream_split_clients_module > disable ngx_stream_split_clients_module > --without-stream_return_module disable ngx_stream_return_module > + --without-stream_pass_module disable ngx_stream_pass_module > --without-stream_set_module disable ngx_stream_set_module > --without-stream_upstream_hash_module > disable ngx_stream_upstream_hash_module > diff --git a/src/stream/ngx_stream_pass_module.c b/src/stream/ngx_stream_pass_module.c > new file mode 100644 > --- /dev/null > +++ b/src/stream/ngx_stream_pass_module.c > @@ -0,0 +1,245 @@ > + > +/* > + * Copyright (C) Roman Arutyunyan > + * Copyright (C) Nginx, Inc. > + */ > + > + > +#include > +#include > +#include > + > + > +typedef struct { > + ngx_addr_t *addr; > + ngx_stream_complex_value_t *addr_value; > +} ngx_stream_pass_srv_conf_t; > + > + > +static void ngx_stream_pass_handler(ngx_stream_session_t *s); > +static void *ngx_stream_pass_create_srv_conf(ngx_conf_t *cf); > +static char *ngx_stream_pass(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); > + > + > +static ngx_command_t ngx_stream_pass_commands[] = { > + > + { ngx_string("pass"), > + NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > + ngx_stream_pass, > + NGX_STREAM_SRV_CONF_OFFSET, > + 0, > + NULL }, > + > + ngx_null_command > +}; > + > + > +static ngx_stream_module_t ngx_stream_pass_module_ctx = { > + NULL, /* preconfiguration */ > + NULL, /* postconfiguration */ > + > + NULL, /* create main configuration */ > + NULL, /* init main configuration */ > + > + ngx_stream_pass_create_srv_conf, /* create server configuration */ > + NULL /* merge server configuration */ > +}; > + > + > +ngx_module_t ngx_stream_pass_module = { > + NGX_MODULE_V1, > + &ngx_stream_pass_module_ctx, /* module conaddr */ typo: s/conaddr/context/ Otherwise, looks good. > + ngx_stream_pass_commands, /* module directives */ > + NGX_STREAM_MODULE, /* module type */ > + NULL, /* init master */ > + NULL, /* init module */ > + NULL, /* init process */ > + NULL, /* init thread */ > + NULL, /* exit thread */ > + NULL, /* exit process */ > + NULL, /* exit master */ > + NGX_MODULE_V1_PADDING > +}; > + > + > +static void > +ngx_stream_pass_handler(ngx_stream_session_t *s) > +{ > + ngx_url_t u; > + ngx_str_t url; > + ngx_addr_t *addr; > + ngx_uint_t i; > + ngx_listening_t *ls; > + ngx_connection_t *c; > + ngx_stream_pass_srv_conf_t *pscf; > + > + c = s->connection; > + > + c->log->action = "passing connection to another module"; > + > + if (c->buffer && c->buffer->pos != c->buffer->last) { > + ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, > + "cannot pass connection with preread data"); > + goto failed; > + } > + > + pscf = ngx_stream_get_module_srv_conf(s, ngx_stream_pass_module); > + > + addr = pscf->addr; > + > + if (addr == NULL) { > + if (ngx_stream_complex_value(s, pscf->addr_value, &url) != NGX_OK) { > + goto failed; > + } > + > + ngx_memzero(&u, sizeof(ngx_url_t)); > + > + u.url = url; > + u.listen = 1; > + u.no_resolve = 1; > + > + if (ngx_parse_url(s->connection->pool, &u) != NGX_OK) { > + if (u.err) { > + ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, > + "%s in pass \"%V\"", u.err, &u.url); > + } > + > + goto failed; > + } > + > + if (u.naddrs == 0) { > + ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, > + "no addresses in pass \"%V\"", &u.url); > + goto failed; > + } > + > + addr = &u.addrs[0]; > + } > + > + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, > + "stream pass addr: \"%V\"", &addr->name); > + > + ls = ngx_cycle->listening.elts; > + > + for (i = 0; i < ngx_cycle->listening.nelts; i++) { > + if (ngx_cmp_sockaddr(ls[i].sockaddr, ls[i].socklen, > + addr->sockaddr, addr->socklen, 1) > + == NGX_OK) > + { > + c->listening = &ls[i]; > + > + c->data = NULL; > + c->buffer = NULL; > + > + *c->log = c->listening->log; > + c->log->handler = NULL; > + c->log->data = NULL; > + > + c->listening->handler(c); > + > + return; > + } > + } > + > + ngx_log_error(NGX_LOG_ERR, c->log, 0, > + "listen not found for \"%V\"", &addr->name); > + > + ngx_stream_finalize_session(s, NGX_STREAM_OK); > + > + return; > + > +failed: > + > + ngx_stream_finalize_session(s, NGX_STREAM_INTERNAL_SERVER_ERROR); > +} > + > + > +static void * > +ngx_stream_pass_create_srv_conf(ngx_conf_t *cf) > +{ > + ngx_stream_pass_srv_conf_t *conf; > + > + conf = ngx_pcalloc(cf->pool, sizeof(ngx_stream_pass_srv_conf_t)); > + if (conf == NULL) { > + return NULL; > + } > + > + /* > + * set by ngx_pcalloc(): > + * > + * conf->addr = NULL; > + * conf->addr_value = NULL; > + */ > + > + return conf; > +} > + > + > +static char * > +ngx_stream_pass(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > +{ > + ngx_stream_pass_srv_conf_t *pscf = conf; > + > + ngx_url_t u; > + ngx_str_t *value, *url; > + ngx_stream_complex_value_t cv; > + ngx_stream_core_srv_conf_t *cscf; > + ngx_stream_compile_complex_value_t ccv; > + > + if (pscf->addr || pscf->addr_value) { > + return "is duplicate"; > + } > + > + cscf = ngx_stream_conf_get_module_srv_conf(cf, ngx_stream_core_module); > + > + cscf->handler = ngx_stream_pass_handler; > + > + value = cf->args->elts; > + > + url = &value[1]; > + > + ngx_memzero(&ccv, sizeof(ngx_stream_compile_complex_value_t)); > + > + ccv.cf = cf; > + ccv.value = url; > + ccv.complex_value = &cv; > + > + if (ngx_stream_compile_complex_value(&ccv) != NGX_OK) { > + return NGX_CONF_ERROR; > + } > + > + if (cv.lengths) { > + pscf->addr_value = ngx_palloc(cf->pool, > + sizeof(ngx_stream_complex_value_t)); > + if (pscf->addr_value == NULL) { > + return NGX_CONF_ERROR; > + } > + > + *pscf->addr_value = cv; > + > + return NGX_CONF_OK; > + } > + > + ngx_memzero(&u, sizeof(ngx_url_t)); > + > + u.url = *url; > + u.listen = 1; > + > + if (ngx_parse_url(cf->pool, &u) != NGX_OK) { > + if (u.err) { > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "%s in \"%V\" of the \"pass\" directive", > + u.err, &u.url); > + } > + > + return NGX_CONF_ERROR; > + } > + > + if (u.naddrs == 0) { > + return "has no addresses"; > + } > + > + pscf->addr = &u.addrs[0]; > + > + return NGX_CONF_OK; > +} -- Sergey Kandaurov From pluknet at nginx.com Wed Dec 13 15:50:09 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 13 Dec 2023 19:50:09 +0400 Subject: [PATCH 1 of 3] Stream: socket peek in preread phase In-Reply-To: <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> References: <966331bb4936888ef2f0.1699610839@arut-laptop> <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> Message-ID: <20231213155009.rlgrcfxem3d4umo5@Y9MQ9X2QVV> On Wed, Dec 13, 2023 at 06:06:59PM +0400, Roman Arutyunyan wrote: > Hi, > # HG changeset patch > # User Roman Arutyunyan > # Date 1702476295 -14400 > # Wed Dec 13 18:04:55 2023 +0400 > # Node ID 844486cdd43a32d10b78493d7e7b80e9e2239d7e > # Parent 6c8595b77e667bd58fd28186939ed820f2e55e0e > Stream: socket peek in preread phase. > > Previously, preread buffer was always read out from socket, which made it > impossible to terminate SSL on the connection without introducing additional > SSL BIOs. The following patches will rely on this. > > Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. > It's called if SSL is not already terminated and if an egde-triggered event > method is used. For epoll, EPOLLRDHUP support is also required. > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -10,6 +10,10 @@ > #include > > > +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, > + ngx_stream_phase_handler_t *ph); > +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, > + ngx_stream_phase_handler_t *ph); > static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); > static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); > static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); > @@ -203,8 +207,6 @@ ngx_int_t > ngx_stream_core_preread_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph) > { > - size_t size; > - ssize_t n; > ngx_int_t rc; > ngx_connection_t *c; > ngx_stream_core_srv_conf_t *cscf; > @@ -217,56 +219,40 @@ ngx_stream_core_preread_phase(ngx_stream > > if (c->read->timedout) { > rc = NGX_STREAM_OK; > + goto done; > + } > > - } else if (c->read->timer_set) { > - rc = NGX_AGAIN; > + if (!c->read->timer_set) { > + rc = ph->handler(s); > > - } else { > - rc = ph->handler(s); > + if (rc != NGX_AGAIN) { > + goto done; > + } > } > > - while (rc == NGX_AGAIN) { > - > + if (c->buffer == NULL) { > + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > if (c->buffer == NULL) { > - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > - if (c->buffer == NULL) { > - rc = NGX_ERROR; > - break; > - } > + rc = NGX_ERROR; > + goto done; > } > - > - size = c->buffer->end - c->buffer->last; > - > - if (size == 0) { > - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > - rc = NGX_STREAM_BAD_REQUEST; > - break; > - } > + } > > - if (c->read->eof) { > - rc = NGX_STREAM_OK; > - break; > - } > - > - if (!c->read->ready) { > - break; > - } > - > - n = c->recv(c, c->buffer->last, size); > + if (c->ssl == NULL > + && (ngx_event_flags & NGX_USE_CLEAR_EVENT) > + && ((ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0 > +#if (NGX_HAVE_EPOLLRDHUP) > + || ngx_use_epoll_rdhup > +#endif > + )) > + { > + rc = ngx_stream_preread_peek(s, ph); > > - if (n == NGX_ERROR || n == 0) { > - rc = NGX_STREAM_OK; > - break; > - } > + } else { > + rc = ngx_stream_preread(s, ph); > + } > > - if (n == NGX_AGAIN) { > - break; > - } > - > - c->buffer->last += n; > - > - rc = ph->handler(s); > - } > +done: > > if (rc == NGX_AGAIN) { > if (ngx_handle_read_event(c->read, 0) != NGX_OK) { > @@ -311,6 +297,100 @@ ngx_stream_core_preread_phase(ngx_stream > } > > > +static ngx_int_t > +ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > +{ > + ssize_t n; > + ngx_int_t rc; > + ngx_err_t err; > + ngx_connection_t *c; > + > + c = s->connection; > + > + n = recv(c->fd, (char *) c->buffer->last, > + c->buffer->end - c->buffer->last, MSG_PEEK); > + > + err = ngx_socket_errno; > + > + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream recv(): %z", n); > + > + if (n == -1) { > + if (err == NGX_EAGAIN) { > + c->read->ready = 0; > + return NGX_AGAIN; > + } > + > + ngx_connection_error(c, err, "recv() failed"); > + return NGX_STREAM_OK; > + } > + > + if (n == 0) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last += n; > + > + rc = ph->handler(s); > + > + if (rc != NGX_AGAIN) { > + c->buffer->last = c->buffer->pos; > + return rc; > + } > + > + if (c->buffer->last == c->buffer->end) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > + return NGX_STREAM_BAD_REQUEST; > + } > + > + if (c->read->pending_eof) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last = c->buffer->pos; > + > + return NGX_AGAIN; > +} > + > + > +static ngx_int_t > +ngx_stream_preread(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > +{ > + ssize_t n; > + ngx_int_t rc; > + ngx_connection_t *c; > + > + c = s->connection; > + > + while (c->read->ready) { > + > + n = c->recv(c, c->buffer->last, c->buffer->end - c->buffer->last); > + > + if (n == NGX_AGAIN) { > + return NGX_AGAIN; > + } > + > + if (n == NGX_ERROR || n == 0) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last += n; > + > + rc = ph->handler(s); > + > + if (rc != NGX_AGAIN) { > + return rc; > + } > + > + if (c->buffer->last == c->buffer->end) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > + return NGX_STREAM_BAD_REQUEST; > + } > + } > + > + return NGX_AGAIN; > +} > + > + > ngx_int_t > ngx_stream_core_content_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph) Looks good. From benjamin.p.kallus.gr at dartmouth.edu Wed Dec 13 16:09:28 2023 From: benjamin.p.kallus.gr at dartmouth.edu (Ben Kallus) Date: Wed, 13 Dec 2023 11:09:28 -0500 Subject: Core: Avoid memcpy from NULL Message-ID: Nginx executes numerous `memcpy`s from NULL during normal execution. `memcpy`ing to or from NULL is undefined behavior. Accordingly, some compilers (gcc -O2) make optimizations that assume `memcpy` arguments are not NULL. Nginx with UBSan crashes during startup due to this issue. Consider the following function: ```C #include int f(int i) { char a[] = {'a'}; void *src = i ? a : NULL; char dst[1]; memcpy(dst, src, 0); return src == NULL; } ``` Here's what gcc13.2 -O2 -fno-builtin will do to it: ```asm f: sub rsp, 24 xor eax, eax test edi, edi lea rsi, [rsp+14] lea rdi, [rsp+15] mov BYTE PTR [rsp+14], 97 cmove rsi, rax xor edx, edx call memcpy xor eax, eax add rsp, 24 ret ``` Note that `f` always returns 0, regardless of the value of `i`. Feel free to try for yourself at https://gcc.godbolt.org/z/zfvnMMsds The reasoning here is that since memcpy from NULL is UB, the optimizer is free to assume that `src` is non-null. You might consider this to be a problem with the compiler, or the C standard, and I might agree. Regardless, relying on UB is inherently un-portable, and requires maintenance to ensure that new compiler releases don't break existing assumptions about the behavior of undefined operations. The following patch adds a check to `ngx_memcpy` and `ngx_cpymem` that makes 0-length memcpy explicitly a noop. Since all memcpying from NULL in Nginx uses n==0, this should be sufficient to avoid UB. It would be more efficient to instead add a check to every call to ngx_memcpy and ngx_cpymem that might be used with src==NULL, but in the discussion of a previous patch that proposed such a change, a more straightforward and tidy solution was desired. It may also be worth considering adding checks for NULL memset, memmove, etc. I think this is not necessary unless it is demonstrated that Nginx actually executes such undefined calls. # HG changeset patch # User Ben Kallus # Date 1702406466 18000 # Tue Dec 12 13:41:06 2023 -0500 # Node ID d270203d4ecf77cc14a2652c727e236afc659f4a # Parent a6f79f044de58b594563ac03139cd5e2e6a81bdb Add NULL check to ngx_memcpy and ngx_cpymem to satisfy UBSan. diff -r a6f79f044de5 -r d270203d4ecf src/core/ngx_string.c --- a/src/core/ngx_string.c Wed Nov 29 10:58:21 2023 +0400 +++ b/src/core/ngx_string.c Tue Dec 12 13:41:06 2023 -0500 @@ -2098,6 +2098,10 @@ ngx_debug_point(); } + if (n == 0) { + return dst; + } + return memcpy(dst, src, n); } diff -r a6f79f044de5 -r d270203d4ecf src/core/ngx_string.h --- a/src/core/ngx_string.h Wed Nov 29 10:58:21 2023 +0400 +++ b/src/core/ngx_string.h Tue Dec 12 13:41:06 2023 -0500 @@ -103,8 +103,9 @@ * gcc3 compiles memcpy(d, s, 4) to the inline "mov"es. * icc8 compile memcpy(d, s, 4) to the inline "mov"es or XMM moves. */ -#define ngx_memcpy(dst, src, n) (void) memcpy(dst, src, n) -#define ngx_cpymem(dst, src, n) (((u_char *) memcpy(dst, src, n)) + (n)) +#define ngx_memcpy(dst, src, n) (void) ((n) == 0 ? (dst) : memcpy(dst, src, n)) +#define ngx_cpymem(dst, src, n) \ + ((u_char *) ((n) == 0 ? (dst) : memcpy(dst, src, n)) + (n)) #endif diff -r a6f79f044de5 -r d270203d4ecf src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Wed Nov 29 10:58:21 2023 +0400 +++ b/src/http/v2/ngx_http_v2.c Tue Dec 12 13:41:06 2023 -0500 @@ -3998,9 +3998,7 @@ n = size; } - if (n > 0) { - rb->buf->last = ngx_cpymem(rb->buf->last, pos, n); - } + rb->buf->last = ngx_cpymem(rb->buf->last, pos, n); ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, "http2 request body recv %uz", n); From mdounin at mdounin.ru Wed Dec 13 19:16:33 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 13 Dec 2023 22:16:33 +0300 Subject: processing a request without body In-Reply-To: References: Message-ID: Hello! On Wed, Dec 13, 2023 at 03:55:56PM +0800, Muhammad Nuzaihan wrote: > I need to process requests with only URI path (without body) for a module. > > It seems ngx_http_request_body_filter_pt is *not* executed whenever > there is a request without a body (it looked like it bypassed without > request body) and only ngx_http_output_body_filter_pt part of the > code is executed. > > For example i do a request curl curl like this: > > curl -vvvv -X POST http://localhost:8080/proxy/profile/alice/comment > > and i need to validate /proxy/profile/alice/comment in my module and > there is no http headers and no body. Only URI path. When reading an HTTP request, nginx reads the request line and request headers, and then starts request processing by passing it through as sequence of request processing phases - in each phase, corresponding handlers are called. The request body filters are only called if there is a request body, and some handler actually requested reading of the request body - so it's expected these are not called if there is no request body. If you want to handle request based on the request line and the request headers, consider using appropriate phase handler, see https://nginx.org/en/docs/dev/development_guide.html#http_phases and the source code for details. -- Maxim Dounin http://mdounin.ru/ From vasiliy.soshnikov at gmail.com Wed Dec 13 19:57:09 2023 From: vasiliy.soshnikov at gmail.com (Vasiliy Soshnikov) Date: Wed, 13 Dec 2023 22:57:09 +0300 Subject: processing a request without body In-Reply-To: References: Message-ID: Hello, > Is there something similar done before? I'm thinking that you would like to test the incoming path and execute some logic. You could use a header filter for that and also you could keep a body filter for handling the request body. Also pls take a look into PHASEs, I'm thinking you could try to add your own ACCESS PHASE. And the last one: for keeping your context (some variables or data) for this request between filters, phases you could use request's context. Probably, examples would help you: https://github.com/dedok/nginx-tutorials On Wed, Dec 13, 2023 at 10:56 AM Muhammad Nuzaihan wrote: > Hi, > > I need to process requests with only URI path (without body) for a module. > > It seems ngx_http_request_body_filter_pt is *not* executed whenever > there is a request without a body (it looked like it bypassed without > request body) and only ngx_http_output_body_filter_pt part of the > code is executed. > > For example i do a request curl curl like this: > > curl -vvvv -X POST http://localhost:8080/proxy/profile/alice/comment > > and i need to validate /proxy/profile/alice/comment in my module and > there is no http headers and no body. Only URI path. > > Is there something similar done before? > > Thank you, > Muhammad Nuzaihan > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From vasiliy.soshnikov at gmail.com Wed Dec 13 20:00:46 2023 From: vasiliy.soshnikov at gmail.com (Vasiliy Soshnikov) Date: Wed, 13 Dec 2023 23:00:46 +0300 Subject: processing a request without body In-Reply-To: References: Message-ID: Sorry I'm bad. I understood that header filters won't help you. Use the ACCESS PHASE handler, it should work fine for you. On Wed, Dec 13, 2023 at 10:57 PM Vasiliy Soshnikov < vasiliy.soshnikov at gmail.com> wrote: > Hello, > > > Is there something similar done before? > I'm thinking that you would like to test the incoming path and execute > some logic. > > You could use a header filter for that and also you could keep a body > filter for handling the request body. > Also pls take a look into PHASEs, I'm thinking you could try to add your > own ACCESS PHASE. > > And the last one: for keeping your context (some variables or data) for > this request between filters, phases you could use request's context. > > Probably, examples would help you: > https://github.com/dedok/nginx-tutorials > > > On Wed, Dec 13, 2023 at 10:56 AM Muhammad Nuzaihan > wrote: > >> Hi, >> >> I need to process requests with only URI path (without body) for a module. >> >> It seems ngx_http_request_body_filter_pt is *not* executed whenever >> there is a request without a body (it looked like it bypassed without >> request body) and only ngx_http_output_body_filter_pt part of the >> code is executed. >> >> For example i do a request curl curl like this: >> >> curl -vvvv -X POST http://localhost:8080/proxy/profile/alice/comment >> >> and i need to validate /proxy/profile/alice/comment in my module and >> there is no http headers and no body. Only URI path. >> >> Is there something similar done before? >> >> Thank you, >> Muhammad Nuzaihan >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> https://mailman.nginx.org/mailman/listinfo/nginx-devel >> > -------------- next part -------------- An HTML attachment was scrubbed... URL: From julio.suarez at foss.arm.com Wed Dec 13 22:16:15 2023 From: julio.suarez at foss.arm.com (Julio Suarez) Date: Wed, 13 Dec 2023 16:16:15 -0600 Subject: [PATCH] Added asm ISB as asm pause for ngx_cpu_pause() for aarch64 In-Reply-To: References: Message-ID: Hi, This is very discussion helpful. 1. Yes, double checked configuration (what I'm running isn't exactly what's in that link). No shared memory zones or thread pools enabled. Sounds like a change in configuration is needed to test this. Would enabling proxy_cache_path be sufficient for this, or should this be done another way? When proxy_cache_path is enabled, I see calls to ngx_shmtx_lock & ngx_shmtx_unlock in the profile. The assembly annotations are also showing isb being executed (when I put in the ISB). I could try testing like this with both ISB & YIELD. Looking for guidance if you think it's worth a try. Overall, I'd like to sort out if the fact that there is no ngx_cpu_pause on aarch64 is sub optimal. The missing ngx_cpu_pause means there is no wait and subsequently, there is also no back off mechanism because the empty for loop is optimized away. 2. For code alignment question, I tried -falign-{functions,jumps}=64. ministat say's no diff. x Baseline + BaselinewAlign +----------------------------------------------------------------------+ | xx* | |+ x + + x+ *x* ++ x+ ++*+ x x + x x| | |_______M______A_______________| | | |_____________AM____________| | +----------------------------------------------------------------------+ N Min Max Median Avg Stddev x 15 129548 131751 130154 130442 622.46629 + 15 129000 131376 130306 130273 551.93064 No difference proven at 95.0% confidence 3. ministat for comparing blank ngx_cpu_pause() to ISB & YIELD (no memory clobber). Ministat say's significant difference. I have see it where ISB returns like ~10% +/- ~2%, however, I'm going to discount that as cloud variation/noise. A "lucky run". That said, it sounds like this is some kind of side effect of adding this into the binary as you mentioned previously. This diff oddly consistent though, or at least oddly consistent dumb luck. x Baseline + ISB * YIELD +--------------------------------------------------------------------------------+ | xxx * + + + | |x + x xxx x ** *xx *** * x **** *+ + * + * + +| | |______M____A___________| | | |______________MA_______________| | | |_________A__M_______| | +--------------------------------------------------------------------------------+ N Min Max Median Avg Stddev x 15 129548 131751 130154 130442 622.46629 + 15 129778.64 133639.52 132108.5 132135.41 844.66065 Difference at 95.0% confidence 1693.41 +/- 554.832 1.29821% +/- 0.425348% (Student's t, pooled s = 741.929) * 15 130679 132621 131596 131486.47 540.21198 Difference at 95.0% confidence 1044.47 +/- 435.826 0.800713% +/- 0.334115% (Student's t, pooled s = 582.792) 4. ACK on ISB memory clobber points. Makes sense. On 12/12/23 18:16, Maxim Dounin wrote: > Hello! > > On Mon, Dec 11, 2023 at 05:09:17PM -0600, Julio Suarez wrote: > >> Hi Maxim, >> >>> Nitpicking: Added ISB as ngx_cpu_pause() for aarch64. >> Yes, we can make that change. >> >>> Could you please clarify what do you mean by "a bug"? An empty >>> ngx_cpu_pause() is certainly not a bug, it's just a lack of a more >>> optimal solution for the particular architecture. >> Agree, not a bug. I'm in a team that focuses on performance, so >> sub-optimal performance is a "bug" to us. This is not a functional bug. >> Replacing the word bug with "sub-optimal" is more appropriate. >> >> When a delay like the PAUSE that is there for x86 is added, there is a >> 2-5% increase in the number of requests/sec Arm CPUs can achieve under >> high load. >> >> Yes, the test setup is very similar to what's described here (note, >> those particular instances in the blog isn't what I tested): >> >> https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/nginx-performance-on-graviton-3 >> >> Also, we tested on Nginx Open Source (without JWT), not Nginx-Plus like >> in the blog. > The only nginx configs I see there (or, rather, in linked > articles) do not use neither shared memory zones nor thread pools. > That is, ngx_cpu_pause() is not used in these configurations at > all. > > If you think it was actually used in your tests, could you please > provide nginx configuration you've used for testing? > > If nginx configs you've used do not actually contain shared memory > zones or thread pools, the performance changes you are seeing, > even if these are statistically significant (see below about > ministat(1)), might be the result of code alignment changes. > MySQL bug mentioned below uses -falign-{functions,jumps}=64 > to minimize effects of the code alignment changes > (https://bugs.mysql.com/bug.php?id=100664), it might worth to do > the same. > >> We tested for the max RPS of a 512B file that can be pulled through a >> reverse proxy. We select the number of upstreams to be large (8 to be >> exact), they are also high in core count (16+ CPU). The load generator >> node is also large (64 CPUs). This ensures the bottleneck is at the >> reverse proxy. We test small files because large files make the test >> network bounded, while smaller files make the test CPU bounded. >> >> I tested both ISB and YIELD (will talk about YIELD further below). >> >> Results of these tests are something like this: >> >> ISB uplift from no delay across 3 runs: >> >> - 2 CPU: 1.03 - 1.22% >> >> - 4 CPU: 2.70 - 10.75% (I'm treating the 10.75% here as an outlier, >> dropping that 10.75% gets ~5% on the high end of the range, hence why >> I'm just saying ~2-5% in change log, I don't want to overstate the perf >> improvement) >> >> - 8 CPU: 1.1 -2.33% >> >> >> YIELD uplift from no delay across 3 runs: >> >> - 2 CPU: 0 - 0.51% >> >> - 4 CPU: 0 - 1.41% >> >> - 8 CPU: 1.05 - 2.31% >> >> ISB produced the highest uplift, particularly for a 4 CPU reverse proxy. >> Hence why I submitted with ISB. Still, we see benefit with YIELD too. >> >> Variation comes from tearing down cloud infrastructure and redeploying. >> Results can vary depending on where you land in the data center. I'm >> intentionally leaving out exactly which HW/cloud I used in this data, >> but I can say we see similar uplift across a variety of Arm systems. > Could you please share some raw numbers from different tests? It > would be interesting to see ministat(1) results. > >> With respect to using YIELD and other projects that use alternatively >> use ISB: >> >> >> With respect to ISB Vs YIELD. Yes, as documented, YIELD is the >> conceptually right thing to use. However, in practice, it's a NOP which >> produces a shorter delay than ISB. Hence why ISB appears to work better. >> Also, YIELD is intended for SMT systems (uncommon on Arm), and hence, >> it's going to be a NOP for any current Arm system you'll find in the >> cloud. That said, YIELD produces uplift in RPS as well because even a >> small delay is better than no delay. I'm 100% good with using YIELD if >> you want to stay true to what is currently documented. I was going for >> max perf uplift which is also why some other projects are also using >> ISB. Whether it's YIELD or ISB, a revisit with WFET would be in order in >> the more distant future. For today, YIELD or ISB would work better than >> nothing (as it currently is). If YIELD is more acceptable, then I can do >> YIELD. >> >> Projects that previously used YIELD and switched to ISB after noting >> performance improvement (I don't think these projects shared data >> anywhere, we just have to take their word): >> >> MongoDB: >> https://github.com/mongodb/mongo/blob/b7a92e4194cca52665e01d81dd7f9b037b59b362/src/mongo/platform/pause.h#L61 >> >> MySQL: >> https://github.com/mysql/mysql-server/blob/87307d4ddd88405117e3f1e51323836d57ab1f57/storage/innobase/include/ut0ut.h#L108 >> >> Jemalloc: >> https://github.com/jemalloc/jemalloc/blob/e4817c8d89a2a413e835c4adeab5c5c4412f9235/configure.ac#L436 > Thanks for the links. > For the record, here are relevant commits / pull requests: > > https://github.com/wiredtiger/wiredtiger/pull/6080 > https://github.com/mongodb/mongo/commit/6979525674af67405984c58585766dd4d0c3f2a8 > > https://bugs.mysql.com/bug.php?id=100664 > https://github.com/mysql/mysql-server/commit/f2a4ed5b65a6c03ee1bea60b8c3bb4db64dbed10 > > https://github.com/jemalloc/jemalloc/pull/2205 > https://github.com/jemalloc/jemalloc/commit/89fe8ee6bf7a23556350d883a310c0224a171879 > > At least MySQL bug seems to have some interesting details. > >>> Could you please clarify reasons for the "memory" clobber here? >> Putting in the memory clobber for ISB is redundant because ISB is a >> barrier itself, but it's probably the GCC appropriate thing to do. I >> also like it as a hint for someone not familiar with ISB. ISB will pause >> the frontend (fetch-decode) to allow the CPU backend (execute-retire) to >> finish whatever operations are in flight. It's possible that some of >> those operations are writes to memory. Hence why we should tell the >> compiler "this instruction may update memory". > I don't think this interpretation is correct - memory is updated > by other instructions, not ISB. And it's the compiler who emitted > these instructions, so it perfectly knows that these will > eventually update memory. > > Note that the ngx_cpu_pause() call does not need to be a barrier, > neither a hardware barrier nor a compiler barrier. Instead, nginx > relies on using volatile variables for locks, so these are always > loaded from memory by the compiler on pre-lock checks (and will > test the updated value if it's changed by other CPUs), and proper > barrier semantics of ngx_atomic_cmp_set(). > > The "memory" clobber essentially tells compiler that it cannot > optimize stores and loads across the call, and must reload > anything from memory after the call. While this might be expected > in other uses (for example, cpu_relax() in Linux is expected to be > a compiler barrier), this is not something needed in > ngx_cpu_pause(). > > [...] > -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Dec 14 04:53:49 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 14 Dec 2023 07:53:49 +0300 Subject: [PATCH] Added asm ISB as asm pause for ngx_cpu_pause() for aarch64 In-Reply-To: References: Message-ID: Hello! On Wed, Dec 13, 2023 at 04:16:15PM -0600, Julio Suarez wrote: > 1. > > Yes, double checked configuration (what I'm running isn't exactly what's > in that link). No shared memory zones or thread pools enabled. Sounds > like a change in configuration is needed to test this. > > Would enabling proxy_cache_path be sufficient for this, or should this > be done another way? > > When proxy_cache_path is enabled, I see calls to ngx_shmtx_lock & > ngx_shmtx_unlock in the profile. The assembly annotations are also > showing isb being executed (when I put in the ISB). I could try testing > like this with both ISB & YIELD. Looking for guidance if you think it's > worth a try. Overall, I'd like to sort out if the fact that there is no > ngx_cpu_pause on aarch64 is sub optimal. The missing ngx_cpu_pause means > there is no wait and subsequently, there is also no back off mechanism > because the empty for loop is optimized away. In general I think it would be non-trivial to construct a workload which will be able to demonstrate a difference, if at all, especially on platforms with posix semaphores available. And that's the reason why of my initial question on how did you get the numbers. The proxy_cache_path alone is certainly not enough. At least you have to actually enable caching with the proxy_cache directive. And most likely you'll have to play with the number of nginx worker processes and the workload to achieve at least some level of lock contention. Further, some effects are simply cannot be seen from just performance tests. For example, consider two different instructions which introduce exactly the same delay, but one of them due to explicitly requested processor pause, and another one due to a calculation which requires the same time. There will be no performance difference between the two - still, there will be a difference in power consumed by the CPU. > 2. > > For code alignment question, I tried -falign-{functions,jumps}=64. > ministat say's no diff. > > x Baseline > + BaselinewAlign > +----------------------------------------------------------------------+ > | xx* | > |+ x + + x+ *x* ++ x+ ++*+ x x + x x| > | |_______M______A_______________| | > | |_____________AM____________| | > +----------------------------------------------------------------------+ > N Min Max Median Avg Stddev > x 15 129548 131751 130154 130442 622.46629 > + 15 129000 131376 130306 130273 551.93064 > No difference proven at 95.0% confidence This might indicate you've measured some other effect, and not the alignment. Also, it might worth checking in the compiled result that the alignment is actually applied. (Note that text/plain part of your message contains garbled text, I've restored the above quote manually from the text/html part. It might worth switching to plain text in your mail client for further messages here.) > 3. > > ministat for comparing blank ngx_cpu_pause() to ISB & YIELD (no memory > clobber). > > Ministat say's significant difference. I have see it where ISB returns > like ~10% +/- ~2%, however, I'm going to discount that as cloud > variation/noise. A "lucky run". > > That said, it sounds like this is some kind of side effect of adding > this into the binary as you mentioned previously. This diff oddly > consistent though, or at least oddly consistent dumb luck. > > x Baseline > + ISB > * YIELD > +--------------------------------------------------------------------------------+ > | xxx * + + + | > |x + x xxx x ** *xx *** * x **** *+ + * + * + +| > | |______M____A___________| | > | |______________MA_______________| | > | |_________A__M_______| | > +--------------------------------------------------------------------------------+ > N Min Max Median Avg Stddev > x 15 129548 131751 130154 130442 622.46629 > + 15 129778.64 133639.52 132108.5 132135.41 844.66065 > Difference at 95.0% confidence > 1693.41 +/- 554.832 > 1.29821% +/- 0.425348% > (Student's t, pooled s = 741.929) > * 15 130679 132621 131596 131486.47 540.21198 > Difference at 95.0% confidence > 1044.47 +/- 435.826 > 0.800713% +/- 0.334115% > (Student's t, pooled s = 582.792) That's without any caching being used, that is, basically just a result of slightly different compilation, correct? This might be seen as a reference point of how slightly different compilation can affect performance. We've previously seen cases of 2-3% performance improvement observed as a result of a nop change, and these results seem to be in line. Tuning compilation to ensure there is no difference here might be the way to go. -- Maxim Dounin http://mdounin.ru/ From arut at nginx.com Thu Dec 14 06:22:24 2023 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 14 Dec 2023 10:22:24 +0400 Subject: [PATCH 2 of 3] Stream: virtual servers In-Reply-To: <979C0B23-C12F-4BDE-83B1-963126729EF9@nginx.com> References: <1d3464283405a4d8ac54.1699610840@arut-laptop> <979C0B23-C12F-4BDE-83B1-963126729EF9@nginx.com> Message-ID: <20231214062224.w273onqz74suk5f2@N00W24XTQX> Hi, On Wed, Dec 13, 2023 at 05:40:09PM +0400, Sergey Kandaurov wrote: > > > On 10 Nov 2023, at 14:07, Roman Arutyunyan wrote: > > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1699035295 -14400 > > # Fri Nov 03 22:14:55 2023 +0400 > > # Node ID 1d3464283405a4d8ac54caae9bf1815c723f04c5 > > # Parent 966331bb4936888ef2f034aa2700c130514d0b57 > > Stream: virtual servers. > > > > Server name is taken either from ngx_stream_ssl_module or > > ngx_stream_ssl_preread_module. > > > > You may want to consider mentioning here about various > directives introduced in this change, for the reference. ok > > diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c > > --- a/src/stream/ngx_stream.c > > +++ b/src/stream/ngx_stream.c > > @@ -16,16 +16,34 @@ static ngx_int_t ngx_stream_init_phases( > > ngx_stream_core_main_conf_t *cmcf); > > static ngx_int_t ngx_stream_init_phase_handlers(ngx_conf_t *cf, > > ngx_stream_core_main_conf_t *cmcf); > > -static ngx_int_t ngx_stream_add_ports(ngx_conf_t *cf, ngx_array_t *ports, > > - ngx_stream_listen_t *listen); > > -static char *ngx_stream_optimize_servers(ngx_conf_t *cf, ngx_array_t *ports); > > + > > +static ngx_int_t ngx_stream_add_addresses(ngx_conf_t *cf, > > + ngx_stream_core_srv_conf_t *cscf, ngx_stream_conf_port_t *port, > > + ngx_stream_listen_opt_t *lsopt); > > +static ngx_int_t ngx_stream_add_address(ngx_conf_t *cf, > > + ngx_stream_core_srv_conf_t *cscf, ngx_stream_conf_port_t *port, > > + ngx_stream_listen_opt_t *lsopt); > > +static ngx_int_t ngx_stream_add_server(ngx_conf_t *cf, > > + ngx_stream_core_srv_conf_t *cscf, ngx_stream_conf_addr_t *addr); > > + > > +static ngx_int_t ngx_stream_optimize_servers(ngx_conf_t *cf, > > + ngx_stream_core_main_conf_t *cmcf, ngx_array_t *ports); > > +static ngx_int_t ngx_stream_server_names(ngx_conf_t *cf, > > + ngx_stream_core_main_conf_t *cmcf, ngx_stream_conf_addr_t *addr); > > +static ngx_int_t ngx_stream_cmp_conf_addrs(const void *one, const void *two); > > +static int ngx_libc_cdecl ngx_stream_cmp_dns_wildcards(const void *one, > > + const void *two); > > + > > +static ngx_int_t ngx_stream_init_listening(ngx_conf_t *cf, > > + ngx_stream_conf_port_t *port); > > +static ngx_listening_t *ngx_stream_add_listening(ngx_conf_t *cf, > > + ngx_stream_conf_addr_t *addr); > > static ngx_int_t ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *stport, > > ngx_stream_conf_addr_t *addr); > > #if (NGX_HAVE_INET6) > > static ngx_int_t ngx_stream_add_addrs6(ngx_conf_t *cf, > > ngx_stream_port_t *stport, ngx_stream_conf_addr_t *addr); > > #endif > > -static ngx_int_t ngx_stream_cmp_conf_addrs(const void *one, const void *two); > > > > > > ngx_uint_t ngx_stream_max_module; > > @@ -74,10 +92,8 @@ static char * > > ngx_stream_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > > { > > char *rv; > > - ngx_uint_t i, m, mi, s; > > + ngx_uint_t m, mi, s; > > Nitpicking: > virtual servers support is largely based on the existing code > in http module, and we'd like to keep it as similar as possible > to reduce maintenance costs. Still, it has subtle differences. > For example, http has a different declaration order in a similar > code for ngx_http_block(). As part of this change, you may want > to re-align this in stream to how it's done in http, up to you. > > > ngx_conf_t pcf; > > - ngx_array_t ports; > > - ngx_stream_listen_t *listen; > > ngx_stream_module_t *module; > > ngx_stream_conf_ctx_t *ctx; > > ngx_stream_core_srv_conf_t **cscfp; > > @@ -251,21 +267,13 @@ ngx_stream_block(ngx_conf_t *cf, ngx_com > > return NGX_CONF_ERROR; > > } > > > > - if (ngx_array_init(&ports, cf->temp_pool, 4, sizeof(ngx_stream_conf_port_t)) > > - != NGX_OK) > > - { > > + /* optimize the lists of ports, addresses and server names */ > > + > > + if (ngx_stream_optimize_servers(cf, cmcf, cmcf->ports) != NGX_OK) { > > return NGX_CONF_ERROR; > > } > > > > - listen = cmcf->listen.elts; > > - > > - for (i = 0; i < cmcf->listen.nelts; i++) { > > - if (ngx_stream_add_ports(cf, &ports, &listen[i]) != NGX_OK) { > > - return NGX_CONF_ERROR; > > - } > > - } > > - > > - return ngx_stream_optimize_servers(cf, &ports); > > + return NGX_CONF_OK; > > } > > > > > > @@ -377,73 +385,295 @@ ngx_stream_init_phase_handlers(ngx_conf_ > > } > > > > > > -static ngx_int_t > > -ngx_stream_add_ports(ngx_conf_t *cf, ngx_array_t *ports, > > - ngx_stream_listen_t *listen) > > +ngx_int_t > > +ngx_stream_add_listen(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, > > + ngx_stream_listen_opt_t *lsopt) > > { > > - in_port_t p; > > - ngx_uint_t i; > > - struct sockaddr *sa; > > - ngx_stream_conf_port_t *port; > > - ngx_stream_conf_addr_t *addr; > > + in_port_t p; > > + ngx_uint_t i; > > + struct sockaddr *sa; > > + ngx_stream_conf_port_t *port; > > + ngx_stream_core_main_conf_t *cmcf; > > + > > + cmcf = ngx_stream_conf_get_module_main_conf(cf, ngx_stream_core_module); > > > > - sa = listen->sockaddr; > > + if (cmcf->ports == NULL) { > > + cmcf->ports = ngx_array_create(cf->temp_pool, 2, > > + sizeof(ngx_stream_conf_port_t)); > > + if (cmcf->ports == NULL) { > > + return NGX_ERROR; > > + } > > + } > > + > > + sa = lsopt->sockaddr; > > p = ngx_inet_get_port(sa); > > > > - port = ports->elts; > > - for (i = 0; i < ports->nelts; i++) { > > + port = cmcf->ports->elts; > > + for (i = 0; i < cmcf->ports->nelts; i++) { > > > > - if (p == port[i].port > > - && listen->type == port[i].type > > - && sa->sa_family == port[i].family) > > + if (p != port[i].port > > + || lsopt->type != port[i].type > > + || sa->sa_family != port[i].family) > > { > > - /* a port is already in the port list */ > > + continue; > > + } > > > > - port = &port[i]; > > - goto found; > > - } > > + /* a port is already in the port list */ > > + > > + return ngx_stream_add_addresses(cf, cscf, &port[i], lsopt); > > } > > > > /* add a port to the port list */ > > > > - port = ngx_array_push(ports); > > + port = ngx_array_push(cmcf->ports); > > if (port == NULL) { > > return NGX_ERROR; > > } > > > > port->family = sa->sa_family; > > - port->type = listen->type; > > + port->type = lsopt->type; > > port->port = p; > > + port->addrs.elts = NULL; > > + > > + return ngx_stream_add_address(cf, cscf, port, lsopt); > > +} > > + > > + > > +static ngx_int_t > > +ngx_stream_add_addresses(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, > > + ngx_stream_conf_port_t *port, ngx_stream_listen_opt_t *lsopt) > > +{ > > + ngx_uint_t i, default_server, proxy_protocol, > > + protocols, protocols_prev; > > + ngx_stream_conf_addr_t *addr; > > +#if (NGX_STREAM_SSL) > > + ngx_uint_t ssl; > > +#endif > > + > > + /* > > + * we cannot compare whole sockaddr struct's as kernel > > + * may fill some fields in inherited sockaddr struct's > > + */ > > + > > + addr = port->addrs.elts; > > + > > + for (i = 0; i < port->addrs.nelts; i++) { > > + > > + if (ngx_cmp_sockaddr(lsopt->sockaddr, lsopt->socklen, > > + addr[i].opt.sockaddr, > > + addr[i].opt.socklen, 0) > > + != NGX_OK) > > + { > > + continue; > > + } > > + > > + /* the address is already in the address list */ > > + > > + if (ngx_stream_add_server(cf, cscf, &addr[i]) != NGX_OK) { > > + return NGX_ERROR; > > + } > > + > > + /* preserve default_server bit during listen options overwriting */ > > + default_server = addr[i].opt.default_server; > > + > > + proxy_protocol = lsopt->proxy_protocol || addr[i].opt.proxy_protocol; > > + protocols = lsopt->proxy_protocol; > > + protocols_prev = addr[i].opt.proxy_protocol; > > + > > +#if (NGX_STREAM_SSL) > > + ssl = lsopt->ssl || addr[i].opt.ssl; > > + protocols |= lsopt->ssl << 1; > > + protocols_prev |= addr[i].opt.ssl << 1; > > +#endif > > + > > + if (lsopt->set) { > > + > > + if (addr[i].opt.set) { > > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > + "duplicate listen options for %V", > > + &addr[i].opt.addr_text); > > + return NGX_ERROR; > > + } > > + > > + addr[i].opt = *lsopt; > > + } > > + > > + /* check the duplicate "default" server for this address:port */ > > > > - if (ngx_array_init(&port->addrs, cf->temp_pool, 2, > > - sizeof(ngx_stream_conf_addr_t)) > > - != NGX_OK) > > - { > > - return NGX_ERROR; > > + if (lsopt->default_server) { > > + > > + if (default_server) { > > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > + "a duplicate default server for %V", > > + &addr[i].opt.addr_text); > > + return NGX_ERROR; > > + } > > + > > + default_server = 1; > > + addr[i].default_server = cscf; > > + } > > + > > + /* check for conflicting protocol options */ > > + > > + if ((protocols | protocols_prev) != protocols_prev) { > > + > > + /* options added */ > > + > > + if ((addr[i].opt.set && !lsopt->set) > > + || addr[i].protocols_changed > > + || (protocols | protocols_prev) != protocols) > > + { > > + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, > > + "protocol options redefined for %V", > > + &addr[i].opt.addr_text); > > + } > > + > > + addr[i].protocols = protocols_prev; > > + addr[i].protocols_set = 1; > > + addr[i].protocols_changed = 1; > > + > > + } else if ((protocols_prev | protocols) != protocols) { > > + > > + /* options removed */ > > + > > + if (lsopt->set > > + || (addr[i].protocols_set && protocols != addr[i].protocols)) > > + { > > + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, > > + "protocol options redefined for %V", > > + &addr[i].opt.addr_text); > > + } > > + > > + addr[i].protocols = protocols; > > + addr[i].protocols_set = 1; > > + addr[i].protocols_changed = 1; > > + > > + } else { > > + > > + /* the same options */ > > + > > + if ((lsopt->set && addr[i].protocols_changed) > > + || (addr[i].protocols_set && protocols != addr[i].protocols)) > > + { > > + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, > > + "protocol options redefined for %V", > > + &addr[i].opt.addr_text); > > + } > > + > > + addr[i].protocols = protocols; > > + addr[i].protocols_set = 1; > > + } > > + > > + addr[i].opt.default_server = default_server; > > + addr[i].opt.proxy_protocol = proxy_protocol; > > +#if (NGX_STREAM_SSL) > > + addr[i].opt.ssl = ssl; > > +#endif > > + > > + return NGX_OK; > > } > > > > -found: > > + /* add the address to the addresses list that bound to this port */ > > + > > + return ngx_stream_add_address(cf, cscf, port, lsopt); > > +} > > + > > + > > +/* > > + * add the server address, the server names and the server core module > > + * configurations to the port list > > + */ > > + > > +static ngx_int_t > > +ngx_stream_add_address(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, > > + ngx_stream_conf_port_t *port, ngx_stream_listen_opt_t *lsopt) > > +{ > > + ngx_stream_conf_addr_t *addr; > > + > > + if (port->addrs.elts == NULL) { > > + if (ngx_array_init(&port->addrs, cf->temp_pool, 4, > > + sizeof(ngx_stream_conf_addr_t)) > > + != NGX_OK) > > + { > > + return NGX_ERROR; > > + } > > + } > > > > addr = ngx_array_push(&port->addrs); > > if (addr == NULL) { > > return NGX_ERROR; > > } > > > > - addr->opt = *listen; > > + addr->opt = *lsopt; > > + addr->protocols = 0; > > + addr->protocols_set = 0; > > + addr->protocols_changed = 0; > > + addr->hash.buckets = NULL; > > + addr->hash.size = 0; > > + addr->wc_head = NULL; > > + addr->wc_tail = NULL; > > +#if (NGX_PCRE) > > + addr->nregex = 0; > > + addr->regex = NULL; > > +#endif > > + addr->default_server = cscf; > > + addr->servers.elts = NULL; > > + > > + return ngx_stream_add_server(cf, cscf, addr); > > +} > > + > > + > > +/* add the server core module configuration to the address:port */ > > + > > +static ngx_int_t > > +ngx_stream_add_server(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, > > + ngx_stream_conf_addr_t *addr) > > +{ > > + ngx_uint_t i; > > + ngx_stream_core_srv_conf_t **server; > > + > > + if (addr->servers.elts == NULL) { > > + if (ngx_array_init(&addr->servers, cf->temp_pool, 4, > > + sizeof(ngx_stream_core_srv_conf_t *)) > > + != NGX_OK) > > + { > > + return NGX_ERROR; > > + } > > + > > + } else { > > + server = addr->servers.elts; > > + for (i = 0; i < addr->servers.nelts; i++) { > > + if (server[i] == cscf) { > > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > + "a duplicate listen %V", > > + &addr->opt.addr_text); > > + return NGX_ERROR; > > + } > > + } > > + } > > + > > + server = ngx_array_push(&addr->servers); > > + if (server == NULL) { > > + return NGX_ERROR; > > + } > > + > > + *server = cscf; > > > > return NGX_OK; > > } > > > > > > -static char * > > -ngx_stream_optimize_servers(ngx_conf_t *cf, ngx_array_t *ports) > > +static ngx_int_t > > +ngx_stream_optimize_servers(ngx_conf_t *cf, ngx_stream_core_main_conf_t *cmcf, > > + ngx_array_t *ports) > > { > > - ngx_uint_t i, p, last, bind_wildcard; > > - ngx_listening_t *ls; > > - ngx_stream_port_t *stport; > > - ngx_stream_conf_port_t *port; > > - ngx_stream_conf_addr_t *addr; > > - ngx_stream_core_srv_conf_t *cscf; > > + ngx_uint_t p, a; > > + ngx_stream_conf_port_t *port; > > + ngx_stream_conf_addr_t *addr; > > + > > + if (ports == NULL) { > > + return NGX_OK; > > + } > > > > port = ports->elts; > > for (p = 0; p < ports->nelts; p++) { > > @@ -451,175 +681,191 @@ ngx_stream_optimize_servers(ngx_conf_t * > > ngx_sort(port[p].addrs.elts, (size_t) port[p].addrs.nelts, > > sizeof(ngx_stream_conf_addr_t), ngx_stream_cmp_conf_addrs); > > > > - addr = port[p].addrs.elts; > > - last = port[p].addrs.nelts; > > - > > /* > > - * if there is the binding to the "*:port" then we need to bind() > > - * to the "*:port" only and ignore the other bindings > > + * check whether all name-based servers have the same > > + * configuration as a default server for given address:port > > */ > > > > - if (addr[last - 1].opt.wildcard) { > > - addr[last - 1].opt.bind = 1; > > - bind_wildcard = 1; > > + addr = port[p].addrs.elts; > > + for (a = 0; a < port[p].addrs.nelts; a++) { > > > > - } else { > > - bind_wildcard = 0; > > + if (addr[a].servers.nelts > 1 > > +#if (NGX_PCRE) > > + || addr[a].default_server->captures > > +#endif > > + ) > > + { > > + if (ngx_stream_server_names(cf, cmcf, &addr[a]) != NGX_OK) { > > + return NGX_ERROR; > > + } > > + } > > } > > > > - i = 0; > > - > > - while (i < last) { > > - > > - if (bind_wildcard && !addr[i].opt.bind) { > > - i++; > > - continue; > > - } > > - > > - ls = ngx_create_listening(cf, addr[i].opt.sockaddr, > > - addr[i].opt.socklen); > > - if (ls == NULL) { > > - return NGX_CONF_ERROR; > > - } > > - > > - ls->addr_ntop = 1; > > - ls->handler = ngx_stream_init_connection; > > - ls->pool_size = 256; > > - ls->type = addr[i].opt.type; > > - > > - cscf = addr->opt.ctx->srv_conf[ngx_stream_core_module.ctx_index]; > > - > > - ls->logp = cscf->error_log; > > - ls->log.data = &ls->addr_text; > > - ls->log.handler = ngx_accept_log_error; > > - > > - ls->backlog = addr[i].opt.backlog; > > - ls->rcvbuf = addr[i].opt.rcvbuf; > > - ls->sndbuf = addr[i].opt.sndbuf; > > - > > - ls->wildcard = addr[i].opt.wildcard; > > - > > - ls->keepalive = addr[i].opt.so_keepalive; > > -#if (NGX_HAVE_KEEPALIVE_TUNABLE) > > - ls->keepidle = addr[i].opt.tcp_keepidle; > > - ls->keepintvl = addr[i].opt.tcp_keepintvl; > > - ls->keepcnt = addr[i].opt.tcp_keepcnt; > > -#endif > > - > > -#if (NGX_HAVE_INET6) > > - ls->ipv6only = addr[i].opt.ipv6only; > > -#endif > > - > > -#if (NGX_HAVE_TCP_FASTOPEN) > > - ls->fastopen = addr[i].opt.fastopen; > > -#endif > > - > > -#if (NGX_HAVE_REUSEPORT) > > - ls->reuseport = addr[i].opt.reuseport; > > -#endif > > - > > - stport = ngx_palloc(cf->pool, sizeof(ngx_stream_port_t)); > > - if (stport == NULL) { > > - return NGX_CONF_ERROR; > > - } > > - > > - ls->servers = stport; > > - > > - stport->naddrs = i + 1; > > - > > - switch (ls->sockaddr->sa_family) { > > -#if (NGX_HAVE_INET6) > > - case AF_INET6: > > - if (ngx_stream_add_addrs6(cf, stport, addr) != NGX_OK) { > > - return NGX_CONF_ERROR; > > - } > > - break; > > -#endif > > - default: /* AF_INET */ > > - if (ngx_stream_add_addrs(cf, stport, addr) != NGX_OK) { > > - return NGX_CONF_ERROR; > > - } > > - break; > > - } > > - > > - addr++; > > - last--; > > + if (ngx_stream_init_listening(cf, &port[p]) != NGX_OK) { > > + return NGX_ERROR; > > } > > } > > > > - return NGX_CONF_OK; > > -} > > - > > - > > -static ngx_int_t > > -ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *stport, > > - ngx_stream_conf_addr_t *addr) > > -{ > > - ngx_uint_t i; > > - struct sockaddr_in *sin; > > - ngx_stream_in_addr_t *addrs; > > - > > - stport->addrs = ngx_pcalloc(cf->pool, > > - stport->naddrs * sizeof(ngx_stream_in_addr_t)); > > - if (stport->addrs == NULL) { > > - return NGX_ERROR; > > - } > > - > > - addrs = stport->addrs; > > - > > - for (i = 0; i < stport->naddrs; i++) { > > - > > - sin = (struct sockaddr_in *) addr[i].opt.sockaddr; > > - addrs[i].addr = sin->sin_addr.s_addr; > > - > > - addrs[i].conf.ctx = addr[i].opt.ctx; > > -#if (NGX_STREAM_SSL) > > - addrs[i].conf.ssl = addr[i].opt.ssl; > > -#endif > > - addrs[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; > > - addrs[i].conf.addr_text = addr[i].opt.addr_text; > > - } > > - > > return NGX_OK; > > } > > > > > > -#if (NGX_HAVE_INET6) > > - > > static ngx_int_t > > -ngx_stream_add_addrs6(ngx_conf_t *cf, ngx_stream_port_t *stport, > > +ngx_stream_server_names(ngx_conf_t *cf, ngx_stream_core_main_conf_t *cmcf, > > ngx_stream_conf_addr_t *addr) > > { > > - ngx_uint_t i; > > - struct sockaddr_in6 *sin6; > > - ngx_stream_in6_addr_t *addrs6; > > + ngx_int_t rc; > > + ngx_uint_t n, s; > > + ngx_hash_init_t hash; > > + ngx_hash_keys_arrays_t ha; > > + ngx_stream_server_name_t *name; > > + ngx_stream_core_srv_conf_t **cscfp; > > +#if (NGX_PCRE) > > + ngx_uint_t regex, i; > > > > - stport->addrs = ngx_pcalloc(cf->pool, > > - stport->naddrs * sizeof(ngx_stream_in6_addr_t)); > > - if (stport->addrs == NULL) { > > + regex = 0; > > +#endif > > + > > + ngx_memzero(&ha, sizeof(ngx_hash_keys_arrays_t)); > > + > > + ha.temp_pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, cf->log); > > + if (ha.temp_pool == NULL) { > > return NGX_ERROR; > > } > > > > - addrs6 = stport->addrs; > > + ha.pool = cf->pool; > > + > > + if (ngx_hash_keys_array_init(&ha, NGX_HASH_LARGE) != NGX_OK) { > > + goto failed; > > + } > > + > > + cscfp = addr->servers.elts; > > + > > + for (s = 0; s < addr->servers.nelts; s++) { > > + > > + name = cscfp[s]->server_names.elts; > > + > > + for (n = 0; n < cscfp[s]->server_names.nelts; n++) { > > > > - for (i = 0; i < stport->naddrs; i++) { > > +#if (NGX_PCRE) > > + if (name[n].regex) { > > + regex++; > > + continue; > > + } > > +#endif > > > > - sin6 = (struct sockaddr_in6 *) addr[i].opt.sockaddr; > > - addrs6[i].addr6 = sin6->sin6_addr; > > + rc = ngx_hash_add_key(&ha, &name[n].name, name[n].server, > > + NGX_HASH_WILDCARD_KEY); > > + > > + if (rc == NGX_ERROR) { > > + goto failed; > > + } > > > > - addrs6[i].conf.ctx = addr[i].opt.ctx; > > -#if (NGX_STREAM_SSL) > > - addrs6[i].conf.ssl = addr[i].opt.ssl; > > -#endif > > - addrs6[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; > > - addrs6[i].conf.addr_text = addr[i].opt.addr_text; > > + if (rc == NGX_DECLINED) { > > + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > > + "invalid server name or wildcard \"%V\" on %V", > > + &name[n].name, &addr->opt.addr_text); > > + goto failed; > > + } > > + > > + if (rc == NGX_BUSY) { > > + ngx_log_error(NGX_LOG_WARN, cf->log, 0, > > + "conflicting server name \"%V\" on %V, ignored", > > + &name[n].name, &addr->opt.addr_text); > > + } > > + } > > + } > > + > > + hash.key = ngx_hash_key_lc; > > + hash.max_size = cmcf->server_names_hash_max_size; > > + hash.bucket_size = cmcf->server_names_hash_bucket_size; > > + hash.name = "server_names_hash"; > > + hash.pool = cf->pool; > > + > > + if (ha.keys.nelts) { > > + hash.hash = &addr->hash; > > + hash.temp_pool = NULL; > > + > > + if (ngx_hash_init(&hash, ha.keys.elts, ha.keys.nelts) != NGX_OK) { > > + goto failed; > > + } > > } > > > > - return NGX_OK; > > -} > > + if (ha.dns_wc_head.nelts) { > > + > > + ngx_qsort(ha.dns_wc_head.elts, (size_t) ha.dns_wc_head.nelts, > > + sizeof(ngx_hash_key_t), ngx_stream_cmp_dns_wildcards); > > + > > + hash.hash = NULL; > > + hash.temp_pool = ha.temp_pool; > > + > > + if (ngx_hash_wildcard_init(&hash, ha.dns_wc_head.elts, > > + ha.dns_wc_head.nelts) > > + != NGX_OK) > > + { > > + goto failed; > > + } > > + > > + addr->wc_head = (ngx_hash_wildcard_t *) hash.hash; > > + } > > + > > + if (ha.dns_wc_tail.nelts) { > > + > > + ngx_qsort(ha.dns_wc_tail.elts, (size_t) ha.dns_wc_tail.nelts, > > + sizeof(ngx_hash_key_t), ngx_stream_cmp_dns_wildcards); > > + > > + hash.hash = NULL; > > + hash.temp_pool = ha.temp_pool; > > + > > + if (ngx_hash_wildcard_init(&hash, ha.dns_wc_tail.elts, > > + ha.dns_wc_tail.nelts) > > + != NGX_OK) > > + { > > + goto failed; > > + } > > + > > + addr->wc_tail = (ngx_hash_wildcard_t *) hash.hash; > > + } > > + > > + ngx_destroy_pool(ha.temp_pool); > > + > > +#if (NGX_PCRE) > > + > > + if (regex == 0) { > > + return NGX_OK; > > + } > > + > > + addr->nregex = regex; > > + addr->regex = ngx_palloc(cf->pool, > > + regex * sizeof(ngx_stream_server_name_t)); > > + if (addr->regex == NULL) { > > + return NGX_ERROR; > > + } > > + > > + i = 0; > > + > > + for (s = 0; s < addr->servers.nelts; s++) { > > + > > + name = cscfp[s]->server_names.elts; > > + > > + for (n = 0; n < cscfp[s]->server_names.nelts; n++) { > > + if (name[n].regex) { > > + addr->regex[i++] = name[n]; > > + } > > + } > > + } > > > > #endif > > > > + return NGX_OK; > > + > > +failed: > > + > > + ngx_destroy_pool(ha.temp_pool); > > + > > + return NGX_ERROR; > > +} > > + > > > > static ngx_int_t > > ngx_stream_cmp_conf_addrs(const void *one, const void *two) > > @@ -630,12 +876,12 @@ ngx_stream_cmp_conf_addrs(const void *on > > second = (ngx_stream_conf_addr_t *) two; > > > > if (first->opt.wildcard) { > > - /* a wildcard must be the last resort, shift it to the end */ > > + /* a wildcard address must be the last resort, shift it to the end */ > > return 1; > > } > > > > if (second->opt.wildcard) { > > - /* a wildcard must be the last resort, shift it to the end */ > > + /* a wildcard address must be the last resort, shift it to the end */ > > return -1; > > } > > > > @@ -653,3 +899,289 @@ ngx_stream_cmp_conf_addrs(const void *on > > > > return 0; > > } > > + > > + > > +static int ngx_libc_cdecl > > +ngx_stream_cmp_dns_wildcards(const void *one, const void *two) > > +{ > > + ngx_hash_key_t *first, *second; > > + > > + first = (ngx_hash_key_t *) one; > > + second = (ngx_hash_key_t *) two; > > + > > + return ngx_dns_strcmp(first->key.data, second->key.data); > > +} > > + > > + > > +static ngx_int_t > > +ngx_stream_init_listening(ngx_conf_t *cf, ngx_stream_conf_port_t *port) > > +{ > > + ngx_uint_t i, last, bind_wildcard; > > + ngx_listening_t *ls; > > + ngx_stream_port_t *hport; > > Here and below you renamed "stport" (as in "stream port") back to > "hport" (as in "http port"), which apparently doesn't belong here. > > > + ngx_stream_conf_addr_t *addr; > > + > > + addr = port->addrs.elts; > > + last = port->addrs.nelts; > > + > > + /* > > + * If there is a binding to an "*:port" then we need to bind() to > > + * the "*:port" only and ignore other implicit bindings. The bindings > > + * have been already sorted: explicit bindings are on the start, then > > + * implicit bindings go, and wildcard binding is in the end. > > + */ > > + > > + if (addr[last - 1].opt.wildcard) { > > + addr[last - 1].opt.bind = 1; > > + bind_wildcard = 1; > > + > > + } else { > > + bind_wildcard = 0; > > + } > > + > > + i = 0; > > + > > + while (i < last) { > > + > > + if (bind_wildcard && !addr[i].opt.bind) { > > + i++; > > + continue; > > + } > > + > > + ls = ngx_stream_add_listening(cf, &addr[i]); > > + if (ls == NULL) { > > + return NGX_ERROR; > > + } > > + > > + hport = ngx_pcalloc(cf->pool, sizeof(ngx_stream_port_t)); > > + if (hport == NULL) { > > + return NGX_ERROR; > > + } > > + > > + ls->servers = hport; > > + > > + hport->naddrs = i + 1; > > + > > + switch (ls->sockaddr->sa_family) { > > + > > +#if (NGX_HAVE_INET6) > > + case AF_INET6: > > + if (ngx_stream_add_addrs6(cf, hport, addr) != NGX_OK) { > > + return NGX_ERROR; > > + } > > + break; > > +#endif > > + default: /* AF_INET */ > > + if (ngx_stream_add_addrs(cf, hport, addr) != NGX_OK) { > > + return NGX_ERROR; > > + } > > + break; > > + } > > + > > + addr++; > > + last--; > > + } > > + > > + return NGX_OK; > > +} > > + > > + > > +static ngx_listening_t * > > +ngx_stream_add_listening(ngx_conf_t *cf, ngx_stream_conf_addr_t *addr) > > +{ > > + ngx_listening_t *ls; > > + ngx_stream_core_srv_conf_t *cscf; > > + > > + ls = ngx_create_listening(cf, addr->opt.sockaddr, addr->opt.socklen); > > + if (ls == NULL) { > > + return NULL; > > + } > > + > > + ls->addr_ntop = 1; > > + > > + ls->handler = ngx_stream_init_connection; > > + > > + cscf = addr->default_server; > > + ls->pool_size = 256; > > Nitpicking. > Current code has the following initialization order: > > ls->addr_ntop = 1; > ls->handler = ngx_stream_init_connection; > ls->pool_size = 256; > ls->type = addr[i].opt.type > > cscf = addr->opt.ctx->srv_conf[ngx_stream_core_module.ctx_index]; > > Besides "type", it makes sense to keep this order in the new code, as well: > > ls->handler = ngx_stream_init_connection; > > ls->pool_size = 256; > > cscf = addr->default_server; > > ls->logp = cscf->error_log; > > > + > > + ls->logp = cscf->error_log; > > + ls->log.data = &ls->addr_text; > > + ls->log.handler = ngx_accept_log_error; > > + > > > +#if (NGX_WIN32) > > + { > > + ngx_iocp_conf_t *iocpcf = NULL; > > + > > + if (ngx_get_conf(cf->cycle->conf_ctx, ngx_events_module)) { > > + iocpcf = ngx_event_get_conf(cf->cycle->conf_ctx, ngx_iocp_module); > > + } > > + if (iocpcf && iocpcf->acceptex_read) { > > + ls->post_accept_buffer_size = cscf->client_header_buffer_size; > > + } > > + } > > +#endif > > This part is out of scope of this change, and apparently it won't compile. > > > + > > + ls->type = addr->opt.type; > > + ls->backlog = addr->opt.backlog; > > + ls->rcvbuf = addr->opt.rcvbuf; > > + ls->sndbuf = addr->opt.sndbuf; > > + > > + ls->keepalive = addr->opt.so_keepalive; > > +#if (NGX_HAVE_KEEPALIVE_TUNABLE) > > + ls->keepidle = addr->opt.tcp_keepidle; > > + ls->keepintvl = addr->opt.tcp_keepintvl; > > + ls->keepcnt = addr->opt.tcp_keepcnt; > > +#endif > > + > > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > > + ls->accept_filter = addr->opt.accept_filter; > > +#endif > > + > > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > > + ls->deferred_accept = addr->opt.deferred_accept; > > +#endif > > + > > +#if (NGX_HAVE_INET6) > > + ls->ipv6only = addr->opt.ipv6only; > > +#endif > > + > > +#if (NGX_HAVE_SETFIB) > > + ls->setfib = addr->opt.setfib; > > +#endif > > This introduces accept_filter, deferred_accept, and setfib fields, > which is out of scope of this change. Anyway, this is useless > without corresponding support in ngx_stream_core_listen(). We need to catch up with all such missing functionality in Stream in future. > > +#if (NGX_HAVE_TCP_FASTOPEN) > > + ls->fastopen = addr->opt.fastopen; > > +#endif > > + > > +#if (NGX_HAVE_REUSEPORT) > > + ls->reuseport = addr->opt.reuseport; > > +#endif > > + > > + ls->wildcard = addr->opt.wildcard; > > + > > + return ls; > > +} > > + > > + > > +static ngx_int_t > > +ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *hport, > > + ngx_stream_conf_addr_t *addr) > > +{ > > + ngx_uint_t i; > > + struct sockaddr_in *sin; > > + ngx_stream_in_addr_t *addrs; > > + ngx_stream_virtual_names_t *vn; > > + > > + hport->addrs = ngx_pcalloc(cf->pool, > > + hport->naddrs * sizeof(ngx_stream_in_addr_t)); > > + if (hport->addrs == NULL) { > > + return NGX_ERROR; > > + } > > + > > + addrs = hport->addrs; > > + > > + for (i = 0; i < hport->naddrs; i++) { > > + > > + sin = (struct sockaddr_in *) addr[i].opt.sockaddr; > > + addrs[i].addr = sin->sin_addr.s_addr; > > + addrs[i].conf.default_server = addr[i].default_server; > > +#if (NGX_STREAM_SSL) > > + addrs[i].conf.ssl = addr[i].opt.ssl; > > +#endif > > + addrs[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; > > + > > + if (addr[i].hash.buckets == NULL > > + && (addr[i].wc_head == NULL > > + || addr[i].wc_head->hash.buckets == NULL) > > + && (addr[i].wc_tail == NULL > > + || addr[i].wc_tail->hash.buckets == NULL) > > +#if (NGX_PCRE) > > + && addr[i].nregex == 0 > > +#endif > > + ) > > + { > > + continue; > > + } > > + > > + vn = ngx_palloc(cf->pool, sizeof(ngx_stream_virtual_names_t)); > > + if (vn == NULL) { > > + return NGX_ERROR; > > + } > > + > > + addrs[i].conf.virtual_names = vn; > > + > > + vn->names.hash = addr[i].hash; > > + vn->names.wc_head = addr[i].wc_head; > > + vn->names.wc_tail = addr[i].wc_tail; > > +#if (NGX_PCRE) > > + vn->nregex = addr[i].nregex; > > + vn->regex = addr[i].regex; > > +#endif > > + } > > + > > + return NGX_OK; > > +} > > + > > + > > +#if (NGX_HAVE_INET6) > > + > > +static ngx_int_t > > +ngx_stream_add_addrs6(ngx_conf_t *cf, ngx_stream_port_t *hport, > > + ngx_stream_conf_addr_t *addr) > > +{ > > + ngx_uint_t i; > > + struct sockaddr_in6 *sin6; > > + ngx_stream_in6_addr_t *addrs6; > > + ngx_stream_virtual_names_t *vn; > > + > > + hport->addrs = ngx_pcalloc(cf->pool, > > + hport->naddrs * sizeof(ngx_stream_in6_addr_t)); > > + if (hport->addrs == NULL) { > > + return NGX_ERROR; > > + } > > + > > + addrs6 = hport->addrs; > > + > > + for (i = 0; i < hport->naddrs; i++) { > > + > > + sin6 = (struct sockaddr_in6 *) addr[i].opt.sockaddr; > > + addrs6[i].addr6 = sin6->sin6_addr; > > + addrs6[i].conf.default_server = addr[i].default_server; > > +#if (NGX_STREAM_SSL) > > + addrs6[i].conf.ssl = addr[i].opt.ssl; > > +#endif > > + addrs6[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; > > + > > + if (addr[i].hash.buckets == NULL > > + && (addr[i].wc_head == NULL > > + || addr[i].wc_head->hash.buckets == NULL) > > + && (addr[i].wc_tail == NULL > > + || addr[i].wc_tail->hash.buckets == NULL) > > +#if (NGX_PCRE) > > + && addr[i].nregex == 0 > > +#endif > > + ) > > + { > > + continue; > > + } > > + > > + vn = ngx_palloc(cf->pool, sizeof(ngx_stream_virtual_names_t)); > > + if (vn == NULL) { > > + return NGX_ERROR; > > + } > > + > > + addrs6[i].conf.virtual_names = vn; > > + > > + vn->names.hash = addr[i].hash; > > + vn->names.wc_head = addr[i].wc_head; > > + vn->names.wc_tail = addr[i].wc_tail; > > +#if (NGX_PCRE) > > + vn->nregex = addr[i].nregex; > > + vn->regex = addr[i].regex; > > +#endif > > + } > > + > > + return NGX_OK; > > +} > > + > > +#endif > > diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h > > --- a/src/stream/ngx_stream.h > > +++ b/src/stream/ngx_stream.h > > @@ -45,74 +45,39 @@ typedef struct { > > socklen_t socklen; > > ngx_str_t addr_text; > > > > - /* server ctx */ > > - ngx_stream_conf_ctx_t *ctx; > > - > > + unsigned set:1; > > + unsigned default_server:1; > > unsigned bind:1; > > unsigned wildcard:1; > > unsigned ssl:1; > > #if (NGX_HAVE_INET6) > > unsigned ipv6only:1; > > #endif > > + unsigned deferred_accept:1; > > unsigned reuseport:1; > > unsigned so_keepalive:2; > > unsigned proxy_protocol:1; > > + > > + int backlog; > > + int rcvbuf; > > + int sndbuf; > > + int type; > > +#if (NGX_HAVE_SETFIB) > > + int setfib; > > +#endif > > +#if (NGX_HAVE_TCP_FASTOPEN) > > + int fastopen; > > +#endif > > #if (NGX_HAVE_KEEPALIVE_TUNABLE) > > int tcp_keepidle; > > int tcp_keepintvl; > > int tcp_keepcnt; > > #endif > > - int backlog; > > - int rcvbuf; > > - int sndbuf; > > -#if (NGX_HAVE_TCP_FASTOPEN) > > - int fastopen; > > + > > +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > > + char *accept_filter; > > #endif > > Besides introducing unused fields, this part reshuffles > backlog .. type fields without a reason. It can be as minimal as: > > diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h > --- a/src/stream/ngx_stream.h > +++ b/src/stream/ngx_stream.h > @@ -45,9 +45,8 @@ typedef struct { > socklen_t socklen; > ngx_str_t addr_text; > > - /* server ctx */ > - ngx_stream_conf_ctx_t *ctx; > - > + unsigned set:1; > + unsigned default_server:1; > unsigned bind:1; > unsigned wildcard:1; > unsigned ssl:1; > > > - int type; > > -} ngx_stream_listen_t; > > - > > - > > -typedef struct { > > - ngx_stream_conf_ctx_t *ctx; > > - ngx_str_t addr_text; > > - unsigned ssl:1; > > - unsigned proxy_protocol:1; > > -} ngx_stream_addr_conf_t; > > - > > -typedef struct { > > - in_addr_t addr; > > - ngx_stream_addr_conf_t conf; > > -} ngx_stream_in_addr_t; > > - > > - > > -#if (NGX_HAVE_INET6) > > - > > -typedef struct { > > - struct in6_addr addr6; > > - ngx_stream_addr_conf_t conf; > > -} ngx_stream_in6_addr_t; > > - > > -#endif > > - > > - > > -typedef struct { > > - /* ngx_stream_in_addr_t or ngx_stream_in6_addr_t */ > > - void *addrs; > > - ngx_uint_t naddrs; > > -} ngx_stream_port_t; > > - > > - > > -typedef struct { > > - int family; > > - int type; > > - in_port_t port; > > - ngx_array_t addrs; /* array of ngx_stream_conf_addr_t */ > > -} ngx_stream_conf_port_t; > > - > > - > > -typedef struct { > > - ngx_stream_listen_t opt; > > -} ngx_stream_conf_addr_t; > > +} ngx_stream_listen_opt_t; > > > > > > typedef enum { > > @@ -153,7 +118,6 @@ typedef struct { > > > > typedef struct { > > ngx_array_t servers; /* ngx_stream_core_srv_conf_t */ > > - ngx_array_t listen; /* ngx_stream_listen_t */ > > > > ngx_stream_phase_engine_t phase_engine; > > > > @@ -163,16 +127,24 @@ typedef struct { > > ngx_array_t prefix_variables; /* ngx_stream_variable_t */ > > ngx_uint_t ncaptures; > > > > + ngx_uint_t server_names_hash_max_size; > > + ngx_uint_t server_names_hash_bucket_size; > > + > > ngx_uint_t variables_hash_max_size; > > ngx_uint_t variables_hash_bucket_size; > > > > ngx_hash_keys_arrays_t *variables_keys; > > > > + ngx_array_t *ports; > > + > > ngx_stream_phase_t phases[NGX_STREAM_LOG_PHASE + 1]; > > } ngx_stream_core_main_conf_t; > > > > > > typedef struct { > > + /* array of the ngx_stream_server_name_t, "server_name" directive */ > > + ngx_array_t server_names; > > + > > ngx_stream_content_handler_pt handler; > > > > ngx_stream_conf_ctx_t *ctx; > > @@ -180,6 +152,8 @@ typedef struct { > > u_char *file_name; > > ngx_uint_t line; > > > > + ngx_str_t server_name; > > + > > ngx_flag_t tcp_nodelay; > > size_t preread_buffer_size; > > ngx_msec_t preread_timeout; > > @@ -191,10 +165,99 @@ typedef struct { > > > > ngx_msec_t proxy_protocol_timeout; > > > > - ngx_uint_t listen; /* unsigned listen:1; */ > > + unsigned listen:1; > > +#if (NGX_PCRE) > > + unsigned captures:1; > > +#endif > > } ngx_stream_core_srv_conf_t; > > > > > > +/* list of structures to find core_srv_conf quickly at run time */ > > + > > + > > +typedef struct { > > +#if (NGX_PCRE) > > + ngx_stream_regex_t *regex; > > +#endif > > + ngx_stream_core_srv_conf_t *server; /* virtual name server conf */ > > + ngx_str_t name; > > +} ngx_stream_server_name_t; > > + > > + > > +typedef struct { > > + ngx_hash_combined_t names; > > + > > + ngx_uint_t nregex; > > + ngx_stream_server_name_t *regex; > > +} ngx_stream_virtual_names_t; > > + > > + > > +typedef struct { > > + /* the default server configuration for this address:port */ > > + ngx_stream_core_srv_conf_t *default_server; > > + > > + ngx_stream_virtual_names_t *virtual_names; > > + > > + ngx_str_t addr_text; > > This field is now unused. > > > + unsigned ssl:1; > > + unsigned proxy_protocol:1; > > +} ngx_stream_addr_conf_t; > > + > > + > > +typedef struct { > > + in_addr_t addr; > > + ngx_stream_addr_conf_t conf; > > +} ngx_stream_in_addr_t; > > + > > + > > +#if (NGX_HAVE_INET6) > > + > > +typedef struct { > > + struct in6_addr addr6; > > + ngx_stream_addr_conf_t conf; > > +} ngx_stream_in6_addr_t; > > + > > +#endif > > + > > + > > +typedef struct { > > + /* ngx_stream_in_addr_t or ngx_stream_in6_addr_t */ > > + void *addrs; > > + ngx_uint_t naddrs; > > +} ngx_stream_port_t; > > + > > + > > +typedef struct { > > + int family; > > + int type; > > + in_port_t port; > > + ngx_array_t addrs; /* array of ngx_stream_conf_addr_t */ > > +} ngx_stream_conf_port_t; > > + > > + > > +typedef struct { > > + ngx_stream_listen_opt_t opt; > > + > > + unsigned protocols:3; > > + unsigned protocols_set:1; > > + unsigned protocols_changed:1; > > + > > + ngx_hash_t hash; > > + ngx_hash_wildcard_t *wc_head; > > + ngx_hash_wildcard_t *wc_tail; > > + > > +#if (NGX_PCRE) > > + ngx_uint_t nregex; > > + ngx_stream_server_name_t *regex; > > +#endif > > + > > + /* the default server configuration for this address:port */ > > + ngx_stream_core_srv_conf_t *default_server; > > + ngx_array_t servers; > > + /* array of ngx_stream_core_srv_conf_t */ > > misaligned to the right side (off by one) > > > +} ngx_stream_conf_addr_t; > > + > > + > > struct ngx_stream_session_s { > > uint32_t signature; /* "STRM" */ > > > > @@ -210,6 +273,8 @@ struct ngx_stream_session_s { > > void **main_conf; > > void **srv_conf; > > > > + ngx_stream_virtual_names_t *virtual_names; > > + > > ngx_stream_upstream_t *upstream; > > ngx_array_t *upstream_states; > > /* of ngx_stream_upstream_state_t */ > > @@ -283,6 +348,8 @@ typedef struct { > > #define NGX_STREAM_WRITE_BUFFERED 0x10 > > > > > > +ngx_int_t ngx_stream_add_listen(ngx_conf_t *cf, > > + ngx_stream_core_srv_conf_t *cscf, ngx_stream_listen_opt_t *lsopt); > > This deserves two blank lines to divide configuration and > runtime functions, and somewhat similar to http. > > > void ngx_stream_core_run_phases(ngx_stream_session_t *s); > > ngx_int_t ngx_stream_core_generic_phase(ngx_stream_session_t *s, > > ngx_stream_phase_handler_t *ph); > > @@ -290,6 +357,10 @@ ngx_int_t ngx_stream_core_preread_phase( > > ngx_stream_phase_handler_t *ph); > > ngx_int_t ngx_stream_core_content_phase(ngx_stream_session_t *s, > > ngx_stream_phase_handler_t *ph); > > This deserves a blank line. > > > +ngx_int_t ngx_stream_find_virtual_server(ngx_stream_session_t *s, > > + ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp); > > +ngx_int_t ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, > > + ngx_uint_t alloc); > > Apparently, it makes sense to reverse-order these functions > to make them appear in the order they are called. > > > > > > > void ngx_stream_init_connection(ngx_connection_t *c); > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > > --- a/src/stream/ngx_stream_core_module.c > > +++ b/src/stream/ngx_stream_core_module.c > > @@ -26,6 +26,8 @@ static char *ngx_stream_core_server(ngx_ > > void *conf); > > static char *ngx_stream_core_listen(ngx_conf_t *cf, ngx_command_t *cmd, > > void *conf); > > +static char *ngx_stream_core_server_name(ngx_conf_t *cf, ngx_command_t *cmd, > > + void *conf); > > static char *ngx_stream_core_resolver(ngx_conf_t *cf, ngx_command_t *cmd, > > void *conf); > > > > @@ -46,6 +48,20 @@ static ngx_command_t ngx_stream_core_co > > offsetof(ngx_stream_core_main_conf_t, variables_hash_bucket_size), > > NULL }, > > > > + { ngx_string("server_names_hash_max_size"), > > + NGX_STREAM_MAIN_CONF|NGX_CONF_TAKE1, > > + ngx_conf_set_num_slot, > > + NGX_STREAM_MAIN_CONF_OFFSET, > > + offsetof(ngx_stream_core_main_conf_t, server_names_hash_max_size), > > + NULL }, > > + > > + { ngx_string("server_names_hash_bucket_size"), > > + NGX_STREAM_MAIN_CONF|NGX_CONF_TAKE1, > > + ngx_conf_set_num_slot, > > + NGX_STREAM_MAIN_CONF_OFFSET, > > + offsetof(ngx_stream_core_main_conf_t, server_names_hash_bucket_size), > > + NULL }, > > + > > { ngx_string("server"), > > NGX_STREAM_MAIN_CONF|NGX_CONF_BLOCK|NGX_CONF_NOARGS, > > ngx_stream_core_server, > > @@ -60,6 +76,13 @@ static ngx_command_t ngx_stream_core_co > > 0, > > NULL }, > > > > + { ngx_string("server_name"), > > + NGX_STREAM_SRV_CONF|NGX_CONF_1MORE, > > + ngx_stream_core_server_name, > > + NGX_STREAM_SRV_CONF_OFFSET, > > + 0, > > + NULL }, > > + > > { ngx_string("error_log"), > > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_1MORE, > > ngx_stream_core_error_log, > > @@ -413,6 +436,149 @@ ngx_stream_core_content_phase(ngx_stream > > } > > > > > > +ngx_int_t > > +ngx_stream_find_virtual_server(ngx_stream_session_t *s, > > + ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp) > > +{ > > + ngx_stream_core_srv_conf_t *cscf; > > + > > + if (s->virtual_names == NULL) { > > + return NGX_DECLINED; > > + } > > + > > + cscf = ngx_hash_find_combined(&s->virtual_names->names, > > + ngx_hash_key(host->data, host->len), > > + host->data, host->len); > > + > > + if (cscf) { > > + *cscfp = cscf; > > + return NGX_OK; > > + } > > + > > +#if (NGX_PCRE) > > + > > + if (host->len && s->virtual_names->nregex) { > > + ngx_int_t n; > > + ngx_uint_t i; > > + ngx_stream_server_name_t *sn; > > + > > + sn = s->virtual_names->regex; > > + > > + for (i = 0; i < s->virtual_names->nregex; i++) { > > + > > + n = ngx_stream_regex_exec(s, sn[i].regex, host); > > + > > + if (n == NGX_DECLINED) { > > + continue; > > + } > > + > > + if (n == NGX_OK) { > > + *cscfp = sn[i].server; > > + return NGX_OK; > > + } > > + > > + return NGX_ERROR; > > + } > > + } > > + > > +#endif /* NGX_PCRE */ > > + > > + return NGX_DECLINED; > > +} > > + > > + > > +ngx_int_t > > +ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, ngx_uint_t alloc) > > +{ > > + u_char *h, ch; > > + size_t i, dot_pos, host_len; > > + > > + enum { > > + sw_usual = 0, > > + sw_literal, > > + sw_rest > > + } state; > > + > > + dot_pos = host->len; > > + host_len = host->len; > > + > > + h = host->data; > > + > > + state = sw_usual; > > + > > + for (i = 0; i < host->len; i++) { > > + ch = h[i]; > > + > > + switch (ch) { > > + > > + case '.': > > + if (dot_pos == i - 1) { > > + return NGX_DECLINED; > > + } > > + dot_pos = i; > > + break; > > + > > + case ':': > > + if (state == sw_usual) { > > + host_len = i; > > + state = sw_rest; > > + } > > + break; > > + > > + case '[': > > + if (i == 0) { > > + state = sw_literal; > > + } > > + break; > > + > > + case ']': > > + if (state == sw_literal) { > > + host_len = i + 1; > > + state = sw_rest; > > + } > > + break; > > + > > + default: > > + > > + if (ngx_path_separator(ch)) { > > + return NGX_DECLINED; > > + } > > + > > + if (ch <= 0x20 || ch == 0x7f) { > > + return NGX_DECLINED; > > + } > > + > > + if (ch >= 'A' && ch <= 'Z') { > > + alloc = 1; > > + } > > + > > + break; > > + } > > + } > > + > > + if (dot_pos == host_len - 1) { > > + host_len--; > > + } > > + > > + if (host_len == 0) { > > + return NGX_DECLINED; > > + } > > + > > + if (alloc) { > > + host->data = ngx_pnalloc(pool, host_len); > > + if (host->data == NULL) { > > + return NGX_ERROR; > > + } > > + > > + ngx_strlow(host->data, h, host_len); > > + } > > + > > + host->len = host_len; > > + > > + return NGX_OK; > > +} > > Same here. > > > + > > + > > static ngx_int_t > > ngx_stream_core_preconfiguration(ngx_conf_t *cf) > > { > > @@ -437,11 +603,8 @@ ngx_stream_core_create_main_conf(ngx_con > > return NULL; > > } > > > > - if (ngx_array_init(&cmcf->listen, cf->pool, 4, sizeof(ngx_stream_listen_t)) > > - != NGX_OK) > > - { > > - return NULL; > > - } > > + cmcf->server_names_hash_max_size = NGX_CONF_UNSET_UINT; > > + cmcf->server_names_hash_bucket_size = NGX_CONF_UNSET_UINT; > > > > cmcf->variables_hash_max_size = NGX_CONF_UNSET_UINT; > > cmcf->variables_hash_bucket_size = NGX_CONF_UNSET_UINT; > > @@ -455,6 +618,14 @@ ngx_stream_core_init_main_conf(ngx_conf_ > > { > > ngx_stream_core_main_conf_t *cmcf = conf; > > > > + ngx_conf_init_uint_value(cmcf->server_names_hash_max_size, 512); > > + ngx_conf_init_uint_value(cmcf->server_names_hash_bucket_size, > > + ngx_cacheline_size); > > + > > + cmcf->server_names_hash_bucket_size = > > + ngx_align(cmcf->server_names_hash_bucket_size, ngx_cacheline_size); > > + > > + > > extra blank line > > > ngx_conf_init_uint_value(cmcf->variables_hash_max_size, 1024); > > ngx_conf_init_uint_value(cmcf->variables_hash_bucket_size, 64); > > > > @@ -486,6 +657,13 @@ ngx_stream_core_create_srv_conf(ngx_conf > > * cscf->error_log = NULL; > > */ > > > > + if (ngx_array_init(&cscf->server_names, cf->temp_pool, 4, > > + sizeof(ngx_stream_server_name_t)) > > + != NGX_OK) > > + { > > + return NULL; > > + } > > + > > cscf->file_name = cf->conf_file->file.name.data; > > cscf->line = cf->conf_file->line; > > cscf->resolver_timeout = NGX_CONF_UNSET_MSEC; > > @@ -504,6 +682,9 @@ ngx_stream_core_merge_srv_conf(ngx_conf_ > > ngx_stream_core_srv_conf_t *prev = parent; > > ngx_stream_core_srv_conf_t *conf = child; > > > > + ngx_str_t name; > > + ngx_stream_server_name_t *sn; > > + > > ngx_conf_merge_msec_value(conf->resolver_timeout, > > prev->resolver_timeout, 30000); > > > > @@ -551,6 +732,37 @@ ngx_stream_core_merge_srv_conf(ngx_conf_ > > ngx_conf_merge_msec_value(conf->preread_timeout, > > prev->preread_timeout, 30000); > > > > + if (conf->server_names.nelts == 0) { > > + /* the array has 4 empty preallocated elements, so push cannot fail */ > > + sn = ngx_array_push(&conf->server_names); > > +#if (NGX_PCRE) > > + sn->regex = NULL; > > +#endif > > + sn->server = conf; > > + ngx_str_set(&sn->name, ""); > > + } > > + > > + sn = conf->server_names.elts; > > + name = sn[0].name; > > + > > +#if (NGX_PCRE) > > + if (sn->regex) { > > + name.len++; > > + name.data--; > > + } else > > +#endif > > + > > + if (name.data[0] == '.') { > > + name.len--; > > + name.data++; > > + } > > + > > + conf->server_name.len = name.len; > > + conf->server_name.data = ngx_pstrdup(cf->pool, &name); > > + if (conf->server_name.data == NULL) { > > + return NGX_CONF_ERROR; > > + } > > + > > return NGX_CONF_OK; > > } > > > > @@ -650,11 +862,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > { > > ngx_stream_core_srv_conf_t *cscf = conf; > > > > - ngx_str_t *value, size; > > - ngx_url_t u; > > - ngx_uint_t i, n, backlog; > > - ngx_stream_listen_t *ls, *als, *nls; > > - ngx_stream_core_main_conf_t *cmcf; > > + ngx_str_t *value, size; > > + ngx_url_t u; > > + ngx_uint_t i, n, backlog; > > + ngx_stream_listen_opt_t lsopt; > > > > cscf->listen = 1; > > > > @@ -675,51 +886,48 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > return NGX_CONF_ERROR; > > } > > > > - cmcf = ngx_stream_conf_get_module_main_conf(cf, ngx_stream_core_module); > > - > > - ls = ngx_array_push(&cmcf->listen); > > - if (ls == NULL) { > > - return NGX_CONF_ERROR; > > - } > > - > > - ngx_memzero(ls, sizeof(ngx_stream_listen_t)); > > + ngx_memzero(&lsopt, sizeof(ngx_stream_listen_opt_t)); > > > > - ls->backlog = NGX_LISTEN_BACKLOG; > > - ls->rcvbuf = -1; > > - ls->sndbuf = -1; > > - ls->type = SOCK_STREAM; > > - ls->ctx = cf->ctx; > > - > > + lsopt.backlog = NGX_LISTEN_BACKLOG; > > + lsopt.type = SOCK_STREAM; > > + lsopt.rcvbuf = -1; > > + lsopt.sndbuf = -1; > > #if (NGX_HAVE_TCP_FASTOPEN) > > - ls->fastopen = -1; > > + lsopt.fastopen = -1; > > #endif > > - > > #if (NGX_HAVE_INET6) > > - ls->ipv6only = 1; > > + lsopt.ipv6only = 1; > > #endif > > > > backlog = 0; > > > > for (i = 2; i < cf->args->nelts; i++) { > > > > + if (ngx_strcmp(value[i].data, "default_server") == 0 > > + || ngx_strcmp(value[i].data, "default") == 0) > > I don't think we should reintroduce "default" legacy in stream. > > > + { > > + lsopt.default_server = 1; > > + continue; > > + } > > + > > #if !(NGX_WIN32) > > if (ngx_strcmp(value[i].data, "udp") == 0) { > > - ls->type = SOCK_DGRAM; > > + lsopt.type = SOCK_DGRAM; > > continue; > > } > > #endif > > > > if (ngx_strcmp(value[i].data, "bind") == 0) { > > - ls->bind = 1; > > + lsopt.bind = 1; > > Note that here and below, setting lsopt.set is missing. > This renders unusable duplicate socket-level listen parameter > checks in ngx_stream_add_addresses(). > > Aside from that, there are several unimproved error messages in > ngx_stream_core_listen() such as "bind ipv6only is not supported". > They were fixed once in http in 1b05b9bbcebf, but similar fixes > were missed in mail at the time. Then stream was based on mail, > they reappeared there. It makes sense to fix them separately. Yes, this will be a part of the cathup process later. > > continue; > > } > > > > #if (NGX_HAVE_TCP_FASTOPEN) > > if (ngx_strncmp(value[i].data, "fastopen=", 9) == 0) { > > - ls->fastopen = ngx_atoi(value[i].data + 9, value[i].len - 9); > > - ls->bind = 1; > > + lsopt.fastopen = ngx_atoi(value[i].data + 9, value[i].len - 9); > > + lsopt.bind = 1; > > > > - if (ls->fastopen == NGX_ERROR) { > > + if (lsopt.fastopen == NGX_ERROR) { > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > "invalid fastopen \"%V\"", &value[i]); > > return NGX_CONF_ERROR; > > @@ -730,10 +938,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > #endif > > > > if (ngx_strncmp(value[i].data, "backlog=", 8) == 0) { > > - ls->backlog = ngx_atoi(value[i].data + 8, value[i].len - 8); > > - ls->bind = 1; > > + lsopt.backlog = ngx_atoi(value[i].data + 8, value[i].len - 8); > > + lsopt.bind = 1; > > > > - if (ls->backlog == NGX_ERROR || ls->backlog == 0) { > > + if (lsopt.backlog == NGX_ERROR || lsopt.backlog == 0) { > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > "invalid backlog \"%V\"", &value[i]); > > return NGX_CONF_ERROR; > > @@ -748,10 +956,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > size.len = value[i].len - 7; > > size.data = value[i].data + 7; > > > > - ls->rcvbuf = ngx_parse_size(&size); > > - ls->bind = 1; > > + lsopt.rcvbuf = ngx_parse_size(&size); > > + lsopt.bind = 1; > > > > - if (ls->rcvbuf == NGX_ERROR) { > > + if (lsopt.rcvbuf == NGX_ERROR) { > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > "invalid rcvbuf \"%V\"", &value[i]); > > return NGX_CONF_ERROR; > > @@ -764,10 +972,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > size.len = value[i].len - 7; > > size.data = value[i].data + 7; > > > > - ls->sndbuf = ngx_parse_size(&size); > > - ls->bind = 1; > > + lsopt.sndbuf = ngx_parse_size(&size); > > + lsopt.bind = 1; > > > > - if (ls->sndbuf == NGX_ERROR) { > > + if (lsopt.sndbuf == NGX_ERROR) { > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > "invalid sndbuf \"%V\"", &value[i]); > > return NGX_CONF_ERROR; > > @@ -779,10 +987,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > if (ngx_strncmp(value[i].data, "ipv6only=o", 10) == 0) { > > #if (NGX_HAVE_INET6 && defined IPV6_V6ONLY) > > if (ngx_strcmp(&value[i].data[10], "n") == 0) { > > - ls->ipv6only = 1; > > + lsopt.ipv6only = 1; > > > > } else if (ngx_strcmp(&value[i].data[10], "ff") == 0) { > > - ls->ipv6only = 0; > > + lsopt.ipv6only = 0; > > > > } else { > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > @@ -791,7 +999,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > return NGX_CONF_ERROR; > > } > > > > - ls->bind = 1; > > + lsopt.bind = 1; > > continue; > > #else > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > @@ -803,8 +1011,8 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > > > if (ngx_strcmp(value[i].data, "reuseport") == 0) { > > #if (NGX_HAVE_REUSEPORT) > > - ls->reuseport = 1; > > - ls->bind = 1; > > + lsopt.reuseport = 1; > > + lsopt.bind = 1; > > #else > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > "reuseport is not supported " > > @@ -824,7 +1032,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > sslcf->file = cf->conf_file->file.name.data; > > sslcf->line = cf->conf_file->line; > > > > - ls->ssl = 1; > > + lsopt.ssl = 1; > > > Note that your change keeps sslcf->listen, > checked in ngx_stream_core_merge_srv_conf(). > > Currently, without virtual servers support, this is perfectly > fine because if you didn't specify the listen ssl parameter, > then no need to create ssl context and check/load certificates. > With virtual servers support though, sslcf->listen makes harm, > because you cannot specify non-default servers with ssl > parameter, but without certificates, which is pretty valid: > > server { > listen 127.0.0.1:8091 ssl; > server_name foo; > return FOO; > > ssl_certificate_key localhost.key; > ssl_certificate localhost.crt; > } > > server { > listen 127.0.0.1:8091 ssl; > server_name bar; > return BAR; > } > > nginx: [emerg] no "ssl_certificate" is defined for the "listen ... ssl" directive > > So it should be removed and replaced with appropriate certificate checks > in ngx_stream_core_merge_srv_conf(). I propose to take the checks from > ngx_http_core_merge_srv_conf(). Additionally, this will buy us the missing > "ssl_reject_handshake" functionality, to selectively disable SSL handshakes > in virtual servers based on SNI. > > > > > continue; > > #else > > @@ -838,10 +1046,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > if (ngx_strncmp(value[i].data, "so_keepalive=", 13) == 0) { > > > > if (ngx_strcmp(&value[i].data[13], "on") == 0) { > > - ls->so_keepalive = 1; > > + lsopt.so_keepalive = 1; > > > > } else if (ngx_strcmp(&value[i].data[13], "off") == 0) { > > - ls->so_keepalive = 2; > > + lsopt.so_keepalive = 2; > > > > } else { > > > > @@ -860,8 +1068,8 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > if (p > s.data) { > > s.len = p - s.data; > > > > - ls->tcp_keepidle = ngx_parse_time(&s, 1); > > - if (ls->tcp_keepidle == (time_t) NGX_ERROR) { > > + lsopt.tcp_keepidle = ngx_parse_time(&s, 1); > > + if (lsopt.tcp_keepidle == (time_t) NGX_ERROR) { > > goto invalid_so_keepalive; > > } > > } > > @@ -876,8 +1084,8 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > if (p > s.data) { > > s.len = p - s.data; > > > > - ls->tcp_keepintvl = ngx_parse_time(&s, 1); > > - if (ls->tcp_keepintvl == (time_t) NGX_ERROR) { > > + lsopt.tcp_keepintvl = ngx_parse_time(&s, 1); > > + if (lsopt.tcp_keepintvl == (time_t) NGX_ERROR) { > > goto invalid_so_keepalive; > > } > > } > > @@ -887,19 +1095,19 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > if (s.data < end) { > > s.len = end - s.data; > > > > - ls->tcp_keepcnt = ngx_atoi(s.data, s.len); > > - if (ls->tcp_keepcnt == NGX_ERROR) { > > + lsopt.tcp_keepcnt = ngx_atoi(s.data, s.len); > > + if (lsopt.tcp_keepcnt == NGX_ERROR) { > > goto invalid_so_keepalive; > > } > > } > > > > - if (ls->tcp_keepidle == 0 && ls->tcp_keepintvl == 0 > > - && ls->tcp_keepcnt == 0) > > + if (lsopt.tcp_keepidle == 0 && lsopt.tcp_keepintvl == 0 > > + && lsopt.tcp_keepcnt == 0) > > { > > goto invalid_so_keepalive; > > } > > > > - ls->so_keepalive = 1; > > + lsopt.so_keepalive = 1; > > > > #else > > > > @@ -911,7 +1119,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > #endif > > } > > > > - ls->bind = 1; > > + lsopt.bind = 1; > > > > continue; > > > > @@ -926,7 +1134,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > } > > > > if (ngx_strcmp(value[i].data, "proxy_protocol") == 0) { > > - ls->proxy_protocol = 1; > > + lsopt.proxy_protocol = 1; > > continue; > > } > > > > @@ -935,27 +1143,27 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > return NGX_CONF_ERROR; > > } > > > > - if (ls->type == SOCK_DGRAM) { > > + if (lsopt.type == SOCK_DGRAM) { > > if (backlog) { > > return "\"backlog\" parameter is incompatible with \"udp\""; > > } > > > > #if (NGX_STREAM_SSL) > > - if (ls->ssl) { > > + if (lsopt.ssl) { > > return "\"ssl\" parameter is incompatible with \"udp\""; > > } > > #endif > > > > - if (ls->so_keepalive) { > > + if (lsopt.so_keepalive) { > > return "\"so_keepalive\" parameter is incompatible with \"udp\""; > > } > > > > - if (ls->proxy_protocol) { > > + if (lsopt.proxy_protocol) { > > return "\"proxy_protocol\" parameter is incompatible with \"udp\""; > > } > > > > #if (NGX_HAVE_TCP_FASTOPEN) > > - if (ls->fastopen != -1) { > > + if (lsopt.fastopen != -1) { > > return "\"fastopen\" parameter is incompatible with \"udp\""; > > } > > #endif > > @@ -972,40 +1180,12 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > } > > } > > > > - if (n != 0) { > > - nls = ngx_array_push(&cmcf->listen); > > - if (nls == NULL) { > > - return NGX_CONF_ERROR; > > - } > > - > > - *nls = *ls; > > - > > - } else { > > - nls = ls; > > - } > > - > > - nls->sockaddr = u.addrs[n].sockaddr; > > - nls->socklen = u.addrs[n].socklen; > > - nls->addr_text = u.addrs[n].name; > > - nls->wildcard = ngx_inet_wildcard(nls->sockaddr); > > + lsopt.sockaddr = u.addrs[n].sockaddr; > > + lsopt.socklen = u.addrs[n].socklen; > > + lsopt.addr_text = u.addrs[n].name; > > + lsopt.wildcard = ngx_inet_wildcard(lsopt.sockaddr); > > > > - als = cmcf->listen.elts; > > - > > - for (i = 0; i < cmcf->listen.nelts - 1; i++) { > > - if (nls->type != als[i].type) { > > - continue; > > - } > > - > > - if (ngx_cmp_sockaddr(als[i].sockaddr, als[i].socklen, > > - nls->sockaddr, nls->socklen, 1) > > - != NGX_OK) > > - { > > - continue; > > - } > > - > > - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > - "duplicate \"%V\" address and port pair", > > - &nls->addr_text); > > + if (ngx_stream_add_listen(cf, cscf, &lsopt) != NGX_OK) { > > return NGX_CONF_ERROR; > > } > > > > @@ -1018,6 +1198,107 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > > > > > static char * > > +ngx_stream_core_server_name(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > > +{ > > + ngx_stream_core_srv_conf_t *cscf = conf; > > + > > + u_char ch; > > + ngx_str_t *value; > > + ngx_uint_t i; > > + ngx_stream_server_name_t *sn; > > wrong indentation > > > + > > + value = cf->args->elts; > > + > > + for (i = 1; i < cf->args->nelts; i++) { > > + > > + ch = value[i].data[0]; > > + > > + if ((ch == '*' && (value[i].len < 3 || value[i].data[1] != '.')) > > + || (ch == '.' && value[i].len < 2)) > > + { > > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > + "server name \"%V\" is invalid", &value[i]); > > + return NGX_CONF_ERROR; > > + } > > + > > + if (ngx_strchr(value[i].data, '/')) { > > + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, > > + "server name \"%V\" has suspicious symbols", > > + &value[i]); > > + } > > + > > + sn = ngx_array_push(&cscf->server_names); > > + if (sn == NULL) { > > + return NGX_CONF_ERROR; > > + } > > + > > +#if (NGX_PCRE) > > + sn->regex = NULL; > > +#endif > > + sn->server = cscf; > > + > > + if (ngx_strcasecmp(value[i].data, (u_char *) "$hostname") == 0) { > > + sn->name = cf->cycle->hostname; > > + > > + } else { > > + sn->name = value[i]; > > + } > > + > > + if (value[i].data[0] != '~') { > > + ngx_strlow(sn->name.data, sn->name.data, sn->name.len); > > + continue; > > + } > > + > > +#if (NGX_PCRE) > > + { > > + u_char *p; > > + ngx_regex_compile_t rc; > > + u_char errstr[NGX_MAX_CONF_ERRSTR]; > > + > > + if (value[i].len == 1) { > > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > + "empty regex in server name \"%V\"", &value[i]); > > + return NGX_CONF_ERROR; > > + } > > + > > + value[i].len--; > > + value[i].data++; > > + > > + ngx_memzero(&rc, sizeof(ngx_regex_compile_t)); > > + > > + rc.pattern = value[i]; > > + rc.err.len = NGX_MAX_CONF_ERRSTR; > > + rc.err.data = errstr; > > + > > + for (p = value[i].data; p < value[i].data + value[i].len; p++) { > > + if (*p >= 'A' && *p <= 'Z') { > > + rc.options = NGX_REGEX_CASELESS; > > + break; > > + } > > + } > > + > > + sn->regex = ngx_stream_regex_compile(cf, &rc); > > + if (sn->regex == NULL) { > > + return NGX_CONF_ERROR; > > + } > > + > > + sn->name = value[i]; > > + cscf->captures = (rc.captures > 0); > > + } > > +#else > > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > + "using regex \"%V\" " > > + "requires PCRE library", &value[i]); > > + > > + return NGX_CONF_ERROR; > > +#endif > > + } > > + > > + return NGX_CONF_OK; > > +} > > + > > + > > +static char * > > ngx_stream_core_resolver(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > > { > > ngx_stream_core_srv_conf_t *cscf = conf; > > diff --git a/src/stream/ngx_stream_handler.c b/src/stream/ngx_stream_handler.c > > --- a/src/stream/ngx_stream_handler.c > > +++ b/src/stream/ngx_stream_handler.c > > @@ -30,6 +30,7 @@ ngx_stream_init_connection(ngx_connectio > > struct sockaddr_in *sin; > > ngx_stream_in_addr_t *addr; > > ngx_stream_session_t *s; > > + ngx_stream_conf_ctx_t *ctx; > > ngx_stream_addr_conf_t *addr_conf; > > #if (NGX_HAVE_INET6) > > struct sockaddr_in6 *sin6; > > @@ -121,9 +122,12 @@ ngx_stream_init_connection(ngx_connectio > > return; > > } > > > > + ctx = addr_conf->default_server->ctx; > > + > > s->signature = NGX_STREAM_MODULE; > > - s->main_conf = addr_conf->ctx->main_conf; > > - s->srv_conf = addr_conf->ctx->srv_conf; > > + s->main_conf = ctx->main_conf; > > + s->srv_conf = ctx->srv_conf; > > + s->virtual_names = addr_conf->virtual_names; > > > > #if (NGX_STREAM_SSL) > > s->ssl = addr_conf->ssl; > > @@ -144,7 +148,7 @@ ngx_stream_init_connection(ngx_connectio > > > > ngx_log_error(NGX_LOG_INFO, c->log, 0, "*%uA %sclient %*s connected to %V", > > c->number, c->type == SOCK_DGRAM ? "udp " : "", > > - len, text, &addr_conf->addr_text); > > + len, text, &c->listening->addr_text); > > > > c->log->connection = c->number; > > c->log->handler = ngx_stream_log_error; > > diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c > > --- a/src/stream/ngx_stream_ssl_module.c > > +++ b/src/stream/ngx_stream_ssl_module.c > > @@ -458,7 +458,104 @@ ngx_stream_ssl_handshake_handler(ngx_con > > static int > > ngx_stream_ssl_servername(ngx_ssl_conn_t *ssl_conn, int *ad, void *arg) > > { > > + ngx_int_t rc; > > + ngx_str_t host; > > + const char *servername; > > + ngx_connection_t *c; > > + ngx_stream_session_t *s; > > + ngx_stream_ssl_conf_t *sscf; > > Note that stream (as well as mail) consistently uses sslcf naming > for keeping ssl configuration, unlike in http. Probably it makes > sense for a separate sweeping change with renaming sslcf to sscf. Agree. Also, it makes sense to rename ngx_stream_ssl_conf_t to ngx_stream_ssl_srv_conf_t. > > + ngx_stream_core_srv_conf_t *cscf; > > + > > + c = ngx_ssl_get_connection(ssl_conn); > > + > > + if (c->ssl->handshaked) { > > + *ad = SSL_AD_NO_RENEGOTIATION; > > + return SSL_TLSEXT_ERR_ALERT_FATAL; > > + } > > + > > + s = c->data; > > + > > + servername = SSL_get_servername(ssl_conn, TLSEXT_NAMETYPE_host_name); > > + > > + if (servername == NULL) { > > + ngx_log_debug0(NGX_LOG_DEBUG_STREAM, c->log, 0, > > + "SSL server name: null"); > > + goto done; > > + } > > + > > + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, > > + "SSL server name: \"%s\"", servername); > > + > > + host.len = ngx_strlen(servername); > > + > > + if (host.len == 0) { > > + goto done; > > + } > > + > > + host.data = (u_char *) servername; > > + > > + rc = ngx_stream_validate_host(&host, c->pool, 1); > > + > > + if (rc == NGX_ERROR) { > > + goto error; > > + } > > + > > + if (rc == NGX_DECLINED) { > > + goto done; > > + } > > + > > + rc = ngx_stream_find_virtual_server(s, &host, &cscf); > > + > > + if (rc == NGX_ERROR) { > > + goto error; > > + } > > + > > + if (rc == NGX_DECLINED) { > > + goto done; > > + } > > + > > + s->srv_conf = cscf->ctx->srv_conf; > > + > > + sscf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); > > Looks like a copy-paste error from http, where connection log > is set based on the location configuration. > Here it just makes sense to move setting sscf closer to its use. > > > + > > + ngx_set_connection_log(c, cscf->error_log); > > + > > + if (sscf->ssl.ctx) { > > + if (SSL_set_SSL_CTX(ssl_conn, sscf->ssl.ctx) == NULL) { > > + goto error; > > + } > > + > > + /* > > + * SSL_set_SSL_CTX() only changes certs as of 1.0.0d > > + * adjust other things we care about > > + */ > > + > > + SSL_set_verify(ssl_conn, SSL_CTX_get_verify_mode(sscf->ssl.ctx), > > + SSL_CTX_get_verify_callback(sscf->ssl.ctx)); > > + > > + SSL_set_verify_depth(ssl_conn, SSL_CTX_get_verify_depth(sscf->ssl.ctx)); > > + > > +#if OPENSSL_VERSION_NUMBER >= 0x009080dfL > > + /* only in 0.9.8m+ */ > > + SSL_clear_options(ssl_conn, SSL_get_options(ssl_conn) & > > + ~SSL_CTX_get_options(sscf->ssl.ctx)); > > +#endif > > + > > + SSL_set_options(ssl_conn, SSL_CTX_get_options(sscf->ssl.ctx)); > > + > > +#ifdef SSL_OP_NO_RENEGOTIATION > > + SSL_set_options(ssl_conn, SSL_OP_NO_RENEGOTIATION); > > +#endif > > + } > > + > > +done: > > + > > The reject_handshake functionality is missing there, > it could be added in this change or separately > (see below for a proposed addendum). > > > return SSL_TLSEXT_ERR_OK; > > + > > +error: > > + > > + *ad = SSL_AD_INTERNAL_ERROR; > > + return SSL_TLSEXT_ERR_ALERT_FATAL; > > } > > > > #endif > > diff --git a/src/stream/ngx_stream_ssl_preread_module.c b/src/stream/ngx_stream_ssl_preread_module.c > > --- a/src/stream/ngx_stream_ssl_preread_module.c > > +++ b/src/stream/ngx_stream_ssl_preread_module.c > > @@ -33,6 +33,8 @@ typedef struct { > > static ngx_int_t ngx_stream_ssl_preread_handler(ngx_stream_session_t *s); > > static ngx_int_t ngx_stream_ssl_preread_parse_record( > > ngx_stream_ssl_preread_ctx_t *ctx, u_char *pos, u_char *last); > > +static ngx_int_t ngx_stream_ssl_preread_servername(ngx_stream_session_t *s, > > + ngx_str_t *servername); > > static ngx_int_t ngx_stream_ssl_preread_protocol_variable( > > ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); > > static ngx_int_t ngx_stream_ssl_preread_server_name_variable( > > @@ -187,6 +189,10 @@ ngx_stream_ssl_preread_handler(ngx_strea > > return NGX_DECLINED; > > } > > > > + if (rc == NGX_OK) { > > + return ngx_stream_ssl_preread_servername(s, &ctx->host); > > + } > > + > > if (rc != NGX_AGAIN) { > > return rc; > > } > > @@ -404,9 +410,6 @@ ngx_stream_ssl_preread_parse_record(ngx_ > > case sw_sni_host: > > ctx->host.len = (p[1] << 8) + p[2]; > > > > - ngx_log_debug1(NGX_LOG_DEBUG_STREAM, ctx->log, 0, > > - "ssl preread: SNI hostname \"%V\"", &ctx->host); > > - > > state = sw_ext; > > dst = NULL; > > size = ext; > > @@ -497,6 +500,56 @@ ngx_stream_ssl_preread_parse_record(ngx_ > > > > > > static ngx_int_t > > +ngx_stream_ssl_preread_servername(ngx_stream_session_t *s, > > + ngx_str_t *servername) > > +{ > > + ngx_int_t rc; > > + ngx_str_t host; > > + ngx_connection_t *c; > > + ngx_stream_core_srv_conf_t *cscf; > > + > > + c = s->connection; > > + > > + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, > > + "SSL preread server name: \"%V\"", servername); > > + > > + if (servername->len == 0) { > > + return NGX_OK; > > + } > > + > > + host = *servername; > > + > > + rc = ngx_stream_validate_host(&host, c->pool, 1); > > + > > + if (rc == NGX_ERROR) { > > + return NGX_ERROR; > > + } > > + > > + if (rc == NGX_DECLINED) { > > + return NGX_OK; > > + } > > + > > + rc = ngx_stream_find_virtual_server(s, &host, &cscf); > > + > > + if (rc == NGX_ERROR) { > > + return NGX_ERROR; > > + } > > + > > + if (rc == NGX_DECLINED) { > > + return NGX_OK; > > + } > > + > > + s->srv_conf = cscf->ctx->srv_conf; > > + > > + cscf = ngx_stream_get_module_srv_conf(s, ngx_stream_core_module); > > The server configuration is already obtained in > ngx_stream_find_virtual_server(), no need to do this again. > > > + > > + ngx_set_connection_log(c, cscf->error_log); > > + > > + return NGX_OK; > > +} > > + > > + > > +static ngx_int_t > > ngx_stream_ssl_preread_protocol_variable(ngx_stream_session_t *s, > > ngx_variable_value_t *v, uintptr_t data) > > { > > Together, this makes the following update on top of your change: > > diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c > --- a/src/stream/ngx_stream.c > +++ b/src/stream/ngx_stream.c > @@ -92,7 +92,7 @@ static char * > ngx_stream_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > { > char *rv; > - ngx_uint_t m, mi, s; > + ngx_uint_t mi, m, s; > ngx_conf_t pcf; > ngx_stream_module_t *module; > ngx_stream_conf_ctx_t *ctx; > @@ -918,7 +918,7 @@ ngx_stream_init_listening(ngx_conf_t *cf > { > ngx_uint_t i, last, bind_wildcard; > ngx_listening_t *ls; > - ngx_stream_port_t *hport; > + ngx_stream_port_t *stport; > ngx_stream_conf_addr_t *addr; > > addr = port->addrs.elts; > @@ -953,26 +953,26 @@ ngx_stream_init_listening(ngx_conf_t *cf > return NGX_ERROR; > } > > - hport = ngx_pcalloc(cf->pool, sizeof(ngx_stream_port_t)); > - if (hport == NULL) { > + stport = ngx_palloc(cf->pool, sizeof(ngx_stream_port_t)); You lost "c" in ngx_pcalloc() for no apparent reason. > + if (stport == NULL) { > return NGX_ERROR; > } > > - ls->servers = hport; > + ls->servers = stport; > > - hport->naddrs = i + 1; > + stport->naddrs = i + 1; > > switch (ls->sockaddr->sa_family) { > > #if (NGX_HAVE_INET6) > case AF_INET6: > - if (ngx_stream_add_addrs6(cf, hport, addr) != NGX_OK) { > + if (ngx_stream_add_addrs6(cf, stport, addr) != NGX_OK) { > return NGX_ERROR; > } > break; > #endif > default: /* AF_INET */ > - if (ngx_stream_add_addrs(cf, hport, addr) != NGX_OK) { > + if (ngx_stream_add_addrs(cf, stport, addr) != NGX_OK) { > return NGX_ERROR; > } > break; > @@ -1001,26 +1001,14 @@ ngx_stream_add_listening(ngx_conf_t *cf, > > ls->handler = ngx_stream_init_connection; > > + ls->pool_size = 256; > + > cscf = addr->default_server; > - ls->pool_size = 256; > > ls->logp = cscf->error_log; > ls->log.data = &ls->addr_text; > ls->log.handler = ngx_accept_log_error; > > -#if (NGX_WIN32) > - { > - ngx_iocp_conf_t *iocpcf = NULL; > - > - if (ngx_get_conf(cf->cycle->conf_ctx, ngx_events_module)) { > - iocpcf = ngx_event_get_conf(cf->cycle->conf_ctx, ngx_iocp_module); > - } > - if (iocpcf && iocpcf->acceptex_read) { > - ls->post_accept_buffer_size = cscf->client_header_buffer_size; > - } > - } > -#endif > - > ls->type = addr->opt.type; > ls->backlog = addr->opt.backlog; > ls->rcvbuf = addr->opt.rcvbuf; > @@ -1033,22 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, > ls->keepcnt = addr->opt.tcp_keepcnt; > #endif > > -#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > - ls->accept_filter = addr->opt.accept_filter; > -#endif > - > -#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) > - ls->deferred_accept = addr->opt.deferred_accept; > -#endif > - > #if (NGX_HAVE_INET6) > ls->ipv6only = addr->opt.ipv6only; > #endif > > -#if (NGX_HAVE_SETFIB) > - ls->setfib = addr->opt.setfib; > -#endif > - > #if (NGX_HAVE_TCP_FASTOPEN) > ls->fastopen = addr->opt.fastopen; > #endif > @@ -1064,7 +1040,7 @@ ngx_stream_add_listening(ngx_conf_t *cf, > > > static ngx_int_t > -ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *hport, > +ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *stport, > ngx_stream_conf_addr_t *addr) > { > ngx_uint_t i; > @@ -1072,15 +1048,15 @@ ngx_stream_add_addrs(ngx_conf_t *cf, ngx > ngx_stream_in_addr_t *addrs; > ngx_stream_virtual_names_t *vn; > > - hport->addrs = ngx_pcalloc(cf->pool, > - hport->naddrs * sizeof(ngx_stream_in_addr_t)); > - if (hport->addrs == NULL) { > + stport->addrs = ngx_pcalloc(cf->pool, > + stport->naddrs * sizeof(ngx_stream_in_addr_t)); > + if (stport->addrs == NULL) { > return NGX_ERROR; > } > > - addrs = hport->addrs; > + addrs = stport->addrs; > > - for (i = 0; i < hport->naddrs; i++) { > + for (i = 0; i < stport->naddrs; i++) { > > sin = (struct sockaddr_in *) addr[i].opt.sockaddr; > addrs[i].addr = sin->sin_addr.s_addr; > @@ -1126,7 +1102,7 @@ ngx_stream_add_addrs(ngx_conf_t *cf, ngx > #if (NGX_HAVE_INET6) > > static ngx_int_t > -ngx_stream_add_addrs6(ngx_conf_t *cf, ngx_stream_port_t *hport, > +ngx_stream_add_addrs6(ngx_conf_t *cf, ngx_stream_port_t *stport, > ngx_stream_conf_addr_t *addr) > { > ngx_uint_t i; > @@ -1134,15 +1110,15 @@ ngx_stream_add_addrs6(ngx_conf_t *cf, ng > ngx_stream_in6_addr_t *addrs6; > ngx_stream_virtual_names_t *vn; > > - hport->addrs = ngx_pcalloc(cf->pool, > - hport->naddrs * sizeof(ngx_stream_in6_addr_t)); > - if (hport->addrs == NULL) { > + stport->addrs = ngx_pcalloc(cf->pool, > + stport->naddrs * sizeof(ngx_stream_in6_addr_t)); > + if (stport->addrs == NULL) { > return NGX_ERROR; > } > > - addrs6 = hport->addrs; > + addrs6 = stport->addrs; > > - for (i = 0; i < hport->naddrs; i++) { > + for (i = 0; i < stport->naddrs; i++) { > > sin6 = (struct sockaddr_in6 *) addr[i].opt.sockaddr; > addrs6[i].addr6 = sin6->sin6_addr; > diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h > --- a/src/stream/ngx_stream.h > +++ b/src/stream/ngx_stream.h > @@ -53,30 +53,21 @@ typedef struct { > #if (NGX_HAVE_INET6) > unsigned ipv6only:1; > #endif > - unsigned deferred_accept:1; > unsigned reuseport:1; > unsigned so_keepalive:2; > unsigned proxy_protocol:1; > - > - int backlog; > - int rcvbuf; > - int sndbuf; > - int type; > -#if (NGX_HAVE_SETFIB) > - int setfib; > -#endif > -#if (NGX_HAVE_TCP_FASTOPEN) > - int fastopen; > -#endif > #if (NGX_HAVE_KEEPALIVE_TUNABLE) > int tcp_keepidle; > int tcp_keepintvl; > int tcp_keepcnt; > #endif > - > -#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > - char *accept_filter; > + int backlog; > + int rcvbuf; > + int sndbuf; > +#if (NGX_HAVE_TCP_FASTOPEN) > + int fastopen; > #endif > + int type; > } ngx_stream_listen_opt_t; > > > @@ -198,7 +189,6 @@ typedef struct { > > ngx_stream_virtual_names_t *virtual_names; > > - ngx_str_t addr_text; > unsigned ssl:1; > unsigned proxy_protocol:1; > } ngx_stream_addr_conf_t; > @@ -254,7 +244,7 @@ typedef struct { > /* the default server configuration for this address:port */ > ngx_stream_core_srv_conf_t *default_server; > ngx_array_t servers; > - /* array of ngx_stream_core_srv_conf_t */ > + /* array of ngx_stream_core_srv_conf_t */ > } ngx_stream_conf_addr_t; > > > @@ -350,6 +340,8 @@ typedef struct { > > ngx_int_t ngx_stream_add_listen(ngx_conf_t *cf, > ngx_stream_core_srv_conf_t *cscf, ngx_stream_listen_opt_t *lsopt); > + > + Not sure if two lines look good here. I'd leave just one like below. > void ngx_stream_core_run_phases(ngx_stream_session_t *s); > ngx_int_t ngx_stream_core_generic_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph); > @@ -357,11 +349,11 @@ ngx_int_t ngx_stream_core_preread_phase( > ngx_stream_phase_handler_t *ph); > ngx_int_t ngx_stream_core_content_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph); > + > +ngx_int_t ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, > + ngx_uint_t alloc); > ngx_int_t ngx_stream_find_virtual_server(ngx_stream_session_t *s, > ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp); > -ngx_int_t ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, > - ngx_uint_t alloc); > - > > void ngx_stream_init_connection(ngx_connection_t *c); > void ngx_stream_session_handler(ngx_event_t *rev); > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -437,57 +437,6 @@ ngx_stream_core_content_phase(ngx_stream > > > ngx_int_t > -ngx_stream_find_virtual_server(ngx_stream_session_t *s, > - ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp) > -{ > - ngx_stream_core_srv_conf_t *cscf; > - > - if (s->virtual_names == NULL) { > - return NGX_DECLINED; > - } > - > - cscf = ngx_hash_find_combined(&s->virtual_names->names, > - ngx_hash_key(host->data, host->len), > - host->data, host->len); > - > - if (cscf) { > - *cscfp = cscf; > - return NGX_OK; > - } > - > -#if (NGX_PCRE) > - > - if (host->len && s->virtual_names->nregex) { > - ngx_int_t n; > - ngx_uint_t i; > - ngx_stream_server_name_t *sn; > - > - sn = s->virtual_names->regex; > - > - for (i = 0; i < s->virtual_names->nregex; i++) { > - > - n = ngx_stream_regex_exec(s, sn[i].regex, host); > - > - if (n == NGX_DECLINED) { > - continue; > - } > - > - if (n == NGX_OK) { > - *cscfp = sn[i].server; > - return NGX_OK; > - } > - > - return NGX_ERROR; > - } > - } > - > -#endif /* NGX_PCRE */ > - > - return NGX_DECLINED; > -} > - > - > -ngx_int_t > ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, ngx_uint_t alloc) > { > u_char *h, ch; > @@ -579,6 +528,57 @@ ngx_stream_validate_host(ngx_str_t *host > } > > > +ngx_int_t > +ngx_stream_find_virtual_server(ngx_stream_session_t *s, > + ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp) > +{ > + ngx_stream_core_srv_conf_t *cscf; > + > + if (s->virtual_names == NULL) { > + return NGX_DECLINED; > + } > + > + cscf = ngx_hash_find_combined(&s->virtual_names->names, > + ngx_hash_key(host->data, host->len), > + host->data, host->len); > + > + if (cscf) { > + *cscfp = cscf; > + return NGX_OK; > + } > + > +#if (NGX_PCRE) > + > + if (host->len && s->virtual_names->nregex) { > + ngx_int_t n; > + ngx_uint_t i; > + ngx_stream_server_name_t *sn; > + > + sn = s->virtual_names->regex; > + > + for (i = 0; i < s->virtual_names->nregex; i++) { > + > + n = ngx_stream_regex_exec(s, sn[i].regex, host); > + > + if (n == NGX_DECLINED) { > + continue; > + } > + > + if (n == NGX_OK) { > + *cscfp = sn[i].server; > + return NGX_OK; > + } > + > + return NGX_ERROR; > + } > + } > + > +#endif /* NGX_PCRE */ > + > + return NGX_DECLINED; > +} > + > + > static ngx_int_t > ngx_stream_core_preconfiguration(ngx_conf_t *cf) > { > @@ -625,7 +625,6 @@ ngx_stream_core_init_main_conf(ngx_conf_ > cmcf->server_names_hash_bucket_size = > ngx_align(cmcf->server_names_hash_bucket_size, ngx_cacheline_size); > > - In HTTP we do have this empty line. > ngx_conf_init_uint_value(cmcf->variables_hash_max_size, 1024); > ngx_conf_init_uint_value(cmcf->variables_hash_bucket_size, 64); > > @@ -864,7 +863,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > ngx_str_t *value, size; > ngx_url_t u; > - ngx_uint_t i, n, backlog; > + ngx_uint_t n, i, backlog; > ngx_stream_listen_opt_t lsopt; > > cscf->listen = 1; > @@ -903,9 +902,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > for (i = 2; i < cf->args->nelts; i++) { > > - if (ngx_strcmp(value[i].data, "default_server") == 0 > - || ngx_strcmp(value[i].data, "default") == 0) > - { > + if (ngx_strcmp(value[i].data, "default_server") == 0) { > lsopt.default_server = 1; > continue; > } > @@ -918,6 +915,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > #endif > > if (ngx_strcmp(value[i].data, "bind") == 0) { > + lsopt.set = 1; > lsopt.bind = 1; > continue; > } > @@ -925,6 +923,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > #if (NGX_HAVE_TCP_FASTOPEN) > if (ngx_strncmp(value[i].data, "fastopen=", 9) == 0) { > lsopt.fastopen = ngx_atoi(value[i].data + 9, value[i].len - 9); > + lsopt.set = 1; > lsopt.bind = 1; > > if (lsopt.fastopen == NGX_ERROR) { > @@ -939,6 +938,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > if (ngx_strncmp(value[i].data, "backlog=", 8) == 0) { > lsopt.backlog = ngx_atoi(value[i].data + 8, value[i].len - 8); > + lsopt.set = 1; > lsopt.bind = 1; > > if (lsopt.backlog == NGX_ERROR || lsopt.backlog == 0) { > @@ -957,6 +957,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > size.data = value[i].data + 7; > > lsopt.rcvbuf = ngx_parse_size(&size); > + lsopt.set = 1; > lsopt.bind = 1; > > if (lsopt.rcvbuf == NGX_ERROR) { > @@ -973,6 +974,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > size.data = value[i].data + 7; > > lsopt.sndbuf = ngx_parse_size(&size); > + lsopt.set = 1; > lsopt.bind = 1; > > if (lsopt.sndbuf == NGX_ERROR) { > @@ -999,11 +1001,13 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > return NGX_CONF_ERROR; > } > > + lsopt.set = 1; > lsopt.bind = 1; > + > continue; > #else > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > - "bind ipv6only is not supported " > + "ipv6only is not supported " Let's update text in a separate patch. > "on this platform"); > return NGX_CONF_ERROR; > #endif > @@ -1012,6 +1016,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > if (ngx_strcmp(value[i].data, "reuseport") == 0) { > #if (NGX_HAVE_REUSEPORT) > lsopt.reuseport = 1; > + lsopt.set = 1; > lsopt.bind = 1; > #else > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > @@ -1023,17 +1028,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > > if (ngx_strcmp(value[i].data, "ssl") == 0) { > #if (NGX_STREAM_SSL) > - ngx_stream_ssl_conf_t *sslcf; > - > - sslcf = ngx_stream_conf_get_module_srv_conf(cf, > - ngx_stream_ssl_module); > - > - sslcf->listen = 1; > - sslcf->file = cf->conf_file->file.name.data; > - sslcf->line = cf->conf_file->line; > - > lsopt.ssl = 1; > - > continue; > #else > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > @@ -1119,6 +1114,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > #endif > } > > + lsopt.set = 1; > lsopt.bind = 1; > > continue; > @@ -1139,7 +1135,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n > } > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > - "the invalid \"%V\" parameter", &value[i]); > + "invalid parameter \"%V\"", &value[i]); Again, text change. > return NGX_CONF_ERROR; > } > > @@ -1202,9 +1198,9 @@ ngx_stream_core_server_name(ngx_conf_t * > { > ngx_stream_core_srv_conf_t *cscf = conf; > > - u_char ch; > - ngx_str_t *value; > - ngx_uint_t i; > + u_char ch; > + ngx_str_t *value; > + ngx_uint_t i; > ngx_stream_server_name_t *sn; > > value = cf->args->elts; > diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c > --- a/src/stream/ngx_stream_ssl_module.c > +++ b/src/stream/ngx_stream_ssl_module.c > @@ -219,6 +219,13 @@ static ngx_command_t ngx_stream_ssl_com > offsetof(ngx_stream_ssl_conf_t, conf_commands), > &ngx_stream_ssl_conf_command_post }, > > + { ngx_string("ssl_reject_handshake"), > + NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_FLAG, > + ngx_conf_set_flag_slot, > + NGX_STREAM_SRV_CONF_OFFSET, > + offsetof(ngx_stream_ssl_conf_t, reject_handshake), > + NULL }, > + > { ngx_string("ssl_alpn"), > NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_1MORE, > ngx_stream_ssl_alpn, > @@ -463,7 +470,7 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t > const char *servername; > ngx_connection_t *c; > ngx_stream_session_t *s; > - ngx_stream_ssl_conf_t *sscf; > + ngx_stream_ssl_conf_t *sslcf; Why? I though, you like "sscf" more :) I suggest that we use the better name "sscf" here. > ngx_stream_core_srv_conf_t *cscf; > > c = ngx_ssl_get_connection(ssl_conn); > @@ -516,12 +523,12 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t > > s->srv_conf = cscf->ctx->srv_conf; > > - sscf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); > - > ngx_set_connection_log(c, cscf->error_log); > > - if (sscf->ssl.ctx) { > - if (SSL_set_SSL_CTX(ssl_conn, sscf->ssl.ctx) == NULL) { > + sslcf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); > + > + if (sslcf->ssl.ctx) { > + if (SSL_set_SSL_CTX(ssl_conn, sslcf->ssl.ctx) == NULL) { > goto error; > } > > @@ -530,18 +537,19 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t > * adjust other things we care about > */ > > - SSL_set_verify(ssl_conn, SSL_CTX_get_verify_mode(sscf->ssl.ctx), > - SSL_CTX_get_verify_callback(sscf->ssl.ctx)); > + SSL_set_verify(ssl_conn, SSL_CTX_get_verify_mode(sslcf->ssl.ctx), > + SSL_CTX_get_verify_callback(sslcf->ssl.ctx)); > > - SSL_set_verify_depth(ssl_conn, SSL_CTX_get_verify_depth(sscf->ssl.ctx)); > + SSL_set_verify_depth(ssl_conn, > + SSL_CTX_get_verify_depth(sslcf->ssl.ctx)); > > #if OPENSSL_VERSION_NUMBER >= 0x009080dfL > /* only in 0.9.8m+ */ > SSL_clear_options(ssl_conn, SSL_get_options(ssl_conn) & > - ~SSL_CTX_get_options(sscf->ssl.ctx)); > + ~SSL_CTX_get_options(sslcf->ssl.ctx)); > #endif > > - SSL_set_options(ssl_conn, SSL_CTX_get_options(sscf->ssl.ctx)); > + SSL_set_options(ssl_conn, SSL_CTX_get_options(sslcf->ssl.ctx)); > > #ifdef SSL_OP_NO_RENEGOTIATION > SSL_set_options(ssl_conn, SSL_OP_NO_RENEGOTIATION); > @@ -550,6 +558,14 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t > > done: > > + sslcf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); > + > + if (sslcf->reject_handshake) { > + c->ssl->handshake_rejected = 1; > + *ad = SSL_AD_UNRECOGNIZED_NAME; > + return SSL_TLSEXT_ERR_ALERT_FATAL; > + } > + > return SSL_TLSEXT_ERR_OK; > > error: > @@ -752,7 +768,6 @@ ngx_stream_ssl_create_conf(ngx_conf_t *c > /* > * set by ngx_pcalloc(): > * > - * scf->listen = 0; > * scf->protocols = 0; > * scf->certificate_values = NULL; > * scf->dhparam = { 0, NULL }; > @@ -771,6 +786,7 @@ ngx_stream_ssl_create_conf(ngx_conf_t *c > scf->passwords = NGX_CONF_UNSET_PTR; > scf->conf_commands = NGX_CONF_UNSET_PTR; > scf->prefer_server_ciphers = NGX_CONF_UNSET; > + scf->reject_handshake = NGX_CONF_UNSET; > scf->verify = NGX_CONF_UNSET_UINT; > scf->verify_depth = NGX_CONF_UNSET_UINT; > scf->builtin_session_cache = NGX_CONF_UNSET; > @@ -799,6 +815,8 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf > ngx_conf_merge_value(conf->prefer_server_ciphers, > prev->prefer_server_ciphers, 0); > > + ngx_conf_merge_value(conf->reject_handshake, prev->reject_handshake, 0); > + > ngx_conf_merge_bitmask_value(conf->protocols, prev->protocols, > (NGX_CONF_BITMASK_SET > |NGX_SSL_TLSv1|NGX_SSL_TLSv1_1 > @@ -832,35 +850,21 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf > > conf->ssl.log = cf->log; > > - if (!conf->listen) { > - return NGX_CONF_OK; > - } > - > - if (conf->certificates == NULL) { > - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > - "no \"ssl_certificate\" is defined for " > - "the \"listen ... ssl\" directive in %s:%ui", > - conf->file, conf->line); > - return NGX_CONF_ERROR; > - } > + if (conf->certificates) { > > - if (conf->certificate_keys == NULL) { > - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > - "no \"ssl_certificate_key\" is defined for " > - "the \"listen ... ssl\" directive in %s:%ui", > - conf->file, conf->line); > - return NGX_CONF_ERROR; > - } > + if (conf->certificate_keys == NULL > + || conf->certificate_keys->nelts < conf->certificates->nelts) > + { > + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > + "no \"ssl_certificate_key\" is defined " > + "for certificate \"%V\"", > + ((ngx_str_t *) conf->certificates->elts) > + + conf->certificates->nelts - 1); > + return NGX_CONF_ERROR; > + } > > - if (conf->certificate_keys->nelts < conf->certificates->nelts) { > - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > - "no \"ssl_certificate_key\" is defined " > - "for certificate \"%V\" and " > - "the \"listen ... ssl\" directive in %s:%ui", > - ((ngx_str_t *) conf->certificates->elts) > - + conf->certificates->nelts - 1, > - conf->file, conf->line); > - return NGX_CONF_ERROR; > + } else if (!conf->reject_handshake) { > + return NGX_CONF_OK; > } > > if (ngx_ssl_create(&conf->ssl, conf->protocols, NULL) != NGX_OK) { > @@ -915,7 +919,7 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf > return NGX_CONF_ERROR; > #endif > > - } else { > + } else if (conf->certificates) { > > /* configure certificates */ > > @@ -1014,6 +1018,10 @@ ngx_stream_ssl_compile_certificates(ngx_ > ngx_stream_complex_value_t *cv; > ngx_stream_compile_complex_value_t ccv; > > + if (conf->certificates == NULL) { > + return NGX_OK; > + } > + > cert = conf->certificates->elts; > key = conf->certificate_keys->elts; > nelts = conf->certificates->nelts; > @@ -1292,8 +1300,13 @@ ngx_stream_ssl_conf_command_check(ngx_co > static ngx_int_t > ngx_stream_ssl_init(ngx_conf_t *cf) > { > - ngx_stream_handler_pt *h; > - ngx_stream_core_main_conf_t *cmcf; > + ngx_uint_t a, p, s; > + ngx_stream_handler_pt *h; > + ngx_stream_ssl_conf_t *sslcf; Again, let's use sscf here. This way the diff to http is smaller. > + ngx_stream_conf_addr_t *addr; > + ngx_stream_conf_port_t *port; > + ngx_stream_core_srv_conf_t **cscfp, *cscf; > + ngx_stream_core_main_conf_t *cmcf; > > cmcf = ngx_stream_conf_get_module_main_conf(cf, ngx_stream_core_module); > > @@ -1304,5 +1317,58 @@ ngx_stream_ssl_init(ngx_conf_t *cf) > > *h = ngx_stream_ssl_handler; > > + if (cmcf->ports == NULL) { > + return NGX_OK; > + } > + > + port = cmcf->ports->elts; > + for (p = 0; p < cmcf->ports->nelts; p++) { > + > + addr = port[p].addrs.elts; > + for (a = 0; a < port[p].addrs.nelts; a++) { > + > + if (!addr[a].opt.ssl) { > + continue; > + } > + > + cscf = addr[a].default_server; > + sslcf = cscf->ctx->srv_conf[ngx_stream_ssl_module.ctx_index]; > + > + if (sslcf->certificates) { > + continue; > + } > + > + if (!sslcf->reject_handshake) { > + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > + "no \"ssl_certificate\" is defined for " > + "the \"listen ... ssl\" directive in %s:%ui", > + cscf->file_name, cscf->line); > + return NGX_ERROR; > + } > + > + /* > + * if no certificates are defined in the default server, > + * check all non-default server blocks > + */ > + > + cscfp = addr[a].servers.elts; > + for (s = 0; s < addr[a].servers.nelts; s++) { > + > + cscf = cscfp[s]; > + sslcf = cscf->ctx->srv_conf[ngx_stream_ssl_module.ctx_index]; > + > + if (sslcf->certificates || sslcf->reject_handshake) { > + continue; > + } > + > + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > + "no \"ssl_certificate\" is defined for " > + "the \"listen ... ssl\" directive in %s:%ui", > + cscf->file_name, cscf->line); > + return NGX_ERROR; > + } > + } > + } > + > return NGX_OK; > } > diff --git a/src/stream/ngx_stream_ssl_module.h b/src/stream/ngx_stream_ssl_module.h > --- a/src/stream/ngx_stream_ssl_module.h > +++ b/src/stream/ngx_stream_ssl_module.h > @@ -18,10 +18,10 @@ typedef struct { > ngx_msec_t handshake_timeout; > > ngx_flag_t prefer_server_ciphers; > + ngx_flag_t reject_handshake; > > ngx_ssl_t ssl; > > - ngx_uint_t listen; > ngx_uint_t protocols; > > ngx_uint_t verify; > @@ -53,9 +53,6 @@ typedef struct { > > ngx_flag_t session_tickets; > ngx_array_t *session_ticket_keys; > - > - u_char *file; > - ngx_uint_t line; > } ngx_stream_ssl_conf_t; > > > diff --git a/src/stream/ngx_stream_ssl_preread_module.c b/src/stream/ngx_stream_ssl_preread_module.c > --- a/src/stream/ngx_stream_ssl_preread_module.c > +++ b/src/stream/ngx_stream_ssl_preread_module.c > @@ -541,8 +541,6 @@ ngx_stream_ssl_preread_servername(ngx_st > > s->srv_conf = cscf->ctx->srv_conf; > > - cscf = ngx_stream_get_module_srv_conf(s, ngx_stream_core_module); > - > ngx_set_connection_log(c, cscf->error_log); > > return NGX_OK; > > -- > Sergey Kandaurov > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel New patch attached. -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1702532489 -14400 # Thu Dec 14 09:41:29 2023 +0400 # Node ID a390e18b664e7ba678417ef6e40d94c37e89c2f7 # Parent 844486cdd43a32d10b78493d7e7b80e9e2239d7e Stream: virtual servers. Server name is taken either from ngx_stream_ssl_module or ngx_stream_ssl_preread_module. The change adds "default" parameter to the "listen" directive, as well as the following directives: "server_names_hash_max_size", "server_names_hash_bucket_size", "server_name" and "ssl_reject_handshake". diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c --- a/src/stream/ngx_stream.c +++ b/src/stream/ngx_stream.c @@ -16,16 +16,34 @@ static ngx_int_t ngx_stream_init_phases( ngx_stream_core_main_conf_t *cmcf); static ngx_int_t ngx_stream_init_phase_handlers(ngx_conf_t *cf, ngx_stream_core_main_conf_t *cmcf); -static ngx_int_t ngx_stream_add_ports(ngx_conf_t *cf, ngx_array_t *ports, - ngx_stream_listen_t *listen); -static char *ngx_stream_optimize_servers(ngx_conf_t *cf, ngx_array_t *ports); + +static ngx_int_t ngx_stream_add_addresses(ngx_conf_t *cf, + ngx_stream_core_srv_conf_t *cscf, ngx_stream_conf_port_t *port, + ngx_stream_listen_opt_t *lsopt); +static ngx_int_t ngx_stream_add_address(ngx_conf_t *cf, + ngx_stream_core_srv_conf_t *cscf, ngx_stream_conf_port_t *port, + ngx_stream_listen_opt_t *lsopt); +static ngx_int_t ngx_stream_add_server(ngx_conf_t *cf, + ngx_stream_core_srv_conf_t *cscf, ngx_stream_conf_addr_t *addr); + +static ngx_int_t ngx_stream_optimize_servers(ngx_conf_t *cf, + ngx_stream_core_main_conf_t *cmcf, ngx_array_t *ports); +static ngx_int_t ngx_stream_server_names(ngx_conf_t *cf, + ngx_stream_core_main_conf_t *cmcf, ngx_stream_conf_addr_t *addr); +static ngx_int_t ngx_stream_cmp_conf_addrs(const void *one, const void *two); +static int ngx_libc_cdecl ngx_stream_cmp_dns_wildcards(const void *one, + const void *two); + +static ngx_int_t ngx_stream_init_listening(ngx_conf_t *cf, + ngx_stream_conf_port_t *port); +static ngx_listening_t *ngx_stream_add_listening(ngx_conf_t *cf, + ngx_stream_conf_addr_t *addr); static ngx_int_t ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *stport, ngx_stream_conf_addr_t *addr); #if (NGX_HAVE_INET6) static ngx_int_t ngx_stream_add_addrs6(ngx_conf_t *cf, ngx_stream_port_t *stport, ngx_stream_conf_addr_t *addr); #endif -static ngx_int_t ngx_stream_cmp_conf_addrs(const void *one, const void *two); ngx_uint_t ngx_stream_max_module; @@ -74,10 +92,8 @@ static char * ngx_stream_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { char *rv; - ngx_uint_t i, m, mi, s; + ngx_uint_t mi, m, s; ngx_conf_t pcf; - ngx_array_t ports; - ngx_stream_listen_t *listen; ngx_stream_module_t *module; ngx_stream_conf_ctx_t *ctx; ngx_stream_core_srv_conf_t **cscfp; @@ -251,21 +267,13 @@ ngx_stream_block(ngx_conf_t *cf, ngx_com return NGX_CONF_ERROR; } - if (ngx_array_init(&ports, cf->temp_pool, 4, sizeof(ngx_stream_conf_port_t)) - != NGX_OK) - { + /* optimize the lists of ports, addresses and server names */ + + if (ngx_stream_optimize_servers(cf, cmcf, cmcf->ports) != NGX_OK) { return NGX_CONF_ERROR; } - listen = cmcf->listen.elts; - - for (i = 0; i < cmcf->listen.nelts; i++) { - if (ngx_stream_add_ports(cf, &ports, &listen[i]) != NGX_OK) { - return NGX_CONF_ERROR; - } - } - - return ngx_stream_optimize_servers(cf, &ports); + return NGX_CONF_OK; } @@ -377,73 +385,295 @@ ngx_stream_init_phase_handlers(ngx_conf_ } -static ngx_int_t -ngx_stream_add_ports(ngx_conf_t *cf, ngx_array_t *ports, - ngx_stream_listen_t *listen) +ngx_int_t +ngx_stream_add_listen(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, + ngx_stream_listen_opt_t *lsopt) { - in_port_t p; - ngx_uint_t i; - struct sockaddr *sa; - ngx_stream_conf_port_t *port; - ngx_stream_conf_addr_t *addr; + in_port_t p; + ngx_uint_t i; + struct sockaddr *sa; + ngx_stream_conf_port_t *port; + ngx_stream_core_main_conf_t *cmcf; + + cmcf = ngx_stream_conf_get_module_main_conf(cf, ngx_stream_core_module); - sa = listen->sockaddr; + if (cmcf->ports == NULL) { + cmcf->ports = ngx_array_create(cf->temp_pool, 2, + sizeof(ngx_stream_conf_port_t)); + if (cmcf->ports == NULL) { + return NGX_ERROR; + } + } + + sa = lsopt->sockaddr; p = ngx_inet_get_port(sa); - port = ports->elts; - for (i = 0; i < ports->nelts; i++) { + port = cmcf->ports->elts; + for (i = 0; i < cmcf->ports->nelts; i++) { - if (p == port[i].port - && listen->type == port[i].type - && sa->sa_family == port[i].family) + if (p != port[i].port + || lsopt->type != port[i].type + || sa->sa_family != port[i].family) { - /* a port is already in the port list */ + continue; + } - port = &port[i]; - goto found; - } + /* a port is already in the port list */ + + return ngx_stream_add_addresses(cf, cscf, &port[i], lsopt); } /* add a port to the port list */ - port = ngx_array_push(ports); + port = ngx_array_push(cmcf->ports); if (port == NULL) { return NGX_ERROR; } port->family = sa->sa_family; - port->type = listen->type; + port->type = lsopt->type; port->port = p; + port->addrs.elts = NULL; + + return ngx_stream_add_address(cf, cscf, port, lsopt); +} + + +static ngx_int_t +ngx_stream_add_addresses(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, + ngx_stream_conf_port_t *port, ngx_stream_listen_opt_t *lsopt) +{ + ngx_uint_t i, default_server, proxy_protocol, + protocols, protocols_prev; + ngx_stream_conf_addr_t *addr; +#if (NGX_STREAM_SSL) + ngx_uint_t ssl; +#endif + + /* + * we cannot compare whole sockaddr struct's as kernel + * may fill some fields in inherited sockaddr struct's + */ + + addr = port->addrs.elts; + + for (i = 0; i < port->addrs.nelts; i++) { + + if (ngx_cmp_sockaddr(lsopt->sockaddr, lsopt->socklen, + addr[i].opt.sockaddr, + addr[i].opt.socklen, 0) + != NGX_OK) + { + continue; + } + + /* the address is already in the address list */ + + if (ngx_stream_add_server(cf, cscf, &addr[i]) != NGX_OK) { + return NGX_ERROR; + } + + /* preserve default_server bit during listen options overwriting */ + default_server = addr[i].opt.default_server; + + proxy_protocol = lsopt->proxy_protocol || addr[i].opt.proxy_protocol; + protocols = lsopt->proxy_protocol; + protocols_prev = addr[i].opt.proxy_protocol; + +#if (NGX_STREAM_SSL) + ssl = lsopt->ssl || addr[i].opt.ssl; + protocols |= lsopt->ssl << 1; + protocols_prev |= addr[i].opt.ssl << 1; +#endif + + if (lsopt->set) { + + if (addr[i].opt.set) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "duplicate listen options for %V", + &addr[i].opt.addr_text); + return NGX_ERROR; + } + + addr[i].opt = *lsopt; + } + + /* check the duplicate "default" server for this address:port */ - if (ngx_array_init(&port->addrs, cf->temp_pool, 2, - sizeof(ngx_stream_conf_addr_t)) - != NGX_OK) - { - return NGX_ERROR; + if (lsopt->default_server) { + + if (default_server) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "a duplicate default server for %V", + &addr[i].opt.addr_text); + return NGX_ERROR; + } + + default_server = 1; + addr[i].default_server = cscf; + } + + /* check for conflicting protocol options */ + + if ((protocols | protocols_prev) != protocols_prev) { + + /* options added */ + + if ((addr[i].opt.set && !lsopt->set) + || addr[i].protocols_changed + || (protocols | protocols_prev) != protocols) + { + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, + "protocol options redefined for %V", + &addr[i].opt.addr_text); + } + + addr[i].protocols = protocols_prev; + addr[i].protocols_set = 1; + addr[i].protocols_changed = 1; + + } else if ((protocols_prev | protocols) != protocols) { + + /* options removed */ + + if (lsopt->set + || (addr[i].protocols_set && protocols != addr[i].protocols)) + { + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, + "protocol options redefined for %V", + &addr[i].opt.addr_text); + } + + addr[i].protocols = protocols; + addr[i].protocols_set = 1; + addr[i].protocols_changed = 1; + + } else { + + /* the same options */ + + if ((lsopt->set && addr[i].protocols_changed) + || (addr[i].protocols_set && protocols != addr[i].protocols)) + { + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, + "protocol options redefined for %V", + &addr[i].opt.addr_text); + } + + addr[i].protocols = protocols; + addr[i].protocols_set = 1; + } + + addr[i].opt.default_server = default_server; + addr[i].opt.proxy_protocol = proxy_protocol; +#if (NGX_STREAM_SSL) + addr[i].opt.ssl = ssl; +#endif + + return NGX_OK; } -found: + /* add the address to the addresses list that bound to this port */ + + return ngx_stream_add_address(cf, cscf, port, lsopt); +} + + +/* + * add the server address, the server names and the server core module + * configurations to the port list + */ + +static ngx_int_t +ngx_stream_add_address(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, + ngx_stream_conf_port_t *port, ngx_stream_listen_opt_t *lsopt) +{ + ngx_stream_conf_addr_t *addr; + + if (port->addrs.elts == NULL) { + if (ngx_array_init(&port->addrs, cf->temp_pool, 4, + sizeof(ngx_stream_conf_addr_t)) + != NGX_OK) + { + return NGX_ERROR; + } + } addr = ngx_array_push(&port->addrs); if (addr == NULL) { return NGX_ERROR; } - addr->opt = *listen; + addr->opt = *lsopt; + addr->protocols = 0; + addr->protocols_set = 0; + addr->protocols_changed = 0; + addr->hash.buckets = NULL; + addr->hash.size = 0; + addr->wc_head = NULL; + addr->wc_tail = NULL; +#if (NGX_PCRE) + addr->nregex = 0; + addr->regex = NULL; +#endif + addr->default_server = cscf; + addr->servers.elts = NULL; + + return ngx_stream_add_server(cf, cscf, addr); +} + + +/* add the server core module configuration to the address:port */ + +static ngx_int_t +ngx_stream_add_server(ngx_conf_t *cf, ngx_stream_core_srv_conf_t *cscf, + ngx_stream_conf_addr_t *addr) +{ + ngx_uint_t i; + ngx_stream_core_srv_conf_t **server; + + if (addr->servers.elts == NULL) { + if (ngx_array_init(&addr->servers, cf->temp_pool, 4, + sizeof(ngx_stream_core_srv_conf_t *)) + != NGX_OK) + { + return NGX_ERROR; + } + + } else { + server = addr->servers.elts; + for (i = 0; i < addr->servers.nelts; i++) { + if (server[i] == cscf) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "a duplicate listen %V", + &addr->opt.addr_text); + return NGX_ERROR; + } + } + } + + server = ngx_array_push(&addr->servers); + if (server == NULL) { + return NGX_ERROR; + } + + *server = cscf; return NGX_OK; } -static char * -ngx_stream_optimize_servers(ngx_conf_t *cf, ngx_array_t *ports) +static ngx_int_t +ngx_stream_optimize_servers(ngx_conf_t *cf, ngx_stream_core_main_conf_t *cmcf, + ngx_array_t *ports) { - ngx_uint_t i, p, last, bind_wildcard; - ngx_listening_t *ls; - ngx_stream_port_t *stport; - ngx_stream_conf_port_t *port; - ngx_stream_conf_addr_t *addr; - ngx_stream_core_srv_conf_t *cscf; + ngx_uint_t p, a; + ngx_stream_conf_port_t *port; + ngx_stream_conf_addr_t *addr; + + if (ports == NULL) { + return NGX_OK; + } port = ports->elts; for (p = 0; p < ports->nelts; p++) { @@ -451,175 +681,191 @@ ngx_stream_optimize_servers(ngx_conf_t * ngx_sort(port[p].addrs.elts, (size_t) port[p].addrs.nelts, sizeof(ngx_stream_conf_addr_t), ngx_stream_cmp_conf_addrs); - addr = port[p].addrs.elts; - last = port[p].addrs.nelts; - /* - * if there is the binding to the "*:port" then we need to bind() - * to the "*:port" only and ignore the other bindings + * check whether all name-based servers have the same + * configuration as a default server for given address:port */ - if (addr[last - 1].opt.wildcard) { - addr[last - 1].opt.bind = 1; - bind_wildcard = 1; + addr = port[p].addrs.elts; + for (a = 0; a < port[p].addrs.nelts; a++) { - } else { - bind_wildcard = 0; + if (addr[a].servers.nelts > 1 +#if (NGX_PCRE) + || addr[a].default_server->captures +#endif + ) + { + if (ngx_stream_server_names(cf, cmcf, &addr[a]) != NGX_OK) { + return NGX_ERROR; + } + } } - i = 0; - - while (i < last) { - - if (bind_wildcard && !addr[i].opt.bind) { - i++; - continue; - } - - ls = ngx_create_listening(cf, addr[i].opt.sockaddr, - addr[i].opt.socklen); - if (ls == NULL) { - return NGX_CONF_ERROR; - } - - ls->addr_ntop = 1; - ls->handler = ngx_stream_init_connection; - ls->pool_size = 256; - ls->type = addr[i].opt.type; - - cscf = addr->opt.ctx->srv_conf[ngx_stream_core_module.ctx_index]; - - ls->logp = cscf->error_log; - ls->log.data = &ls->addr_text; - ls->log.handler = ngx_accept_log_error; - - ls->backlog = addr[i].opt.backlog; - ls->rcvbuf = addr[i].opt.rcvbuf; - ls->sndbuf = addr[i].opt.sndbuf; - - ls->wildcard = addr[i].opt.wildcard; - - ls->keepalive = addr[i].opt.so_keepalive; -#if (NGX_HAVE_KEEPALIVE_TUNABLE) - ls->keepidle = addr[i].opt.tcp_keepidle; - ls->keepintvl = addr[i].opt.tcp_keepintvl; - ls->keepcnt = addr[i].opt.tcp_keepcnt; -#endif - -#if (NGX_HAVE_INET6) - ls->ipv6only = addr[i].opt.ipv6only; -#endif - -#if (NGX_HAVE_TCP_FASTOPEN) - ls->fastopen = addr[i].opt.fastopen; -#endif - -#if (NGX_HAVE_REUSEPORT) - ls->reuseport = addr[i].opt.reuseport; -#endif - - stport = ngx_palloc(cf->pool, sizeof(ngx_stream_port_t)); - if (stport == NULL) { - return NGX_CONF_ERROR; - } - - ls->servers = stport; - - stport->naddrs = i + 1; - - switch (ls->sockaddr->sa_family) { -#if (NGX_HAVE_INET6) - case AF_INET6: - if (ngx_stream_add_addrs6(cf, stport, addr) != NGX_OK) { - return NGX_CONF_ERROR; - } - break; -#endif - default: /* AF_INET */ - if (ngx_stream_add_addrs(cf, stport, addr) != NGX_OK) { - return NGX_CONF_ERROR; - } - break; - } - - addr++; - last--; + if (ngx_stream_init_listening(cf, &port[p]) != NGX_OK) { + return NGX_ERROR; } } - return NGX_CONF_OK; -} - - -static ngx_int_t -ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *stport, - ngx_stream_conf_addr_t *addr) -{ - ngx_uint_t i; - struct sockaddr_in *sin; - ngx_stream_in_addr_t *addrs; - - stport->addrs = ngx_pcalloc(cf->pool, - stport->naddrs * sizeof(ngx_stream_in_addr_t)); - if (stport->addrs == NULL) { - return NGX_ERROR; - } - - addrs = stport->addrs; - - for (i = 0; i < stport->naddrs; i++) { - - sin = (struct sockaddr_in *) addr[i].opt.sockaddr; - addrs[i].addr = sin->sin_addr.s_addr; - - addrs[i].conf.ctx = addr[i].opt.ctx; -#if (NGX_STREAM_SSL) - addrs[i].conf.ssl = addr[i].opt.ssl; -#endif - addrs[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; - addrs[i].conf.addr_text = addr[i].opt.addr_text; - } - return NGX_OK; } -#if (NGX_HAVE_INET6) - static ngx_int_t -ngx_stream_add_addrs6(ngx_conf_t *cf, ngx_stream_port_t *stport, +ngx_stream_server_names(ngx_conf_t *cf, ngx_stream_core_main_conf_t *cmcf, ngx_stream_conf_addr_t *addr) { - ngx_uint_t i; - struct sockaddr_in6 *sin6; - ngx_stream_in6_addr_t *addrs6; + ngx_int_t rc; + ngx_uint_t n, s; + ngx_hash_init_t hash; + ngx_hash_keys_arrays_t ha; + ngx_stream_server_name_t *name; + ngx_stream_core_srv_conf_t **cscfp; +#if (NGX_PCRE) + ngx_uint_t regex, i; - stport->addrs = ngx_pcalloc(cf->pool, - stport->naddrs * sizeof(ngx_stream_in6_addr_t)); - if (stport->addrs == NULL) { + regex = 0; +#endif + + ngx_memzero(&ha, sizeof(ngx_hash_keys_arrays_t)); + + ha.temp_pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, cf->log); + if (ha.temp_pool == NULL) { return NGX_ERROR; } - addrs6 = stport->addrs; + ha.pool = cf->pool; + + if (ngx_hash_keys_array_init(&ha, NGX_HASH_LARGE) != NGX_OK) { + goto failed; + } + + cscfp = addr->servers.elts; + + for (s = 0; s < addr->servers.nelts; s++) { + + name = cscfp[s]->server_names.elts; + + for (n = 0; n < cscfp[s]->server_names.nelts; n++) { - for (i = 0; i < stport->naddrs; i++) { +#if (NGX_PCRE) + if (name[n].regex) { + regex++; + continue; + } +#endif - sin6 = (struct sockaddr_in6 *) addr[i].opt.sockaddr; - addrs6[i].addr6 = sin6->sin6_addr; + rc = ngx_hash_add_key(&ha, &name[n].name, name[n].server, + NGX_HASH_WILDCARD_KEY); + + if (rc == NGX_ERROR) { + goto failed; + } - addrs6[i].conf.ctx = addr[i].opt.ctx; -#if (NGX_STREAM_SSL) - addrs6[i].conf.ssl = addr[i].opt.ssl; -#endif - addrs6[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; - addrs6[i].conf.addr_text = addr[i].opt.addr_text; + if (rc == NGX_DECLINED) { + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "invalid server name or wildcard \"%V\" on %V", + &name[n].name, &addr->opt.addr_text); + goto failed; + } + + if (rc == NGX_BUSY) { + ngx_log_error(NGX_LOG_WARN, cf->log, 0, + "conflicting server name \"%V\" on %V, ignored", + &name[n].name, &addr->opt.addr_text); + } + } + } + + hash.key = ngx_hash_key_lc; + hash.max_size = cmcf->server_names_hash_max_size; + hash.bucket_size = cmcf->server_names_hash_bucket_size; + hash.name = "server_names_hash"; + hash.pool = cf->pool; + + if (ha.keys.nelts) { + hash.hash = &addr->hash; + hash.temp_pool = NULL; + + if (ngx_hash_init(&hash, ha.keys.elts, ha.keys.nelts) != NGX_OK) { + goto failed; + } } - return NGX_OK; -} + if (ha.dns_wc_head.nelts) { + + ngx_qsort(ha.dns_wc_head.elts, (size_t) ha.dns_wc_head.nelts, + sizeof(ngx_hash_key_t), ngx_stream_cmp_dns_wildcards); + + hash.hash = NULL; + hash.temp_pool = ha.temp_pool; + + if (ngx_hash_wildcard_init(&hash, ha.dns_wc_head.elts, + ha.dns_wc_head.nelts) + != NGX_OK) + { + goto failed; + } + + addr->wc_head = (ngx_hash_wildcard_t *) hash.hash; + } + + if (ha.dns_wc_tail.nelts) { + + ngx_qsort(ha.dns_wc_tail.elts, (size_t) ha.dns_wc_tail.nelts, + sizeof(ngx_hash_key_t), ngx_stream_cmp_dns_wildcards); + + hash.hash = NULL; + hash.temp_pool = ha.temp_pool; + + if (ngx_hash_wildcard_init(&hash, ha.dns_wc_tail.elts, + ha.dns_wc_tail.nelts) + != NGX_OK) + { + goto failed; + } + + addr->wc_tail = (ngx_hash_wildcard_t *) hash.hash; + } + + ngx_destroy_pool(ha.temp_pool); + +#if (NGX_PCRE) + + if (regex == 0) { + return NGX_OK; + } + + addr->nregex = regex; + addr->regex = ngx_palloc(cf->pool, + regex * sizeof(ngx_stream_server_name_t)); + if (addr->regex == NULL) { + return NGX_ERROR; + } + + i = 0; + + for (s = 0; s < addr->servers.nelts; s++) { + + name = cscfp[s]->server_names.elts; + + for (n = 0; n < cscfp[s]->server_names.nelts; n++) { + if (name[n].regex) { + addr->regex[i++] = name[n]; + } + } + } #endif + return NGX_OK; + +failed: + + ngx_destroy_pool(ha.temp_pool); + + return NGX_ERROR; +} + static ngx_int_t ngx_stream_cmp_conf_addrs(const void *one, const void *two) @@ -630,12 +876,12 @@ ngx_stream_cmp_conf_addrs(const void *on second = (ngx_stream_conf_addr_t *) two; if (first->opt.wildcard) { - /* a wildcard must be the last resort, shift it to the end */ + /* a wildcard address must be the last resort, shift it to the end */ return 1; } if (second->opt.wildcard) { - /* a wildcard must be the last resort, shift it to the end */ + /* a wildcard address must be the last resort, shift it to the end */ return -1; } @@ -653,3 +899,265 @@ ngx_stream_cmp_conf_addrs(const void *on return 0; } + + +static int ngx_libc_cdecl +ngx_stream_cmp_dns_wildcards(const void *one, const void *two) +{ + ngx_hash_key_t *first, *second; + + first = (ngx_hash_key_t *) one; + second = (ngx_hash_key_t *) two; + + return ngx_dns_strcmp(first->key.data, second->key.data); +} + + +static ngx_int_t +ngx_stream_init_listening(ngx_conf_t *cf, ngx_stream_conf_port_t *port) +{ + ngx_uint_t i, last, bind_wildcard; + ngx_listening_t *ls; + ngx_stream_port_t *stport; + ngx_stream_conf_addr_t *addr; + + addr = port->addrs.elts; + last = port->addrs.nelts; + + /* + * If there is a binding to an "*:port" then we need to bind() to + * the "*:port" only and ignore other implicit bindings. The bindings + * have been already sorted: explicit bindings are on the start, then + * implicit bindings go, and wildcard binding is in the end. + */ + + if (addr[last - 1].opt.wildcard) { + addr[last - 1].opt.bind = 1; + bind_wildcard = 1; + + } else { + bind_wildcard = 0; + } + + i = 0; + + while (i < last) { + + if (bind_wildcard && !addr[i].opt.bind) { + i++; + continue; + } + + ls = ngx_stream_add_listening(cf, &addr[i]); + if (ls == NULL) { + return NGX_ERROR; + } + + stport = ngx_pcalloc(cf->pool, sizeof(ngx_stream_port_t)); + if (stport == NULL) { + return NGX_ERROR; + } + + ls->servers = stport; + + stport->naddrs = i + 1; + + switch (ls->sockaddr->sa_family) { + +#if (NGX_HAVE_INET6) + case AF_INET6: + if (ngx_stream_add_addrs6(cf, stport, addr) != NGX_OK) { + return NGX_ERROR; + } + break; +#endif + default: /* AF_INET */ + if (ngx_stream_add_addrs(cf, stport, addr) != NGX_OK) { + return NGX_ERROR; + } + break; + } + + addr++; + last--; + } + + return NGX_OK; +} + + +static ngx_listening_t * +ngx_stream_add_listening(ngx_conf_t *cf, ngx_stream_conf_addr_t *addr) +{ + ngx_listening_t *ls; + ngx_stream_core_srv_conf_t *cscf; + + ls = ngx_create_listening(cf, addr->opt.sockaddr, addr->opt.socklen); + if (ls == NULL) { + return NULL; + } + + ls->addr_ntop = 1; + + ls->handler = ngx_stream_init_connection; + + ls->pool_size = 256; + + cscf = addr->default_server; + + ls->logp = cscf->error_log; + ls->log.data = &ls->addr_text; + ls->log.handler = ngx_accept_log_error; + + ls->type = addr->opt.type; + ls->backlog = addr->opt.backlog; + ls->rcvbuf = addr->opt.rcvbuf; + ls->sndbuf = addr->opt.sndbuf; + + ls->keepalive = addr->opt.so_keepalive; +#if (NGX_HAVE_KEEPALIVE_TUNABLE) + ls->keepidle = addr->opt.tcp_keepidle; + ls->keepintvl = addr->opt.tcp_keepintvl; + ls->keepcnt = addr->opt.tcp_keepcnt; +#endif + +#if (NGX_HAVE_INET6) + ls->ipv6only = addr->opt.ipv6only; +#endif + +#if (NGX_HAVE_TCP_FASTOPEN) + ls->fastopen = addr->opt.fastopen; +#endif + +#if (NGX_HAVE_REUSEPORT) + ls->reuseport = addr->opt.reuseport; +#endif + + ls->wildcard = addr->opt.wildcard; + + return ls; +} + + +static ngx_int_t +ngx_stream_add_addrs(ngx_conf_t *cf, ngx_stream_port_t *stport, + ngx_stream_conf_addr_t *addr) +{ + ngx_uint_t i; + struct sockaddr_in *sin; + ngx_stream_in_addr_t *addrs; + ngx_stream_virtual_names_t *vn; + + stport->addrs = ngx_pcalloc(cf->pool, + stport->naddrs * sizeof(ngx_stream_in_addr_t)); + if (stport->addrs == NULL) { + return NGX_ERROR; + } + + addrs = stport->addrs; + + for (i = 0; i < stport->naddrs; i++) { + + sin = (struct sockaddr_in *) addr[i].opt.sockaddr; + addrs[i].addr = sin->sin_addr.s_addr; + addrs[i].conf.default_server = addr[i].default_server; +#if (NGX_STREAM_SSL) + addrs[i].conf.ssl = addr[i].opt.ssl; +#endif + addrs[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; + + if (addr[i].hash.buckets == NULL + && (addr[i].wc_head == NULL + || addr[i].wc_head->hash.buckets == NULL) + && (addr[i].wc_tail == NULL + || addr[i].wc_tail->hash.buckets == NULL) +#if (NGX_PCRE) + && addr[i].nregex == 0 +#endif + ) + { + continue; + } + + vn = ngx_palloc(cf->pool, sizeof(ngx_stream_virtual_names_t)); + if (vn == NULL) { + return NGX_ERROR; + } + + addrs[i].conf.virtual_names = vn; + + vn->names.hash = addr[i].hash; + vn->names.wc_head = addr[i].wc_head; + vn->names.wc_tail = addr[i].wc_tail; +#if (NGX_PCRE) + vn->nregex = addr[i].nregex; + vn->regex = addr[i].regex; +#endif + } + + return NGX_OK; +} + + +#if (NGX_HAVE_INET6) + +static ngx_int_t +ngx_stream_add_addrs6(ngx_conf_t *cf, ngx_stream_port_t *stport, + ngx_stream_conf_addr_t *addr) +{ + ngx_uint_t i; + struct sockaddr_in6 *sin6; + ngx_stream_in6_addr_t *addrs6; + ngx_stream_virtual_names_t *vn; + + stport->addrs = ngx_pcalloc(cf->pool, + stport->naddrs * sizeof(ngx_stream_in6_addr_t)); + if (stport->addrs == NULL) { + return NGX_ERROR; + } + + addrs6 = stport->addrs; + + for (i = 0; i < stport->naddrs; i++) { + + sin6 = (struct sockaddr_in6 *) addr[i].opt.sockaddr; + addrs6[i].addr6 = sin6->sin6_addr; + addrs6[i].conf.default_server = addr[i].default_server; +#if (NGX_STREAM_SSL) + addrs6[i].conf.ssl = addr[i].opt.ssl; +#endif + addrs6[i].conf.proxy_protocol = addr[i].opt.proxy_protocol; + + if (addr[i].hash.buckets == NULL + && (addr[i].wc_head == NULL + || addr[i].wc_head->hash.buckets == NULL) + && (addr[i].wc_tail == NULL + || addr[i].wc_tail->hash.buckets == NULL) +#if (NGX_PCRE) + && addr[i].nregex == 0 +#endif + ) + { + continue; + } + + vn = ngx_palloc(cf->pool, sizeof(ngx_stream_virtual_names_t)); + if (vn == NULL) { + return NGX_ERROR; + } + + addrs6[i].conf.virtual_names = vn; + + vn->names.hash = addr[i].hash; + vn->names.wc_head = addr[i].wc_head; + vn->names.wc_tail = addr[i].wc_tail; +#if (NGX_PCRE) + vn->nregex = addr[i].nregex; + vn->regex = addr[i].regex; +#endif + } + + return NGX_OK; +} + +#endif diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h --- a/src/stream/ngx_stream.h +++ b/src/stream/ngx_stream.h @@ -45,9 +45,8 @@ typedef struct { socklen_t socklen; ngx_str_t addr_text; - /* server ctx */ - ngx_stream_conf_ctx_t *ctx; - + unsigned set:1; + unsigned default_server:1; unsigned bind:1; unsigned wildcard:1; unsigned ssl:1; @@ -69,50 +68,7 @@ typedef struct { int fastopen; #endif int type; -} ngx_stream_listen_t; - - -typedef struct { - ngx_stream_conf_ctx_t *ctx; - ngx_str_t addr_text; - unsigned ssl:1; - unsigned proxy_protocol:1; -} ngx_stream_addr_conf_t; - -typedef struct { - in_addr_t addr; - ngx_stream_addr_conf_t conf; -} ngx_stream_in_addr_t; - - -#if (NGX_HAVE_INET6) - -typedef struct { - struct in6_addr addr6; - ngx_stream_addr_conf_t conf; -} ngx_stream_in6_addr_t; - -#endif - - -typedef struct { - /* ngx_stream_in_addr_t or ngx_stream_in6_addr_t */ - void *addrs; - ngx_uint_t naddrs; -} ngx_stream_port_t; - - -typedef struct { - int family; - int type; - in_port_t port; - ngx_array_t addrs; /* array of ngx_stream_conf_addr_t */ -} ngx_stream_conf_port_t; - - -typedef struct { - ngx_stream_listen_t opt; -} ngx_stream_conf_addr_t; +} ngx_stream_listen_opt_t; typedef enum { @@ -153,7 +109,6 @@ typedef struct { typedef struct { ngx_array_t servers; /* ngx_stream_core_srv_conf_t */ - ngx_array_t listen; /* ngx_stream_listen_t */ ngx_stream_phase_engine_t phase_engine; @@ -163,16 +118,24 @@ typedef struct { ngx_array_t prefix_variables; /* ngx_stream_variable_t */ ngx_uint_t ncaptures; + ngx_uint_t server_names_hash_max_size; + ngx_uint_t server_names_hash_bucket_size; + ngx_uint_t variables_hash_max_size; ngx_uint_t variables_hash_bucket_size; ngx_hash_keys_arrays_t *variables_keys; + ngx_array_t *ports; + ngx_stream_phase_t phases[NGX_STREAM_LOG_PHASE + 1]; } ngx_stream_core_main_conf_t; typedef struct { + /* array of the ngx_stream_server_name_t, "server_name" directive */ + ngx_array_t server_names; + ngx_stream_content_handler_pt handler; ngx_stream_conf_ctx_t *ctx; @@ -180,6 +143,8 @@ typedef struct { u_char *file_name; ngx_uint_t line; + ngx_str_t server_name; + ngx_flag_t tcp_nodelay; size_t preread_buffer_size; ngx_msec_t preread_timeout; @@ -191,10 +156,98 @@ typedef struct { ngx_msec_t proxy_protocol_timeout; - ngx_uint_t listen; /* unsigned listen:1; */ + unsigned listen:1; +#if (NGX_PCRE) + unsigned captures:1; +#endif } ngx_stream_core_srv_conf_t; +/* list of structures to find core_srv_conf quickly at run time */ + + +typedef struct { +#if (NGX_PCRE) + ngx_stream_regex_t *regex; +#endif + ngx_stream_core_srv_conf_t *server; /* virtual name server conf */ + ngx_str_t name; +} ngx_stream_server_name_t; + + +typedef struct { + ngx_hash_combined_t names; + + ngx_uint_t nregex; + ngx_stream_server_name_t *regex; +} ngx_stream_virtual_names_t; + + +typedef struct { + /* the default server configuration for this address:port */ + ngx_stream_core_srv_conf_t *default_server; + + ngx_stream_virtual_names_t *virtual_names; + + unsigned ssl:1; + unsigned proxy_protocol:1; +} ngx_stream_addr_conf_t; + + +typedef struct { + in_addr_t addr; + ngx_stream_addr_conf_t conf; +} ngx_stream_in_addr_t; + + +#if (NGX_HAVE_INET6) + +typedef struct { + struct in6_addr addr6; + ngx_stream_addr_conf_t conf; +} ngx_stream_in6_addr_t; + +#endif + + +typedef struct { + /* ngx_stream_in_addr_t or ngx_stream_in6_addr_t */ + void *addrs; + ngx_uint_t naddrs; +} ngx_stream_port_t; + + +typedef struct { + int family; + int type; + in_port_t port; + ngx_array_t addrs; /* array of ngx_stream_conf_addr_t */ +} ngx_stream_conf_port_t; + + +typedef struct { + ngx_stream_listen_opt_t opt; + + unsigned protocols:3; + unsigned protocols_set:1; + unsigned protocols_changed:1; + + ngx_hash_t hash; + ngx_hash_wildcard_t *wc_head; + ngx_hash_wildcard_t *wc_tail; + +#if (NGX_PCRE) + ngx_uint_t nregex; + ngx_stream_server_name_t *regex; +#endif + + /* the default server configuration for this address:port */ + ngx_stream_core_srv_conf_t *default_server; + ngx_array_t servers; + /* array of ngx_stream_core_srv_conf_t */ +} ngx_stream_conf_addr_t; + + struct ngx_stream_session_s { uint32_t signature; /* "STRM" */ @@ -210,6 +263,8 @@ struct ngx_stream_session_s { void **main_conf; void **srv_conf; + ngx_stream_virtual_names_t *virtual_names; + ngx_stream_upstream_t *upstream; ngx_array_t *upstream_states; /* of ngx_stream_upstream_state_t */ @@ -283,6 +338,9 @@ typedef struct { #define NGX_STREAM_WRITE_BUFFERED 0x10 +ngx_int_t ngx_stream_add_listen(ngx_conf_t *cf, + ngx_stream_core_srv_conf_t *cscf, ngx_stream_listen_opt_t *lsopt); + void ngx_stream_core_run_phases(ngx_stream_session_t *s); ngx_int_t ngx_stream_core_generic_phase(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph); @@ -291,6 +349,10 @@ ngx_int_t ngx_stream_core_preread_phase( ngx_int_t ngx_stream_core_content_phase(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph); +ngx_int_t ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, + ngx_uint_t alloc); +ngx_int_t ngx_stream_find_virtual_server(ngx_stream_session_t *s, + ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp); void ngx_stream_init_connection(ngx_connection_t *c); void ngx_stream_session_handler(ngx_event_t *rev); diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -26,6 +26,8 @@ static char *ngx_stream_core_server(ngx_ void *conf); static char *ngx_stream_core_listen(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +static char *ngx_stream_core_server_name(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); static char *ngx_stream_core_resolver(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); @@ -46,6 +48,20 @@ static ngx_command_t ngx_stream_core_co offsetof(ngx_stream_core_main_conf_t, variables_hash_bucket_size), NULL }, + { ngx_string("server_names_hash_max_size"), + NGX_STREAM_MAIN_CONF|NGX_CONF_TAKE1, + ngx_conf_set_num_slot, + NGX_STREAM_MAIN_CONF_OFFSET, + offsetof(ngx_stream_core_main_conf_t, server_names_hash_max_size), + NULL }, + + { ngx_string("server_names_hash_bucket_size"), + NGX_STREAM_MAIN_CONF|NGX_CONF_TAKE1, + ngx_conf_set_num_slot, + NGX_STREAM_MAIN_CONF_OFFSET, + offsetof(ngx_stream_core_main_conf_t, server_names_hash_bucket_size), + NULL }, + { ngx_string("server"), NGX_STREAM_MAIN_CONF|NGX_CONF_BLOCK|NGX_CONF_NOARGS, ngx_stream_core_server, @@ -60,6 +76,13 @@ static ngx_command_t ngx_stream_core_co 0, NULL }, + { ngx_string("server_name"), + NGX_STREAM_SRV_CONF|NGX_CONF_1MORE, + ngx_stream_core_server_name, + NGX_STREAM_SRV_CONF_OFFSET, + 0, + NULL }, + { ngx_string("error_log"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_1MORE, ngx_stream_core_error_log, @@ -418,6 +441,149 @@ ngx_stream_core_content_phase(ngx_stream } +ngx_int_t +ngx_stream_validate_host(ngx_str_t *host, ngx_pool_t *pool, ngx_uint_t alloc) +{ + u_char *h, ch; + size_t i, dot_pos, host_len; + + enum { + sw_usual = 0, + sw_literal, + sw_rest + } state; + + dot_pos = host->len; + host_len = host->len; + + h = host->data; + + state = sw_usual; + + for (i = 0; i < host->len; i++) { + ch = h[i]; + + switch (ch) { + + case '.': + if (dot_pos == i - 1) { + return NGX_DECLINED; + } + dot_pos = i; + break; + + case ':': + if (state == sw_usual) { + host_len = i; + state = sw_rest; + } + break; + + case '[': + if (i == 0) { + state = sw_literal; + } + break; + + case ']': + if (state == sw_literal) { + host_len = i + 1; + state = sw_rest; + } + break; + + default: + + if (ngx_path_separator(ch)) { + return NGX_DECLINED; + } + + if (ch <= 0x20 || ch == 0x7f) { + return NGX_DECLINED; + } + + if (ch >= 'A' && ch <= 'Z') { + alloc = 1; + } + + break; + } + } + + if (dot_pos == host_len - 1) { + host_len--; + } + + if (host_len == 0) { + return NGX_DECLINED; + } + + if (alloc) { + host->data = ngx_pnalloc(pool, host_len); + if (host->data == NULL) { + return NGX_ERROR; + } + + ngx_strlow(host->data, h, host_len); + } + + host->len = host_len; + + return NGX_OK; +} + + +ngx_int_t +ngx_stream_find_virtual_server(ngx_stream_session_t *s, + ngx_str_t *host, ngx_stream_core_srv_conf_t **cscfp) +{ + ngx_stream_core_srv_conf_t *cscf; + + if (s->virtual_names == NULL) { + return NGX_DECLINED; + } + + cscf = ngx_hash_find_combined(&s->virtual_names->names, + ngx_hash_key(host->data, host->len), + host->data, host->len); + + if (cscf) { + *cscfp = cscf; + return NGX_OK; + } + +#if (NGX_PCRE) + + if (host->len && s->virtual_names->nregex) { + ngx_int_t n; + ngx_uint_t i; + ngx_stream_server_name_t *sn; + + sn = s->virtual_names->regex; + + for (i = 0; i < s->virtual_names->nregex; i++) { + + n = ngx_stream_regex_exec(s, sn[i].regex, host); + + if (n == NGX_DECLINED) { + continue; + } + + if (n == NGX_OK) { + *cscfp = sn[i].server; + return NGX_OK; + } + + return NGX_ERROR; + } + } + +#endif /* NGX_PCRE */ + + return NGX_DECLINED; +} + + static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf) { @@ -442,11 +608,8 @@ ngx_stream_core_create_main_conf(ngx_con return NULL; } - if (ngx_array_init(&cmcf->listen, cf->pool, 4, sizeof(ngx_stream_listen_t)) - != NGX_OK) - { - return NULL; - } + cmcf->server_names_hash_max_size = NGX_CONF_UNSET_UINT; + cmcf->server_names_hash_bucket_size = NGX_CONF_UNSET_UINT; cmcf->variables_hash_max_size = NGX_CONF_UNSET_UINT; cmcf->variables_hash_bucket_size = NGX_CONF_UNSET_UINT; @@ -460,6 +623,14 @@ ngx_stream_core_init_main_conf(ngx_conf_ { ngx_stream_core_main_conf_t *cmcf = conf; + ngx_conf_init_uint_value(cmcf->server_names_hash_max_size, 512); + ngx_conf_init_uint_value(cmcf->server_names_hash_bucket_size, + ngx_cacheline_size); + + cmcf->server_names_hash_bucket_size = + ngx_align(cmcf->server_names_hash_bucket_size, ngx_cacheline_size); + + ngx_conf_init_uint_value(cmcf->variables_hash_max_size, 1024); ngx_conf_init_uint_value(cmcf->variables_hash_bucket_size, 64); @@ -491,6 +662,13 @@ ngx_stream_core_create_srv_conf(ngx_conf * cscf->error_log = NULL; */ + if (ngx_array_init(&cscf->server_names, cf->temp_pool, 4, + sizeof(ngx_stream_server_name_t)) + != NGX_OK) + { + return NULL; + } + cscf->file_name = cf->conf_file->file.name.data; cscf->line = cf->conf_file->line; cscf->resolver_timeout = NGX_CONF_UNSET_MSEC; @@ -509,6 +687,9 @@ ngx_stream_core_merge_srv_conf(ngx_conf_ ngx_stream_core_srv_conf_t *prev = parent; ngx_stream_core_srv_conf_t *conf = child; + ngx_str_t name; + ngx_stream_server_name_t *sn; + ngx_conf_merge_msec_value(conf->resolver_timeout, prev->resolver_timeout, 30000); @@ -556,6 +737,37 @@ ngx_stream_core_merge_srv_conf(ngx_conf_ ngx_conf_merge_msec_value(conf->preread_timeout, prev->preread_timeout, 30000); + if (conf->server_names.nelts == 0) { + /* the array has 4 empty preallocated elements, so push cannot fail */ + sn = ngx_array_push(&conf->server_names); +#if (NGX_PCRE) + sn->regex = NULL; +#endif + sn->server = conf; + ngx_str_set(&sn->name, ""); + } + + sn = conf->server_names.elts; + name = sn[0].name; + +#if (NGX_PCRE) + if (sn->regex) { + name.len++; + name.data--; + } else +#endif + + if (name.data[0] == '.') { + name.len--; + name.data++; + } + + conf->server_name.len = name.len; + conf->server_name.data = ngx_pstrdup(cf->pool, &name); + if (conf->server_name.data == NULL) { + return NGX_CONF_ERROR; + } + return NGX_CONF_OK; } @@ -655,11 +867,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n { ngx_stream_core_srv_conf_t *cscf = conf; - ngx_str_t *value, size; - ngx_url_t u; - ngx_uint_t i, n, backlog; - ngx_stream_listen_t *ls, *als, *nls; - ngx_stream_core_main_conf_t *cmcf; + ngx_str_t *value, size; + ngx_url_t u; + ngx_uint_t n, i, backlog; + ngx_stream_listen_opt_t lsopt; cscf->listen = 1; @@ -680,51 +891,48 @@ ngx_stream_core_listen(ngx_conf_t *cf, n return NGX_CONF_ERROR; } - cmcf = ngx_stream_conf_get_module_main_conf(cf, ngx_stream_core_module); - - ls = ngx_array_push(&cmcf->listen); - if (ls == NULL) { - return NGX_CONF_ERROR; - } - - ngx_memzero(ls, sizeof(ngx_stream_listen_t)); + ngx_memzero(&lsopt, sizeof(ngx_stream_listen_opt_t)); - ls->backlog = NGX_LISTEN_BACKLOG; - ls->rcvbuf = -1; - ls->sndbuf = -1; - ls->type = SOCK_STREAM; - ls->ctx = cf->ctx; - + lsopt.backlog = NGX_LISTEN_BACKLOG; + lsopt.type = SOCK_STREAM; + lsopt.rcvbuf = -1; + lsopt.sndbuf = -1; #if (NGX_HAVE_TCP_FASTOPEN) - ls->fastopen = -1; + lsopt.fastopen = -1; #endif - #if (NGX_HAVE_INET6) - ls->ipv6only = 1; + lsopt.ipv6only = 1; #endif backlog = 0; for (i = 2; i < cf->args->nelts; i++) { + if (ngx_strcmp(value[i].data, "default_server") == 0) { + lsopt.default_server = 1; + continue; + } + #if !(NGX_WIN32) if (ngx_strcmp(value[i].data, "udp") == 0) { - ls->type = SOCK_DGRAM; + lsopt.type = SOCK_DGRAM; continue; } #endif if (ngx_strcmp(value[i].data, "bind") == 0) { - ls->bind = 1; + lsopt.set = 1; + lsopt.bind = 1; continue; } #if (NGX_HAVE_TCP_FASTOPEN) if (ngx_strncmp(value[i].data, "fastopen=", 9) == 0) { - ls->fastopen = ngx_atoi(value[i].data + 9, value[i].len - 9); - ls->bind = 1; + lsopt.fastopen = ngx_atoi(value[i].data + 9, value[i].len - 9); + lsopt.set = 1; + lsopt.bind = 1; - if (ls->fastopen == NGX_ERROR) { + if (lsopt.fastopen == NGX_ERROR) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid fastopen \"%V\"", &value[i]); return NGX_CONF_ERROR; @@ -735,10 +943,11 @@ ngx_stream_core_listen(ngx_conf_t *cf, n #endif if (ngx_strncmp(value[i].data, "backlog=", 8) == 0) { - ls->backlog = ngx_atoi(value[i].data + 8, value[i].len - 8); - ls->bind = 1; + lsopt.backlog = ngx_atoi(value[i].data + 8, value[i].len - 8); + lsopt.set = 1; + lsopt.bind = 1; - if (ls->backlog == NGX_ERROR || ls->backlog == 0) { + if (lsopt.backlog == NGX_ERROR || lsopt.backlog == 0) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid backlog \"%V\"", &value[i]); return NGX_CONF_ERROR; @@ -753,10 +962,11 @@ ngx_stream_core_listen(ngx_conf_t *cf, n size.len = value[i].len - 7; size.data = value[i].data + 7; - ls->rcvbuf = ngx_parse_size(&size); - ls->bind = 1; + lsopt.rcvbuf = ngx_parse_size(&size); + lsopt.set = 1; + lsopt.bind = 1; - if (ls->rcvbuf == NGX_ERROR) { + if (lsopt.rcvbuf == NGX_ERROR) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid rcvbuf \"%V\"", &value[i]); return NGX_CONF_ERROR; @@ -769,10 +979,11 @@ ngx_stream_core_listen(ngx_conf_t *cf, n size.len = value[i].len - 7; size.data = value[i].data + 7; - ls->sndbuf = ngx_parse_size(&size); - ls->bind = 1; + lsopt.sndbuf = ngx_parse_size(&size); + lsopt.set = 1; + lsopt.bind = 1; - if (ls->sndbuf == NGX_ERROR) { + if (lsopt.sndbuf == NGX_ERROR) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid sndbuf \"%V\"", &value[i]); return NGX_CONF_ERROR; @@ -784,10 +995,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n if (ngx_strncmp(value[i].data, "ipv6only=o", 10) == 0) { #if (NGX_HAVE_INET6 && defined IPV6_V6ONLY) if (ngx_strcmp(&value[i].data[10], "n") == 0) { - ls->ipv6only = 1; + lsopt.ipv6only = 1; } else if (ngx_strcmp(&value[i].data[10], "ff") == 0) { - ls->ipv6only = 0; + lsopt.ipv6only = 0; } else { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, @@ -796,7 +1007,9 @@ ngx_stream_core_listen(ngx_conf_t *cf, n return NGX_CONF_ERROR; } - ls->bind = 1; + lsopt.set = 1; + lsopt.bind = 1; + continue; #else ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, @@ -808,8 +1021,9 @@ ngx_stream_core_listen(ngx_conf_t *cf, n if (ngx_strcmp(value[i].data, "reuseport") == 0) { #if (NGX_HAVE_REUSEPORT) - ls->reuseport = 1; - ls->bind = 1; + lsopt.reuseport = 1; + lsopt.set = 1; + lsopt.bind = 1; #else ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "reuseport is not supported " @@ -820,17 +1034,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n if (ngx_strcmp(value[i].data, "ssl") == 0) { #if (NGX_STREAM_SSL) - ngx_stream_ssl_conf_t *sslcf; - - sslcf = ngx_stream_conf_get_module_srv_conf(cf, - ngx_stream_ssl_module); - - sslcf->listen = 1; - sslcf->file = cf->conf_file->file.name.data; - sslcf->line = cf->conf_file->line; - - ls->ssl = 1; - + lsopt.ssl = 1; continue; #else ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, @@ -843,10 +1047,10 @@ ngx_stream_core_listen(ngx_conf_t *cf, n if (ngx_strncmp(value[i].data, "so_keepalive=", 13) == 0) { if (ngx_strcmp(&value[i].data[13], "on") == 0) { - ls->so_keepalive = 1; + lsopt.so_keepalive = 1; } else if (ngx_strcmp(&value[i].data[13], "off") == 0) { - ls->so_keepalive = 2; + lsopt.so_keepalive = 2; } else { @@ -865,8 +1069,8 @@ ngx_stream_core_listen(ngx_conf_t *cf, n if (p > s.data) { s.len = p - s.data; - ls->tcp_keepidle = ngx_parse_time(&s, 1); - if (ls->tcp_keepidle == (time_t) NGX_ERROR) { + lsopt.tcp_keepidle = ngx_parse_time(&s, 1); + if (lsopt.tcp_keepidle == (time_t) NGX_ERROR) { goto invalid_so_keepalive; } } @@ -881,8 +1085,8 @@ ngx_stream_core_listen(ngx_conf_t *cf, n if (p > s.data) { s.len = p - s.data; - ls->tcp_keepintvl = ngx_parse_time(&s, 1); - if (ls->tcp_keepintvl == (time_t) NGX_ERROR) { + lsopt.tcp_keepintvl = ngx_parse_time(&s, 1); + if (lsopt.tcp_keepintvl == (time_t) NGX_ERROR) { goto invalid_so_keepalive; } } @@ -892,19 +1096,19 @@ ngx_stream_core_listen(ngx_conf_t *cf, n if (s.data < end) { s.len = end - s.data; - ls->tcp_keepcnt = ngx_atoi(s.data, s.len); - if (ls->tcp_keepcnt == NGX_ERROR) { + lsopt.tcp_keepcnt = ngx_atoi(s.data, s.len); + if (lsopt.tcp_keepcnt == NGX_ERROR) { goto invalid_so_keepalive; } } - if (ls->tcp_keepidle == 0 && ls->tcp_keepintvl == 0 - && ls->tcp_keepcnt == 0) + if (lsopt.tcp_keepidle == 0 && lsopt.tcp_keepintvl == 0 + && lsopt.tcp_keepcnt == 0) { goto invalid_so_keepalive; } - ls->so_keepalive = 1; + lsopt.so_keepalive = 1; #else @@ -916,7 +1120,8 @@ ngx_stream_core_listen(ngx_conf_t *cf, n #endif } - ls->bind = 1; + lsopt.set = 1; + lsopt.bind = 1; continue; @@ -931,7 +1136,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n } if (ngx_strcmp(value[i].data, "proxy_protocol") == 0) { - ls->proxy_protocol = 1; + lsopt.proxy_protocol = 1; continue; } @@ -940,27 +1145,27 @@ ngx_stream_core_listen(ngx_conf_t *cf, n return NGX_CONF_ERROR; } - if (ls->type == SOCK_DGRAM) { + if (lsopt.type == SOCK_DGRAM) { if (backlog) { return "\"backlog\" parameter is incompatible with \"udp\""; } #if (NGX_STREAM_SSL) - if (ls->ssl) { + if (lsopt.ssl) { return "\"ssl\" parameter is incompatible with \"udp\""; } #endif - if (ls->so_keepalive) { + if (lsopt.so_keepalive) { return "\"so_keepalive\" parameter is incompatible with \"udp\""; } - if (ls->proxy_protocol) { + if (lsopt.proxy_protocol) { return "\"proxy_protocol\" parameter is incompatible with \"udp\""; } #if (NGX_HAVE_TCP_FASTOPEN) - if (ls->fastopen != -1) { + if (lsopt.fastopen != -1) { return "\"fastopen\" parameter is incompatible with \"udp\""; } #endif @@ -977,40 +1182,12 @@ ngx_stream_core_listen(ngx_conf_t *cf, n } } - if (n != 0) { - nls = ngx_array_push(&cmcf->listen); - if (nls == NULL) { - return NGX_CONF_ERROR; - } - - *nls = *ls; - - } else { - nls = ls; - } - - nls->sockaddr = u.addrs[n].sockaddr; - nls->socklen = u.addrs[n].socklen; - nls->addr_text = u.addrs[n].name; - nls->wildcard = ngx_inet_wildcard(nls->sockaddr); + lsopt.sockaddr = u.addrs[n].sockaddr; + lsopt.socklen = u.addrs[n].socklen; + lsopt.addr_text = u.addrs[n].name; + lsopt.wildcard = ngx_inet_wildcard(lsopt.sockaddr); - als = cmcf->listen.elts; - - for (i = 0; i < cmcf->listen.nelts - 1; i++) { - if (nls->type != als[i].type) { - continue; - } - - if (ngx_cmp_sockaddr(als[i].sockaddr, als[i].socklen, - nls->sockaddr, nls->socklen, 1) - != NGX_OK) - { - continue; - } - - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "duplicate \"%V\" address and port pair", - &nls->addr_text); + if (ngx_stream_add_listen(cf, cscf, &lsopt) != NGX_OK) { return NGX_CONF_ERROR; } @@ -1023,6 +1200,107 @@ ngx_stream_core_listen(ngx_conf_t *cf, n static char * +ngx_stream_core_server_name(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_stream_core_srv_conf_t *cscf = conf; + + u_char ch; + ngx_str_t *value; + ngx_uint_t i; + ngx_stream_server_name_t *sn; + + value = cf->args->elts; + + for (i = 1; i < cf->args->nelts; i++) { + + ch = value[i].data[0]; + + if ((ch == '*' && (value[i].len < 3 || value[i].data[1] != '.')) + || (ch == '.' && value[i].len < 2)) + { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "server name \"%V\" is invalid", &value[i]); + return NGX_CONF_ERROR; + } + + if (ngx_strchr(value[i].data, '/')) { + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, + "server name \"%V\" has suspicious symbols", + &value[i]); + } + + sn = ngx_array_push(&cscf->server_names); + if (sn == NULL) { + return NGX_CONF_ERROR; + } + +#if (NGX_PCRE) + sn->regex = NULL; +#endif + sn->server = cscf; + + if (ngx_strcasecmp(value[i].data, (u_char *) "$hostname") == 0) { + sn->name = cf->cycle->hostname; + + } else { + sn->name = value[i]; + } + + if (value[i].data[0] != '~') { + ngx_strlow(sn->name.data, sn->name.data, sn->name.len); + continue; + } + +#if (NGX_PCRE) + { + u_char *p; + ngx_regex_compile_t rc; + u_char errstr[NGX_MAX_CONF_ERRSTR]; + + if (value[i].len == 1) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "empty regex in server name \"%V\"", &value[i]); + return NGX_CONF_ERROR; + } + + value[i].len--; + value[i].data++; + + ngx_memzero(&rc, sizeof(ngx_regex_compile_t)); + + rc.pattern = value[i]; + rc.err.len = NGX_MAX_CONF_ERRSTR; + rc.err.data = errstr; + + for (p = value[i].data; p < value[i].data + value[i].len; p++) { + if (*p >= 'A' && *p <= 'Z') { + rc.options = NGX_REGEX_CASELESS; + break; + } + } + + sn->regex = ngx_stream_regex_compile(cf, &rc); + if (sn->regex == NULL) { + return NGX_CONF_ERROR; + } + + sn->name = value[i]; + cscf->captures = (rc.captures > 0); + } +#else + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "using regex \"%V\" " + "requires PCRE library", &value[i]); + + return NGX_CONF_ERROR; +#endif + } + + return NGX_CONF_OK; +} + + +static char * ngx_stream_core_resolver(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { ngx_stream_core_srv_conf_t *cscf = conf; diff --git a/src/stream/ngx_stream_handler.c b/src/stream/ngx_stream_handler.c --- a/src/stream/ngx_stream_handler.c +++ b/src/stream/ngx_stream_handler.c @@ -30,6 +30,7 @@ ngx_stream_init_connection(ngx_connectio struct sockaddr_in *sin; ngx_stream_in_addr_t *addr; ngx_stream_session_t *s; + ngx_stream_conf_ctx_t *ctx; ngx_stream_addr_conf_t *addr_conf; #if (NGX_HAVE_INET6) struct sockaddr_in6 *sin6; @@ -121,9 +122,12 @@ ngx_stream_init_connection(ngx_connectio return; } + ctx = addr_conf->default_server->ctx; + s->signature = NGX_STREAM_MODULE; - s->main_conf = addr_conf->ctx->main_conf; - s->srv_conf = addr_conf->ctx->srv_conf; + s->main_conf = ctx->main_conf; + s->srv_conf = ctx->srv_conf; + s->virtual_names = addr_conf->virtual_names; #if (NGX_STREAM_SSL) s->ssl = addr_conf->ssl; @@ -144,7 +148,7 @@ ngx_stream_init_connection(ngx_connectio ngx_log_error(NGX_LOG_INFO, c->log, 0, "*%uA %sclient %*s connected to %V", c->number, c->type == SOCK_DGRAM ? "udp " : "", - len, text, &addr_conf->addr_text); + len, text, &c->listening->addr_text); c->log->connection = c->number; c->log->handler = ngx_stream_log_error; diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c +++ b/src/stream/ngx_stream_ssl_module.c @@ -219,6 +219,13 @@ static ngx_command_t ngx_stream_ssl_com offsetof(ngx_stream_ssl_conf_t, conf_commands), &ngx_stream_ssl_conf_command_post }, + { ngx_string("ssl_reject_handshake"), + NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_STREAM_SRV_CONF_OFFSET, + offsetof(ngx_stream_ssl_conf_t, reject_handshake), + NULL }, + { ngx_string("ssl_alpn"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_1MORE, ngx_stream_ssl_alpn, @@ -458,7 +465,112 @@ ngx_stream_ssl_handshake_handler(ngx_con static int ngx_stream_ssl_servername(ngx_ssl_conn_t *ssl_conn, int *ad, void *arg) { + ngx_int_t rc; + ngx_str_t host; + const char *servername; + ngx_connection_t *c; + ngx_stream_session_t *s; + ngx_stream_ssl_conf_t *sscf; + ngx_stream_core_srv_conf_t *cscf; + + c = ngx_ssl_get_connection(ssl_conn); + + if (c->ssl->handshaked) { + *ad = SSL_AD_NO_RENEGOTIATION; + return SSL_TLSEXT_ERR_ALERT_FATAL; + } + + s = c->data; + + servername = SSL_get_servername(ssl_conn, TLSEXT_NAMETYPE_host_name); + + if (servername == NULL) { + ngx_log_debug0(NGX_LOG_DEBUG_STREAM, c->log, 0, + "SSL server name: null"); + goto done; + } + + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, + "SSL server name: \"%s\"", servername); + + host.len = ngx_strlen(servername); + + if (host.len == 0) { + goto done; + } + + host.data = (u_char *) servername; + + rc = ngx_stream_validate_host(&host, c->pool, 1); + + if (rc == NGX_ERROR) { + goto error; + } + + if (rc == NGX_DECLINED) { + goto done; + } + + rc = ngx_stream_find_virtual_server(s, &host, &cscf); + + if (rc == NGX_ERROR) { + goto error; + } + + if (rc == NGX_DECLINED) { + goto done; + } + + s->srv_conf = cscf->ctx->srv_conf; + + ngx_set_connection_log(c, cscf->error_log); + + sscf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); + + if (sscf->ssl.ctx) { + if (SSL_set_SSL_CTX(ssl_conn, sscf->ssl.ctx) == NULL) { + goto error; + } + + /* + * SSL_set_SSL_CTX() only changes certs as of 1.0.0d + * adjust other things we care about + */ + + SSL_set_verify(ssl_conn, SSL_CTX_get_verify_mode(sscf->ssl.ctx), + SSL_CTX_get_verify_callback(sscf->ssl.ctx)); + + SSL_set_verify_depth(ssl_conn, SSL_CTX_get_verify_depth(sscf->ssl.ctx)); + +#if OPENSSL_VERSION_NUMBER >= 0x009080dfL + /* only in 0.9.8m+ */ + SSL_clear_options(ssl_conn, SSL_get_options(ssl_conn) & + ~SSL_CTX_get_options(sscf->ssl.ctx)); +#endif + + SSL_set_options(ssl_conn, SSL_CTX_get_options(sscf->ssl.ctx)); + +#ifdef SSL_OP_NO_RENEGOTIATION + SSL_set_options(ssl_conn, SSL_OP_NO_RENEGOTIATION); +#endif + } + +done: + + sscf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); + + if (sscf->reject_handshake) { + c->ssl->handshake_rejected = 1; + *ad = SSL_AD_UNRECOGNIZED_NAME; + return SSL_TLSEXT_ERR_ALERT_FATAL; + } + return SSL_TLSEXT_ERR_OK; + +error: + + *ad = SSL_AD_INTERNAL_ERROR; + return SSL_TLSEXT_ERR_ALERT_FATAL; } #endif @@ -655,7 +767,6 @@ ngx_stream_ssl_create_conf(ngx_conf_t *c /* * set by ngx_pcalloc(): * - * scf->listen = 0; * scf->protocols = 0; * scf->certificate_values = NULL; * scf->dhparam = { 0, NULL }; @@ -674,6 +785,7 @@ ngx_stream_ssl_create_conf(ngx_conf_t *c scf->passwords = NGX_CONF_UNSET_PTR; scf->conf_commands = NGX_CONF_UNSET_PTR; scf->prefer_server_ciphers = NGX_CONF_UNSET; + scf->reject_handshake = NGX_CONF_UNSET; scf->verify = NGX_CONF_UNSET_UINT; scf->verify_depth = NGX_CONF_UNSET_UINT; scf->builtin_session_cache = NGX_CONF_UNSET; @@ -702,6 +814,8 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf ngx_conf_merge_value(conf->prefer_server_ciphers, prev->prefer_server_ciphers, 0); + ngx_conf_merge_value(conf->reject_handshake, prev->reject_handshake, 0); + ngx_conf_merge_bitmask_value(conf->protocols, prev->protocols, (NGX_CONF_BITMASK_SET |NGX_SSL_TLSv1|NGX_SSL_TLSv1_1 @@ -735,35 +849,21 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf conf->ssl.log = cf->log; - if (!conf->listen) { - return NGX_CONF_OK; - } - - if (conf->certificates == NULL) { - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, - "no \"ssl_certificate\" is defined for " - "the \"listen ... ssl\" directive in %s:%ui", - conf->file, conf->line); - return NGX_CONF_ERROR; - } + if (conf->certificates) { - if (conf->certificate_keys == NULL) { - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, - "no \"ssl_certificate_key\" is defined for " - "the \"listen ... ssl\" directive in %s:%ui", - conf->file, conf->line); - return NGX_CONF_ERROR; - } + if (conf->certificate_keys == NULL + || conf->certificate_keys->nelts < conf->certificates->nelts) + { + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "no \"ssl_certificate_key\" is defined " + "for certificate \"%V\"", + ((ngx_str_t *) conf->certificates->elts) + + conf->certificates->nelts - 1); + return NGX_CONF_ERROR; + } - if (conf->certificate_keys->nelts < conf->certificates->nelts) { - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, - "no \"ssl_certificate_key\" is defined " - "for certificate \"%V\" and " - "the \"listen ... ssl\" directive in %s:%ui", - ((ngx_str_t *) conf->certificates->elts) - + conf->certificates->nelts - 1, - conf->file, conf->line); - return NGX_CONF_ERROR; + } else if (!conf->reject_handshake) { + return NGX_CONF_OK; } if (ngx_ssl_create(&conf->ssl, conf->protocols, NULL) != NGX_OK) { @@ -818,7 +918,7 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf return NGX_CONF_ERROR; #endif - } else { + } else if (conf->certificates) { /* configure certificates */ @@ -917,6 +1017,10 @@ ngx_stream_ssl_compile_certificates(ngx_ ngx_stream_complex_value_t *cv; ngx_stream_compile_complex_value_t ccv; + if (conf->certificates == NULL) { + return NGX_OK; + } + cert = conf->certificates->elts; key = conf->certificate_keys->elts; nelts = conf->certificates->nelts; @@ -1195,8 +1299,13 @@ ngx_stream_ssl_conf_command_check(ngx_co static ngx_int_t ngx_stream_ssl_init(ngx_conf_t *cf) { - ngx_stream_handler_pt *h; - ngx_stream_core_main_conf_t *cmcf; + ngx_uint_t a, p, s; + ngx_stream_handler_pt *h; + ngx_stream_ssl_conf_t *sscf; + ngx_stream_conf_addr_t *addr; + ngx_stream_conf_port_t *port; + ngx_stream_core_srv_conf_t **cscfp, *cscf; + ngx_stream_core_main_conf_t *cmcf; cmcf = ngx_stream_conf_get_module_main_conf(cf, ngx_stream_core_module); @@ -1207,5 +1316,58 @@ ngx_stream_ssl_init(ngx_conf_t *cf) *h = ngx_stream_ssl_handler; + if (cmcf->ports == NULL) { + return NGX_OK; + } + + port = cmcf->ports->elts; + for (p = 0; p < cmcf->ports->nelts; p++) { + + addr = port[p].addrs.elts; + for (a = 0; a < port[p].addrs.nelts; a++) { + + if (!addr[a].opt.ssl) { + continue; + } + + cscf = addr[a].default_server; + sscf = cscf->ctx->srv_conf[ngx_stream_ssl_module.ctx_index]; + + if (sscf->certificates) { + continue; + } + + if (!sscf->reject_handshake) { + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "no \"ssl_certificate\" is defined for " + "the \"listen ... ssl\" directive in %s:%ui", + cscf->file_name, cscf->line); + return NGX_ERROR; + } + + /* + * if no certificates are defined in the default server, + * check all non-default server blocks + */ + + cscfp = addr[a].servers.elts; + for (s = 0; s < addr[a].servers.nelts; s++) { + + cscf = cscfp[s]; + sscf = cscf->ctx->srv_conf[ngx_stream_ssl_module.ctx_index]; + + if (sscf->certificates || sscf->reject_handshake) { + continue; + } + + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "no \"ssl_certificate\" is defined for " + "the \"listen ... ssl\" directive in %s:%ui", + cscf->file_name, cscf->line); + return NGX_ERROR; + } + } + } + return NGX_OK; } diff --git a/src/stream/ngx_stream_ssl_module.h b/src/stream/ngx_stream_ssl_module.h --- a/src/stream/ngx_stream_ssl_module.h +++ b/src/stream/ngx_stream_ssl_module.h @@ -18,10 +18,10 @@ typedef struct { ngx_msec_t handshake_timeout; ngx_flag_t prefer_server_ciphers; + ngx_flag_t reject_handshake; ngx_ssl_t ssl; - ngx_uint_t listen; ngx_uint_t protocols; ngx_uint_t verify; @@ -53,9 +53,6 @@ typedef struct { ngx_flag_t session_tickets; ngx_array_t *session_ticket_keys; - - u_char *file; - ngx_uint_t line; } ngx_stream_ssl_conf_t; diff --git a/src/stream/ngx_stream_ssl_preread_module.c b/src/stream/ngx_stream_ssl_preread_module.c --- a/src/stream/ngx_stream_ssl_preread_module.c +++ b/src/stream/ngx_stream_ssl_preread_module.c @@ -33,6 +33,8 @@ typedef struct { static ngx_int_t ngx_stream_ssl_preread_handler(ngx_stream_session_t *s); static ngx_int_t ngx_stream_ssl_preread_parse_record( ngx_stream_ssl_preread_ctx_t *ctx, u_char *pos, u_char *last); +static ngx_int_t ngx_stream_ssl_preread_servername(ngx_stream_session_t *s, + ngx_str_t *servername); static ngx_int_t ngx_stream_ssl_preread_protocol_variable( ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_stream_ssl_preread_server_name_variable( @@ -187,6 +189,10 @@ ngx_stream_ssl_preread_handler(ngx_strea return NGX_DECLINED; } + if (rc == NGX_OK) { + return ngx_stream_ssl_preread_servername(s, &ctx->host); + } + if (rc != NGX_AGAIN) { return rc; } @@ -404,9 +410,6 @@ ngx_stream_ssl_preread_parse_record(ngx_ case sw_sni_host: ctx->host.len = (p[1] << 8) + p[2]; - ngx_log_debug1(NGX_LOG_DEBUG_STREAM, ctx->log, 0, - "ssl preread: SNI hostname \"%V\"", &ctx->host); - state = sw_ext; dst = NULL; size = ext; @@ -497,6 +500,54 @@ ngx_stream_ssl_preread_parse_record(ngx_ static ngx_int_t +ngx_stream_ssl_preread_servername(ngx_stream_session_t *s, + ngx_str_t *servername) +{ + ngx_int_t rc; + ngx_str_t host; + ngx_connection_t *c; + ngx_stream_core_srv_conf_t *cscf; + + c = s->connection; + + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, + "SSL preread server name: \"%V\"", servername); + + if (servername->len == 0) { + return NGX_OK; + } + + host = *servername; + + rc = ngx_stream_validate_host(&host, c->pool, 1); + + if (rc == NGX_ERROR) { + return NGX_ERROR; + } + + if (rc == NGX_DECLINED) { + return NGX_OK; + } + + rc = ngx_stream_find_virtual_server(s, &host, &cscf); + + if (rc == NGX_ERROR) { + return NGX_ERROR; + } + + if (rc == NGX_DECLINED) { + return NGX_OK; + } + + s->srv_conf = cscf->ctx->srv_conf; + + ngx_set_connection_log(c, cscf->error_log); + + return NGX_OK; +} + + +static ngx_int_t ngx_stream_ssl_preread_protocol_variable(ngx_stream_session_t *s, ngx_variable_value_t *v, uintptr_t data) { From pluknet at nginx.com Thu Dec 14 17:35:41 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 14 Dec 2023 21:35:41 +0400 Subject: [PATCH 2 of 3] Stream: virtual servers In-Reply-To: <20231214062224.w273onqz74suk5f2@N00W24XTQX> References: <1d3464283405a4d8ac54.1699610840@arut-laptop> <979C0B23-C12F-4BDE-83B1-963126729EF9@nginx.com> <20231214062224.w273onqz74suk5f2@N00W24XTQX> Message-ID: <20231214173541.mgqomiwedg52zeu3@Y9MQ9X2QVV> On Thu, Dec 14, 2023 at 10:22:24AM +0400, Roman Arutyunyan wrote: > Hi, > > On Wed, Dec 13, 2023 at 05:40:09PM +0400, Sergey Kandaurov wrote: > > [..] > > > > This introduces accept_filter, deferred_accept, and setfib fields, > > which is out of scope of this change. Anyway, this is useless > > without corresponding support in ngx_stream_core_listen(). > > We need to catch up with all such missing functionality in Stream in future. Ok, let's postpone such unrelated changes in to separate patches. [..] > > > > Note that stream (as well as mail) consistently uses sslcf naming > > for keeping ssl configuration, unlike in http. Probably it makes > > sense for a separate sweeping change with renaming sslcf to sscf. > > Agree. Also, it makes sense to rename ngx_stream_ssl_conf_t to > ngx_stream_ssl_srv_conf_t. Ok. > > You lost "c" in ngx_pcalloc() for no apparent reason. No idea how's that happenned, thanks for catching. [..] > > @@ -463,7 +470,7 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t > > const char *servername; > > ngx_connection_t *c; > > ngx_stream_session_t *s; > > - ngx_stream_ssl_conf_t *sscf; > > + ngx_stream_ssl_conf_t *sslcf; > > Why? I though, you like "sscf" more :) > I suggest that we use the better name "sscf" here. Just to be clear, I used sslcf for consistency within current stream code, but personally I agree to change this to sscf everywhere. > > New patch attached. > > -- > Roman Arutyunyan > # HG changeset patch > # User Roman Arutyunyan > # Date 1702532489 -14400 > # Thu Dec 14 09:41:29 2023 +0400 > # Node ID a390e18b664e7ba678417ef6e40d94c37e89c2f7 > # Parent 844486cdd43a32d10b78493d7e7b80e9e2239d7e > Stream: virtual servers. > > Server name is taken either from ngx_stream_ssl_module or > ngx_stream_ssl_preread_module. > > The change adds "default" parameter to the "listen" directive, as well as the the "default_server" parameter ? > following directives: "server_names_hash_max_size", > "server_names_hash_bucket_size", "server_name" and "ssl_reject_handshake". Otherwise, looks good. From mdounin at mdounin.ru Fri Dec 15 02:41:36 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 15 Dec 2023 05:41:36 +0300 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: Message-ID: Hello! On Wed, Dec 13, 2023 at 11:09:28AM -0500, Ben Kallus wrote: > Nginx executes numerous `memcpy`s from NULL during normal execution. > `memcpy`ing to or from NULL is undefined behavior. Accordingly, some > compilers (gcc -O2) make optimizations that assume `memcpy` arguments > are not NULL. Nginx with UBSan crashes during startup due to this > issue. > > Consider the following function: > ```C > #include > > int f(int i) { > char a[] = {'a'}; > void *src = i ? a : NULL; > char dst[1]; > memcpy(dst, src, 0); > return src == NULL; > } > ``` > Here's what gcc13.2 -O2 -fno-builtin will do to it: > ```asm > f: > sub rsp, 24 > xor eax, eax > test edi, edi > lea rsi, [rsp+14] > lea rdi, [rsp+15] > mov BYTE PTR [rsp+14], 97 > cmove rsi, rax > xor edx, edx > call memcpy > xor eax, eax > add rsp, 24 > ret > ``` > Note that `f` always returns 0, regardless of the value of `i`. > > Feel free to try for yourself at https://gcc.godbolt.org/z/zfvnMMsds > > The reasoning here is that since memcpy from NULL is UB, the optimizer > is free to assume that `src` is non-null. You might consider this to > be a problem with the compiler, or the C standard, and I might agree. > Regardless, relying on UB is inherently un-portable, and requires > maintenance to ensure that new compiler releases don't break existing > assumptions about the behavior of undefined operations. > > The following patch adds a check to `ngx_memcpy` and `ngx_cpymem` that > makes 0-length memcpy explicitly a noop. Since all memcpying from NULL > in Nginx uses n==0, this should be sufficient to avoid UB. > > It would be more efficient to instead add a check to every call to > ngx_memcpy and ngx_cpymem that might be used with src==NULL, but in > the discussion of a previous patch that proposed such a change, a more > straightforward and tidy solution was desired. > It may also be worth considering adding checks for NULL memset, > memmove, etc. I think this is not necessary unless it is demonstrated > that Nginx actually executes such undefined calls. > > # HG changeset patch > # User Ben Kallus > # Date 1702406466 18000 > # Tue Dec 12 13:41:06 2023 -0500 > # Node ID d270203d4ecf77cc14a2652c727e236afc659f4a > # Parent a6f79f044de58b594563ac03139cd5e2e6a81bdb > Add NULL check to ngx_memcpy and ngx_cpymem to satisfy UBSan. > > diff -r a6f79f044de5 -r d270203d4ecf src/core/ngx_string.c > --- a/src/core/ngx_string.c Wed Nov 29 10:58:21 2023 +0400 > +++ b/src/core/ngx_string.c Tue Dec 12 13:41:06 2023 -0500 > @@ -2098,6 +2098,10 @@ > ngx_debug_point(); > } > > + if (n == 0) { > + return dst; > + } > + > return memcpy(dst, src, n); > } > > diff -r a6f79f044de5 -r d270203d4ecf src/core/ngx_string.h > --- a/src/core/ngx_string.h Wed Nov 29 10:58:21 2023 +0400 > +++ b/src/core/ngx_string.h Tue Dec 12 13:41:06 2023 -0500 > @@ -103,8 +103,9 @@ > * gcc3 compiles memcpy(d, s, 4) to the inline "mov"es. > * icc8 compile memcpy(d, s, 4) to the inline "mov"es or XMM moves. > */ > -#define ngx_memcpy(dst, src, n) (void) memcpy(dst, src, n) > -#define ngx_cpymem(dst, src, n) (((u_char *) memcpy(dst, src, n)) + (n)) > +#define ngx_memcpy(dst, src, n) (void) ((n) == 0 ? (dst) : memcpy(dst, src, n)) > +#define ngx_cpymem(dst, src, n) \ > + ((u_char *) ((n) == 0 ? (dst) : memcpy(dst, src, n)) + (n)) > > #endif > > diff -r a6f79f044de5 -r d270203d4ecf src/http/v2/ngx_http_v2.c > --- a/src/http/v2/ngx_http_v2.c Wed Nov 29 10:58:21 2023 +0400 > +++ b/src/http/v2/ngx_http_v2.c Tue Dec 12 13:41:06 2023 -0500 > @@ -3998,9 +3998,7 @@ > n = size; > } > > - if (n > 0) { > - rb->buf->last = ngx_cpymem(rb->buf->last, pos, n); > - } > + rb->buf->last = ngx_cpymem(rb->buf->last, pos, n); > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, > "http2 request body recv %uz", n); For the record, I've already provided some feedback to Ben in the ticket here: https://trac.nginx.org/nginx/ticket/2570 And pointed to the existing thread here: https://mailman.nginx.org/pipermail/nginx-devel/2023-October/PX7VH5A273NLUGSYC7DR2AZRU75CIQ3Q.html https://mailman.nginx.org/pipermail/nginx-devel/2023-December/DCGUEGEFS6TSVIWNEWUEZO3FZMR6ESYZ.html Hope this helps. -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Fri Dec 15 06:58:09 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Fri, 15 Dec 2023 06:58:09 +0000 Subject: [njs] Introduced njs_vm_set_module_loader(). Message-ID: details: https://hg.nginx.org/njs/rev/ad1a7ad3c715 branches: changeset: 2248:ad1a7ad3c715 user: Dmitry Volyntsev date: Wed Dec 13 18:38:47 2023 -0800 description: Introduced njs_vm_set_module_loader(). diffstat: external/njs_shell.c | 1 - nginx/ngx_http_js_module.c | 1 - nginx/ngx_stream_js_module.c | 1 - src/njs.h | 3 ++- src/njs_module.c | 6 +++--- src/njs_vm.c | 9 +++++++++ src/njs_vm.h | 3 +++ 7 files changed, 17 insertions(+), 7 deletions(-) diffs (101 lines): diff -r 34df3f0796cf -r ad1a7ad3c715 external/njs_shell.c --- a/external/njs_shell.c Mon Dec 11 19:10:38 2023 -0800 +++ b/external/njs_shell.c Wed Dec 13 18:38:47 2023 -0800 @@ -269,7 +269,6 @@ static njs_external_t njs_ext_262[] = { static njs_vm_ops_t njs_console_ops = { - NULL, njs_console_log, }; diff -r 34df3f0796cf -r ad1a7ad3c715 nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Mon Dec 11 19:10:38 2023 -0800 +++ b/nginx/ngx_http_js_module.c Wed Dec 13 18:38:47 2023 -0800 @@ -840,7 +840,6 @@ static njs_external_t ngx_http_js_ext_p static njs_vm_ops_t ngx_http_js_ops = { - NULL, ngx_js_logger, }; diff -r 34df3f0796cf -r ad1a7ad3c715 nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Mon Dec 11 19:10:38 2023 -0800 +++ b/nginx/ngx_stream_js_module.c Wed Dec 13 18:38:47 2023 -0800 @@ -609,7 +609,6 @@ static njs_external_t ngx_stream_js_ext static njs_vm_ops_t ngx_stream_js_ops = { - NULL, ngx_js_logger, }; diff -r 34df3f0796cf -r ad1a7ad3c715 src/njs.h --- a/src/njs.h Mon Dec 11 19:10:38 2023 -0800 +++ b/src/njs.h Wed Dec 13 18:38:47 2023 -0800 @@ -214,7 +214,6 @@ typedef void (*njs_logger_t)(njs_vm_t *v typedef struct { - njs_module_loader_t module_loader; njs_logger_t logger; } njs_vm_ops_t; @@ -310,6 +309,8 @@ NJS_EXPORT njs_vm_t *njs_vm_create(njs_v NJS_EXPORT void njs_vm_destroy(njs_vm_t *vm); NJS_EXPORT njs_int_t njs_vm_compile(njs_vm_t *vm, u_char **start, u_char *end); +NJS_EXPORT void njs_vm_set_module_loader(njs_vm_t *vm, + njs_module_loader_t module_loader, void *opaque); NJS_EXPORT njs_mod_t *njs_vm_add_module(njs_vm_t *vm, njs_str_t *name, njs_value_t *value); NJS_EXPORT njs_mod_t *njs_vm_compile_module(njs_vm_t *vm, njs_str_t *name, diff -r 34df3f0796cf -r ad1a7ad3c715 src/njs_module.c --- a/src/njs_module.c Mon Dec 11 19:10:38 2023 -0800 +++ b/src/njs_module.c Wed Dec 13 18:38:47 2023 -0800 @@ -48,9 +48,9 @@ njs_parser_module(njs_parser_t *parser, external = parser; loader = njs_default_module_loader; - if (vm->options.ops != NULL && vm->options.ops->module_loader != NULL) { - loader = vm->options.ops->module_loader; - external = vm->external; + if (vm->module_loader != NULL) { + loader = vm->module_loader; + external = vm->module_loader_opaque; } module = loader(vm, external, name); diff -r 34df3f0796cf -r ad1a7ad3c715 src/njs_vm.c --- a/src/njs_vm.c Mon Dec 11 19:10:38 2023 -0800 +++ b/src/njs_vm.c Wed Dec 13 18:38:47 2023 -0800 @@ -730,6 +730,15 @@ njs_vm_execute_pending_job(njs_vm_t *vm) } +void +njs_vm_set_module_loader(njs_vm_t *vm, njs_module_loader_t module_loader, + void *opaque) +{ + vm->module_loader = module_loader; + vm->module_loader_opaque = opaque; +} + + njs_int_t njs_vm_add_path(njs_vm_t *vm, const njs_str_t *path) { diff -r 34df3f0796cf -r ad1a7ad3c715 src/njs_vm.h --- a/src/njs_vm.h Mon Dec 11 19:10:38 2023 -0800 +++ b/src/njs_vm.h Wed Dec 13 18:38:47 2023 -0800 @@ -182,6 +182,9 @@ struct njs_vm_s { njs_rbtree_t global_symbols; uint64_t symbol_generator; + + njs_module_loader_t module_loader; + void *module_loader_opaque; }; From xeioex at nginx.com Fri Dec 15 06:58:12 2023 From: xeioex at nginx.com (=?utf-8?q?Dmitry_Volyntsev?=) Date: Fri, 15 Dec 2023 06:58:12 +0000 Subject: [njs] Moving out logger from njs core. Message-ID: details: https://hg.nginx.org/njs/rev/fc1001f6801b branches: changeset: 2249:fc1001f6801b user: Dmitry Volyntsev date: Thu Dec 14 22:32:02 2023 -0800 description: Moving out logger from njs core. Logger is not part of the JS runtime according to ECMAScript and should be implemented by host environment. diffstat: external/njs_fs_module.c | 1 - external/njs_shell.c | 70 ++++++++++++++++++++++++++++--------------- nginx/ngx_http_js_module.c | 11 +----- nginx/ngx_js.c | 40 ++++++++++++++++++------ nginx/ngx_js.h | 6 ++- nginx/ngx_stream_js_module.c | 6 --- src/njs.h | 26 ---------------- src/njs_builtin.c | 9 +----- src/njs_vm.c | 25 --------------- 9 files changed, 82 insertions(+), 112 deletions(-) diffs (493 lines): diff -r ad1a7ad3c715 -r fc1001f6801b external/njs_fs_module.c --- a/external/njs_fs_module.c Wed Dec 13 18:38:47 2023 -0800 +++ b/external/njs_fs_module.c Thu Dec 14 22:32:02 2023 -0800 @@ -3668,7 +3668,6 @@ njs_fs_filehandle_cleanup(void *data) njs_filehandle_t *fh = data; if (fh->vm != NULL && fh->fd != -1) { - njs_vm_warn(fh->vm, "closing file description %d on cleanup\n", fh->fd); (void) close(fh->fd); } } diff -r ad1a7ad3c715 -r fc1001f6801b external/njs_shell.c --- a/external/njs_shell.c Wed Dec 13 18:38:47 2023 -0800 +++ b/external/njs_shell.c Thu Dec 14 22:32:02 2023 -0800 @@ -53,6 +53,13 @@ typedef struct { } njs_opts_t; +typedef enum { + NJS_LOG_ERROR = 4, + NJS_LOG_WARN = 5, + NJS_LOG_INFO = 7, +} njs_log_level_t; + + typedef struct { size_t index; size_t length; @@ -137,8 +144,9 @@ static njs_int_t njs_ext_console_time(nj static njs_int_t njs_ext_console_time_end(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval); -static void njs_console_log(njs_vm_t *vm, njs_external_ptr_t external, - njs_log_level_t level, const u_char *start, size_t length); +static void njs_console_log(njs_log_level_t level, const char *fmt, ...); +static void njs_console_logger(njs_log_level_t level, const u_char *start, + size_t length); static intptr_t njs_event_rbtree_compare(njs_rbtree_node_t *node1, njs_rbtree_node_t *node2); @@ -159,7 +167,7 @@ static njs_external_t njs_ext_console[] .native = njs_ext_console_log, #define NJS_LOG_DUMP 16 #define NJS_LOG_MASK 15 - .magic8 = NJS_LOG_LEVEL_INFO | NJS_LOG_DUMP, + .magic8 = NJS_LOG_INFO | NJS_LOG_DUMP, } }, @@ -171,7 +179,7 @@ static njs_external_t njs_ext_console[] .enumerable = 1, .u.method = { .native = njs_ext_console_log, - .magic8 = NJS_LOG_LEVEL_ERROR, + .magic8 = NJS_LOG_ERROR, } }, @@ -183,7 +191,7 @@ static njs_external_t njs_ext_console[] .enumerable = 1, .u.method = { .native = njs_ext_console_log, - .magic8 = NJS_LOG_LEVEL_INFO, + .magic8 = NJS_LOG_INFO, } }, @@ -195,7 +203,7 @@ static njs_external_t njs_ext_console[] .enumerable = 1, .u.method = { .native = njs_ext_console_log, - .magic8 = NJS_LOG_LEVEL_INFO, + .magic8 = NJS_LOG_INFO, } }, @@ -237,7 +245,7 @@ static njs_external_t njs_ext_console[] .enumerable = 1, .u.method = { .native = njs_ext_console_log, - .magic8 = NJS_LOG_LEVEL_WARN, + .magic8 = NJS_LOG_WARN, } }, @@ -268,11 +276,6 @@ static njs_external_t njs_ext_262[] = { }; -static njs_vm_ops_t njs_console_ops = { - njs_console_log, -}; - - njs_module_t njs_console_module = { .name = njs_str("console"), .preinit = NULL, @@ -776,7 +779,6 @@ njs_create_vm(njs_opts_t *opts) vm_options.opcode_debug = opts->opcode_debug; #endif - vm_options.ops = &njs_console_ops; vm_options.addons = njs_console_addon_modules; vm_options.external = &njs_console; vm_options.argv = opts->argv; @@ -1419,7 +1421,7 @@ njs_ext_console_log(njs_vm_t *vm, njs_va return NJS_ERROR; } - njs_vm_logger(vm, level, "%*s\n", msg.length, msg.start); + njs_console_logger(level, msg.start, msg.length); n++; } @@ -1475,7 +1477,8 @@ njs_ext_console_time(njs_vm_t *vm, njs_v label = njs_queue_link_data(link, njs_timelabel_t, link); if (njs_strstr_eq(&name, &label->name)) { - njs_vm_log(vm, "Timer \"%V\" already exists.\n", &name); + njs_console_log(NJS_LOG_INFO, "Timer \"%V\" already exists.", + &name); njs_value_undefined_set(retval); return NJS_OK; } @@ -1546,7 +1549,8 @@ njs_ext_console_time_end(njs_vm_t *vm, n for ( ;; ) { if (link == njs_queue_tail(labels)) { - njs_vm_log(vm, "Timer \"%V\" doesn’t exist.\n", &name); + njs_console_log(NJS_LOG_INFO, "Timer \"%V\" doesn’t exist.", + &name); njs_value_undefined_set(retval); return NJS_OK; } @@ -1566,7 +1570,7 @@ njs_ext_console_time_end(njs_vm_t *vm, n ms = ns / 1000000; ns = ns % 1000000; - njs_vm_log(vm, "%V: %uL.%06uLms\n", &name, ms, ns); + njs_console_log(NJS_LOG_INFO, "%V: %uL.%06uLms", &name, ms, ns); njs_mp_free(njs_vm_memory_pool(vm), label); @@ -1693,20 +1697,36 @@ njs_clear_timeout(njs_vm_t *vm, njs_valu static void -njs_console_log(njs_vm_t *vm, njs_external_ptr_t external, - njs_log_level_t level, const u_char *start, size_t length) +njs_console_log(njs_log_level_t level, const char *fmt, ...) +{ + u_char *p; + va_list args; + u_char buf[2048]; + + va_start(args, fmt); + p = njs_vsprintf(buf, buf + sizeof(buf), fmt, args); + va_end(args); + + njs_console_logger(level, buf, p - buf); +} + + +static void +njs_console_logger(njs_log_level_t level, const u_char *start, size_t length) { switch (level) { - case NJS_LOG_LEVEL_INFO: - njs_printf("%*s", length, start); + case NJS_LOG_WARN: + njs_printf("W: "); break; - case NJS_LOG_LEVEL_WARN: - njs_printf("W: %*s", length, start); + case NJS_LOG_ERROR: + njs_printf("E: "); break; - case NJS_LOG_LEVEL_ERROR: - njs_printf("E: %*s", length, start); + case NJS_LOG_INFO: break; } + + njs_print(start, length); + njs_print("\n", 1); } diff -r ad1a7ad3c715 -r fc1001f6801b nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Wed Dec 13 18:38:47 2023 -0800 +++ b/nginx/ngx_http_js_module.c Thu Dec 14 22:32:02 2023 -0800 @@ -839,11 +839,6 @@ static njs_external_t ngx_http_js_ext_p }; -static njs_vm_ops_t ngx_http_js_ops = { - ngx_js_logger, -}; - - static uintptr_t ngx_http_js_uptr[] = { offsetof(ngx_http_request_t, connection), (uintptr_t) ngx_http_js_pool, @@ -1660,8 +1655,9 @@ ngx_http_js_ext_header_out(njs_vm_t *vm, } if (r->header_sent && setval != NULL) { - njs_vm_warn(vm, "ignored setting of response header \"%V\" because" - " headers were already sent", &name); + ngx_log_error(NGX_LOG_WARN, r->connection->log, 0, + "ignored setting of response header \"%V\" because" + " headers were already sent", &name); } for (h = headers_out; h->name.length > 0; h++) { @@ -4509,7 +4505,6 @@ ngx_http_js_init_conf_vm(ngx_conf_t *cf, options.backtrace = 1; options.unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_THROW; - options.ops = &ngx_http_js_ops; options.metas = &ngx_http_js_metas; options.addons = njs_http_js_addon_modules; options.argv = ngx_argv; diff -r ad1a7ad3c715 -r fc1001f6801b nginx/ngx_js.c --- a/nginx/ngx_js.c Wed Dec 13 18:38:47 2023 -0800 +++ b/nginx/ngx_js.c Thu Dec 14 22:32:02 2023 -0800 @@ -787,11 +787,10 @@ njs_int_t ngx_js_ext_log(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t magic, njs_value_t *retval) { - char *p; - ngx_int_t lvl; - njs_str_t msg; - njs_uint_t n; - njs_log_level_t level; + char *p; + ngx_int_t lvl; + njs_str_t msg; + njs_uint_t n, level; p = njs_vm_external(vm, NJS_PROTO_ID_ANY, njs_argument(args, 0)); if (p == NULL) { @@ -799,7 +798,7 @@ ngx_js_ext_log(njs_vm_t *vm, njs_value_t return NJS_ERROR; } - level = (njs_log_level_t) magic & NGX_JS_LOG_MASK; + level = magic & NGX_JS_LOG_MASK; if (level == 0) { if (ngx_js_integer(vm, njs_arg(args, nargs, 1), &lvl) != NGX_OK) { @@ -892,7 +891,8 @@ ngx_js_ext_console_time(njs_vm_t *vm, nj label = ngx_queue_data(q, ngx_js_timelabel_t, queue); if (njs_strstr_eq(&name, &label->name)) { - njs_vm_log(vm, "Timer \"%V\" already exists.\n", &name); + ngx_js_log(vm, njs_vm_external_ptr(vm), NGX_LOG_INFO, + "Timer \"%V\" already exists.", &name); njs_value_undefined_set(retval); return NJS_OK; } @@ -988,7 +988,8 @@ ngx_js_ext_console_time_end(njs_vm_t *vm ms = ns / 1000000; ns = ns % 1000000; - njs_vm_log(vm, "%V: %uL.%06uLms\n", &name, ms, ns); + ngx_js_log(vm, njs_vm_external_ptr(vm), NGX_LOG_INFO, "%V: %uL.%06uLms", + &name, ms, ns); njs_value_undefined_set(retval); @@ -996,7 +997,8 @@ ngx_js_ext_console_time_end(njs_vm_t *vm not_found: - njs_vm_log(vm, "Timer \"%V\" doesn't exist.\n", &name); + ngx_js_log(vm, njs_vm_external_ptr(vm), NGX_LOG_INFO, + "Timer \"%V\" doesn't exist.", &name); njs_value_undefined_set(retval); @@ -1152,7 +1154,23 @@ njs_clear_timeout(njs_vm_t *vm, njs_valu void -ngx_js_logger(njs_vm_t *vm, njs_external_ptr_t external, njs_log_level_t level, +ngx_js_log(njs_vm_t *vm, njs_external_ptr_t external, ngx_uint_t level, + const char *fmt, ...) +{ + u_char *p; + va_list args; + u_char buf[NGX_MAX_ERROR_STR]; + + va_start(args, fmt); + p = njs_vsprintf(buf, buf + sizeof(buf), fmt, args); + va_end(args); + + ngx_js_logger(vm, external, level, buf, p - buf); +} + + +void +ngx_js_logger(njs_vm_t *vm, njs_external_ptr_t external, ngx_uint_t level, const u_char *start, size_t length) { ngx_log_t *log; @@ -1174,7 +1192,7 @@ ngx_js_logger(njs_vm_t *vm, njs_external log = ngx_cycle->log; } - ngx_log_error((ngx_uint_t) level, log, 0, "js: %*s", length, start); + ngx_log_error(level, log, 0, "js: %*s", length, start); if (external != NULL) { log->handler = handler; diff -r ad1a7ad3c715 -r fc1001f6801b nginx/ngx_js.h --- a/nginx/ngx_js.h Wed Dec 13 18:38:47 2023 -0800 +++ b/nginx/ngx_js.h Thu Dec 14 22:32:02 2023 -0800 @@ -192,8 +192,10 @@ ngx_int_t ngx_js_exception(njs_vm_t *vm, njs_int_t ngx_js_ext_log(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t level, njs_value_t *retval); -void ngx_js_logger(njs_vm_t *vm, njs_external_ptr_t external, - njs_log_level_t level, const u_char *start, size_t length); +void ngx_js_log(njs_vm_t *vm, njs_external_ptr_t external, + ngx_uint_t level, const char *fmt, ...); +void ngx_js_logger(njs_vm_t *vm, njs_external_ptr_t external, ngx_uint_t level, + const u_char *start, size_t length); char * ngx_js_import(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); char * ngx_js_preload_object(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); ngx_int_t ngx_js_init_preload_vm(ngx_conf_t *cf, ngx_js_loc_conf_t *conf); diff -r ad1a7ad3c715 -r fc1001f6801b nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Wed Dec 13 18:38:47 2023 -0800 +++ b/nginx/ngx_stream_js_module.c Thu Dec 14 22:32:02 2023 -0800 @@ -608,11 +608,6 @@ static njs_external_t ngx_stream_js_ext }; -static njs_vm_ops_t ngx_stream_js_ops = { - ngx_js_logger, -}; - - static uintptr_t ngx_stream_js_uptr[] = { offsetof(ngx_stream_session_t, connection), (uintptr_t) ngx_stream_js_pool, @@ -1784,7 +1779,6 @@ ngx_stream_js_init_conf_vm(ngx_conf_t *c options.backtrace = 1; options.unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_THROW; - options.ops = &ngx_stream_js_ops; options.metas = &ngx_stream_js_metas; options.addons = njs_stream_js_addon_modules; options.argv = ngx_argv; diff -r ad1a7ad3c715 -r fc1001f6801b src/njs.h --- a/src/njs.h Wed Dec 13 18:38:47 2023 -0800 +++ b/src/njs.h Thu Dec 14 22:32:02 2023 -0800 @@ -46,12 +46,6 @@ typedef struct { uint64_t filler[2]; } njs_opaque_value_t; -typedef enum { - NJS_LOG_LEVEL_ERROR = 4, - NJS_LOG_LEVEL_WARN = 5, - NJS_LOG_LEVEL_INFO = 7, -} njs_log_level_t; - /* sizeof(njs_value_t) is 16 bytes. */ #define njs_argument(args, n) \ (njs_value_t *) ((u_char *) args + (n) * 16) @@ -72,13 +66,6 @@ extern const njs_value_t njs_ ((n < nargs) ? njs_argument(args, n) \ : (njs_value_assign(lvalue, &njs_value_undefined), lvalue)) -#define njs_vm_log(vm, fmt, ...) \ - njs_vm_logger(vm, NJS_LOG_LEVEL_INFO, fmt, ##__VA_ARGS__) -#define njs_vm_warn(vm, fmt, ...) \ - njs_vm_logger(vm, NJS_LOG_LEVEL_WARN, fmt, ##__VA_ARGS__) -#define njs_vm_err(vm, fmt, ...) \ - njs_vm_logger(vm, NJS_LOG_LEVEL_ERROR, fmt, ##__VA_ARGS__) - #define njs_vm_error(vm, fmt, ...) \ njs_vm_error2(vm, 0, fmt, ##__VA_ARGS__) #define njs_vm_internal_error(vm, fmt, ...) \ @@ -209,13 +196,6 @@ typedef void * njs_ typedef njs_mod_t *(*njs_module_loader_t)(njs_vm_t *vm, njs_external_ptr_t external, njs_str_t *name); -typedef void (*njs_logger_t)(njs_vm_t *vm, njs_external_ptr_t external, - njs_log_level_t level, const u_char *start, size_t length); - - -typedef struct { - njs_logger_t logger; -} njs_vm_ops_t; typedef struct { @@ -236,7 +216,6 @@ typedef struct { typedef struct { njs_external_ptr_t external; njs_vm_shared_t *shared; - njs_vm_ops_t *ops; njs_vm_meta_t *metas; njs_module_t **addons; njs_str_t file; @@ -246,8 +225,6 @@ typedef struct { njs_uint_t max_stack_size; - njs_log_level_t log_level; - #define NJS_VM_OPT_UNHANDLED_REJECTION_IGNORE 0 #define NJS_VM_OPT_UNHANDLED_REJECTION_THROW 1 @@ -476,9 +453,6 @@ NJS_EXPORT njs_int_t njs_vm_value_dump(n NJS_EXPORT void njs_vm_memory_error(njs_vm_t *vm); -NJS_EXPORT void njs_vm_logger(njs_vm_t *vm, njs_log_level_t level, - const char *fmt, ...); - NJS_EXPORT void njs_value_undefined_set(njs_value_t *value); NJS_EXPORT void njs_value_null_set(njs_value_t *value); NJS_EXPORT void njs_value_invalid_set(njs_value_t *value); diff -r ad1a7ad3c715 -r fc1001f6801b src/njs_builtin.c --- a/src/njs_builtin.c Wed Dec 13 18:38:47 2023 -0800 +++ b/src/njs_builtin.c Thu Dec 14 22:32:02 2023 -0800 @@ -1532,7 +1532,7 @@ njs_env_hash_init(njs_vm_t *vm, njs_lvlh uint32_t cp; njs_int_t ret; const u_char *val, *entry, *s, *end; - njs_object_prop_t *prop, *prev; + njs_object_prop_t *prop; njs_string_prop_t string; njs_lvlhsh_query_t lhq; @@ -1599,13 +1599,6 @@ njs_env_hash_init(njs_vm_t *vm, njs_lvlh * Always using the first element among the duplicates * and ignoring the rest. */ - - prev = lhq.value; - - if (!njs_values_same(njs_prop_value(prop), njs_prop_value(prev))) { - njs_vm_warn(vm, "environment variable \"%V\" has more than one" - " value\n", &lhq.key); - } } } diff -r ad1a7ad3c715 -r fc1001f6801b src/njs_vm.c --- a/src/njs_vm.c Wed Dec 13 18:38:47 2023 -0800 +++ b/src/njs_vm.c Thu Dec 14 22:32:02 2023 -0800 @@ -24,7 +24,6 @@ njs_vm_opt_init(njs_vm_opt_t *options) { njs_memzero(options, sizeof(njs_vm_opt_t)); - options->log_level = NJS_LOG_LEVEL_INFO; options->max_stack_size = NJS_MAX_STACK_SIZE; } @@ -1093,30 +1092,6 @@ njs_vm_memory_error(njs_vm_t *vm) } -njs_noinline void -njs_vm_logger(njs_vm_t *vm, njs_log_level_t level, const char *fmt, ...) -{ - u_char *p; - va_list args; - njs_logger_t logger; - u_char buf[32768]; - - if (vm->options.ops == NULL) { - return; - } - - logger = vm->options.ops->logger; - - if (logger != NULL && vm->options.log_level >= level) { - va_start(args, fmt); - p = njs_vsprintf(buf, buf + sizeof(buf), fmt, args); - va_end(args); - - logger(vm, vm->external, level, buf, p - buf); - } -} - - njs_int_t njs_vm_value_string(njs_vm_t *vm, njs_str_t *dst, njs_value_t *src) { From rcasey at gmail.com Fri Dec 15 07:02:45 2023 From: rcasey at gmail.com (Rob Casey) Date: Fri, 15 Dec 2023 18:02:45 +1100 Subject: [PATCH] Add ssl_client_tls_bind variable Message-ID: First time caller, long time listener. This patch introduces the variable $ssl_client_tls_bind which provides the last Finished message returned by the OpenSSL SSL_get_peer_finished() function. The value returned by this function may be used in TLS channel binding operations as described in RFC 5929 (TLSv1.2) and RFC 9266 (TLSv1.3). The bytes returned by this function are base64-encoded for ease-of-use as per suggestion on Nginx forum thread . Rob -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- # HG changeset patch # User Rob Casey # Date 1702623002 0 # Fri Dec 15 06:50:02 2023 +0000 # Node ID b76f61aaf306ad55604dfa47d572a0dbc1dcab50 # Parent 6c8595b77e667bd58fd28186939ed820f2e55e0e Added $ssl_client_tls_bind variable. This variable provides the last Finished message returned by the OpenSSL SSL_get_peer_finished() function for use in TLS channel binding operations as described in RFC 5929 (TLSv1.2) and RFC 9266 (TLSv1.3). The bytes returned by this function are base64-encoded for ease-of-use. diff -r 6c8595b77e66 -r b76f61aaf306 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Dec 12 20:21:12 2023 +0400 +++ b/src/event/ngx_event_openssl.c Fri Dec 15 06:50:02 2023 +0000 @@ -5943,6 +5943,34 @@ } +ngx_int_t +ngx_ssl_get_client_tls_bind(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s) +{ + ngx_str_t data; + u_char buf[1024]; + size_t n; + + s->len = 0; + + n = SSL_get_peer_finished(c->ssl->connection, buf, sizeof(buf)); + if (n == 0) { + return NGX_OK; + } + data.len = n; + data.data = buf; + + n = ngx_base64_encoded_length(data.len); + s->data = ngx_palloc(pool, n); + if (s->data == NULL) { + return NGX_ERROR; + } + ngx_encode_base64(s, &data); + s->len = n; + + return NGX_OK; +} + + static time_t ngx_ssl_parse_time( #if OPENSSL_VERSION_NUMBER > 0x10100000L diff -r 6c8595b77e66 -r b76f61aaf306 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Tue Dec 12 20:21:12 2023 +0400 +++ b/src/event/ngx_event_openssl.h Fri Dec 15 06:50:02 2023 +0000 @@ -307,6 +307,8 @@ ngx_str_t *s); ngx_int_t ngx_ssl_get_client_v_remain(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s); +ngx_int_t ngx_ssl_get_client_tls_bind(ngx_connection_t *c, ngx_pool_t *pool, + ngx_str_t *s); ngx_int_t ngx_ssl_handshake(ngx_connection_t *c); diff -r 6c8595b77e66 -r b76f61aaf306 src/http/modules/ngx_http_ssl_module.c --- a/src/http/modules/ngx_http_ssl_module.c Tue Dec 12 20:21:12 2023 +0400 +++ b/src/http/modules/ngx_http_ssl_module.c Fri Dec 15 06:50:02 2023 +0000 @@ -399,6 +399,9 @@ { ngx_string("ssl_client_v_remain"), NULL, ngx_http_ssl_variable, (uintptr_t) ngx_ssl_get_client_v_remain, NGX_HTTP_VAR_CHANGEABLE, 0 }, + { ngx_string("ssl_client_tls_bind"), NULL, ngx_http_ssl_variable, + (uintptr_t) ngx_ssl_get_client_tls_bind, NGX_HTTP_VAR_CHANGEABLE, 0 }, + ngx_http_null_variable }; diff -r 6c8595b77e66 -r b76f61aaf306 src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c Tue Dec 12 20:21:12 2023 +0400 +++ b/src/stream/ngx_stream_ssl_module.c Fri Dec 15 06:50:02 2023 +0000 @@ -322,6 +322,9 @@ { ngx_string("ssl_client_v_remain"), NULL, ngx_stream_ssl_variable, (uintptr_t) ngx_ssl_get_client_v_remain, NGX_STREAM_VAR_CHANGEABLE, 0 }, + { ngx_string("ssl_client_tls_bind"), NULL, ngx_stream_ssl_variable, + (uintptr_t) ngx_ssl_get_client_tls_bind, NGX_STREAM_VAR_CHANGEABLE, 0 }, + ngx_stream_null_variable }; From mdounin at mdounin.ru Fri Dec 15 11:27:35 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 15 Dec 2023 14:27:35 +0300 Subject: [PATCH] Add ssl_client_tls_bind variable In-Reply-To: References: Message-ID: Hello! On Fri, Dec 15, 2023 at 06:02:45PM +1100, Rob Casey wrote: > First time caller, long time listener. > > This patch introduces the variable $ssl_client_tls_bind which provides the > last Finished message returned by the OpenSSL SSL_get_peer_finished() > function. The value returned by this function may be used in TLS channel > binding operations as described in RFC 5929 > (TLSv1.2) and RFC 9266 > (TLSv1.3). The bytes > returned by this function are base64-encoded for ease-of-use as per > suggestion on Nginx forum thread > . You might be interested in a previous attempt to introduce similar variables, here: https://mailman.nginx.org/pipermail/nginx-devel/2021-May/014082.html https://mailman.nginx.org/pipermail/nginx-devel/2021-June/014090.html -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Fri Dec 15 12:40:37 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 15 Dec 2023 16:40:37 +0400 Subject: [PATCH 6 of 6] QUIC: path revalidation after expansion failure In-Reply-To: <82fa5941af6fecb4fc7f.1701342332@arut-laptop> References: <82fa5941af6fecb4fc7f.1701342332@arut-laptop> Message-ID: <53998043-7D23-400E-8B67-EAB7051B4BFB@nginx.com> > On 30 Nov 2023, at 15:05, Roman Arutyunyan wrote: > > # HG changeset patch > # User Roman Arutyunyan > # Date 1701241101 -14400 > # Wed Nov 29 10:58:21 2023 +0400 > # Node ID 82fa5941af6fecb4fc7f0ac6308ae6c266d5e545 > # Parent 4b7663d9146ce9baeb78fb57c3fed7368f25dae9 > QUIC: path revalidation after expansion failure. > > As per RFC 9000, Section 8.2.1: > > When an endpoint is unable to expand the datagram size to 1200 bytes due > to the anti-amplification limit, the path MTU will not be validated. > To ensure that the path MTU is large enough, the endpoint MUST perform a > second path validation by sending a PATH_CHALLENGE frame in a datagram of > at least 1200 bytes. > > diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h > --- a/src/event/quic/ngx_event_quic_connection.h > +++ b/src/event/quic/ngx_event_quic_connection.h > @@ -111,7 +111,8 @@ struct ngx_quic_path_s { > uint64_t mtu_pnum[NGX_QUIC_PATH_RETRIES]; > ngx_str_t addr_text; > u_char text[NGX_SOCKADDR_STRLEN]; > - ngx_uint_t validated; /* unsigned validated:1; */ > + unsigned validated:1; > + unsigned mtu_unvalidated:1; > }; > > > diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c > --- a/src/event/quic/ngx_event_quic_migration.c > +++ b/src/event/quic/ngx_event_quic_migration.c > @@ -169,6 +169,7 @@ valid: > > path->mtu = prev->mtu; > path->max_mtu = prev->max_mtu; > + path->mtu_unvalidated = 0; > } > } > > @@ -182,6 +183,13 @@ valid: > qc->congestion.recovery_start = ngx_current_msec; > } > > + path->validated = 1; > + > + if (path->mtu_unvalidated) { > + path->mtu_unvalidated = 0; > + return ngx_quic_validate_path(c, path); > + } > + > /* > * RFC 9000, 9.3. Responding to Connection Migration > * > @@ -199,8 +207,6 @@ valid: > > ngx_quic_path_dbg(c, "is validated", path); > > - path->validated = 1; > - > ngx_quic_discover_path_mtu(c, path); > > return NGX_OK; > @@ -578,7 +584,15 @@ ngx_quic_send_path_challenge(ngx_connect > * sending a datagram of this size. > */ > > - min = (ngx_quic_path_limit(c, path, 1200) < 1200) ? 0 : 1200; > + if (path->mtu_unvalidated > + || ngx_quic_path_limit(c, path, 1200) < 1200) > + { > + min = 0; > + path->mtu_unvalidated = 1; > + > + } else { > + min = 1200; > + } > > if (ngx_quic_frame_sendto(c, frame, min, path) == NGX_ERROR) { > return NGX_ERROR; This needs the following fixup, path->validated is now a bit-mask. # HG changeset patch # User Sergey Kandaurov # Date 1702643887 -14400 # Fri Dec 15 16:38:07 2023 +0400 # Node ID 763803589a36e3c67cbe39dd324b4e91fe57ecb7 # Parent cbe1a0e8094be744b940fe1b0cc5314f99c94672 QUIC: fixed format specifier after a6f79f044de5. diff --git a/src/event/quic/ngx_event_quic_migration.h b/src/event/quic/ngx_event_quic_migration.h --- a/src/event/quic/ngx_event_quic_migration.h +++ b/src/event/quic/ngx_event_quic_migration.h @@ -19,7 +19,7 @@ #define ngx_quic_path_dbg(c, msg, path) \ ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, \ - "quic path seq:%uL %s tx:%O rx:%O valid:%ui st:%d mtu:%uz",\ + "quic path seq:%uL %s tx:%O rx:%O valid:%d st:%d mtu:%uz", \ path->seqnum, msg, path->sent, path->received, \ path->validated, path->state, path->mtu); -- Sergey Kandaurov From serg.brester at sebres.de Fri Dec 15 14:46:19 2023 From: serg.brester at sebres.de (Dipl. Ing. Sergey Brester) Date: Fri, 15 Dec 2023 15:46:19 +0100 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: Message-ID: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Enclosed few thoughts to the subject: - since it is very rare situation that one needs only a memcpy without to know whether previous alloc may fail (e. g. some of pointers were NULL), me too thinks that the caller should be responsible for the check. So I would not extend ngx_memcpy or ngx_cpymem in that way. - rewrite of `ngx_memcpy` define like here: ``` + #define ngx_memcpy(dst, src, n) (void) ((n) == 0 ? (dst) : memcpy(dst, src, n)) ``` may introduce a regression or compat issues, e. g. fully functioning codes like that may become broken hereafter: ``` ngx_memcpy(dst, src, ++len); // because n would be incremented twice in the macro now ``` Sure, `ngx_cpymem` has also the same issue, but it had that already before the "fix"... Anyway, I'm always against of such macros (no matter with or without check it would be better as an inline function instead). My conclusion: a fix of affected places invoking `ngx_memcpy` and `ngx_cpymem`, and possibly an assert in `ngx_memcpy` and `ngx_cpymem` would be fully enough, in my opinion. Regards, Sergey. On 15.12.2023 03:41, Maxim Dounin wrote: > Hello! > > On Wed, Dec 13, 2023 at 11:09:28AM -0500, Ben Kallus wrote: > > Nginx executes numerous `memcpy`s from NULL during normal > execution. > `memcpy`ing to or from NULL is undefined behavior. Accordingly, > some > compilers (gcc -O2) make optimizations that assume `memcpy` > arguments > are not NULL. Nginx with UBSan crashes during startup due to this > issue. > > Consider the following function: > ```C > #include > > int f(int i) { > char a[] = {'a'}; > void *src = i ? a : NULL; > char dst[1]; > memcpy(dst, src, 0); > return src == NULL; > } > ``` > Here's what gcc13.2 -O2 -fno-builtin will do to it: > ```asm > f: > sub rsp, 24 > xor eax, eax > test edi, edi > lea rsi, [rsp+14] > lea rdi, [rsp+15] > mov BYTE PTR [rsp+14], 97 > cmove rsi, rax > xor edx, edx > call memcpy > xor eax, eax > add rsp, 24 > ret > ``` > Note that `f` always returns 0, regardless of the value of `i`. > > Feel free to try for yourself at > https://gcc.godbolt.org/z/zfvnMMsds > > The reasoning here is that since memcpy from NULL is UB, the > optimizer > is free to assume that `src` is non-null. You might consider this > to > be a problem with the compiler, or the C standard, and I might > agree. > Regardless, relying on UB is inherently un-portable, and requires > maintenance to ensure that new compiler releases don't break > existing > assumptions about the behavior of undefined operations. > > The following patch adds a check to `ngx_memcpy` and `ngx_cpymem` > that > makes 0-length memcpy explicitly a noop. Since all memcpying from > NULL > in Nginx uses n==0, this should be sufficient to avoid UB. > > It would be more efficient to instead add a check to every call to > ngx_memcpy and ngx_cpymem that might be used with src==NULL, but in > the discussion of a previous patch that proposed such a change, a > more > straightforward and tidy solution was desired. > It may also be worth considering adding checks for NULL memset, > memmove, etc. I think this is not necessary unless it is > demonstrated > that Nginx actually executes such undefined calls. > > # HG changeset patch > # User Ben Kallus > # Date 1702406466 18000 > # Tue Dec 12 13:41:06 2023 -0500 > # Node ID d270203d4ecf77cc14a2652c727e236afc659f4a > # Parent a6f79f044de58b594563ac03139cd5e2e6a81bdb > Add NULL check to ngx_memcpy and ngx_cpymem to satisfy UBSan. > > diff -r a6f79f044de5 -r d270203d4ecf src/core/ngx_string.c > --- a/src/core/ngx_string.c Wed Nov 29 10:58:21 2023 +0400 > +++ b/src/core/ngx_string.c Tue Dec 12 13:41:06 2023 -0500 > @@ -2098,6 +2098,10 @@ > ngx_debug_point(); > } > > + if (n == 0) { > + return dst; > + } > + > return memcpy(dst, src, n); > } > > diff -r a6f79f044de5 -r d270203d4ecf src/core/ngx_string.h > --- a/src/core/ngx_string.h Wed Nov 29 10:58:21 2023 +0400 > +++ b/src/core/ngx_string.h Tue Dec 12 13:41:06 2023 -0500 > @@ -103,8 +103,9 @@ > * gcc3 compiles memcpy(d, s, 4) to the inline "mov"es. > * icc8 compile memcpy(d, s, 4) to the inline "mov"es or XMM > moves. > */ > -#define ngx_memcpy(dst, src, n) (void) memcpy(dst, src, n) > -#define ngx_cpymem(dst, src, n) (((u_char *) memcpy(dst, src, > n)) + (n)) > +#define ngx_memcpy(dst, src, n) (void) ((n) == 0 ? (dst) : > memcpy(dst, src, n)) > +#define ngx_cpymem(dst, src, n) > \ > + ((u_char *) ((n) == 0 ? (dst) : memcpy(dst, src, n)) + (n)) > > #endif > > diff -r a6f79f044de5 -r d270203d4ecf src/http/v2/ngx_http_v2.c > --- a/src/http/v2/ngx_http_v2.c Wed Nov 29 10:58:21 2023 +0400 > +++ b/src/http/v2/ngx_http_v2.c Tue Dec 12 13:41:06 2023 -0500 > @@ -3998,9 +3998,7 @@ > n = size; > } > > - if (n > 0) { > - rb->buf->last = ngx_cpymem(rb->buf->last, pos, n); > - } > + rb->buf->last = ngx_cpymem(rb->buf->last, pos, n); > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, > "http2 request body recv %uz", n); > > > For the record, I've already provided some feedback to Ben in the > ticket here: > > https://trac.nginx.org/nginx/ticket/2570 > > And pointed to the existing thread here: > > https://mailman.nginx.org/pipermail/nginx-devel/2023-October/PX7VH5A273NLUGSYC7DR2AZRU75CIQ3Q.html > https://mailman.nginx.org/pipermail/nginx-devel/2023-December/DCGUEGEFS6TSVIWNEWUEZO3FZMR6ESYZ.html > > Hope this helps. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > From pluknet at nginx.com Fri Dec 15 15:37:43 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 15 Dec 2023 19:37:43 +0400 Subject: [PATCH 0 of 6] Re: [PATCH 2 of 3] Stream: virtual servers In-Reply-To: <20231214173541.mgqomiwedg52zeu3@Y9MQ9X2QVV> References: <20231214173541.mgqomiwedg52zeu3@Y9MQ9X2QVV> Message-ID: On Thu, Dec 14, 2023 at 09:35:41PM +0400, Sergey Kandaurov wrote: > On Thu, Dec 14, 2023 at 10:22:24AM +0400, Roman Arutyunyan wrote: > > Hi, > > > > On Wed, Dec 13, 2023 at 05:40:09PM +0400, Sergey Kandaurov wrote: > > > [..] > > > > > > This introduces accept_filter, deferred_accept, and setfib fields, > > > which is out of scope of this change. Anyway, this is useless > > > without corresponding support in ngx_stream_core_listen(). > > > > We need to catch up with all such missing functionality in Stream in future. > > Ok, let's postpone such unrelated changes in to separate patches. Moved this into a separate series, please see adjacent patchbomb emails. From pluknet at nginx.com Fri Dec 15 15:37:44 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 15 Dec 2023 19:37:44 +0400 Subject: [PATCH 1 of 6] Stream: using ngx_stream_ssl_srv_conf_t *sscf naming convention In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1702646778 -14400 # Fri Dec 15 17:26:18 2023 +0400 # Node ID cb377d36446e1ce22b71848a4a138564b2e38719 # Parent 763803589a36e3c67cbe39dd324b4e91fe57ecb7 Stream: using ngx_stream_ssl_srv_conf_t *sscf naming convention. Originally, the stream module was developed based on the mail module, following the existing style. Then it was diverged to closely follow the http module development. This change updates style to use sscf naming convention troughout the stream module, which matches the http module code style. No functional changes. diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c +++ b/src/stream/ngx_stream_ssl_module.c @@ -40,12 +40,12 @@ static ngx_int_t ngx_stream_ssl_variable ngx_stream_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_stream_ssl_add_variables(ngx_conf_t *cf); -static void *ngx_stream_ssl_create_conf(ngx_conf_t *cf); -static char *ngx_stream_ssl_merge_conf(ngx_conf_t *cf, void *parent, +static void *ngx_stream_ssl_create_srv_conf(ngx_conf_t *cf); +static char *ngx_stream_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child); static ngx_int_t ngx_stream_ssl_compile_certificates(ngx_conf_t *cf, - ngx_stream_ssl_conf_t *conf); + ngx_stream_ssl_srv_conf_t *conf); static char *ngx_stream_ssl_password_file(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); @@ -90,21 +90,21 @@ static ngx_command_t ngx_stream_ssl_com NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_msec_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, handshake_timeout), + offsetof(ngx_stream_ssl_srv_conf_t, handshake_timeout), NULL }, { ngx_string("ssl_certificate"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_array_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, certificates), + offsetof(ngx_stream_ssl_srv_conf_t, certificates), NULL }, { ngx_string("ssl_certificate_key"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_array_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, certificate_keys), + offsetof(ngx_stream_ssl_srv_conf_t, certificate_keys), NULL }, { ngx_string("ssl_password_file"), @@ -118,63 +118,63 @@ static ngx_command_t ngx_stream_ssl_com NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, dhparam), + offsetof(ngx_stream_ssl_srv_conf_t, dhparam), NULL }, { ngx_string("ssl_ecdh_curve"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, ecdh_curve), + offsetof(ngx_stream_ssl_srv_conf_t, ecdh_curve), NULL }, { ngx_string("ssl_protocols"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_1MORE, ngx_conf_set_bitmask_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, protocols), + offsetof(ngx_stream_ssl_srv_conf_t, protocols), &ngx_stream_ssl_protocols }, { ngx_string("ssl_ciphers"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, ciphers), + offsetof(ngx_stream_ssl_srv_conf_t, ciphers), NULL }, { ngx_string("ssl_verify_client"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_enum_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, verify), + offsetof(ngx_stream_ssl_srv_conf_t, verify), &ngx_stream_ssl_verify }, { ngx_string("ssl_verify_depth"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_num_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, verify_depth), + offsetof(ngx_stream_ssl_srv_conf_t, verify_depth), NULL }, { ngx_string("ssl_client_certificate"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, client_certificate), + offsetof(ngx_stream_ssl_srv_conf_t, client_certificate), NULL }, { ngx_string("ssl_trusted_certificate"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, trusted_certificate), + offsetof(ngx_stream_ssl_srv_conf_t, trusted_certificate), NULL }, { ngx_string("ssl_prefer_server_ciphers"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_FLAG, ngx_conf_set_flag_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, prefer_server_ciphers), + offsetof(ngx_stream_ssl_srv_conf_t, prefer_server_ciphers), NULL }, { ngx_string("ssl_session_cache"), @@ -188,42 +188,42 @@ static ngx_command_t ngx_stream_ssl_com NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_FLAG, ngx_conf_set_flag_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, session_tickets), + offsetof(ngx_stream_ssl_srv_conf_t, session_tickets), NULL }, { ngx_string("ssl_session_ticket_key"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_array_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, session_ticket_keys), + offsetof(ngx_stream_ssl_srv_conf_t, session_ticket_keys), NULL }, { ngx_string("ssl_session_timeout"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_sec_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, session_timeout), + offsetof(ngx_stream_ssl_srv_conf_t, session_timeout), NULL }, { ngx_string("ssl_crl"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, crl), + offsetof(ngx_stream_ssl_srv_conf_t, crl), NULL }, { ngx_string("ssl_conf_command"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE2, ngx_conf_set_keyval_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, conf_commands), + offsetof(ngx_stream_ssl_srv_conf_t, conf_commands), &ngx_stream_ssl_conf_command_post }, { ngx_string("ssl_reject_handshake"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_FLAG, ngx_conf_set_flag_slot, NGX_STREAM_SRV_CONF_OFFSET, - offsetof(ngx_stream_ssl_conf_t, reject_handshake), + offsetof(ngx_stream_ssl_srv_conf_t, reject_handshake), NULL }, { ngx_string("ssl_alpn"), @@ -244,8 +244,8 @@ static ngx_stream_module_t ngx_stream_s NULL, /* create main configuration */ NULL, /* init main configuration */ - ngx_stream_ssl_create_conf, /* create server configuration */ - ngx_stream_ssl_merge_conf /* merge server configuration */ + ngx_stream_ssl_create_srv_conf, /* create server configuration */ + ngx_stream_ssl_merge_srv_conf /* merge server configuration */ }; @@ -339,11 +339,11 @@ static ngx_str_t ngx_stream_ssl_sess_id_ static ngx_int_t ngx_stream_ssl_handler(ngx_stream_session_t *s) { - long rc; - X509 *cert; - ngx_int_t rv; - ngx_connection_t *c; - ngx_stream_ssl_conf_t *sslcf; + long rc; + X509 *cert; + ngx_int_t rv; + ngx_connection_t *c; + ngx_stream_ssl_srv_conf_t *sscf; if (!s->ssl) { return NGX_OK; @@ -351,23 +351,23 @@ ngx_stream_ssl_handler(ngx_stream_sessio c = s->connection; - sslcf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); + sscf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); if (c->ssl == NULL) { c->log->action = "SSL handshaking"; - rv = ngx_stream_ssl_init_connection(&sslcf->ssl, c); + rv = ngx_stream_ssl_init_connection(&sscf->ssl, c); if (rv != NGX_OK) { return rv; } } - if (sslcf->verify) { + if (sscf->verify) { rc = SSL_get_verify_result(c->ssl->connection); if (rc != X509_V_OK - && (sslcf->verify != 3 || !ngx_ssl_verify_error_optional(rc))) + && (sscf->verify != 3 || !ngx_ssl_verify_error_optional(rc))) { ngx_log_error(NGX_LOG_INFO, c->log, 0, "client SSL certificate verify error: (%l:%s)", @@ -378,7 +378,7 @@ ngx_stream_ssl_handler(ngx_stream_sessio return NGX_ERROR; } - if (sslcf->verify == 1) { + if (sscf->verify == 1) { cert = SSL_get_peer_certificate(c->ssl->connection); if (cert == NULL) { @@ -403,7 +403,7 @@ ngx_stream_ssl_init_connection(ngx_ssl_t { ngx_int_t rc; ngx_stream_session_t *s; - ngx_stream_ssl_conf_t *sslcf; + ngx_stream_ssl_srv_conf_t *sscf; ngx_stream_core_srv_conf_t *cscf; s = c->data; @@ -425,9 +425,9 @@ ngx_stream_ssl_init_connection(ngx_ssl_t } if (rc == NGX_AGAIN) { - sslcf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); + sscf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); - ngx_add_timer(c->read, sslcf->handshake_timeout); + ngx_add_timer(c->read, sscf->handshake_timeout); c->ssl->handler = ngx_stream_ssl_handshake_handler; @@ -470,7 +470,7 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t const char *servername; ngx_connection_t *c; ngx_stream_session_t *s; - ngx_stream_ssl_conf_t *sscf; + ngx_stream_ssl_srv_conf_t *sscf; ngx_stream_core_srv_conf_t *cscf; c = ngx_ssl_get_connection(ssl_conn); @@ -625,7 +625,7 @@ ngx_stream_ssl_certificate(ngx_ssl_conn_ ngx_uint_t i, nelts; ngx_connection_t *c; ngx_stream_session_t *s; - ngx_stream_ssl_conf_t *sslcf; + ngx_stream_ssl_srv_conf_t *sscf; ngx_stream_complex_value_t *certs, *keys; c = ngx_ssl_get_connection(ssl_conn); @@ -636,11 +636,11 @@ ngx_stream_ssl_certificate(ngx_ssl_conn_ s = c->data; - sslcf = arg; + sscf = arg; - nelts = sslcf->certificate_values->nelts; - certs = sslcf->certificate_values->elts; - keys = sslcf->certificate_key_values->elts; + nelts = sscf->certificate_values->nelts; + certs = sscf->certificate_values->elts; + keys = sscf->certificate_key_values->elts; for (i = 0; i < nelts; i++) { @@ -659,7 +659,7 @@ ngx_stream_ssl_certificate(ngx_ssl_conn_ "ssl key: \"%s\"", key.data); if (ngx_ssl_connection_certificate(c, c->pool, &cert, &key, - sslcf->passwords) + sscf->passwords) != NGX_OK) { return 0; @@ -755,53 +755,53 @@ ngx_stream_ssl_add_variables(ngx_conf_t static void * -ngx_stream_ssl_create_conf(ngx_conf_t *cf) +ngx_stream_ssl_create_srv_conf(ngx_conf_t *cf) { - ngx_stream_ssl_conf_t *scf; + ngx_stream_ssl_srv_conf_t *sscf; - scf = ngx_pcalloc(cf->pool, sizeof(ngx_stream_ssl_conf_t)); - if (scf == NULL) { + sscf = ngx_pcalloc(cf->pool, sizeof(ngx_stream_ssl_srv_conf_t)); + if (sscf == NULL) { return NULL; } /* * set by ngx_pcalloc(): * - * scf->protocols = 0; - * scf->certificate_values = NULL; - * scf->dhparam = { 0, NULL }; - * scf->ecdh_curve = { 0, NULL }; - * scf->client_certificate = { 0, NULL }; - * scf->trusted_certificate = { 0, NULL }; - * scf->crl = { 0, NULL }; - * scf->alpn = { 0, NULL }; - * scf->ciphers = { 0, NULL }; - * scf->shm_zone = NULL; + * sscf->protocols = 0; + * sscf->certificate_values = NULL; + * sscf->dhparam = { 0, NULL }; + * sscf->ecdh_curve = { 0, NULL }; + * sscf->client_certificate = { 0, NULL }; + * sscf->trusted_certificate = { 0, NULL }; + * sscf->crl = { 0, NULL }; + * sscf->alpn = { 0, NULL }; + * sscf->ciphers = { 0, NULL }; + * sscf->shm_zone = NULL; */ - scf->handshake_timeout = NGX_CONF_UNSET_MSEC; - scf->certificates = NGX_CONF_UNSET_PTR; - scf->certificate_keys = NGX_CONF_UNSET_PTR; - scf->passwords = NGX_CONF_UNSET_PTR; - scf->conf_commands = NGX_CONF_UNSET_PTR; - scf->prefer_server_ciphers = NGX_CONF_UNSET; - scf->reject_handshake = NGX_CONF_UNSET; - scf->verify = NGX_CONF_UNSET_UINT; - scf->verify_depth = NGX_CONF_UNSET_UINT; - scf->builtin_session_cache = NGX_CONF_UNSET; - scf->session_timeout = NGX_CONF_UNSET; - scf->session_tickets = NGX_CONF_UNSET; - scf->session_ticket_keys = NGX_CONF_UNSET_PTR; + sscf->handshake_timeout = NGX_CONF_UNSET_MSEC; + sscf->certificates = NGX_CONF_UNSET_PTR; + sscf->certificate_keys = NGX_CONF_UNSET_PTR; + sscf->passwords = NGX_CONF_UNSET_PTR; + sscf->conf_commands = NGX_CONF_UNSET_PTR; + sscf->prefer_server_ciphers = NGX_CONF_UNSET; + sscf->reject_handshake = NGX_CONF_UNSET; + sscf->verify = NGX_CONF_UNSET_UINT; + sscf->verify_depth = NGX_CONF_UNSET_UINT; + sscf->builtin_session_cache = NGX_CONF_UNSET; + sscf->session_timeout = NGX_CONF_UNSET; + sscf->session_tickets = NGX_CONF_UNSET; + sscf->session_ticket_keys = NGX_CONF_UNSET_PTR; - return scf; + return sscf; } static char * -ngx_stream_ssl_merge_conf(ngx_conf_t *cf, void *parent, void *child) +ngx_stream_ssl_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) { - ngx_stream_ssl_conf_t *prev = parent; - ngx_stream_ssl_conf_t *conf = child; + ngx_stream_ssl_srv_conf_t *prev = parent; + ngx_stream_ssl_srv_conf_t *conf = child; ngx_pool_cleanup_t *cln; @@ -1010,7 +1010,7 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf static ngx_int_t ngx_stream_ssl_compile_certificates(ngx_conf_t *cf, - ngx_stream_ssl_conf_t *conf) + ngx_stream_ssl_srv_conf_t *conf) { ngx_str_t *cert, *key; ngx_uint_t i, nelts; @@ -1099,19 +1099,19 @@ found: static char * ngx_stream_ssl_password_file(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { - ngx_stream_ssl_conf_t *scf = conf; + ngx_stream_ssl_srv_conf_t *sscf = conf; ngx_str_t *value; - if (scf->passwords != NGX_CONF_UNSET_PTR) { + if (sscf->passwords != NGX_CONF_UNSET_PTR) { return "is duplicate"; } value = cf->args->elts; - scf->passwords = ngx_ssl_read_password_file(cf, &value[1]); + sscf->passwords = ngx_ssl_read_password_file(cf, &value[1]); - if (scf->passwords == NULL) { + if (sscf->passwords == NULL) { return NGX_CONF_ERROR; } @@ -1122,7 +1122,7 @@ ngx_stream_ssl_password_file(ngx_conf_t static char * ngx_stream_ssl_session_cache(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { - ngx_stream_ssl_conf_t *scf = conf; + ngx_stream_ssl_srv_conf_t *sscf = conf; size_t len; ngx_str_t *value, name, size; @@ -1134,17 +1134,17 @@ ngx_stream_ssl_session_cache(ngx_conf_t for (i = 1; i < cf->args->nelts; i++) { if (ngx_strcmp(value[i].data, "off") == 0) { - scf->builtin_session_cache = NGX_SSL_NO_SCACHE; + sscf->builtin_session_cache = NGX_SSL_NO_SCACHE; continue; } if (ngx_strcmp(value[i].data, "none") == 0) { - scf->builtin_session_cache = NGX_SSL_NONE_SCACHE; + sscf->builtin_session_cache = NGX_SSL_NONE_SCACHE; continue; } if (ngx_strcmp(value[i].data, "builtin") == 0) { - scf->builtin_session_cache = NGX_SSL_DFLT_BUILTIN_SCACHE; + sscf->builtin_session_cache = NGX_SSL_DFLT_BUILTIN_SCACHE; continue; } @@ -1159,7 +1159,7 @@ ngx_stream_ssl_session_cache(ngx_conf_t goto invalid; } - scf->builtin_session_cache = n; + sscf->builtin_session_cache = n; continue; } @@ -1202,13 +1202,13 @@ ngx_stream_ssl_session_cache(ngx_conf_t return NGX_CONF_ERROR; } - scf->shm_zone = ngx_shared_memory_add(cf, &name, n, + sscf->shm_zone = ngx_shared_memory_add(cf, &name, n, &ngx_stream_ssl_module); - if (scf->shm_zone == NULL) { + if (sscf->shm_zone == NULL) { return NGX_CONF_ERROR; } - scf->shm_zone->init = ngx_ssl_session_cache_init; + sscf->shm_zone->init = ngx_ssl_session_cache_init; continue; } @@ -1216,8 +1216,8 @@ ngx_stream_ssl_session_cache(ngx_conf_t goto invalid; } - if (scf->shm_zone && scf->builtin_session_cache == NGX_CONF_UNSET) { - scf->builtin_session_cache = NGX_SSL_NO_BUILTIN_SCACHE; + if (sscf->shm_zone && sscf->builtin_session_cache == NGX_CONF_UNSET) { + sscf->builtin_session_cache = NGX_SSL_NO_BUILTIN_SCACHE; } return NGX_CONF_OK; @@ -1236,14 +1236,14 @@ ngx_stream_ssl_alpn(ngx_conf_t *cf, ngx_ { #ifdef TLSEXT_TYPE_application_layer_protocol_negotiation - ngx_stream_ssl_conf_t *scf = conf; + ngx_stream_ssl_srv_conf_t *sscf = conf; u_char *p; size_t len; ngx_str_t *value; ngx_uint_t i; - if (scf->alpn.len) { + if (sscf->alpn.len) { return "is duplicate"; } @@ -1260,19 +1260,19 @@ ngx_stream_ssl_alpn(ngx_conf_t *cf, ngx_ len += value[i].len + 1; } - scf->alpn.data = ngx_pnalloc(cf->pool, len); - if (scf->alpn.data == NULL) { + sscf->alpn.data = ngx_pnalloc(cf->pool, len); + if (sscf->alpn.data == NULL) { return NGX_CONF_ERROR; } - p = scf->alpn.data; + p = sscf->alpn.data; for (i = 1; i < cf->args->nelts; i++) { *p++ = value[i].len; p = ngx_cpymem(p, value[i].data, value[i].len); } - scf->alpn.len = len; + sscf->alpn.len = len; return NGX_CONF_OK; @@ -1301,9 +1301,9 @@ ngx_stream_ssl_init(ngx_conf_t *cf) { ngx_uint_t a, p, s; ngx_stream_handler_pt *h; - ngx_stream_ssl_conf_t *sscf; ngx_stream_conf_addr_t *addr; ngx_stream_conf_port_t *port; + ngx_stream_ssl_srv_conf_t *sscf; ngx_stream_core_srv_conf_t **cscfp, *cscf; ngx_stream_core_main_conf_t *cmcf; diff --git a/src/stream/ngx_stream_ssl_module.h b/src/stream/ngx_stream_ssl_module.h --- a/src/stream/ngx_stream_ssl_module.h +++ b/src/stream/ngx_stream_ssl_module.h @@ -53,7 +53,7 @@ typedef struct { ngx_flag_t session_tickets; ngx_array_t *session_ticket_keys; -} ngx_stream_ssl_conf_t; +} ngx_stream_ssl_srv_conf_t; extern ngx_module_t ngx_stream_ssl_module; From pluknet at nginx.com Fri Dec 15 15:37:45 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 15 Dec 2023 19:37:45 +0400 Subject: [PATCH 2 of 6] Overhauled some diagnostic messages missed in 1b05b9bbcebf In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1702647536 -14400 # Fri Dec 15 17:38:56 2023 +0400 # Node ID de11f5373157db6c1e22dbad2ab4014143a5e8f8 # Parent cb377d36446e1ce22b71848a4a138564b2e38719 Overhauled some diagnostic messages missed in 1b05b9bbcebf. diff --git a/src/http/modules/ngx_http_referer_module.c b/src/http/modules/ngx_http_referer_module.c --- a/src/http/modules/ngx_http_referer_module.c +++ b/src/http/modules/ngx_http_referer_module.c @@ -631,7 +631,7 @@ ngx_http_add_regex_referer(ngx_conf_t *c #else ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "the using of the regex \"%V\" requires PCRE library", + "using regex \"%V\" requires PCRE library", name); return NGX_ERROR; diff --git a/src/http/modules/ngx_http_ssi_filter_module.c b/src/http/modules/ngx_http_ssi_filter_module.c --- a/src/http/modules/ngx_http_ssi_filter_module.c +++ b/src/http/modules/ngx_http_ssi_filter_module.c @@ -2001,7 +2001,7 @@ ngx_http_ssi_regex_match(ngx_http_reques #else ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, - "the using of the regex \"%V\" in SSI requires PCRE library", + "using regex \"%V\" in SSI requires PCRE library", pattern); return NGX_HTTP_SSI_ERROR; diff --git a/src/mail/ngx_mail_core_module.c b/src/mail/ngx_mail_core_module.c --- a/src/mail/ngx_mail_core_module.c +++ b/src/mail/ngx_mail_core_module.c @@ -441,7 +441,7 @@ ngx_mail_core_listen(ngx_conf_t *cf, ngx continue; #else ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "bind ipv6only is not supported " + "ipv6only is not supported " "on this platform"); return NGX_CONF_ERROR; #endif @@ -564,7 +564,7 @@ ngx_mail_core_listen(ngx_conf_t *cf, ngx } ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "the invalid \"%V\" parameter", &value[i]); + "invalid \"%V\" parameter", &value[i]); return NGX_CONF_ERROR; } diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -1008,7 +1008,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n continue; #else ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "bind ipv6only is not supported " + "ipv6only is not supported " "on this platform"); return NGX_CONF_ERROR; #endif @@ -1136,7 +1136,7 @@ ngx_stream_core_listen(ngx_conf_t *cf, n } ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "the invalid \"%V\" parameter", &value[i]); + "invalid \"%V\" parameter", &value[i]); return NGX_CONF_ERROR; } From pluknet at nginx.com Fri Dec 15 15:37:46 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 15 Dec 2023 19:37:46 +0400 Subject: [PATCH 3 of 6] Stream: reshuffled ngx_stream_listen_opt_t fields In-Reply-To: References: Message-ID: <4d90cb223fdb9e3e6c14.1702654666@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1702648226 -14400 # Fri Dec 15 17:50:26 2023 +0400 # Node ID 4d90cb223fdb9e3e6c148726e36cec7835b2f0f8 # Parent de11f5373157db6c1e22dbad2ab4014143a5e8f8 Stream: reshuffled ngx_stream_listen_opt_t fields. In preparation for adding more parameters to the listen directive, and to be in sync with the corresponding structure in the http module. No functional changes. diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h --- a/src/stream/ngx_stream.h +++ b/src/stream/ngx_stream.h @@ -56,18 +56,19 @@ typedef struct { unsigned reuseport:1; unsigned so_keepalive:2; unsigned proxy_protocol:1; + + int backlog; + int rcvbuf; + int sndbuf; + int type; +#if (NGX_HAVE_TCP_FASTOPEN) + int fastopen; +#endif #if (NGX_HAVE_KEEPALIVE_TUNABLE) int tcp_keepidle; int tcp_keepintvl; int tcp_keepcnt; #endif - int backlog; - int rcvbuf; - int sndbuf; -#if (NGX_HAVE_TCP_FASTOPEN) - int fastopen; -#endif - int type; } ngx_stream_listen_opt_t; From pluknet at nginx.com Fri Dec 15 15:37:47 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 15 Dec 2023 19:37:47 +0400 Subject: [PATCH 4 of 6] Stream: the "deferred" parameter of the "listen" directive In-Reply-To: References: Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1702650289 -14400 # Fri Dec 15 18:24:49 2023 +0400 # Node ID cca722e447f8beaaa6b41a620c8b4239a5d1aa7d # Parent 4d90cb223fdb9e3e6c148726e36cec7835b2f0f8 Stream: the "deferred" parameter of the "listen" directive. The Linux TCP_DEFER_ACCEPT support. diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c --- a/src/stream/ngx_stream.c +++ b/src/stream/ngx_stream.c @@ -1021,6 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, ls->keepcnt = addr->opt.tcp_keepcnt; #endif +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) + ls->deferred_accept = addr->opt.deferred_accept; +#endif + #if (NGX_HAVE_INET6) ls->ipv6only = addr->opt.ipv6only; #endif diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h --- a/src/stream/ngx_stream.h +++ b/src/stream/ngx_stream.h @@ -53,6 +53,7 @@ typedef struct { #if (NGX_HAVE_INET6) unsigned ipv6only:1; #endif + unsigned deferred_accept:1; unsigned reuseport:1; unsigned so_keepalive:2; unsigned proxy_protocol:1; diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -987,6 +987,19 @@ ngx_stream_core_listen(ngx_conf_t *cf, n continue; } + if (ngx_strcmp(value[i].data, "deferred") == 0) { +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) + lsopt.deferred_accept = 1; + lsopt.set = 1; + lsopt.bind = 1; +#else + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "the deferred accept is not supported " + "on this platform, ignored"); +#endif + continue; + } + if (ngx_strncmp(value[i].data, "ipv6only=o", 10) == 0) { #if (NGX_HAVE_INET6 && defined IPV6_V6ONLY) if (ngx_strcmp(&value[i].data[10], "n") == 0) { From pluknet at nginx.com Fri Dec 15 15:37:48 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 15 Dec 2023 19:37:48 +0400 Subject: [PATCH 5 of 6] Stream: the "accept_filter" parameter of the "listen" directive In-Reply-To: References: Message-ID: <9be627b7a3a35c00be13.1702654668@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1702650593 -14400 # Fri Dec 15 18:29:53 2023 +0400 # Node ID 9be627b7a3a35c00be13332f553e2d3b778877ae # Parent cca722e447f8beaaa6b41a620c8b4239a5d1aa7d Stream: the "accept_filter" parameter of the "listen" directive. The FreeBSD accept filters support. diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c --- a/src/stream/ngx_stream.c +++ b/src/stream/ngx_stream.c @@ -1021,6 +1021,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, ls->keepcnt = addr->opt.tcp_keepcnt; #endif +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) + ls->accept_filter = addr->opt.accept_filter; +#endif + #if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) ls->deferred_accept = addr->opt.deferred_accept; #endif diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h --- a/src/stream/ngx_stream.h +++ b/src/stream/ngx_stream.h @@ -70,6 +70,10 @@ typedef struct { int tcp_keepintvl; int tcp_keepcnt; #endif + +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) + char *accept_filter; +#endif } ngx_stream_listen_opt_t; diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -987,6 +987,20 @@ ngx_stream_core_listen(ngx_conf_t *cf, n continue; } + if (ngx_strncmp(value[i].data, "accept_filter=", 14) == 0) { +#if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) + lsopt.accept_filter = (char *) &value[i].data[14]; + lsopt.set = 1; + lsopt.bind = 1; +#else + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "accept filters \"%V\" are not supported " + "on this platform, ignored", + &value[i]); +#endif + continue; + } + if (ngx_strcmp(value[i].data, "deferred") == 0) { #if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) lsopt.deferred_accept = 1; From pluknet at nginx.com Fri Dec 15 15:37:49 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 15 Dec 2023 19:37:49 +0400 Subject: [PATCH 6 of 6] Stream: the "setfib" parameter of the "listen" directive In-Reply-To: References: Message-ID: <219662ea1613ab68d4d5.1702654669@enoparse.local> # HG changeset patch # User Sergey Kandaurov # Date 1702651328 -14400 # Fri Dec 15 18:42:08 2023 +0400 # Node ID 219662ea1613ab68d4d5d4085394bba75993ae42 # Parent 9be627b7a3a35c00be13332f553e2d3b778877ae Stream: the "setfib" parameter of the "listen" directive. The FreeBSD SO_SETFIB support. diff --git a/src/stream/ngx_stream.c b/src/stream/ngx_stream.c --- a/src/stream/ngx_stream.c +++ b/src/stream/ngx_stream.c @@ -1033,6 +1033,10 @@ ngx_stream_add_listening(ngx_conf_t *cf, ls->ipv6only = addr->opt.ipv6only; #endif +#if (NGX_HAVE_SETFIB) + ls->setfib = addr->opt.setfib; +#endif + #if (NGX_HAVE_TCP_FASTOPEN) ls->fastopen = addr->opt.fastopen; #endif diff --git a/src/stream/ngx_stream.h b/src/stream/ngx_stream.h --- a/src/stream/ngx_stream.h +++ b/src/stream/ngx_stream.h @@ -62,6 +62,9 @@ typedef struct { int rcvbuf; int sndbuf; int type; +#if (NGX_HAVE_SETFIB) + int setfib; +#endif #if (NGX_HAVE_TCP_FASTOPEN) int fastopen; #endif diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -892,6 +892,9 @@ ngx_stream_core_listen(ngx_conf_t *cf, n lsopt.type = SOCK_STREAM; lsopt.rcvbuf = -1; lsopt.sndbuf = -1; +#if (NGX_HAVE_SETFIB) + lsopt.setfib = -1; +#endif #if (NGX_HAVE_TCP_FASTOPEN) lsopt.fastopen = -1; #endif @@ -921,6 +924,22 @@ ngx_stream_core_listen(ngx_conf_t *cf, n continue; } +#if (NGX_HAVE_SETFIB) + if (ngx_strncmp(value[i].data, "setfib=", 7) == 0) { + lsopt.setfib = ngx_atoi(value[i].data + 7, value[i].len - 7); + lsopt.set = 1; + lsopt.bind = 1; + + if (lsopt.setfib == NGX_ERROR) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid setfib \"%V\"", &value[i]); + return NGX_CONF_ERROR; + } + + continue; + } +#endif + #if (NGX_HAVE_TCP_FASTOPEN) if (ngx_strncmp(value[i].data, "fastopen=", 9) == 0) { lsopt.fastopen = ngx_atoi(value[i].data + 9, value[i].len - 9); From pluknet at nginx.com Fri Dec 15 15:46:01 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 15 Dec 2023 19:46:01 +0400 Subject: [PATCH 3 of 3] Stream: ngx_stream_pass_module In-Reply-To: <3cab85fe55272835674b.1699610841@arut-laptop> References: <3cab85fe55272835674b.1699610841@arut-laptop> Message-ID: <9AE01647-8C50-4018-A7C7-F92B32DEA684@nginx.com> > On 10 Nov 2023, at 14:07, Roman Arutyunyan wrote: > > # HG changeset patch > # User Roman Arutyunyan > # Date 1699543504 -14400 > # Thu Nov 09 19:25:04 2023 +0400 > # Node ID 3cab85fe55272835674b7f1c296796955256d019 > # Parent 1d3464283405a4d8ac54caae9bf1815c723f04c5 > Stream: ngx_stream_pass_module. > > The module allows to pass connections from Stream to other modules such as HTTP > or Mail, as well as back to Stream. Previously, this was only possible with > proxying. Connections with preread buffer read out from socket cannot be > passed. > > The module allows to terminate SSL selectively based on SNI. Note that terminating SSL before passing connections may be challenging and requires appropriate preparations in the Stream module itself. Notably, Stream unconditionally disables SSL buffering now, which may affect performance in the modules being passed. The following patches are to address such deficiency (and effectively, they are used to catch up differences with the http module). # HG changeset patch # User Sergey Kandaurov # Date 1702653986 -14400 # Fri Dec 15 19:26:26 2023 +0400 # Node ID aed7c0875fc690ccaa7288f66624ddca8299bff5 # Parent 219662ea1613ab68d4d5d4085394bba75993ae42 SSL: make it possible to disable SSL buffering. It is now disabled if "ssl_buffer_size 0;" is used in the configuration. Previously, such configuration was meaningless and resulted in CPU hog. diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -1709,7 +1709,7 @@ ngx_ssl_create_connection(ngx_ssl_t *ssl return NGX_ERROR; } - sc->buffer = ((flags & NGX_SSL_BUFFER) != 0); + sc->buffer = ((flags & NGX_SSL_BUFFER) != 0) && (ssl->buffer_size != 0); sc->buffer_size = ssl->buffer_size; sc->session_ctx = ssl->ctx; diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c +++ b/src/http/ngx_http_request.c @@ -947,6 +947,7 @@ ngx_http_ssl_servername(ngx_ssl_conn_t * sscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_ssl_module); + c->ssl->buffer = (sscf->buffer_size != 0); c->ssl->buffer_size = sscf->buffer_size; if (sscf->ssl.ctx) { # HG changeset patch # User Sergey Kandaurov # Date 1702653990 -14400 # Fri Dec 15 19:26:30 2023 +0400 # Node ID 272afdf3fe232dd9770c8644a5230830192b434d # Parent aed7c0875fc690ccaa7288f66624ddca8299bff5 Stream: the "ssl_buffer_size" directive. This change introduces behaviour change in SSL buffering, which is now enabled by default. It can be disabled with "ssl_buffer_size 0;". Previously, it was unconditionally disabled. In particular, it is useful when Stream passes SSL connections after SSL termination to other modules using ngx_stream_pass_module. diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c +++ b/src/stream/ngx_stream_ssl_module.c @@ -142,6 +142,13 @@ static ngx_command_t ngx_stream_ssl_com offsetof(ngx_stream_ssl_srv_conf_t, ciphers), NULL }, + { ngx_string("ssl_buffer_size"), + NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, + ngx_conf_set_size_slot, + NGX_STREAM_SRV_CONF_OFFSET, + offsetof(ngx_stream_ssl_srv_conf_t, buffer_size), + NULL }, + { ngx_string("ssl_verify_client"), NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_enum_slot, @@ -414,7 +421,7 @@ ngx_stream_ssl_init_connection(ngx_ssl_t return NGX_ERROR; } - if (ngx_ssl_create_connection(ssl, c, 0) != NGX_OK) { + if (ngx_ssl_create_connection(ssl, c, NGX_SSL_BUFFER) != NGX_OK) { return NGX_ERROR; } @@ -527,6 +534,9 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t sscf = ngx_stream_get_module_srv_conf(s, ngx_stream_ssl_module); + c->ssl->buffer = (sscf->buffer_size != 0); + c->ssl->buffer_size = sscf->buffer_size; + if (sscf->ssl.ctx) { if (SSL_set_SSL_CTX(ssl_conn, sscf->ssl.ctx) == NULL) { goto error; @@ -786,6 +796,7 @@ ngx_stream_ssl_create_srv_conf(ngx_conf_ sscf->conf_commands = NGX_CONF_UNSET_PTR; sscf->prefer_server_ciphers = NGX_CONF_UNSET; sscf->reject_handshake = NGX_CONF_UNSET; + sscf->buffer_size = NGX_CONF_UNSET_SIZE; sscf->verify = NGX_CONF_UNSET_UINT; sscf->verify_depth = NGX_CONF_UNSET_UINT; sscf->builtin_session_cache = NGX_CONF_UNSET; @@ -821,6 +832,9 @@ ngx_stream_ssl_merge_srv_conf(ngx_conf_t |NGX_SSL_TLSv1|NGX_SSL_TLSv1_1 |NGX_SSL_TLSv1_2|NGX_SSL_TLSv1_3)); + ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, + NGX_SSL_BUFSIZE); + ngx_conf_merge_uint_value(conf->verify, prev->verify, 0); ngx_conf_merge_uint_value(conf->verify_depth, prev->verify_depth, 1); @@ -930,6 +944,8 @@ ngx_stream_ssl_merge_srv_conf(ngx_conf_t } } + conf->ssl.buffer_size = conf->buffer_size; + if (conf->verify) { if (conf->client_certificate.len == 0 && conf->verify != 3) { diff --git a/src/stream/ngx_stream_ssl_module.h b/src/stream/ngx_stream_ssl_module.h --- a/src/stream/ngx_stream_ssl_module.h +++ b/src/stream/ngx_stream_ssl_module.h @@ -27,6 +27,8 @@ typedef struct { ngx_uint_t verify; ngx_uint_t verify_depth; + size_t buffer_size; + ssize_t builtin_session_cache; time_t session_timeout; > > stream { > server { > listen 8000 default_server; > ssl_preread on; > ... > } > > server { > listen 8000; > server_name foo.example.com; > pass 8001; # to HTTP > } > > server { > listen 8000; > server_name bar.example.com; > ... > } > } > > http { > server { > listen 8001 ssl; > ... > > location / { > root html; > } > } > } > > diff --git a/auto/modules b/auto/modules > --- a/auto/modules > +++ b/auto/modules > @@ -1166,6 +1166,16 @@ if [ $STREAM != NO ]; then > . auto/module > fi > > + if [ $STREAM_PASS = YES ]; then > + ngx_module_name=ngx_stream_pass_module > + ngx_module_deps= > + ngx_module_srcs=src/stream/ngx_stream_pass_module.c > + ngx_module_libs= > + ngx_module_link=$STREAM_PASS > + > + . auto/module > + fi > + > if [ $STREAM_SET = YES ]; then > ngx_module_name=ngx_stream_set_module > ngx_module_deps= > diff --git a/auto/options b/auto/options > --- a/auto/options > +++ b/auto/options > @@ -127,6 +127,7 @@ STREAM_GEOIP=NO > STREAM_MAP=YES > STREAM_SPLIT_CLIENTS=YES > STREAM_RETURN=YES > +STREAM_PASS=YES > STREAM_SET=YES > STREAM_UPSTREAM_HASH=YES > STREAM_UPSTREAM_LEAST_CONN=YES > @@ -337,6 +338,7 @@ use the \"--with-mail_ssl_module\" optio > --without-stream_split_clients_module) > STREAM_SPLIT_CLIENTS=NO ;; > --without-stream_return_module) STREAM_RETURN=NO ;; > + --without-stream_pass_module) STREAM_PASS=NO ;; > --without-stream_set_module) STREAM_SET=NO ;; > --without-stream_upstream_hash_module) > STREAM_UPSTREAM_HASH=NO ;; > @@ -556,6 +558,7 @@ cat << END > --without-stream_split_clients_module > disable ngx_stream_split_clients_module > --without-stream_return_module disable ngx_stream_return_module > + --without-stream_pass_module disable ngx_stream_pass_module > --without-stream_set_module disable ngx_stream_set_module > --without-stream_upstream_hash_module > disable ngx_stream_upstream_hash_module > diff --git a/src/stream/ngx_stream_pass_module.c b/src/stream/ngx_stream_pass_module.c > new file mode 100644 > --- /dev/null > +++ b/src/stream/ngx_stream_pass_module.c > @@ -0,0 +1,245 @@ > + > +/* > + * Copyright (C) Roman Arutyunyan > + * Copyright (C) Nginx, Inc. > + */ > + > + > +#include > +#include > +#include > + > + > +typedef struct { > + ngx_addr_t *addr; > + ngx_stream_complex_value_t *addr_value; > +} ngx_stream_pass_srv_conf_t; > + > + > +static void ngx_stream_pass_handler(ngx_stream_session_t *s); > +static void *ngx_stream_pass_create_srv_conf(ngx_conf_t *cf); > +static char *ngx_stream_pass(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); > + > + > +static ngx_command_t ngx_stream_pass_commands[] = { > + > + { ngx_string("pass"), > + NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1, > + ngx_stream_pass, > + NGX_STREAM_SRV_CONF_OFFSET, > + 0, > + NULL }, > + > + ngx_null_command > +}; > + > + > +static ngx_stream_module_t ngx_stream_pass_module_ctx = { > + NULL, /* preconfiguration */ > + NULL, /* postconfiguration */ > + > + NULL, /* create main configuration */ > + NULL, /* init main configuration */ > + > + ngx_stream_pass_create_srv_conf, /* create server configuration */ > + NULL /* merge server configuration */ > +}; > + > + > +ngx_module_t ngx_stream_pass_module = { > + NGX_MODULE_V1, > + &ngx_stream_pass_module_ctx, /* module conaddr */ > + ngx_stream_pass_commands, /* module directives */ > + NGX_STREAM_MODULE, /* module type */ > + NULL, /* init master */ > + NULL, /* init module */ > + NULL, /* init process */ > + NULL, /* init thread */ > + NULL, /* exit thread */ > + NULL, /* exit process */ > + NULL, /* exit master */ > + NGX_MODULE_V1_PADDING > +}; > + > + > +static void > +ngx_stream_pass_handler(ngx_stream_session_t *s) > +{ > + ngx_url_t u; > + ngx_str_t url; > + ngx_addr_t *addr; > + ngx_uint_t i; > + ngx_listening_t *ls; > + ngx_connection_t *c; > + ngx_stream_pass_srv_conf_t *pscf; > + > + c = s->connection; > + > + c->log->action = "passing connection to another module"; > + > + if (c->buffer && c->buffer->pos != c->buffer->last) { > + ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, > + "cannot pass connection with preread data"); > + goto failed; > + } > + > + pscf = ngx_stream_get_module_srv_conf(s, ngx_stream_pass_module); > + > + addr = pscf->addr; > + > + if (addr == NULL) { > + if (ngx_stream_complex_value(s, pscf->addr_value, &url) != NGX_OK) { > + goto failed; > + } > + > + ngx_memzero(&u, sizeof(ngx_url_t)); > + > + u.url = url; > + u.listen = 1; > + u.no_resolve = 1; > + > + if (ngx_parse_url(s->connection->pool, &u) != NGX_OK) { > + if (u.err) { > + ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, > + "%s in pass \"%V\"", u.err, &u.url); > + } > + > + goto failed; > + } > + > + if (u.naddrs == 0) { > + ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, > + "no addresses in pass \"%V\"", &u.url); > + goto failed; > + } > + > + addr = &u.addrs[0]; > + } > + > + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, > + "stream pass addr: \"%V\"", &addr->name); > + > + ls = ngx_cycle->listening.elts; > + > + for (i = 0; i < ngx_cycle->listening.nelts; i++) { > + if (ngx_cmp_sockaddr(ls[i].sockaddr, ls[i].socklen, > + addr->sockaddr, addr->socklen, 1) > + == NGX_OK) > + { > + c->listening = &ls[i]; > + > + c->data = NULL; > + c->buffer = NULL; > + > + *c->log = c->listening->log; > + c->log->handler = NULL; > + c->log->data = NULL; > + > + c->listening->handler(c); > + > + return; > + } > + } > + > + ngx_log_error(NGX_LOG_ERR, c->log, 0, > + "listen not found for \"%V\"", &addr->name); > + > + ngx_stream_finalize_session(s, NGX_STREAM_OK); > + > + return; > + > +failed: > + > + ngx_stream_finalize_session(s, NGX_STREAM_INTERNAL_SERVER_ERROR); > +} > + > + > +static void * > +ngx_stream_pass_create_srv_conf(ngx_conf_t *cf) > +{ > + ngx_stream_pass_srv_conf_t *conf; > + > + conf = ngx_pcalloc(cf->pool, sizeof(ngx_stream_pass_srv_conf_t)); > + if (conf == NULL) { > + return NULL; > + } > + > + /* > + * set by ngx_pcalloc(): > + * > + * conf->addr = NULL; > + * conf->addr_value = NULL; > + */ > + > + return conf; > +} > + > + > +static char * > +ngx_stream_pass(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > +{ > + ngx_stream_pass_srv_conf_t *pscf = conf; > + > + ngx_url_t u; > + ngx_str_t *value, *url; > + ngx_stream_complex_value_t cv; > + ngx_stream_core_srv_conf_t *cscf; > + ngx_stream_compile_complex_value_t ccv; > + > + if (pscf->addr || pscf->addr_value) { > + return "is duplicate"; > + } > + > + cscf = ngx_stream_conf_get_module_srv_conf(cf, ngx_stream_core_module); > + > + cscf->handler = ngx_stream_pass_handler; > + > + value = cf->args->elts; > + > + url = &value[1]; > + > + ngx_memzero(&ccv, sizeof(ngx_stream_compile_complex_value_t)); > + > + ccv.cf = cf; > + ccv.value = url; > + ccv.complex_value = &cv; > + > + if (ngx_stream_compile_complex_value(&ccv) != NGX_OK) { > + return NGX_CONF_ERROR; > + } > + > + if (cv.lengths) { > + pscf->addr_value = ngx_palloc(cf->pool, > + sizeof(ngx_stream_complex_value_t)); > + if (pscf->addr_value == NULL) { > + return NGX_CONF_ERROR; > + } > + > + *pscf->addr_value = cv; > + > + return NGX_CONF_OK; > + } > + > + ngx_memzero(&u, sizeof(ngx_url_t)); > + > + u.url = *url; > + u.listen = 1; > + > + if (ngx_parse_url(cf->pool, &u) != NGX_OK) { > + if (u.err) { > + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > + "%s in \"%V\" of the \"pass\" directive", > + u.err, &u.url); > + } > + > + return NGX_CONF_ERROR; > + } > + > + if (u.naddrs == 0) { > + return "has no addresses"; > + } > + > + pscf->addr = &u.addrs[0]; > + > + return NGX_CONF_OK; > +} > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Sergey Kandaurov From arut at nginx.com Fri Dec 15 16:09:03 2023 From: arut at nginx.com (Roman Arutyunyan) Date: Fri, 15 Dec 2023 20:09:03 +0400 Subject: [PATCH 6 of 6] QUIC: path revalidation after expansion failure In-Reply-To: <53998043-7D23-400E-8B67-EAB7051B4BFB@nginx.com> References: <82fa5941af6fecb4fc7f.1701342332@arut-laptop> <53998043-7D23-400E-8B67-EAB7051B4BFB@nginx.com> Message-ID: <20231215160903.6e4vmjbremv7fx4y@N00W24XTQX> Hi, On Fri, Dec 15, 2023 at 04:40:37PM +0400, Sergey Kandaurov wrote: > > > On 30 Nov 2023, at 15:05, Roman Arutyunyan wrote: > > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1701241101 -14400 > > # Wed Nov 29 10:58:21 2023 +0400 > > # Node ID 82fa5941af6fecb4fc7f0ac6308ae6c266d5e545 > > # Parent 4b7663d9146ce9baeb78fb57c3fed7368f25dae9 > > QUIC: path revalidation after expansion failure. > > > > As per RFC 9000, Section 8.2.1: > > > > When an endpoint is unable to expand the datagram size to 1200 bytes due > > to the anti-amplification limit, the path MTU will not be validated. > > To ensure that the path MTU is large enough, the endpoint MUST perform a > > second path validation by sending a PATH_CHALLENGE frame in a datagram of > > at least 1200 bytes. > > > > diff --git a/src/event/quic/ngx_event_quic_connection.h b/src/event/quic/ngx_event_quic_connection.h > > --- a/src/event/quic/ngx_event_quic_connection.h > > +++ b/src/event/quic/ngx_event_quic_connection.h > > @@ -111,7 +111,8 @@ struct ngx_quic_path_s { > > uint64_t mtu_pnum[NGX_QUIC_PATH_RETRIES]; > > ngx_str_t addr_text; > > u_char text[NGX_SOCKADDR_STRLEN]; > > - ngx_uint_t validated; /* unsigned validated:1; */ > > + unsigned validated:1; > > + unsigned mtu_unvalidated:1; > > }; > > > > > > diff --git a/src/event/quic/ngx_event_quic_migration.c b/src/event/quic/ngx_event_quic_migration.c > > --- a/src/event/quic/ngx_event_quic_migration.c > > +++ b/src/event/quic/ngx_event_quic_migration.c > > @@ -169,6 +169,7 @@ valid: > > > > path->mtu = prev->mtu; > > path->max_mtu = prev->max_mtu; > > + path->mtu_unvalidated = 0; > > } > > } > > > > @@ -182,6 +183,13 @@ valid: > > qc->congestion.recovery_start = ngx_current_msec; > > } > > > > + path->validated = 1; > > + > > + if (path->mtu_unvalidated) { > > + path->mtu_unvalidated = 0; > > + return ngx_quic_validate_path(c, path); > > + } > > + > > /* > > * RFC 9000, 9.3. Responding to Connection Migration > > * > > @@ -199,8 +207,6 @@ valid: > > > > ngx_quic_path_dbg(c, "is validated", path); > > > > - path->validated = 1; > > - > > ngx_quic_discover_path_mtu(c, path); > > > > return NGX_OK; > > @@ -578,7 +584,15 @@ ngx_quic_send_path_challenge(ngx_connect > > * sending a datagram of this size. > > */ > > > > - min = (ngx_quic_path_limit(c, path, 1200) < 1200) ? 0 : 1200; > > + if (path->mtu_unvalidated > > + || ngx_quic_path_limit(c, path, 1200) < 1200) > > + { > > + min = 0; > > + path->mtu_unvalidated = 1; > > + > > + } else { > > + min = 1200; > > + } > > > > if (ngx_quic_frame_sendto(c, frame, min, path) == NGX_ERROR) { > > return NGX_ERROR; > > This needs the following fixup, path->validated is now a bit-mask. > > # HG changeset patch > # User Sergey Kandaurov > # Date 1702643887 -14400 > # Fri Dec 15 16:38:07 2023 +0400 > # Node ID 763803589a36e3c67cbe39dd324b4e91fe57ecb7 > # Parent cbe1a0e8094be744b940fe1b0cc5314f99c94672 > QUIC: fixed format specifier after a6f79f044de5. > > diff --git a/src/event/quic/ngx_event_quic_migration.h b/src/event/quic/ngx_event_quic_migration.h > --- a/src/event/quic/ngx_event_quic_migration.h > +++ b/src/event/quic/ngx_event_quic_migration.h > @@ -19,7 +19,7 @@ > > #define ngx_quic_path_dbg(c, msg, path) \ > ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, \ > - "quic path seq:%uL %s tx:%O rx:%O valid:%ui st:%d mtu:%uz",\ > + "quic path seq:%uL %s tx:%O rx:%O valid:%d st:%d mtu:%uz", \ > path->seqnum, msg, path->sent, path->received, \ > path->validated, path->state, path->mtu); > Looks ok From benjamin.p.kallus.gr at dartmouth.edu Fri Dec 15 16:28:46 2023 From: benjamin.p.kallus.gr at dartmouth.edu (Ben Kallus) Date: Fri, 15 Dec 2023 11:28:46 -0500 Subject: Core: Avoid memcpy from NULL In-Reply-To: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> References: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Message-ID: > - rewrite of `ngx_memcpy` define like here: > ``` > + #define ngx_memcpy(dst, src, n) (void) ((n) == 0 ? (dst) : memcpy(dst, src, n)) > ``` > may introduce a regression or compat issues, e. g. fully functioning codes like that may become broken hereafter: > ``` > ngx_memcpy(dst, src, ++len); // because n would be incremented twice in the macro now > ``` > Sure, `ngx_cpymem` has also the same issue, but it had that already before the "fix"... > Anyway, I'm always against of such macros (no matter with or without check it would be better as an inline function instead). I totally agree. I'm not a fan of function-like macros either; I introduced those extra evaluations of n and dst only because I saw that cpymem already valuates n twice. I'd be happy to update these to functions (explicitly inlined or not) in the final changeset. > - since it is very rare situation that one needs only a memcpy without to know whether previous alloc may fail > (e. g. some of pointers were NULL), me too thinks that the caller should be responsible for the check. > So I would not extend ngx_memcpy or ngx_cpymem in that way. I am inclined to agree with you on this point, but Maxim previously wrote this: > Trying to check length everywhere is just ugly and unreadable. Which I also think is reasonable. Does anyone else have a position on this? -Ben On 12/15/23, Dipl. Ing. Sergey Brester via nginx-devel wrote: > Enclosed few thoughts to the subject: > > - since it is very rare situation that one needs only a memcpy without > to know whether previous alloc may fail > (e. g. some of pointers were NULL), me too thinks that the caller > should be responsible for the check. > So I would not extend ngx_memcpy or ngx_cpymem in that way. > > - rewrite of `ngx_memcpy` define like here: > ``` > + #define ngx_memcpy(dst, src, n) (void) ((n) == 0 ? (dst) : > memcpy(dst, src, n)) > ``` > may introduce a regression or compat issues, e. g. fully functioning > codes like that may become broken hereafter: > ``` > ngx_memcpy(dst, src, ++len); // because n would be incremented twice > in the macro now > ``` > Sure, `ngx_cpymem` has also the same issue, but it had that already > before the "fix"... > Anyway, I'm always against of such macros (no matter with or without > check it would be better as an inline function instead). > > My conclusion: > a fix of affected places invoking `ngx_memcpy` and `ngx_cpymem`, and > possibly an assert in `ngx_memcpy` > and `ngx_cpymem` would be fully enough, in my opinion. > > Regards, > Sergey. > > On 15.12.2023 03:41, Maxim Dounin wrote: > >> Hello! >> >> On Wed, Dec 13, 2023 at 11:09:28AM -0500, Ben Kallus wrote: >> >> Nginx executes numerous `memcpy`s from NULL during normal >> execution. >> `memcpy`ing to or from NULL is undefined behavior. Accordingly, >> some >> compilers (gcc -O2) make optimizations that assume `memcpy` >> arguments >> are not NULL. Nginx with UBSan crashes during startup due to this >> issue. >> >> Consider the following function: >> ```C >> #include >> >> int f(int i) { >> char a[] = {'a'}; >> void *src = i ? a : NULL; >> char dst[1]; >> memcpy(dst, src, 0); >> return src == NULL; >> } >> ``` >> Here's what gcc13.2 -O2 -fno-builtin will do to it: >> ```asm >> f: >> sub rsp, 24 >> xor eax, eax >> test edi, edi >> lea rsi, [rsp+14] >> lea rdi, [rsp+15] >> mov BYTE PTR [rsp+14], 97 >> cmove rsi, rax >> xor edx, edx >> call memcpy >> xor eax, eax >> add rsp, 24 >> ret >> ``` >> Note that `f` always returns 0, regardless of the value of `i`. >> >> Feel free to try for yourself at >> https://gcc.godbolt.org/z/zfvnMMsds >> >> The reasoning here is that since memcpy from NULL is UB, the >> optimizer >> is free to assume that `src` is non-null. You might consider this >> to >> be a problem with the compiler, or the C standard, and I might >> agree. >> Regardless, relying on UB is inherently un-portable, and requires >> maintenance to ensure that new compiler releases don't break >> existing >> assumptions about the behavior of undefined operations. >> >> The following patch adds a check to `ngx_memcpy` and `ngx_cpymem` >> that >> makes 0-length memcpy explicitly a noop. Since all memcpying from >> NULL >> in Nginx uses n==0, this should be sufficient to avoid UB. >> >> It would be more efficient to instead add a check to every call to >> ngx_memcpy and ngx_cpymem that might be used with src==NULL, but in >> the discussion of a previous patch that proposed such a change, a >> more >> straightforward and tidy solution was desired. >> It may also be worth considering adding checks for NULL memset, >> memmove, etc. I think this is not necessary unless it is >> demonstrated >> that Nginx actually executes such undefined calls. >> >> # HG changeset patch >> # User Ben Kallus >> # Date 1702406466 18000 >> # Tue Dec 12 13:41:06 2023 -0500 >> # Node ID d270203d4ecf77cc14a2652c727e236afc659f4a >> # Parent a6f79f044de58b594563ac03139cd5e2e6a81bdb >> Add NULL check to ngx_memcpy and ngx_cpymem to satisfy UBSan. >> >> diff -r a6f79f044de5 -r d270203d4ecf src/core/ngx_string.c >> --- a/src/core/ngx_string.c Wed Nov 29 10:58:21 2023 +0400 >> +++ b/src/core/ngx_string.c Tue Dec 12 13:41:06 2023 -0500 >> @@ -2098,6 +2098,10 @@ >> ngx_debug_point(); >> } >> >> + if (n == 0) { >> + return dst; >> + } >> + >> return memcpy(dst, src, n); >> } >> >> diff -r a6f79f044de5 -r d270203d4ecf src/core/ngx_string.h >> --- a/src/core/ngx_string.h Wed Nov 29 10:58:21 2023 +0400 >> +++ b/src/core/ngx_string.h Tue Dec 12 13:41:06 2023 -0500 >> @@ -103,8 +103,9 @@ >> * gcc3 compiles memcpy(d, s, 4) to the inline "mov"es. >> * icc8 compile memcpy(d, s, 4) to the inline "mov"es or XMM >> moves. >> */ >> -#define ngx_memcpy(dst, src, n) (void) memcpy(dst, src, n) >> -#define ngx_cpymem(dst, src, n) (((u_char *) memcpy(dst, src, >> n)) + (n)) >> +#define ngx_memcpy(dst, src, n) (void) ((n) == 0 ? (dst) : >> memcpy(dst, src, n)) >> +#define ngx_cpymem(dst, src, n) >> \ >> + ((u_char *) ((n) == 0 ? (dst) : memcpy(dst, src, n)) + (n)) >> >> #endif >> >> diff -r a6f79f044de5 -r d270203d4ecf src/http/v2/ngx_http_v2.c >> --- a/src/http/v2/ngx_http_v2.c Wed Nov 29 10:58:21 2023 +0400 >> +++ b/src/http/v2/ngx_http_v2.c Tue Dec 12 13:41:06 2023 -0500 >> @@ -3998,9 +3998,7 @@ >> n = size; >> } >> >> - if (n > 0) { >> - rb->buf->last = ngx_cpymem(rb->buf->last, pos, n); >> - } >> + rb->buf->last = ngx_cpymem(rb->buf->last, pos, n); >> >> ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, >> "http2 request body recv %uz", n); >> >> >> For the record, I've already provided some feedback to Ben in the >> ticket here: >> >> https://trac.nginx.org/nginx/ticket/2570 >> >> And pointed to the existing thread here: >> >> https://mailman.nginx.org/pipermail/nginx-devel/2023-October/PX7VH5A273NLUGSYC7DR2AZRU75CIQ3Q.html >> https://mailman.nginx.org/pipermail/nginx-devel/2023-December/DCGUEGEFS6TSVIWNEWUEZO3FZMR6ESYZ.html >> >> Hope this helps. >> >> -- >> Maxim Dounin >> http://mdounin.ru/ >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> https://mailman.nginx.org/mailman/listinfo/nginx-devel >> > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > From pluknet at nginx.com Fri Dec 15 23:44:10 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Fri, 15 Dec 2023 23:44:10 +0000 Subject: [nginx] QUIC: fixed format specifier after a6f79f044de5. Message-ID: details: https://hg.nginx.org/nginx/rev/cc16989c6d61 branches: changeset: 9197:cc16989c6d61 user: Sergey Kandaurov date: Sat Dec 16 03:40:01 2023 +0400 description: QUIC: fixed format specifier after a6f79f044de5. diffstat: src/event/quic/ngx_event_quic_migration.h | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 6c8595b77e66 -r cc16989c6d61 src/event/quic/ngx_event_quic_migration.h --- a/src/event/quic/ngx_event_quic_migration.h Tue Dec 12 20:21:12 2023 +0400 +++ b/src/event/quic/ngx_event_quic_migration.h Sat Dec 16 03:40:01 2023 +0400 @@ -19,7 +19,7 @@ #define ngx_quic_path_dbg(c, msg, path) \ ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, \ - "quic path seq:%uL %s tx:%O rx:%O valid:%ui st:%d mtu:%uz",\ + "quic path seq:%uL %s tx:%O rx:%O valid:%d st:%d mtu:%uz", \ path->seqnum, msg, path->sent, path->received, \ path->validated, path->state, path->mtu); From mdounin at mdounin.ru Sat Dec 16 02:57:28 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 16 Dec 2023 05:57:28 +0300 Subject: [PATCH 2 of 2] Win32: extended ngx_random range to 0x7fffffff In-Reply-To: References: Message-ID: Hello! On Sat, Dec 09, 2023 at 08:42:11AM +0000, J Carter wrote: > On Sat, 09 Dec 2023 07:46:10 +0000 > J Carter wrote: > > > # HG changeset patch > > # User J Carter > > # Date 1702101635 0 > > # Sat Dec 09 06:00:35 2023 +0000 > > # Node ID 1a77698f82d2580aa8b8f62ce89b4dbb6d678c5d > > # Parent d9275e982a7188a1ea7855295ffa93362ea9830f > > Win32: extended ngx_random range to 0x7fffffff > > > > rand() is used on win32. RAND_MAX is implementation defined. win32's is > > 0x7fff. > > > > Existing uses of ngx_random rely upon 0x7fffffff range provided by > > posix implementations of random(). > > > > diff -r d9275e982a71 -r 1a77698f82d2 src/os/win32/ngx_win32_config.h > > --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 > > +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 06:00:35 2023 +0000 > > @@ -280,7 +280,9 @@ > > > > #define NGX_HAVE_GETADDRINFO 1 > > > > -#define ngx_random rand > > +#define ngx_random \ > > + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) > > + > > #define ngx_debug_init() > > > > > > ^ my mistake - copying error.. > > # HG changeset patch > # User J Carter > # Date 1702111094 0 > # Sat Dec 09 08:38:14 2023 +0000 > # Node ID 10ef59a412a330872fc6d46de64f42e32e997b3a > # Parent d9275e982a7188a1ea7855295ffa93362ea9830f > Win32: extended ngx_random range to 0x7fffffff Nitpicking: Win32: extended ngx_random() range to 0x7fffffff. > > rand() is used on win32. RAND_MAX is implementation defined. win32's is > 0x7fff. > > Existing uses of ngx_random rely upon 0x7fffffff range provided by > posix implementations of random(). Thanks for catching this. As far as I can see, the only module which actually relies on the range is the random index module. Relying on the ngx_random() range generally looks wrong to me, and we might want to change the code to don't. OTOH, it's the only way to get a completely uniform distribution, and that's what the module tries to do. As such, it might be good enough to preserve it as is, at least till further changes to ngx_random(). Either way, wider range for ngx_random() should be beneficial in other places. > > diff -r d9275e982a71 -r 10ef59a412a3 src/os/win32/ngx_win32_config.h > --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 > +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 08:38:14 2023 +0000 > @@ -280,7 +280,9 @@ > > #define NGX_HAVE_GETADDRINFO 1 > > -#define ngx_random rand > +#define ngx_random() \ Nitpicking: the "\" character should be at the 79th column (in some files at 78th). This ensures that a diff won't wrap on a standard 80-column terminal. > + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) > + > #define ngx_debug_init() Using "|" for random numbers looks utterly wrong to me, even if ORed values are guaranteed to not interfere. I would rather use "^", and avoid dependency on the particular value of RAND_MAX (other than POSIX-required minimum of 32767) by using something like 0x7fffffff & ((rand() << 16) ^ (rand() << 8) ^ rand()) with proper typecasts. Something like this should work: diff --git a/src/os/win32/ngx_win32_config.h b/src/os/win32/ngx_win32_config.h --- a/src/os/win32/ngx_win32_config.h +++ b/src/os/win32/ngx_win32_config.h @@ -280,7 +280,11 @@ typedef int sig_atomic_t #define NGX_HAVE_GETADDRINFO 1 -#define ngx_random rand +#define ngx_random() \ + ((long) (0x7fffffff & ( ((uint32_t) rand() << 16) \ + ^ ((uint32_t) rand() << 8) \ + ^ ((uint32_t) rand()) ))) + #define ngx_debug_init() -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Sat Dec 16 04:15:57 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 16 Dec 2023 07:15:57 +0300 Subject: Core: Avoid memcpy from NULL In-Reply-To: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> References: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Message-ID: Hello! On Fri, Dec 15, 2023 at 03:46:19PM +0100, Dipl. Ing. Sergey Brester via nginx-devel wrote: > Enclosed few thoughts to the subject: > > - since it is very rare situation that one needs only a memcpy without > to know whether previous alloc may fail > (e. g. some of pointers were NULL), me too thinks that the caller > should be responsible for the check. > So I would not extend ngx_memcpy or ngx_cpymem in that way. That's not about failed allocations, it's about ability to work with empty strings which aren't explicitly set to something other than { 0, NULL }. And you may refer to Vladimir's patch I've referenced to find out what it means in terms of the "caller should be responsible" approach even without trying to look into places not explicitly reported by the UB sanitizer. https://mailman.nginx.org/pipermail/nginx-devel/2023-October/PX7VH5A273NLUGSYC7DR2AZRU75CIQ3Q.html https://mailman.nginx.org/pipermail/nginx-devel/2023-December/DCGUEGEFS6TSVIWNEWUEZO3FZMR6ESYZ.html > - rewrite of `ngx_memcpy` define like here: > ``` > + #define ngx_memcpy(dst, src, n) (void) ((n) == 0 ? (dst) : > memcpy(dst, src, n)) > ``` > may introduce a regression or compat issues, e. g. fully functioning > codes like that may become broken hereafter: > ``` > ngx_memcpy(dst, src, ++len); // because n would be incremented twice > in the macro now > ``` > Sure, `ngx_cpymem` has also the same issue, but it had that already > before the "fix"... > Anyway, I'm always against of such macros (no matter with or without > check it would be better as an inline function instead). In general macro definitions in nginx are used everywhere for efficiency reasons, and macro definitions usually aren't safe. While some might prefer other approaches, writing code like "ngx_memcpy(dst, src, ++len)" in nginx is just wrong, and shouldn't be trusted to work, much like it won't work with "ngx_cpymem(dst, src, ++len)". I'm not exactly against using inline functions here, but the particular argument is at most very weak. > My conclusion: > a fix of affected places invoking `ngx_memcpy` and `ngx_cpymem`, and > possibly an assert in `ngx_memcpy` > and `ngx_cpymem` would be fully enough, in my opinion. Well, thank you for your opinion, appreciated. I don't think this approach is going to work though, see my review of Vladimir's patch. Ideally, I would prefer this to be fixed in the C standard (and GCC). But given this is not a likely option, and there is a constant stream of reports "hey, UB sanitizer reports about memcpy(dst, NULL, 0) in nginx" we might consider actually silencing this by introducing appropriate checks at the interface level. -- Maxim Dounin http://mdounin.ru/ From jordanc.carter at outlook.com Sat Dec 16 06:27:44 2023 From: jordanc.carter at outlook.com (J Carter) Date: Sat, 16 Dec 2023 06:27:44 +0000 Subject: [PATCH 2 of 2] Win32: extended ngx_random range to 0x7fffffff In-Reply-To: References: Message-ID: Hello, Thanks the review and feedback. On Sat, 16 Dec 2023 05:57:28 +0300 Maxim Dounin wrote: > Hello! > > On Sat, Dec 09, 2023 at 08:42:11AM +0000, J Carter wrote: > > > On Sat, 09 Dec 2023 07:46:10 +0000 > > J Carter wrote: > > > > > # HG changeset patch > > > # User J Carter > > > # Date 1702101635 0 > > > # Sat Dec 09 06:00:35 2023 +0000 > > > # Node ID 1a77698f82d2580aa8b8f62ce89b4dbb6d678c5d > > > # Parent d9275e982a7188a1ea7855295ffa93362ea9830f > > > Win32: extended ngx_random range to 0x7fffffff > > > > > > rand() is used on win32. RAND_MAX is implementation defined. win32's is > > > 0x7fff. > > > > > > Existing uses of ngx_random rely upon 0x7fffffff range provided by > > > posix implementations of random(). > > > > > > diff -r d9275e982a71 -r 1a77698f82d2 src/os/win32/ngx_win32_config.h > > > --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 > > > +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 06:00:35 2023 +0000 > > > @@ -280,7 +280,9 @@ > > > > > > #define NGX_HAVE_GETADDRINFO 1 > > > > > > -#define ngx_random rand > > > +#define ngx_random \ > > > + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) > > > + > > > #define ngx_debug_init() > > > > > > > > > > ^ my mistake - copying error.. > > > > # HG changeset patch > > # User J Carter > > # Date 1702111094 0 > > # Sat Dec 09 08:38:14 2023 +0000 > > # Node ID 10ef59a412a330872fc6d46de64f42e32e997b3a > > # Parent d9275e982a7188a1ea7855295ffa93362ea9830f > > Win32: extended ngx_random range to 0x7fffffff > > Nitpicking: > > Win32: extended ngx_random() range to 0x7fffffff. Ah thanks, will include the full stop in future. > > > > > rand() is used on win32. RAND_MAX is implementation defined. win32's is > > 0x7fff. > > > > Existing uses of ngx_random rely upon 0x7fffffff range provided by > > posix implementations of random(). > > Thanks for catching this. > > As far as I can see, the only module which actually relies on the > range is the random index module. Yes, that was the obvious one - I suspect upstream_random balancers might act strangely in extreme cases (I'm not entirely sure though). Other uses I'm not sure at all, as those are more domain specific (like resolver). > Relying on the ngx_random() range generally looks wrong to me, and > we might want to change the > code to don't. OTOH, it's the only way to get a completely > uniform distribution, and that's what the module tries to do. As > such, it might be good enough to preserve it as is, at least till > further changes to ngx_random(). Yes the alternative would seem to be #ifdefs for win32 vs unix, or adding in an 'ngx_random_max', or writing (or borrowing) a posix random implementation into nginx's codebase. > > Either way, wider range for ngx_random() should be beneficial in > other places. > > > > > diff -r d9275e982a71 -r 10ef59a412a3 src/os/win32/ngx_win32_config.h > > --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 > > +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 08:38:14 2023 +0000 > > @@ -280,7 +280,9 @@ > > > > #define NGX_HAVE_GETADDRINFO 1 > > > > -#define ngx_random rand > > +#define ngx_random() \ > > Nitpicking: the "\" character should be at the 79th column (in > some files at 78th). This ensures that a diff won't wrap on a > standard 80-column terminal. Thanks, will note for the future. > > > + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) > > + > > #define ngx_debug_init() > > Using "|" for random numbers looks utterly wrong to me, even if > ORed values are guaranteed to not interfere. > > I would rather use "^", and avoid dependency on the particular > value of RAND_MAX (other than POSIX-required minimum of 32767) by > using something like > > 0x7fffffff & ((rand() << 16) ^ (rand() << 8) ^ rand()) > > with proper typecasts. Yep, makes sense. > > Something like this should work: > > diff --git a/src/os/win32/ngx_win32_config.h b/src/os/win32/ngx_win32_config.h > --- a/src/os/win32/ngx_win32_config.h > +++ b/src/os/win32/ngx_win32_config.h > @@ -280,7 +280,11 @@ typedef int sig_atomic_t > > #define NGX_HAVE_GETADDRINFO 1 > > -#define ngx_random rand > +#define ngx_random() \ > + ((long) (0x7fffffff & ( ((uint32_t) rand() << 16) \ > + ^ ((uint32_t) rand() << 8) \ > + ^ ((uint32_t) rand()) ))) > + > #define ngx_debug_init() > > > Looks good to me. From rcasey at gmail.com Sat Dec 16 07:42:18 2023 From: rcasey at gmail.com (Rob Casey) Date: Sat, 16 Dec 2023 18:42:18 +1100 Subject: [PATCH] Add ssl_client_tls_bind variable In-Reply-To: References: Message-ID: Very interesting. Thanks for these links Maxim. I would actually favour Steffen's patch over my own for the completeness of exposing both tls-unique and tls-server-end-point. I would note from the second link however that this patch was abandoned due to the limited application of channel binding where more comprehensive web infrastructure may be deployed. There is however a separate use for tls-unique (in TLSv1.2 and tls-exporter in TLSv1.3) independent of channel binding. This is in the form of demonstrating proof-of-possession within Enrollment over Secure Transport (EST). This protocol, described in RFC 7030 , describes a simple, yet functional, certificate management protocol targeting Public Key Infrastructure (PKI) clients that need to acquire client certificates and associated Certification Authority (CA) certificates. This protocol supports both client-generated public/private key pairs and those generated by the CA. In this protocol, the EST server can mandate the inclusion of information from the current authenticated TLS session (tls-unique) within the Certificate Signing Request (CSR). This action may be performed to establish a link between authentication identity and proof-of-possession of the private key associated with the certification request - See Section 3.5 of RFC 7030 for details. While I have an in-progress implementation of an EST module for nginx ( nginx-http-est ), this functionality (proof-of-possession) has not yet been incorporated within my code. I did however note a request for this same functionality in the Nginx forum (linked in my original post) which prompted me to create this patch - >From my perspective, I have no concern whether this patch is merged, but feel that overhead cost of incorporating these additional SSL variables is likely to be very small with respect to the benefit for, admittedly, a very small, user segment with interest in this level of SSL operations. Rob On Fri, Dec 15, 2023 at 10:28 PM Maxim Dounin wrote: > Hello! > > On Fri, Dec 15, 2023 at 06:02:45PM +1100, Rob Casey wrote: > > > First time caller, long time listener. > > > > This patch introduces the variable $ssl_client_tls_bind which provides > the > > last Finished message returned by the OpenSSL SSL_get_peer_finished() > > function. The value returned by this function may be used in TLS channel > > binding operations as described in RFC 5929 > > (TLSv1.2) and RFC 9266 > > (TLSv1.3). The bytes > > returned by this function are base64-encoded for ease-of-use as per > > suggestion on Nginx forum thread > > . > > You might be interested in a previous attempt to introduce similar > variables, here: > > https://mailman.nginx.org/pipermail/nginx-devel/2021-May/014082.html > https://mailman.nginx.org/pipermail/nginx-devel/2021-June/014090.html > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From benjamin.p.kallus.gr at dartmouth.edu Sat Dec 16 21:26:37 2023 From: benjamin.p.kallus.gr at dartmouth.edu (Ben Kallus) Date: Sat, 16 Dec 2023 16:26:37 -0500 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Message-ID: > In general macro definitions in nginx are used everywhere for > efficiency reasons Clang inlines short functions with -O1, and GCC does so with -O2 or -O1 -finline-small-functions. Are there any platforms that Nginx needs to support for which short function inlining isn't sufficient to solve this issue? > While some might prefer other approaches, writing code like > "ngx_memcpy(dst, src, ++len)" in nginx is just wrong, and > shouldn't be trusted to work, much like it won't work with > "ngx_cpymem(dst, src, ++len)". It is indeed wrong to use an expression with a side-effect in an argument to cpymem, but it's also not very obvious that it's wrong. An inlined function solves the argument reevaluation issue without performance overhead. -Ben From pluknet at nginx.com Mon Dec 18 22:09:10 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 19 Dec 2023 02:09:10 +0400 Subject: [PATCH] ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 In-Reply-To: <2001e73ce136d5bfc9bd.1700771381@fedora> References: <2001e73ce136d5bfc9bd.1700771381@fedora> Message-ID: <18C46B67-C82F-490A-B3AA-14AF5D9CC369@nginx.com> > On 24 Nov 2023, at 00:29, Ilya Shipitsin wrote: > > # HG changeset patch > # User Ilya Shipitsin > # Date 1700769135 -3600 > # Thu Nov 23 20:52:15 2023 +0100 > # Node ID 2001e73ce136d5bfc9bde27d338865b14b8ad436 > # Parent 7ec761f0365f418511e30b82e9adf80bc56681df > ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 style: SSL prefix should be uppercase. > > diff -r 7ec761f0365f -r 2001e73ce136 src/event/ngx_event_openssl_stapling.c > --- a/src/event/ngx_event_openssl_stapling.c Thu Oct 26 23:35:09 2023 +0300 > +++ b/src/event/ngx_event_openssl_stapling.c Thu Nov 23 20:52:15 2023 +0100 > @@ -893,7 +893,8 @@ > ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > ocsp->conf = ocf; > > -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > +/* minimum OpenSSL 1.1.1 & LibreSSL 3.3.6 */ > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) || (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER >= 0x3030600L)) > > ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > Testing "defined(LIBRESSL_VERSION_NUMBER)" is superfluous. The macro test suffers from a very long line. The correct version test seems to be against LibreSSL 3.5.0, see https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-3.5.0-relnotes.txt So, the resulting change would be as follows: diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c --- a/src/event/ngx_event_openssl_stapling.c +++ b/src/event/ngx_event_openssl_stapling.c @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; ocsp->conf = ocf; -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ + && !defined LIBRESSL_VERSION_NUMBER) \ + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); On the other hand, I don't like the resulting style mudness. It may have sense just to drop old LibreSSL versions support: maintaining one or two most recent stable branches should be enough. But anyway, I don't see an obvious win over the existing code: the certificate chain is reconstructed if SSL_get0_verified_chain() is (detected to be) not present, which should be fine in most cases. That said, it doesn't seem to deserve introducing 3-line macro test, or (see OTOH note) breaking old LibreSSL support for no apparent reason. -- Sergey Kandaurov From mdounin at mdounin.ru Tue Dec 19 08:58:02 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 19 Dec 2023 11:58:02 +0300 Subject: [PATCH] ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 In-Reply-To: <18C46B67-C82F-490A-B3AA-14AF5D9CC369@nginx.com> References: <2001e73ce136d5bfc9bd.1700771381@fedora> <18C46B67-C82F-490A-B3AA-14AF5D9CC369@nginx.com> Message-ID: Hello! On Tue, Dec 19, 2023 at 02:09:10AM +0400, Sergey Kandaurov wrote: > > On 24 Nov 2023, at 00:29, Ilya Shipitsin wrote: > > > > # HG changeset patch > > # User Ilya Shipitsin > > # Date 1700769135 -3600 > > # Thu Nov 23 20:52:15 2023 +0100 > > # Node ID 2001e73ce136d5bfc9bde27d338865b14b8ad436 > > # Parent 7ec761f0365f418511e30b82e9adf80bc56681df > > ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 > > style: SSL prefix should be uppercase. > > > > > diff -r 7ec761f0365f -r 2001e73ce136 src/event/ngx_event_openssl_stapling.c > > --- a/src/event/ngx_event_openssl_stapling.c Thu Oct 26 23:35:09 2023 +0300 > > +++ b/src/event/ngx_event_openssl_stapling.c Thu Nov 23 20:52:15 2023 +0100 > > @@ -893,7 +893,8 @@ > > ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > > ocsp->conf = ocf; > > > > -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > > +/* minimum OpenSSL 1.1.1 & LibreSSL 3.3.6 */ > > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) || (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER >= 0x3030600L)) > > > > ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > > > > Testing "defined(LIBRESSL_VERSION_NUMBER)" is superfluous. > The macro test suffers from a very long line. > > The correct version test seems to be against LibreSSL 3.5.0, see > https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-3.5.0-relnotes.txt > > So, the resulting change would be as follows: > > diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c > --- a/src/event/ngx_event_openssl_stapling.c > +++ b/src/event/ngx_event_openssl_stapling.c > @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * > ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > ocsp->conf = ocf; > > -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ > + && !defined LIBRESSL_VERSION_NUMBER) \ > + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL Rather, +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ + && (!defined LIBRESSL_VERSION_NUMBER \ + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL)) > > ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > > > On the other hand, I don't like the resulting style mudness. > It may have sense just to drop old LibreSSL versions support: > maintaining one or two most recent stable branches should be enough. +1 on this. We certainly don't want to maintain support for ancient LibreSSL versions. IMO, just two branches is more than enough (and this is what I use in my testing, which usually means latest development release and latest stable release). At most, this probably can be three branches - which seems to match LibreSSL support policy, https://www.libressl.org/releases.html: : LibreSSL transitions to a new stable release branch every 6 months : in coordination with the OpenBSD development schedule. LibreSSL : stable branches are updated for 1 year after their corresponding : OpenBSD branch is tagged for release. See below for the current : stable release branches. In either case, LibreSSL versions below 3.5.0 are already not supported. If I understand correctly, right now the oldest supported branch is 3.7.x. > But anyway, I don't see an obvious win over the existing code: > the certificate chain is reconstructed if SSL_get0_verified_chain() > is (detected to be) not present, which should be fine in most cases. > > That said, it doesn't seem to deserve introducing 3-line macro test, > or (see OTOH note) breaking old LibreSSL support for no apparent reason. Reconstruction of the chain implies verification of signatures along the chain and can be costly. As such, it certainly would be better to use SSL_get0_verified_chain() as long as it is available. Also, removing the "!defined LIBRESSL_VERSION_NUMBER" check might be seen as positive even without any additional benefits. Along with that, however, we might want to adjust the LIBRESSL_VERSION_NUMBER check in the ngx_event_openssl.h file, so OPENSSL_VERSION_NUMBER is set to a better value for old LibreSSL versions - for example, to only set OPENSSL_VERSION_NUMBER to 0x1010000fL for LibreSSL 3.5.0 or above. This might allow to preserve limited compatibility with ancient LibreSSL versions without additional efforts (not tested though). -- Maxim Dounin http://mdounin.ru/ From chipitsine at gmail.com Tue Dec 19 11:16:58 2023 From: chipitsine at gmail.com (=?UTF-8?B?0JjQu9GM0Y8g0KjQuNC/0LjRhtC40L0=?=) Date: Tue, 19 Dec 2023 12:16:58 +0100 Subject: [PATCH] ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 In-Reply-To: References: <2001e73ce136d5bfc9bd.1700771381@fedora> <18C46B67-C82F-490A-B3AA-14AF5D9CC369@nginx.com> Message-ID: вт, 19 дек. 2023 г. в 09:58, Maxim Dounin : > Hello! > > On Tue, Dec 19, 2023 at 02:09:10AM +0400, Sergey Kandaurov wrote: > > > > On 24 Nov 2023, at 00:29, Ilya Shipitsin wrote: > > > > > > # HG changeset patch > > > # User Ilya Shipitsin > > > # Date 1700769135 -3600 > > > # Thu Nov 23 20:52:15 2023 +0100 > > > # Node ID 2001e73ce136d5bfc9bde27d338865b14b8ad436 > > > # Parent 7ec761f0365f418511e30b82e9adf80bc56681df > > > ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 > > > > style: SSL prefix should be uppercase. > > > > > > > > diff -r 7ec761f0365f -r 2001e73ce136 > src/event/ngx_event_openssl_stapling.c > > > --- a/src/event/ngx_event_openssl_stapling.c Thu Oct 26 > 23:35:09 2023 +0300 > > > +++ b/src/event/ngx_event_openssl_stapling.c Thu Nov 23 > 20:52:15 2023 +0100 > > > @@ -893,7 +893,8 @@ > > > ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > > > ocsp->conf = ocf; > > > > > > -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined > LIBRESSL_VERSION_NUMBER) > > > +/* minimum OpenSSL 1.1.1 & LibreSSL 3.3.6 */ > > > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined > LIBRESSL_VERSION_NUMBER) || (defined(LIBRESSL_VERSION_NUMBER) && > (LIBRESSL_VERSION_NUMBER >= 0x3030600L)) > > > > > > ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > > > > > > > Testing "defined(LIBRESSL_VERSION_NUMBER)" is superfluous. > > The macro test suffers from a very long line. > > > > The correct version test seems to be against LibreSSL 3.5.0, see > > https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-3.5.0-relnotes.txt > > > > So, the resulting change would be as follows: > > > > diff --git a/src/event/ngx_event_openssl_stapling.c > b/src/event/ngx_event_openssl_stapling.c > > --- a/src/event/ngx_event_openssl_stapling.c > > +++ b/src/event/ngx_event_openssl_stapling.c > > @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * > > ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > > ocsp->conf = ocf; > > > > -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined > LIBRESSL_VERSION_NUMBER) > > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ > > + && !defined LIBRESSL_VERSION_NUMBER) \ > > + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL > > Rather, > > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L > \ > + && (!defined LIBRESSL_VERSION_NUMBER > \ > + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL)) > > > > > ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > > > > > > On the other hand, I don't like the resulting style mudness. > > It may have sense just to drop old LibreSSL versions support: > > maintaining one or two most recent stable branches should be enough. > > +1 on this. > if we want to keep code clean, we can move "if LibreSSL >= 3.7" to "configure" level > > We certainly don't want to maintain support for ancient LibreSSL > versions. IMO, just two branches is more than enough (and this is > what I use in my testing, which usually means latest development > release and latest stable release). > > At most, this probably can be three branches - which seems to > match LibreSSL support policy, > https://www.libressl.org/releases.html: > > : LibreSSL transitions to a new stable release branch every 6 months > : in coordination with the OpenBSD development schedule. LibreSSL > : stable branches are updated for 1 year after their corresponding > : OpenBSD branch is tagged for release. See below for the current > : stable release branches. > > In either case, LibreSSL versions below 3.5.0 are already not > supported. If I understand correctly, right now the oldest > supported branch is 3.7.x. > > > But anyway, I don't see an obvious win over the existing code: > > the certificate chain is reconstructed if SSL_get0_verified_chain() > > is (detected to be) not present, which should be fine in most cases. > > > > That said, it doesn't seem to deserve introducing 3-line macro test, > > or (see OTOH note) breaking old LibreSSL support for no apparent reason. > > Reconstruction of the chain implies verification of signatures > along the chain and can be costly. As such, it certainly would be > better to use SSL_get0_verified_chain() as long as it is > available. > > Also, removing the "!defined LIBRESSL_VERSION_NUMBER" check might > be seen as positive even without any additional benefits. > > Along with that, however, we might want to adjust the > LIBRESSL_VERSION_NUMBER check in the ngx_event_openssl.h file, so > OPENSSL_VERSION_NUMBER is set to a better value for old LibreSSL > versions - for example, to only set OPENSSL_VERSION_NUMBER to > 0x1010000fL for LibreSSL 3.5.0 or above. This might allow to > preserve limited compatibility with ancient LibreSSL versions > without additional efforts (not tested though). > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From zaihan at unrealasia.net Tue Dec 19 14:11:04 2023 From: zaihan at unrealasia.net (Muhammad Nuzaihan) Date: Tue, 19 Dec 2023 22:11:04 +0800 Subject: processing a request without body In-Reply-To: References: Message-ID: Thanks Maxim, Vasility, The problem i was going to solve is to i needed to run my specific function that takes the data of request URL path, Headers and request body and determine and validate that all that data is correct before sending upstream, or else i would deny the request with 4xx code errors. Handlers can only handle (from what i know) URL path and headers. Request body requires a request chain (ngx_chain_t)) to piece out the request body and handlers doesn't seem to have t ngx_chain_t unlike request body filters. Or maybe i am wrong in this case? Thank you, Muhammad Nuzaihan On Thu, Dec 14, 2023 at 4:01 AM Vasiliy Soshnikov wrote: > > Sorry I'm bad. I understood that header filters won't help you. Use the ACCESS PHASE handler, it should work fine for you. > > On Wed, Dec 13, 2023 at 10:57 PM Vasiliy Soshnikov wrote: >> >> Hello, >> >> > Is there something similar done before? >> I'm thinking that you would like to test the incoming path and execute some logic. >> >> You could use a header filter for that and also you could keep a body filter for handling the request body. >> Also pls take a look into PHASEs, I'm thinking you could try to add your own ACCESS PHASE. >> >> And the last one: for keeping your context (some variables or data) for this request between filters, phases you could use request's context. >> >> Probably, examples would help you: https://github.com/dedok/nginx-tutorials >> >> >> On Wed, Dec 13, 2023 at 10:56 AM Muhammad Nuzaihan wrote: >>> >>> Hi, >>> >>> I need to process requests with only URI path (without body) for a module. >>> >>> It seems ngx_http_request_body_filter_pt is *not* executed whenever >>> there is a request without a body (it looked like it bypassed without >>> request body) and only ngx_http_output_body_filter_pt part of the >>> code is executed. >>> >>> For example i do a request curl curl like this: >>> >>> curl -vvvv -X POST http://localhost:8080/proxy/profile/alice/comment >>> >>> and i need to validate /proxy/profile/alice/comment in my module and >>> there is no http headers and no body. Only URI path. >>> >>> Is there something similar done before? >>> >>> Thank you, >>> Muhammad Nuzaihan >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> https://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From jan.prachar at gmail.com Tue Dec 19 15:02:46 2023 From: jan.prachar at gmail.com (Jan =?UTF-8?Q?Pracha=C5=99?=) Date: Tue, 19 Dec 2023 16:02:46 +0100 Subject: Meaning of proxy_ignore_client_abort Message-ID: <00d92b9d21b932f7ae26cf40d1b64f288ffadd7e.camel@gmail.com> Hello, I have proxy module configured with proxy_ignore_client_abort on; but connection to upstream is still being closed. This is in debug log: writev() failed (32: Broken pipe) while sending to client, ... http write filter FFFFFFFFFFFFFFFF http copy filter: -1 ... pipe read upstream: 0 pipe buf free s:0 t:1 f:0 000055DEBEF95EC0, pos 000055DEBEF95EC0, size: 473 file: 0, size: 0 pipe buf free s:0 t:1 f:0 000055DEBEF91EB0, pos 000055DEBEF91EB0, size: 0 file: 0, size: 0 pipe length: 22594336 event timer: 23, old: 15583745, new: 15583837 http upstream downstream error finalize http upstream request: -1 finalize http proxy request close http upstream connection: 23 It seems, that check if ignore_client_abort is on, is missing here:  https://trac.nginx.org/nginx/browser/nginx/src/http/ngx_http_upstream.c#L4223 Or is there any reason why the connection is closed regardless the ignore_client_abort? Thanks, Jan From mdounin at mdounin.ru Tue Dec 19 17:45:21 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 19 Dec 2023 20:45:21 +0300 Subject: Meaning of proxy_ignore_client_abort In-Reply-To: <00d92b9d21b932f7ae26cf40d1b64f288ffadd7e.camel@gmail.com> References: <00d92b9d21b932f7ae26cf40d1b64f288ffadd7e.camel@gmail.com> Message-ID: Hello! On Tue, Dec 19, 2023 at 04:02:46PM +0100, Jan Prachař wrote: > Hello, > > I have proxy module configured with proxy_ignore_client_abort on; but connection to > upstream is still being closed. This is in debug log: > > writev() failed (32: Broken pipe) while sending to client, ... > http write filter FFFFFFFFFFFFFFFF > http copy filter: -1 ... > pipe read upstream: 0 > pipe buf free s:0 t:1 f:0 000055DEBEF95EC0, pos 000055DEBEF95EC0, size: 473 file: 0, > size: 0 > pipe buf free s:0 t:1 f:0 000055DEBEF91EB0, pos 000055DEBEF91EB0, size: 0 file: 0, size: > 0 > pipe length: 22594336 > event timer: 23, old: 15583745, new: 15583837 > http upstream downstream error > finalize http upstream request: -1 > finalize http proxy request > close http upstream connection: 23 > > It seems, that check if ignore_client_abort is on, is missing here:  > https://trac.nginx.org/nginx/browser/nginx/src/http/ngx_http_upstream.c#L4223 > > Or is there any reason why the connection is closed regardless the ignore_client_abort? When an error while sending to the client occurs, like in the log you've provided, the connection is closed regardless of the "proxy_ignore_client_abort" directive. The directive only affects nginx behaviour when nginx is waiting for a response from the upstream server: with "proxy_ignore_client_abort on;" nginx will not try to detect if the client already closed the connection and close the upstream connection accordingly. When the response is being sent, the upstream server is expected to be smart enough to recognize that the connection was closed. Note that the docs say (http://nginx.org/r/proxy_ignore_client_abort): : Determines whether the connection with a proxied server should : be closed when a client closes the connection without waiting for : a response. While it probably can be improved, it explicitly says "without waiting for a response", and nothing about "when reading a response". -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Dec 19 17:55:33 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 19 Dec 2023 20:55:33 +0300 Subject: [PATCH] ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 In-Reply-To: References: <2001e73ce136d5bfc9bd.1700771381@fedora> <18C46B67-C82F-490A-B3AA-14AF5D9CC369@nginx.com> Message-ID: Hello! On Tue, Dec 19, 2023 at 12:16:58PM +0100, Илья Шипицин wrote: > вт, 19 дек. 2023 г. в 09:58, Maxim Dounin : > > > Hello! > > > > On Tue, Dec 19, 2023 at 02:09:10AM +0400, Sergey Kandaurov wrote: > > > > > > On 24 Nov 2023, at 00:29, Ilya Shipitsin wrote: > > > > > > > > # HG changeset patch > > > > # User Ilya Shipitsin > > > > # Date 1700769135 -3600 > > > > # Thu Nov 23 20:52:15 2023 +0100 > > > > # Node ID 2001e73ce136d5bfc9bde27d338865b14b8ad436 > > > > # Parent 7ec761f0365f418511e30b82e9adf80bc56681df > > > > ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 > > > > > > style: SSL prefix should be uppercase. > > > > > > > > > > > diff -r 7ec761f0365f -r 2001e73ce136 > > src/event/ngx_event_openssl_stapling.c > > > > --- a/src/event/ngx_event_openssl_stapling.c Thu Oct 26 > > 23:35:09 2023 +0300 > > > > +++ b/src/event/ngx_event_openssl_stapling.c Thu Nov 23 > > 20:52:15 2023 +0100 > > > > @@ -893,7 +893,8 @@ > > > > ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > > > > ocsp->conf = ocf; > > > > > > > > -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined > > LIBRESSL_VERSION_NUMBER) > > > > +/* minimum OpenSSL 1.1.1 & LibreSSL 3.3.6 */ > > > > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined > > LIBRESSL_VERSION_NUMBER) || (defined(LIBRESSL_VERSION_NUMBER) && > > (LIBRESSL_VERSION_NUMBER >= 0x3030600L)) > > > > > > > > ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > > > > > > > > > > Testing "defined(LIBRESSL_VERSION_NUMBER)" is superfluous. > > > The macro test suffers from a very long line. > > > > > > The correct version test seems to be against LibreSSL 3.5.0, see > > > https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-3.5.0-relnotes.txt > > > > > > So, the resulting change would be as follows: > > > > > > diff --git a/src/event/ngx_event_openssl_stapling.c > > b/src/event/ngx_event_openssl_stapling.c > > > --- a/src/event/ngx_event_openssl_stapling.c > > > +++ b/src/event/ngx_event_openssl_stapling.c > > > @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * > > > ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > > > ocsp->conf = ocf; > > > > > > -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined > > LIBRESSL_VERSION_NUMBER) > > > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ > > > + && !defined LIBRESSL_VERSION_NUMBER) \ > > > + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL > > > > Rather, > > > > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L > > \ > > + && (!defined LIBRESSL_VERSION_NUMBER > > \ > > + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL)) > > > > > > > > ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > > > > > > > > > On the other hand, I don't like the resulting style mudness. > > > It may have sense just to drop old LibreSSL versions support: > > > maintaining one or two most recent stable branches should be enough. > > > > +1 on this. > > > > if we want to keep code clean, we can move "if LibreSSL >= 3.7" to > "configure" level I wouldn't expect such a change to be accepted. We generally don't try to test various SSL library features in configure - there are a lot of things to check, and testing appropriate defines (or version numbers, if no defines are available) is believed to work much better. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Tue Dec 19 18:44:00 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 19 Dec 2023 22:44:00 +0400 Subject: [PATCH] ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 In-Reply-To: References: <2001e73ce136d5bfc9bd.1700771381@fedora> <18C46B67-C82F-490A-B3AA-14AF5D9CC369@nginx.com> Message-ID: > On 19 Dec 2023, at 12:58, Maxim Dounin wrote: > > Hello! > > On Tue, Dec 19, 2023 at 02:09:10AM +0400, Sergey Kandaurov wrote: > >>> On 24 Nov 2023, at 00:29, Ilya Shipitsin wrote: >>> >>> # HG changeset patch >>> # User Ilya Shipitsin >>> # Date 1700769135 -3600 >>> # Thu Nov 23 20:52:15 2023 +0100 >>> # Node ID 2001e73ce136d5bfc9bde27d338865b14b8ad436 >>> # Parent 7ec761f0365f418511e30b82e9adf80bc56681df >>> ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 >> >> style: SSL prefix should be uppercase. >> >>> >>> diff -r 7ec761f0365f -r 2001e73ce136 src/event/ngx_event_openssl_stapling.c >>> --- a/src/event/ngx_event_openssl_stapling.c Thu Oct 26 23:35:09 2023 +0300 >>> +++ b/src/event/ngx_event_openssl_stapling.c Thu Nov 23 20:52:15 2023 +0100 >>> @@ -893,7 +893,8 @@ >>> ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; >>> ocsp->conf = ocf; >>> >>> -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) >>> +/* minimum OpenSSL 1.1.1 & LibreSSL 3.3.6 */ >>> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) || (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER >= 0x3030600L)) >>> >>> ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); >>> >> >> Testing "defined(LIBRESSL_VERSION_NUMBER)" is superfluous. >> The macro test suffers from a very long line. >> >> The correct version test seems to be against LibreSSL 3.5.0, see >> https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-3.5.0-relnotes.txt >> >> So, the resulting change would be as follows: >> >> diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c >> --- a/src/event/ngx_event_openssl_stapling.c >> +++ b/src/event/ngx_event_openssl_stapling.c >> @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * >> ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; >> ocsp->conf = ocf; >> >> -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) >> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ >> + && !defined LIBRESSL_VERSION_NUMBER) \ >> + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL > > Rather, > > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ > + && (!defined LIBRESSL_VERSION_NUMBER \ > + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL)) > Agree. For the sake of completeness: # HG changeset patch # User Sergey Kandaurov # Date 1703004490 -14400 # Tue Dec 19 20:48:10 2023 +0400 # Node ID 267cee796462f4f6bacf825c8fd24d13845d36f4 # Parent 7a6d52990f2e2d88460a3dc6cc84aac89b7329ea SSL: using SSL_get0_verified_chain() with LibreSSL 3.5.0+. Prodded by Ilya Shipitsin. diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c --- a/src/event/ngx_event_openssl_stapling.c +++ b/src/event/ngx_event_openssl_stapling.c @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; ocsp->conf = ocf; -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ + && (!defined LIBRESSL_VERSION_NUMBER \ + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL)) ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); >> >> ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); >> >> >> On the other hand, I don't like the resulting style mudness. >> It may have sense just to drop old LibreSSL versions support: >> maintaining one or two most recent stable branches should be enough. > > +1 on this. > > We certainly don't want to maintain support for ancient LibreSSL > versions. IMO, just two branches is more than enough (and this is > what I use in my testing, which usually means latest development > release and latest stable release). > > At most, this probably can be three branches - which seems to > match LibreSSL support policy, > https://www.libressl.org/releases.html: > > : LibreSSL transitions to a new stable release branch every 6 months > : in coordination with the OpenBSD development schedule. LibreSSL > : stable branches are updated for 1 year after their corresponding > : OpenBSD branch is tagged for release. See below for the current > : stable release branches. > > In either case, LibreSSL versions below 3.5.0 are already not > supported. If I understand correctly, right now the oldest > supported branch is 3.7.x. Agree. Also, Repology shows that modern popular distributions, such as Alpine Linux and FreeBSD, have at least LibreSSL 3.5.x: https://repology.org/project/libressl/versions > >> But anyway, I don't see an obvious win over the existing code: >> the certificate chain is reconstructed if SSL_get0_verified_chain() >> is (detected to be) not present, which should be fine in most cases. >> >> That said, it doesn't seem to deserve introducing 3-line macro test, >> or (see OTOH note) breaking old LibreSSL support for no apparent reason. > > Reconstruction of the chain implies verification of signatures > along the chain and can be costly. As such, it certainly would be > better to use SSL_get0_verified_chain() as long as it is > available. Agree. My point is that not using SSL_get0_verified_chain() should not result in a broken functionality, as in the OCSP cert validation. So, intention to start using it in LibreSSL may look an insufficient argument per se to drop old LibreSSL versions. Though, dropping them may be orthogonal to SSL_get0_verified_chain(). > > Also, removing the "!defined LIBRESSL_VERSION_NUMBER" check might > be seen as positive even without any additional benefits. > > Along with that, however, we might want to adjust the > LIBRESSL_VERSION_NUMBER check in the ngx_event_openssl.h file, so > OPENSSL_VERSION_NUMBER is set to a better value for old LibreSSL > versions - for example, to only set OPENSSL_VERSION_NUMBER to > 0x1010000fL for LibreSSL 3.5.0 or above. Sounds like a plan if we are fine to drop older LibreSSL versions. > This might allow to > preserve limited compatibility with ancient LibreSSL versions > without additional efforts (not tested though). > This won't build with any LibreSSL version in the [2.8.0, 3.5.0) range. Particularly, SSL_CTX_sess_set_get_cb() has got a const argument in LibreSSL 2.8.0, which is not backward compatible, see 7337:cab37803ebb3. Another reason is SSL was made opaque by default in 3.4.x. (Others seem not to affect building on older versions if pick up 3.5.0: - X509_up_ref appeared in LibreSSL 2.6.0, X509 made opaque in 3.5.0; - X509_get0_notAfter appeared in 2.7.0, X509_get_notAfter still there.) Personally I'm fine to drop ancient LibreSSL versions, because it has to happen someday and we don't want to maintain them eternally. Alternative patch for your consideration: # HG changeset patch # User Sergey Kandaurov # Date 1703011348 -14400 # Tue Dec 19 22:42:28 2023 +0400 # Node ID 94d4ef4a2316da66fea084952913ff2b0f84827d # Parent 7a6d52990f2e2d88460a3dc6cc84aac89b7329ea SSL: removed compatibility with LibreSSL < 3.5.0. OPENSSL_VERSION_NUMBER is now redefined to 0x1010000fL for LibreSSL 3.5.0+. As older versions starting from LibreSSL 2.8.0 won't build with a lesser OPENSSL_VERSION_NUMBER value (see 7337:cab37803ebb3 for details), they are now explicitly unsupported. Besides that, this allows to start using SSL_get0_verified_chain() with LibreSSL without additional macro tests. diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -45,10 +45,10 @@ #if (defined LIBRESSL_VERSION_NUMBER && OPENSSL_VERSION_NUMBER == 0x20000000L) #undef OPENSSL_VERSION_NUMBER -#if (LIBRESSL_VERSION_NUMBER >= 0x2080000fL) +#if (LIBRESSL_VERSION_NUMBER >= 0x3050000fL) #define OPENSSL_VERSION_NUMBER 0x1010000fL #else -#define OPENSSL_VERSION_NUMBER 0x1000107fL +#error LibreSSL too old, need at least 3.5.0 #endif #endif diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c --- a/src/event/ngx_event_openssl_stapling.c +++ b/src/event/ngx_event_openssl_stapling.c @@ -893,7 +893,7 @@ ngx_ssl_ocsp_validate(ngx_connection_t * ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; ocsp->conf = ocf; -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) +#if OPENSSL_VERSION_NUMBER >= 0x10100000L ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); -- Sergey Kandaurov From v.zhestikov at f5.com Tue Dec 19 20:51:07 2023 From: v.zhestikov at f5.com (=?utf-8?q?Vadim_Zhestikov?=) Date: Tue, 19 Dec 2023 20:51:07 +0000 Subject: [njs] Modules: fixed clear() method of a shared dictionary without timeout. Message-ID: details: https://hg.nginx.org/njs/rev/4a15613f4e8b branches: changeset: 2250:4a15613f4e8b user: Vadim Zhestikov date: Tue Dec 19 12:37:05 2023 -0800 description: Modules: fixed clear() method of a shared dictionary without timeout. This fixes #690 issue on Github. diffstat: nginx/ngx_js_shared_dict.c | 30 +++++++++++++++++++++++++++--- nginx/t/js_shared_dict.t | 21 +++++++++++++++++++-- 2 files changed, 46 insertions(+), 5 deletions(-) diffs (115 lines): diff -r fc1001f6801b -r 4a15613f4e8b nginx/ngx_js_shared_dict.c --- a/nginx/ngx_js_shared_dict.c Thu Dec 14 22:32:02 2023 -0800 +++ b/nginx/ngx_js_shared_dict.c Tue Dec 19 12:37:05 2023 -0800 @@ -109,6 +109,8 @@ static njs_int_t ngx_js_dict_shared_erro static ngx_int_t ngx_js_dict_init_zone(ngx_shm_zone_t *shm_zone, void *data); static njs_int_t ngx_js_shared_dict_preinit(njs_vm_t *vm); static njs_int_t ngx_js_shared_dict_init(njs_vm_t *vm); +static void ngx_js_dict_node_free(ngx_js_dict_t *dict, + ngx_js_dict_node_t *node); static njs_external_t ngx_js_ext_shared_dict[] = { @@ -454,8 +456,10 @@ static njs_int_t njs_js_ext_shared_dict_clear(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval) { - ngx_js_dict_t *dict; - ngx_shm_zone_t *shm_zone; + ngx_rbtree_t *rbtree; + ngx_js_dict_t *dict; + ngx_shm_zone_t *shm_zone; + ngx_rbtree_node_t *rn, *next; shm_zone = njs_vm_external(vm, ngx_js_shared_dict_proto_id, njs_argument(args, 0)); @@ -468,7 +472,27 @@ njs_js_ext_shared_dict_clear(njs_vm_t *v ngx_rwlock_wlock(&dict->sh->rwlock); - ngx_js_dict_evict(dict, 0x7fffffff /* INT_MAX */); + if (dict->timeout) { + ngx_js_dict_evict(dict, 0x7fffffff /* INT_MAX */); + + } else { + rbtree = &dict->sh->rbtree; + + if (rbtree->root == rbtree->sentinel) { + return NJS_OK; + } + + for (rn = ngx_rbtree_min(rbtree->root, rbtree->sentinel); + rn != NULL; + rn = next) + { + next = ngx_rbtree_next(rbtree, rn); + + ngx_rbtree_delete(rbtree, rn); + + ngx_js_dict_node_free(dict, (ngx_js_dict_node_t *) rn); + } + } ngx_rwlock_unlock(&dict->sh->rwlock); diff -r fc1001f6801b -r 4a15613f4e8b nginx/t/js_shared_dict.t --- a/nginx/t/js_shared_dict.t Thu Dec 14 22:32:02 2023 -0800 +++ b/nginx/t/js_shared_dict.t Tue Dec 19 12:37:05 2023 -0800 @@ -41,6 +41,7 @@ http { js_shared_dict_zone zone=foo:32k timeout=2s evict; js_shared_dict_zone zone=bar:64k type=string; js_shared_dict_zone zone=waka:32k type=number; + js_shared_dict_zone zone=no_timeout:32k; server { listen 127.0.0.1:8080; @@ -110,6 +111,10 @@ http { js_content test.set; } + location /set_clear { + js_content test.set_clear; + } + location /size { js_content test.size; } @@ -259,16 +264,25 @@ EOF r.return(200, `size: ${dict.size()}`); } + function set_clear(r) { + var dict = ngx.shared.no_timeout; + dict.set("test", "test value"); + dict.set("test1", "test1 value"); + dict.clear(); + r.return(200, `size: ${dict.size()}`); + } + + function zones(r) { r.return(200, Object.keys(ngx.shared).sort()); } export default { add, capacity, chain, clear, del, free_space, get, has, incr, items, keys, name, njs: test_njs, pop, replace, set, - size, zones }; + set_clear, size, zones }; EOF -$t->try_run('no js_shared_dict_zone')->plan(43); +$t->try_run('no js_shared_dict_zone')->plan(44); ############################################################################### @@ -339,7 +353,10 @@ like(http_get('/pop?dict=bar&key=FOO'), like(http_get('/pop?dict=bar&key=FOO'), qr/undefined/, 'pop deleted bar.FOO'); http_get('/set?dict=foo&key=BAR&value=xxx'); like(http_get('/clear?dict=foo'), qr/undefined/, 'clear foo'); + like(http_get('/size?dict=foo'), qr/size: 0/, 'no of items in foo after clear'); +like(http_get('/set_clear'), qr/size: 0/, + 'no of items in no_timeout after clear'); ############################################################################### From mdounin at mdounin.ru Wed Dec 20 22:40:21 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 21 Dec 2023 01:40:21 +0300 Subject: [PATCH] ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 In-Reply-To: References: <2001e73ce136d5bfc9bd.1700771381@fedora> <18C46B67-C82F-490A-B3AA-14AF5D9CC369@nginx.com> Message-ID: Hello! On Tue, Dec 19, 2023 at 10:44:00PM +0400, Sergey Kandaurov wrote: > > > On 19 Dec 2023, at 12:58, Maxim Dounin wrote: > > > > Hello! > > > > On Tue, Dec 19, 2023 at 02:09:10AM +0400, Sergey Kandaurov wrote: > > > >>> On 24 Nov 2023, at 00:29, Ilya Shipitsin wrote: > >>> > >>> # HG changeset patch > >>> # User Ilya Shipitsin > >>> # Date 1700769135 -3600 > >>> # Thu Nov 23 20:52:15 2023 +0100 > >>> # Node ID 2001e73ce136d5bfc9bde27d338865b14b8ad436 > >>> # Parent 7ec761f0365f418511e30b82e9adf80bc56681df > >>> ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 > >> > >> style: SSL prefix should be uppercase. > >> > >>> > >>> diff -r 7ec761f0365f -r 2001e73ce136 src/event/ngx_event_openssl_stapling.c > >>> --- a/src/event/ngx_event_openssl_stapling.c Thu Oct 26 23:35:09 2023 +0300 > >>> +++ b/src/event/ngx_event_openssl_stapling.c Thu Nov 23 20:52:15 2023 +0100 > >>> @@ -893,7 +893,8 @@ > >>> ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > >>> ocsp->conf = ocf; > >>> > >>> -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > >>> +/* minimum OpenSSL 1.1.1 & LibreSSL 3.3.6 */ > >>> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) || (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER >= 0x3030600L)) > >>> > >>> ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > >>> > >> > >> Testing "defined(LIBRESSL_VERSION_NUMBER)" is superfluous. > >> The macro test suffers from a very long line. > >> > >> The correct version test seems to be against LibreSSL 3.5.0, see > >> https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-3.5.0-relnotes.txt > >> > >> So, the resulting change would be as follows: > >> > >> diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c > >> --- a/src/event/ngx_event_openssl_stapling.c > >> +++ b/src/event/ngx_event_openssl_stapling.c > >> @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * > >> ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > >> ocsp->conf = ocf; > >> > >> -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > >> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ > >> + && !defined LIBRESSL_VERSION_NUMBER) \ > >> + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL > > > > Rather, > > > > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ > > + && (!defined LIBRESSL_VERSION_NUMBER \ > > + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL)) > > > > Agree. For the sake of completeness: > > # HG changeset patch > # User Sergey Kandaurov > # Date 1703004490 -14400 > # Tue Dec 19 20:48:10 2023 +0400 > # Node ID 267cee796462f4f6bacf825c8fd24d13845d36f4 > # Parent 7a6d52990f2e2d88460a3dc6cc84aac89b7329ea > SSL: using SSL_get0_verified_chain() with LibreSSL 3.5.0+. > > Prodded by Ilya Shipitsin. > > diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c > --- a/src/event/ngx_event_openssl_stapling.c > +++ b/src/event/ngx_event_openssl_stapling.c > @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * > ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > ocsp->conf = ocf; > > -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ > + && (!defined LIBRESSL_VERSION_NUMBER \ > + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL)) > > ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > > > >> > >> ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > >> > >> > >> On the other hand, I don't like the resulting style mudness. > >> It may have sense just to drop old LibreSSL versions support: > >> maintaining one or two most recent stable branches should be enough. > > > > +1 on this. > > > > We certainly don't want to maintain support for ancient LibreSSL > > versions. IMO, just two branches is more than enough (and this is > > what I use in my testing, which usually means latest development > > release and latest stable release). > > > > At most, this probably can be three branches - which seems to > > match LibreSSL support policy, > > https://www.libressl.org/releases.html: > > > > : LibreSSL transitions to a new stable release branch every 6 months > > : in coordination with the OpenBSD development schedule. LibreSSL > > : stable branches are updated for 1 year after their corresponding > > : OpenBSD branch is tagged for release. See below for the current > > : stable release branches. > > > > In either case, LibreSSL versions below 3.5.0 are already not > > supported. If I understand correctly, right now the oldest > > supported branch is 3.7.x. > > Agree. Also, Repology shows that modern popular distributions, > such as Alpine Linux and FreeBSD, have at least LibreSSL 3.5.x: > https://repology.org/project/libressl/versions > > > > >> But anyway, I don't see an obvious win over the existing code: > >> the certificate chain is reconstructed if SSL_get0_verified_chain() > >> is (detected to be) not present, which should be fine in most cases. > >> > >> That said, it doesn't seem to deserve introducing 3-line macro test, > >> or (see OTOH note) breaking old LibreSSL support for no apparent reason. > > > > Reconstruction of the chain implies verification of signatures > > along the chain and can be costly. As such, it certainly would be > > better to use SSL_get0_verified_chain() as long as it is > > available. > > Agree. > My point is that not using SSL_get0_verified_chain() should not result > in a broken functionality, as in the OCSP cert validation. > So, intention to start using it in LibreSSL may look an insufficient > argument per se to drop old LibreSSL versions. > Though, dropping them may be orthogonal to SSL_get0_verified_chain(). > > > > > Also, removing the "!defined LIBRESSL_VERSION_NUMBER" check might > > be seen as positive even without any additional benefits. > > > > Along with that, however, we might want to adjust the > > LIBRESSL_VERSION_NUMBER check in the ngx_event_openssl.h file, so > > OPENSSL_VERSION_NUMBER is set to a better value for old LibreSSL > > versions - for example, to only set OPENSSL_VERSION_NUMBER to > > 0x1010000fL for LibreSSL 3.5.0 or above. > > Sounds like a plan if we are fine to drop older LibreSSL versions. > > > This might allow to > > preserve limited compatibility with ancient LibreSSL versions > > without additional efforts (not tested though). > > > > This won't build with any LibreSSL version in the [2.8.0, 3.5.0) range. > Particularly, SSL_CTX_sess_set_get_cb() has got a const argument in > LibreSSL 2.8.0, which is not backward compatible, see 7337:cab37803ebb3. > Another reason is SSL was made opaque by default in 3.4.x. The const argument can be easily ignored by using -Wno-error=incompatible-function-pointer-types (or just -Wno-error), which seems to be reasonable when trying to build things with ancient libraries. This makes it possible to build with LibreSSL 2.8.0-3.3.6 with minimal efforts. For LibreSSL 3.4.x, the dependency on the SSL internals can be easily eliminated by testing SSL_OP_NO_CLIENT_RENEGOTIATION, which anyway seems to be a reasonable change. > > (Others seem not to affect building on older versions if pick up 3.5.0: > - X509_up_ref appeared in LibreSSL 2.6.0, X509 made opaque in 3.5.0; > - X509_get0_notAfter appeared in 2.7.0, X509_get_notAfter still there.) > > Personally I'm fine to drop ancient LibreSSL versions, because it has > to happen someday and we don't want to maintain them eternally. > Alternative patch for your consideration: > > # HG changeset patch > # User Sergey Kandaurov > # Date 1703011348 -14400 > # Tue Dec 19 22:42:28 2023 +0400 > # Node ID 94d4ef4a2316da66fea084952913ff2b0f84827d > # Parent 7a6d52990f2e2d88460a3dc6cc84aac89b7329ea > SSL: removed compatibility with LibreSSL < 3.5.0. > > OPENSSL_VERSION_NUMBER is now redefined to 0x1010000fL for LibreSSL 3.5.0+. > As older versions starting from LibreSSL 2.8.0 won't build with a lesser > OPENSSL_VERSION_NUMBER value (see 7337:cab37803ebb3 for details), they are > now explicitly unsupported. > > Besides that, this allows to start using SSL_get0_verified_chain() > with LibreSSL without additional macro tests. > > diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h > --- a/src/event/ngx_event_openssl.h > +++ b/src/event/ngx_event_openssl.h > @@ -45,10 +45,10 @@ > > #if (defined LIBRESSL_VERSION_NUMBER && OPENSSL_VERSION_NUMBER == 0x20000000L) > #undef OPENSSL_VERSION_NUMBER > -#if (LIBRESSL_VERSION_NUMBER >= 0x2080000fL) > +#if (LIBRESSL_VERSION_NUMBER >= 0x3050000fL) > #define OPENSSL_VERSION_NUMBER 0x1010000fL > #else > -#define OPENSSL_VERSION_NUMBER 0x1000107fL > +#error LibreSSL too old, need at least 3.5.0 > #endif > #endif I'm certainly against the idea of explicitly rejecting old versions. As demonstrated above, even versions affected by various changes can be used with minimal efforts, such as disabling -Werror. For the record, here is a patch I tested with: diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -1105,7 +1105,8 @@ ngx_ssl_info_callback(const ngx_ssl_conn BIO *rbio, *wbio; ngx_connection_t *c; -#ifndef SSL_OP_NO_RENEGOTIATION +#if (!defined SSL_OP_NO_RENEGOTIATION \ + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) if ((where & SSL_CB_HANDSHAKE_START) && SSL_is_server((ngx_ssl_conn_t *) ssl_conn)) @@ -1838,9 +1839,10 @@ ngx_ssl_handshake(ngx_connection_t *c) c->read->ready = 1; c->write->ready = 1; -#ifndef SSL_OP_NO_RENEGOTIATION -#if OPENSSL_VERSION_NUMBER < 0x10100000L -#ifdef SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS +#if (!defined SSL_OP_NO_RENEGOTIATION \ + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION \ + && defined SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS \ + && OPENSSL_VERSION_NUMBER < 0x10100000L) /* initial handshake done, disable renegotiation (CVE-2009-3555) */ if (c->ssl->connection->s3 && SSL_is_server(c->ssl->connection)) { @@ -1848,8 +1850,6 @@ ngx_ssl_handshake(ngx_connection_t *c) } #endif -#endif -#endif #if (defined BIO_get_ktls_send && !NGX_WIN32) @@ -2483,7 +2483,8 @@ ngx_ssl_handle_recv(ngx_connection_t *c, int sslerr; ngx_err_t err; -#ifndef SSL_OP_NO_RENEGOTIATION +#if (!defined SSL_OP_NO_RENEGOTIATION \ + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) if (c->ssl->renegotiation) { /* diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -45,7 +45,7 @@ #if (defined LIBRESSL_VERSION_NUMBER && OPENSSL_VERSION_NUMBER == 0x20000000L) #undef OPENSSL_VERSION_NUMBER -#if (LIBRESSL_VERSION_NUMBER >= 0x2080000fL) +#if (LIBRESSL_VERSION_NUMBER >= 0x3050000fL) #define OPENSSL_VERSION_NUMBER 0x1010000fL #else #define OPENSSL_VERSION_NUMBER 0x1000107fL diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c --- a/src/event/ngx_event_openssl_stapling.c +++ b/src/event/ngx_event_openssl_stapling.c @@ -893,7 +893,7 @@ ngx_ssl_ocsp_validate(ngx_connection_t * ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; ocsp->conf = ocf; -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Thu Dec 21 00:35:12 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 21 Dec 2023 03:35:12 +0300 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Message-ID: Hello! On Sat, Dec 16, 2023 at 04:26:37PM -0500, Ben Kallus wrote: > > In general macro definitions in nginx are used everywhere for > > efficiency reasons > > Clang inlines short functions with -O1, and GCC does so with -O2 or > -O1 -finline-small-functions. Are there any platforms that Nginx needs > to support for which short function inlining isn't sufficient to solve > this issue? In nginx, functions expected to be inlined are marked with "ngx_inline", which normally resolves to "inline" (on unix) or "__inline" (on win32). As such, modern versions of both clang and gcc will inline corresponding functions unless optimization is disabled. Still, -O0 is often used at least during development, and it might be unreasonable to introduce extra function calls in basic primitives. Further, nginx generally supports all available platforms reasonably compatible with POSIX and C89. This implies that inline might be not available. > > While some might prefer other approaches, writing code like > > "ngx_memcpy(dst, src, ++len)" in nginx is just wrong, and > > shouldn't be trusted to work, much like it won't work with > > "ngx_cpymem(dst, src, ++len)". > > It is indeed wrong to use an expression with a side-effect in an > argument to cpymem, but it's also not very obvious that it's wrong. An > inlined function solves the argument reevaluation issue without > performance overhead. Sure (but see above about performance overhead; and another question is if it needs to be solved, or following existing style is enough to never see the issue). The point is: in nginx, it's anyway wrong to use arguments with side effects. And even expressions without side effects might won't work. While many macro definitions were adjusted to accept expressions instead of the bare variables (see 2f9214713666 and https://mailman.nginx.org/pipermail/nginx-devel/2020-July/013354.html for an example), some still don't or can be picky. For example, good luck with doing something like "ngx_max(foo & 0xff, bar)". As such, it's certainly not an argument against using checks in macro definitions in the particular patch. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Thu Dec 21 00:59:26 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 21 Dec 2023 03:59:26 +0300 Subject: processing a request without body In-Reply-To: References: Message-ID: Hello! On Tue, Dec 19, 2023 at 10:11:04PM +0800, Muhammad Nuzaihan wrote: > Thanks Maxim, Vasility, > > The problem i was going to solve is to i needed to run my specific > function that takes the data of request URL path, Headers and request > body and determine and validate that all that data is correct before > sending upstream, or else i would deny the request with 4xx code > errors. > > Handlers can only handle (from what i know) URL path and headers. > > Request body requires a request chain (ngx_chain_t)) to piece out the > request body and handlers doesn't seem to have t ngx_chain_t unlike > request body filters. > > Or maybe i am wrong in this case? It looks like you are trying to do something which simply cannot be done. For example, consider a configuration with "proxy_request_buffering off;" - in such a configuration request body is being read _after_ the request is passed to the upstream server, and you simply cannot validate request body before passing request headers to the upstream server. As long as you have to examine both request body and request headers, I think there can be two possible solutions: 1. Install a phase handler, in which read the request body yourself, and check both request headers and request body once it's read. See the mirror module as an example on how to read the body in a phase handler and properly resume processing after it. This will break proxying without request buffering, though might be good enough for your particular task. 2. Install a phase handler to check request headers, and a request body filter to check the request body. Do checking in both places, and abort request processing when you see that data aren't correct. This will work with proxying without request buffering, but will be generally more complex to implement. And, obviously, this in case of proxying without request buffering this won't let you to validate request body before the request headers are sent to upstream server. Hope this helps. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Thu Dec 21 13:37:02 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 21 Dec 2023 17:37:02 +0400 Subject: [PATCH 2 of 2] Win32: extended ngx_random range to 0x7fffffff In-Reply-To: References: Message-ID: <26182490-3FF6-491F-8BE9-FA7E342E54CF@nginx.com> > On 16 Dec 2023, at 06:57, Maxim Dounin wrote: > > Hello! > > On Sat, Dec 09, 2023 at 08:42:11AM +0000, J Carter wrote: > >> On Sat, 09 Dec 2023 07:46:10 +0000 >> J Carter wrote: >> >>> # HG changeset patch >>> # User J Carter >>> # Date 1702101635 0 >>> # Sat Dec 09 06:00:35 2023 +0000 >>> # Node ID 1a77698f82d2580aa8b8f62ce89b4dbb6d678c5d >>> # Parent d9275e982a7188a1ea7855295ffa93362ea9830f >>> Win32: extended ngx_random range to 0x7fffffff >>> >>> rand() is used on win32. RAND_MAX is implementation defined. win32's is >>> 0x7fff. >>> >>> Existing uses of ngx_random rely upon 0x7fffffff range provided by >>> posix implementations of random(). >>> >>> diff -r d9275e982a71 -r 1a77698f82d2 src/os/win32/ngx_win32_config.h >>> --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 >>> +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 06:00:35 2023 +0000 >>> @@ -280,7 +280,9 @@ >>> >>> #define NGX_HAVE_GETADDRINFO 1 >>> >>> -#define ngx_random rand >>> +#define ngx_random \ >>> + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) >>> + >>> #define ngx_debug_init() >>> >>> >> >> ^ my mistake - copying error.. >> >> # HG changeset patch >> # User J Carter >> # Date 1702111094 0 >> # Sat Dec 09 08:38:14 2023 +0000 >> # Node ID 10ef59a412a330872fc6d46de64f42e32e997b3a >> # Parent d9275e982a7188a1ea7855295ffa93362ea9830f >> Win32: extended ngx_random range to 0x7fffffff > > Nitpicking: > > Win32: extended ngx_random() range to 0x7fffffff. > >> >> rand() is used on win32. RAND_MAX is implementation defined. win32's is >> 0x7fff. >> >> Existing uses of ngx_random rely upon 0x7fffffff range provided by >> posix implementations of random(). > > Thanks for catching this. > > As far as I can see, the only module which actually relies on the > range is the random index module. Relying on the ngx_random() > range generally looks wrong to me, and we might want to change the > code to don't. OTOH, it's the only way to get a completely > uniform distribution, and that's what the module tries to do. As > such, it might be good enough to preserve it as is, at least till > further changes to ngx_random(). > > Either way, wider range for ngx_random() should be beneficial in > other places. > >> >> diff -r d9275e982a71 -r 10ef59a412a3 src/os/win32/ngx_win32_config.h >> --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 >> +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 08:38:14 2023 +0000 >> @@ -280,7 +280,9 @@ >> >> #define NGX_HAVE_GETADDRINFO 1 >> >> -#define ngx_random rand >> +#define ngx_random() \ > > Nitpicking: the "\" character should be at the 79th column (in > some files at 78th). This ensures that a diff won't wrap on a > standard 80-column terminal. > >> + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) >> + >> #define ngx_debug_init() > > Using "|" for random numbers looks utterly wrong to me, even if > ORed values are guaranteed to not interfere. > > I would rather use "^", and avoid dependency on the particular > value of RAND_MAX (other than POSIX-required minimum of 32767) by > using something like > > 0x7fffffff & ((rand() << 16) ^ (rand() << 8) ^ rand()) > > with proper typecasts. > > Something like this should work: > > diff --git a/src/os/win32/ngx_win32_config.h b/src/os/win32/ngx_win32_config.h > --- a/src/os/win32/ngx_win32_config.h > +++ b/src/os/win32/ngx_win32_config.h > @@ -280,7 +280,11 @@ typedef int sig_atomic_t > > #define NGX_HAVE_GETADDRINFO 1 > > -#define ngx_random rand > +#define ngx_random() \ > + ((long) (0x7fffffff & ( ((uint32_t) rand() << 16) \ > + ^ ((uint32_t) rand() << 8) \ > + ^ ((uint32_t) rand()) ))) > + > #define ngx_debug_init() > > Nitpicking: you might want to re-align the "^" operator to the first symbol of the left-hand operand (similar to NGX_CONF_TAKE1234, or even NGX_UNIX_ADDRSTRLEN). Other than that, it looks good. -- Sergey Kandaurov From mdounin at mdounin.ru Thu Dec 21 16:14:40 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 21 Dec 2023 19:14:40 +0300 Subject: [PATCH 2 of 2] Win32: extended ngx_random range to 0x7fffffff In-Reply-To: <26182490-3FF6-491F-8BE9-FA7E342E54CF@nginx.com> References: <26182490-3FF6-491F-8BE9-FA7E342E54CF@nginx.com> Message-ID: Hello! On Thu, Dec 21, 2023 at 05:37:02PM +0400, Sergey Kandaurov wrote: > > On 16 Dec 2023, at 06:57, Maxim Dounin wrote: > > > > Hello! > > > > On Sat, Dec 09, 2023 at 08:42:11AM +0000, J Carter wrote: > > > >> On Sat, 09 Dec 2023 07:46:10 +0000 > >> J Carter wrote: > >> > >>> # HG changeset patch > >>> # User J Carter > >>> # Date 1702101635 0 > >>> # Sat Dec 09 06:00:35 2023 +0000 > >>> # Node ID 1a77698f82d2580aa8b8f62ce89b4dbb6d678c5d > >>> # Parent d9275e982a7188a1ea7855295ffa93362ea9830f > >>> Win32: extended ngx_random range to 0x7fffffff > >>> > >>> rand() is used on win32. RAND_MAX is implementation defined. win32's is > >>> 0x7fff. > >>> > >>> Existing uses of ngx_random rely upon 0x7fffffff range provided by > >>> posix implementations of random(). > >>> > >>> diff -r d9275e982a71 -r 1a77698f82d2 src/os/win32/ngx_win32_config.h > >>> --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 > >>> +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 06:00:35 2023 +0000 > >>> @@ -280,7 +280,9 @@ > >>> > >>> #define NGX_HAVE_GETADDRINFO 1 > >>> > >>> -#define ngx_random rand > >>> +#define ngx_random \ > >>> + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) > >>> + > >>> #define ngx_debug_init() > >>> > >>> > >> > >> ^ my mistake - copying error.. > >> > >> # HG changeset patch > >> # User J Carter > >> # Date 1702111094 0 > >> # Sat Dec 09 08:38:14 2023 +0000 > >> # Node ID 10ef59a412a330872fc6d46de64f42e32e997b3a > >> # Parent d9275e982a7188a1ea7855295ffa93362ea9830f > >> Win32: extended ngx_random range to 0x7fffffff > > > > Nitpicking: > > > > Win32: extended ngx_random() range to 0x7fffffff. > > > >> > >> rand() is used on win32. RAND_MAX is implementation defined. win32's is > >> 0x7fff. > >> > >> Existing uses of ngx_random rely upon 0x7fffffff range provided by > >> posix implementations of random(). > > > > Thanks for catching this. > > > > As far as I can see, the only module which actually relies on the > > range is the random index module. Relying on the ngx_random() > > range generally looks wrong to me, and we might want to change the > > code to don't. OTOH, it's the only way to get a completely > > uniform distribution, and that's what the module tries to do. As > > such, it might be good enough to preserve it as is, at least till > > further changes to ngx_random(). > > > > Either way, wider range for ngx_random() should be beneficial in > > other places. > > > >> > >> diff -r d9275e982a71 -r 10ef59a412a3 src/os/win32/ngx_win32_config.h > >> --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 > >> +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 08:38:14 2023 +0000 > >> @@ -280,7 +280,9 @@ > >> > >> #define NGX_HAVE_GETADDRINFO 1 > >> > >> -#define ngx_random rand > >> +#define ngx_random() \ > > > > Nitpicking: the "\" character should be at the 79th column (in > > some files at 78th). This ensures that a diff won't wrap on a > > standard 80-column terminal. > > > >> + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) > >> + > >> #define ngx_debug_init() > > > > Using "|" for random numbers looks utterly wrong to me, even if > > ORed values are guaranteed to not interfere. > > > > I would rather use "^", and avoid dependency on the particular > > value of RAND_MAX (other than POSIX-required minimum of 32767) by > > using something like > > > > 0x7fffffff & ((rand() << 16) ^ (rand() << 8) ^ rand()) > > > > with proper typecasts. > > > > Something like this should work: > > > > diff --git a/src/os/win32/ngx_win32_config.h b/src/os/win32/ngx_win32_config.h > > --- a/src/os/win32/ngx_win32_config.h > > +++ b/src/os/win32/ngx_win32_config.h > > @@ -280,7 +280,11 @@ typedef int sig_atomic_t > > > > #define NGX_HAVE_GETADDRINFO 1 > > > > -#define ngx_random rand > > +#define ngx_random() \ > > + ((long) (0x7fffffff & ( ((uint32_t) rand() << 16) \ > > + ^ ((uint32_t) rand() << 8) \ > > + ^ ((uint32_t) rand()) ))) > > + > > #define ngx_debug_init() > > > > > > Nitpicking: you might want to re-align the "^" operator to the first > symbol of the left-hand operand (similar to NGX_CONF_TAKE1234, or > even NGX_UNIX_ADDRSTRLEN). Other than that, it looks good. It's intentionally aligned this way to simplify reading. Such style is occasionally used in complex macro definitions, see ngx_mp4_get_32value() or ngx_proxy_protocol_parse_uint32() for some examples. Just for completeness, below is the updated patch. # HG changeset patch # User J Carter # Date 1702111094 0 # Sat Dec 09 08:38:14 2023 +0000 # Node ID 92923ac5ea2a395774b28460f07d0fd2e1a2de24 # Parent cc16989c6d61385027c1ebfd43929f8369fa5f62 Win32: extended ngx_random() range to 0x7fffffff. rand() is used on win32. RAND_MAX is implementation defined. win32's is 0x7fff. Existing uses of ngx_random() rely upon 0x7fffffff range provided by POSIX implementations of random(). diff -r cc16989c6d61 -r 92923ac5ea2a src/os/win32/ngx_win32_config.h --- a/src/os/win32/ngx_win32_config.h Sat Dec 16 03:40:01 2023 +0400 +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 08:38:14 2023 +0000 @@ -280,7 +280,11 @@ typedef int sig_atomic_t #define NGX_HAVE_GETADDRINFO 1 -#define ngx_random rand +#define ngx_random() \ + ((long) (0x7fffffff & ( ((uint32_t) rand() << 16) \ + ^ ((uint32_t) rand() << 8) \ + ^ ((uint32_t) rand()) ))) + #define ngx_debug_init() -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Fri Dec 22 13:52:30 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 22 Dec 2023 17:52:30 +0400 Subject: [PATCH 2 of 2] Win32: extended ngx_random range to 0x7fffffff In-Reply-To: References: <26182490-3FF6-491F-8BE9-FA7E342E54CF@nginx.com> Message-ID: <20231222135230.xdxaiz5lrx4625oj@Y9MQ9X2QVV> On Thu, Dec 21, 2023 at 07:14:40PM +0300, Maxim Dounin wrote: > Hello! > > On Thu, Dec 21, 2023 at 05:37:02PM +0400, Sergey Kandaurov wrote: > > > > On 16 Dec 2023, at 06:57, Maxim Dounin wrote: > > > > > > Hello! > > > > > > On Sat, Dec 09, 2023 at 08:42:11AM +0000, J Carter wrote: > > > > > >> On Sat, 09 Dec 2023 07:46:10 +0000 > > >> J Carter wrote: > > >> > > >>> # HG changeset patch > > >>> # User J Carter > > >>> # Date 1702101635 0 > > >>> # Sat Dec 09 06:00:35 2023 +0000 > > >>> # Node ID 1a77698f82d2580aa8b8f62ce89b4dbb6d678c5d > > >>> # Parent d9275e982a7188a1ea7855295ffa93362ea9830f > > >>> Win32: extended ngx_random range to 0x7fffffff > > >>> > > >>> rand() is used on win32. RAND_MAX is implementation defined. win32's is > > >>> 0x7fff. > > >>> > > >>> Existing uses of ngx_random rely upon 0x7fffffff range provided by > > >>> posix implementations of random(). > > >>> > > >>> diff -r d9275e982a71 -r 1a77698f82d2 src/os/win32/ngx_win32_config.h > > >>> --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 > > >>> +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 06:00:35 2023 +0000 > > >>> @@ -280,7 +280,9 @@ > > >>> > > >>> #define NGX_HAVE_GETADDRINFO 1 > > >>> > > >>> -#define ngx_random rand > > >>> +#define ngx_random \ > > >>> + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) > > >>> + > > >>> #define ngx_debug_init() > > >>> > > >>> > > >> > > >> ^ my mistake - copying error.. > > >> > > >> # HG changeset patch > > >> # User J Carter > > >> # Date 1702111094 0 > > >> # Sat Dec 09 08:38:14 2023 +0000 > > >> # Node ID 10ef59a412a330872fc6d46de64f42e32e997b3a > > >> # Parent d9275e982a7188a1ea7855295ffa93362ea9830f > > >> Win32: extended ngx_random range to 0x7fffffff > > > > > > Nitpicking: > > > > > > Win32: extended ngx_random() range to 0x7fffffff. > > > > > >> > > >> rand() is used on win32. RAND_MAX is implementation defined. win32's is > > >> 0x7fff. > > >> > > >> Existing uses of ngx_random rely upon 0x7fffffff range provided by > > >> posix implementations of random(). > > > > > > Thanks for catching this. > > > > > > As far as I can see, the only module which actually relies on the > > > range is the random index module. Relying on the ngx_random() > > > range generally looks wrong to me, and we might want to change the > > > code to don't. OTOH, it's the only way to get a completely > > > uniform distribution, and that's what the module tries to do. As > > > such, it might be good enough to preserve it as is, at least till > > > further changes to ngx_random(). > > > > > > Either way, wider range for ngx_random() should be beneficial in > > > other places. > > > > > >> > > >> diff -r d9275e982a71 -r 10ef59a412a3 src/os/win32/ngx_win32_config.h > > >> --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 > > >> +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 08:38:14 2023 +0000 > > >> @@ -280,7 +280,9 @@ > > >> > > >> #define NGX_HAVE_GETADDRINFO 1 > > >> > > >> -#define ngx_random rand > > >> +#define ngx_random() \ > > > > > > Nitpicking: the "\" character should be at the 79th column (in > > > some files at 78th). This ensures that a diff won't wrap on a > > > standard 80-column terminal. > > > > > >> + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) > > >> + > > >> #define ngx_debug_init() > > > > > > Using "|" for random numbers looks utterly wrong to me, even if > > > ORed values are guaranteed to not interfere. > > > > > > I would rather use "^", and avoid dependency on the particular > > > value of RAND_MAX (other than POSIX-required minimum of 32767) by > > > using something like > > > > > > 0x7fffffff & ((rand() << 16) ^ (rand() << 8) ^ rand()) > > > > > > with proper typecasts. > > > > > > Something like this should work: > > > > > > diff --git a/src/os/win32/ngx_win32_config.h b/src/os/win32/ngx_win32_config.h > > > --- a/src/os/win32/ngx_win32_config.h > > > +++ b/src/os/win32/ngx_win32_config.h > > > @@ -280,7 +280,11 @@ typedef int sig_atomic_t > > > > > > #define NGX_HAVE_GETADDRINFO 1 > > > > > > -#define ngx_random rand > > > +#define ngx_random() \ > > > + ((long) (0x7fffffff & ( ((uint32_t) rand() << 16) \ > > > + ^ ((uint32_t) rand() << 8) \ > > > + ^ ((uint32_t) rand()) ))) > > > + > > > #define ngx_debug_init() > > > > > > > > > > Nitpicking: you might want to re-align the "^" operator to the first > > symbol of the left-hand operand (similar to NGX_CONF_TAKE1234, or > > even NGX_UNIX_ADDRSTRLEN). Other than that, it looks good. > > It's intentionally aligned this way to simplify reading. Such > style is occasionally used in complex macro definitions, see > ngx_mp4_get_32value() or ngx_proxy_protocol_parse_uint32() for > some examples. > > Just for completeness, below is the updated patch. > > # HG changeset patch > # User J Carter > # Date 1702111094 0 > # Sat Dec 09 08:38:14 2023 +0000 > # Node ID 92923ac5ea2a395774b28460f07d0fd2e1a2de24 > # Parent cc16989c6d61385027c1ebfd43929f8369fa5f62 > Win32: extended ngx_random() range to 0x7fffffff. > > rand() is used on win32. RAND_MAX is implementation defined. win32's is > 0x7fff. I'd unwrap this line to look more pleasant, it fits into 80 columns. > > Existing uses of ngx_random() rely upon 0x7fffffff range provided by > POSIX implementations of random(). > > diff -r cc16989c6d61 -r 92923ac5ea2a src/os/win32/ngx_win32_config.h > --- a/src/os/win32/ngx_win32_config.h Sat Dec 16 03:40:01 2023 +0400 > +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 08:38:14 2023 +0000 > @@ -280,7 +280,11 @@ typedef int sig_atomic_t > > #define NGX_HAVE_GETADDRINFO 1 > > -#define ngx_random rand > +#define ngx_random() \ > + ((long) (0x7fffffff & ( ((uint32_t) rand() << 16) \ If I'm not mistaken, indentation is now 5 spaces. Otherwise, looks good. > + ^ ((uint32_t) rand() << 8) \ > + ^ ((uint32_t) rand()) ))) > + > #define ngx_debug_init() > > > From pluknet at nginx.com Fri Dec 22 13:53:30 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 22 Dec 2023 17:53:30 +0400 Subject: [PATCH] ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 In-Reply-To: References: <2001e73ce136d5bfc9bd.1700771381@fedora> <18C46B67-C82F-490A-B3AA-14AF5D9CC369@nginx.com> Message-ID: > On 21 Dec 2023, at 02:40, Maxim Dounin wrote: > > Hello! > > On Tue, Dec 19, 2023 at 10:44:00PM +0400, Sergey Kandaurov wrote: > >> >>> On 19 Dec 2023, at 12:58, Maxim Dounin wrote: >>> >>> Hello! >>> >>> On Tue, Dec 19, 2023 at 02:09:10AM +0400, Sergey Kandaurov wrote: >>> >>>>> On 24 Nov 2023, at 00:29, Ilya Shipitsin wrote: >>>>> >>>>> # HG changeset patch >>>>> # User Ilya Shipitsin >>>>> # Date 1700769135 -3600 >>>>> # Thu Nov 23 20:52:15 2023 +0100 >>>>> # Node ID 2001e73ce136d5bfc9bde27d338865b14b8ad436 >>>>> # Parent 7ec761f0365f418511e30b82e9adf80bc56681df >>>>> ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 >>>> >>>> style: SSL prefix should be uppercase. >>>> >>>>> >>>>> diff -r 7ec761f0365f -r 2001e73ce136 src/event/ngx_event_openssl_stapling.c >>>>> --- a/src/event/ngx_event_openssl_stapling.c Thu Oct 26 23:35:09 2023 +0300 >>>>> +++ b/src/event/ngx_event_openssl_stapling.c Thu Nov 23 20:52:15 2023 +0100 >>>>> @@ -893,7 +893,8 @@ >>>>> ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; >>>>> ocsp->conf = ocf; >>>>> >>>>> -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) >>>>> +/* minimum OpenSSL 1.1.1 & LibreSSL 3.3.6 */ >>>>> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) || (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER >= 0x3030600L)) >>>>> >>>>> ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); >>>>> >>>> >>>> Testing "defined(LIBRESSL_VERSION_NUMBER)" is superfluous. >>>> The macro test suffers from a very long line. >>>> >>>> The correct version test seems to be against LibreSSL 3.5.0, see >>>> https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-3.5.0-relnotes.txt >>>> >>>> So, the resulting change would be as follows: >>>> >>>> diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c >>>> --- a/src/event/ngx_event_openssl_stapling.c >>>> +++ b/src/event/ngx_event_openssl_stapling.c >>>> @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * >>>> ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; >>>> ocsp->conf = ocf; >>>> >>>> -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) >>>> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ >>>> + && !defined LIBRESSL_VERSION_NUMBER) \ >>>> + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL >>> >>> Rather, >>> >>> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ >>> + && (!defined LIBRESSL_VERSION_NUMBER \ >>> + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL)) >>> >> >> Agree. For the sake of completeness: >> >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1703004490 -14400 >> # Tue Dec 19 20:48:10 2023 +0400 >> # Node ID 267cee796462f4f6bacf825c8fd24d13845d36f4 >> # Parent 7a6d52990f2e2d88460a3dc6cc84aac89b7329ea >> SSL: using SSL_get0_verified_chain() with LibreSSL 3.5.0+. >> >> Prodded by Ilya Shipitsin. >> >> diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c >> --- a/src/event/ngx_event_openssl_stapling.c >> +++ b/src/event/ngx_event_openssl_stapling.c >> @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * >> ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; >> ocsp->conf = ocf; >> >> -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) >> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ >> + && (!defined LIBRESSL_VERSION_NUMBER \ >> + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL)) >> >> ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); >> >> >>>> >>>> ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); >>>> >>>> >>>> On the other hand, I don't like the resulting style mudness. >>>> It may have sense just to drop old LibreSSL versions support: >>>> maintaining one or two most recent stable branches should be enough. >>> >>> +1 on this. >>> >>> We certainly don't want to maintain support for ancient LibreSSL >>> versions. IMO, just two branches is more than enough (and this is >>> what I use in my testing, which usually means latest development >>> release and latest stable release). >>> >>> At most, this probably can be three branches - which seems to >>> match LibreSSL support policy, >>> https://www.libressl.org/releases.html: >>> >>> : LibreSSL transitions to a new stable release branch every 6 months >>> : in coordination with the OpenBSD development schedule. LibreSSL >>> : stable branches are updated for 1 year after their corresponding >>> : OpenBSD branch is tagged for release. See below for the current >>> : stable release branches. >>> >>> In either case, LibreSSL versions below 3.5.0 are already not >>> supported. If I understand correctly, right now the oldest >>> supported branch is 3.7.x. >> >> Agree. Also, Repology shows that modern popular distributions, >> such as Alpine Linux and FreeBSD, have at least LibreSSL 3.5.x: >> https://repology.org/project/libressl/versions >> >>> >>>> But anyway, I don't see an obvious win over the existing code: >>>> the certificate chain is reconstructed if SSL_get0_verified_chain() >>>> is (detected to be) not present, which should be fine in most cases. >>>> >>>> That said, it doesn't seem to deserve introducing 3-line macro test, >>>> or (see OTOH note) breaking old LibreSSL support for no apparent reason. >>> >>> Reconstruction of the chain implies verification of signatures >>> along the chain and can be costly. As such, it certainly would be >>> better to use SSL_get0_verified_chain() as long as it is >>> available. >> >> Agree. >> My point is that not using SSL_get0_verified_chain() should not result >> in a broken functionality, as in the OCSP cert validation. >> So, intention to start using it in LibreSSL may look an insufficient >> argument per se to drop old LibreSSL versions. >> Though, dropping them may be orthogonal to SSL_get0_verified_chain(). >> >>> >>> Also, removing the "!defined LIBRESSL_VERSION_NUMBER" check might >>> be seen as positive even without any additional benefits. >>> >>> Along with that, however, we might want to adjust the >>> LIBRESSL_VERSION_NUMBER check in the ngx_event_openssl.h file, so >>> OPENSSL_VERSION_NUMBER is set to a better value for old LibreSSL >>> versions - for example, to only set OPENSSL_VERSION_NUMBER to >>> 0x1010000fL for LibreSSL 3.5.0 or above. >> >> Sounds like a plan if we are fine to drop older LibreSSL versions. >> >>> This might allow to >>> preserve limited compatibility with ancient LibreSSL versions >>> without additional efforts (not tested though). >>> >> >> This won't build with any LibreSSL version in the [2.8.0, 3.5.0) range. >> Particularly, SSL_CTX_sess_set_get_cb() has got a const argument in >> LibreSSL 2.8.0, which is not backward compatible, see 7337:cab37803ebb3. >> Another reason is SSL was made opaque by default in 3.4.x. > > The const argument can be easily ignored by using > -Wno-error=incompatible-function-pointer-types (or just > -Wno-error), which seems to be reasonable when trying to build > things with ancient libraries. This makes it possible to build > with LibreSSL 2.8.0-3.3.6 with minimal efforts. Ok, that makes sense. For the record, GCC uses another warning option name: src/event/ngx_event_openssl.c:3770:43: error: passing argument 2 of 'SSL_CTX_sess_set_get_cb' from incompatible pointer type [-Werror=incompatible-pointer-type] There is a typo in GCC 12 diagnostics, the actual option name appears to be -Wno-incompatible-pointer-types (note "s"). BTW, it appears to be a valid option name in Clang as well to suppress such warnings. > > For LibreSSL 3.4.x, the dependency on the SSL internals can be > easily eliminated by testing SSL_OP_NO_CLIENT_RENEGOTIATION, which > anyway seems to be a reasonable change. Agree, this is really true as tested on various LibreSSL versions, from 2.5.0 to 3.8.2 (SSL_OP_NO_CLIENT_RENEGOTIATION appeared in 2.6.0). > >> >> (Others seem not to affect building on older versions if pick up 3.5.0: >> - X509_up_ref appeared in LibreSSL 2.6.0, X509 made opaque in 3.5.0; >> - X509_get0_notAfter appeared in 2.7.0, X509_get_notAfter still there.) >> >> Personally I'm fine to drop ancient LibreSSL versions, because it has >> to happen someday and we don't want to maintain them eternally. >> Alternative patch for your consideration: >> >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1703011348 -14400 >> # Tue Dec 19 22:42:28 2023 +0400 >> # Node ID 94d4ef4a2316da66fea084952913ff2b0f84827d >> # Parent 7a6d52990f2e2d88460a3dc6cc84aac89b7329ea >> SSL: removed compatibility with LibreSSL < 3.5.0. >> >> OPENSSL_VERSION_NUMBER is now redefined to 0x1010000fL for LibreSSL 3.5.0+. >> As older versions starting from LibreSSL 2.8.0 won't build with a lesser >> OPENSSL_VERSION_NUMBER value (see 7337:cab37803ebb3 for details), they are >> now explicitly unsupported. >> >> Besides that, this allows to start using SSL_get0_verified_chain() >> with LibreSSL without additional macro tests. >> >> diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h >> --- a/src/event/ngx_event_openssl.h >> +++ b/src/event/ngx_event_openssl.h >> @@ -45,10 +45,10 @@ >> >> #if (defined LIBRESSL_VERSION_NUMBER && OPENSSL_VERSION_NUMBER == 0x20000000L) >> #undef OPENSSL_VERSION_NUMBER >> -#if (LIBRESSL_VERSION_NUMBER >= 0x2080000fL) >> +#if (LIBRESSL_VERSION_NUMBER >= 0x3050000fL) >> #define OPENSSL_VERSION_NUMBER 0x1010000fL >> #else >> -#define OPENSSL_VERSION_NUMBER 0x1000107fL >> +#error LibreSSL too old, need at least 3.5.0 >> #endif >> #endif > > I'm certainly against the idea of explicitly rejecting old > versions. As demonstrated above, even versions affected by > various changes can be used with minimal efforts, such as > disabling -Werror. > > For the record, here is a patch I tested with: > > diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c > --- a/src/event/ngx_event_openssl.c > +++ b/src/event/ngx_event_openssl.c > @@ -1105,7 +1105,8 @@ ngx_ssl_info_callback(const ngx_ssl_conn > BIO *rbio, *wbio; > ngx_connection_t *c; > > -#ifndef SSL_OP_NO_RENEGOTIATION > +#if (!defined SSL_OP_NO_RENEGOTIATION \ > + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) > > if ((where & SSL_CB_HANDSHAKE_START) > && SSL_is_server((ngx_ssl_conn_t *) ssl_conn)) > @@ -1838,9 +1839,10 @@ ngx_ssl_handshake(ngx_connection_t *c) > c->read->ready = 1; > c->write->ready = 1; > > -#ifndef SSL_OP_NO_RENEGOTIATION > -#if OPENSSL_VERSION_NUMBER < 0x10100000L > -#ifdef SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS > +#if (!defined SSL_OP_NO_RENEGOTIATION \ > + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION \ > + && defined SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS \ > + && OPENSSL_VERSION_NUMBER < 0x10100000L) Keeping macro tests nested allows to add/remove them with minimal diff. I don't mind though to combine primaries into a single test, this is what nginx happens to use in the rest style. The only justified exceptions I found that use nested tests are "#if (NGX_GNU_HURD)" and recently introduced "#if (NGX_QUIC)" in ngx_event_openssl.h. > > /* initial handshake done, disable renegotiation (CVE-2009-3555) */ > if (c->ssl->connection->s3 && SSL_is_server(c->ssl->connection)) { > @@ -1848,8 +1850,6 @@ ngx_ssl_handshake(ngx_connection_t *c) > } > > #endif > -#endif > -#endif > > #if (defined BIO_get_ktls_send && !NGX_WIN32) > > @@ -2483,7 +2483,8 @@ ngx_ssl_handle_recv(ngx_connection_t *c, > int sslerr; > ngx_err_t err; > > -#ifndef SSL_OP_NO_RENEGOTIATION > +#if (!defined SSL_OP_NO_RENEGOTIATION \ > + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) > > if (c->ssl->renegotiation) { > /* > diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h > --- a/src/event/ngx_event_openssl.h > +++ b/src/event/ngx_event_openssl.h > @@ -45,7 +45,7 @@ > > #if (defined LIBRESSL_VERSION_NUMBER && OPENSSL_VERSION_NUMBER == 0x20000000L) > #undef OPENSSL_VERSION_NUMBER > -#if (LIBRESSL_VERSION_NUMBER >= 0x2080000fL) > +#if (LIBRESSL_VERSION_NUMBER >= 0x3050000fL) > #define OPENSSL_VERSION_NUMBER 0x1010000fL > #else > #define OPENSSL_VERSION_NUMBER 0x1000107fL > diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c > --- a/src/event/ngx_event_openssl_stapling.c > +++ b/src/event/ngx_event_openssl_stapling.c > @@ -893,7 +893,7 @@ ngx_ssl_ocsp_validate(ngx_connection_t * > ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > ocsp->conf = ocf; > > -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) Not sure if brackets are needed here, since it's no longer combined. Currently, there are 17 occurrences in nginx that don't use brackets, and 3 simple cases that do. > > ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > Overall, things look good to me. As discussed, broke this into two changes: # HG changeset patch # User Sergey Kandaurov # Date 1703252997 -14400 # Fri Dec 22 17:49:57 2023 +0400 # Node ID 7fc017b776047b26c7e42b355a1bf142cf968537 # Parent 500071f3265d259eb1917cd8367828834ff0ae14 SSL: disabled renegotiation checks with LibreSSL. Similar to 7356:e3ba4026c02d, as long as SSL_OP_NO_CLIENT_RENEGOTIATION is defined, it is the library responsibility to prevent renegotiation. Additionally, this allows to raise LibreSSL version used to redefine OPENSSL_VERSION_NUMBER to 0x1010000fL, such that this won't result in attempts to dereference SSL objects made opaque in LibreSSL 3.4.0. Patch by Maxim Dounin. diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -1105,7 +1105,8 @@ ngx_ssl_info_callback(const ngx_ssl_conn BIO *rbio, *wbio; ngx_connection_t *c; -#ifndef SSL_OP_NO_RENEGOTIATION +#if (!defined SSL_OP_NO_RENEGOTIATION \ + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) if ((where & SSL_CB_HANDSHAKE_START) && SSL_is_server((ngx_ssl_conn_t *) ssl_conn)) @@ -1838,9 +1839,10 @@ ngx_ssl_handshake(ngx_connection_t *c) c->read->ready = 1; c->write->ready = 1; -#ifndef SSL_OP_NO_RENEGOTIATION -#if OPENSSL_VERSION_NUMBER < 0x10100000L -#ifdef SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS +#if (!defined SSL_OP_NO_RENEGOTIATION \ + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION \ + && defined SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS \ + && OPENSSL_VERSION_NUMBER < 0x10100000L) /* initial handshake done, disable renegotiation (CVE-2009-3555) */ if (c->ssl->connection->s3 && SSL_is_server(c->ssl->connection)) { @@ -1848,8 +1850,6 @@ ngx_ssl_handshake(ngx_connection_t *c) } #endif -#endif -#endif #if (defined BIO_get_ktls_send && !NGX_WIN32) @@ -2483,7 +2483,8 @@ ngx_ssl_handle_recv(ngx_connection_t *c, int sslerr; ngx_err_t err; -#ifndef SSL_OP_NO_RENEGOTIATION +#if (!defined SSL_OP_NO_RENEGOTIATION \ + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) if (c->ssl->renegotiation) { /* # HG changeset patch # User Sergey Kandaurov # Date 1703253041 -14400 # Fri Dec 22 17:50:41 2023 +0400 # Node ID 44266e0651c44f530c4aa66e68c1b9464a9acee7 # Parent 7fc017b776047b26c7e42b355a1bf142cf968537 SSL: reasonable version for LibreSSL adjusted. OPENSSL_VERSION_NUMBER is now redefined to 0x1010000fL for LibreSSL 3.5.0 and above. Building with older LibreSSL versions, such as 2.8.0, may now produce warnings (see cab37803ebb3) and may require appropriate compiler options to suppress them. Notably, this allows to start using SSL_get0_verified_chain() appeared in OpenSSL 1.1.0 and LibreSSL 3.5.0, without additional macro tests. Prodded by Ilya Shipitsin. diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -45,7 +45,7 @@ #if (defined LIBRESSL_VERSION_NUMBER && OPENSSL_VERSION_NUMBER == 0x20000000L) #undef OPENSSL_VERSION_NUMBER -#if (LIBRESSL_VERSION_NUMBER >= 0x2080000fL) +#if (LIBRESSL_VERSION_NUMBER >= 0x3050000fL) #define OPENSSL_VERSION_NUMBER 0x1010000fL #else #define OPENSSL_VERSION_NUMBER 0x1000107fL diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c --- a/src/event/ngx_event_openssl_stapling.c +++ b/src/event/ngx_event_openssl_stapling.c @@ -893,7 +893,7 @@ ngx_ssl_ocsp_validate(ngx_connection_t * ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; ocsp->conf = ocf; -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) +#if OPENSSL_VERSION_NUMBER >= 0x10100000L ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); -- Sergey Kandaurov From pluknet at nginx.com Fri Dec 22 14:28:34 2023 From: pluknet at nginx.com (=?iso-8859-1?q?Sergey_Kandaurov?=) Date: Fri, 22 Dec 2023 18:28:34 +0400 Subject: [PATCH] SSL: raised limit for upstream session size Message-ID: # HG changeset patch # User Sergey Kandaurov # Date 1703255284 -14400 # Fri Dec 22 18:28:04 2023 +0400 # Node ID a463fb67e143c051fd373d1df94e5813a37d5cea # Parent 44266e0651c44f530c4aa66e68c1b9464a9acee7 SSL: raised limit for upstream session size. Unlike shared session cache used to store multiple client SSL sessions and which may be per a single SSL connection, sessions saved from upstream are per upstream server peer, so there is no such multiplier effect, but they may be of noticeably larger size due to session tickets being used. It was observed that session tickets sent from JVM backends may result in a decoded session size nearly the previous maximum session size limit of 4096 or slightly beyond. Raising the limit allows to save such sessions. diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -137,7 +137,8 @@ struct ngx_ssl_connection_s { #define NGX_SSL_DFLT_BUILTIN_SCACHE -5 -#define NGX_SSL_MAX_SESSION_SIZE 4096 +#define NGX_SSL_MAX_SESSION_SIZE 4096 +#define NGX_SSL_CLIENT_MAX_SESSION_SIZE 8192 typedef struct ngx_ssl_sess_id_s ngx_ssl_sess_id_t; diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c --- a/src/http/ngx_http_upstream_round_robin.c +++ b/src/http/ngx_http_upstream_round_robin.c @@ -685,7 +685,7 @@ ngx_http_upstream_set_round_robin_peer_s int len; const u_char *p; ngx_http_upstream_rr_peers_t *peers; - u_char buf[NGX_SSL_MAX_SESSION_SIZE]; + u_char buf[NGX_SSL_CLIENT_MAX_SESSION_SIZE]; #endif peer = rrp->current; @@ -747,7 +747,7 @@ ngx_http_upstream_save_round_robin_peer_ int len; u_char *p; ngx_http_upstream_rr_peers_t *peers; - u_char buf[NGX_SSL_MAX_SESSION_SIZE]; + u_char buf[NGX_SSL_CLIENT_MAX_SESSION_SIZE]; #endif #if (NGX_HTTP_UPSTREAM_ZONE) @@ -768,7 +768,7 @@ ngx_http_upstream_save_round_robin_peer_ /* do not cache too big session */ - if (len > NGX_SSL_MAX_SESSION_SIZE) { + if (len > NGX_SSL_CLIENT_MAX_SESSION_SIZE) { return; } diff --git a/src/stream/ngx_stream_upstream_round_robin.c b/src/stream/ngx_stream_upstream_round_robin.c --- a/src/stream/ngx_stream_upstream_round_robin.c +++ b/src/stream/ngx_stream_upstream_round_robin.c @@ -717,7 +717,7 @@ ngx_stream_upstream_set_round_robin_peer int len; const u_char *p; ngx_stream_upstream_rr_peers_t *peers; - u_char buf[NGX_SSL_MAX_SESSION_SIZE]; + u_char buf[NGX_SSL_CLIENT_MAX_SESSION_SIZE]; #endif peer = rrp->current; @@ -779,7 +779,7 @@ ngx_stream_upstream_save_round_robin_pee int len; u_char *p; ngx_stream_upstream_rr_peers_t *peers; - u_char buf[NGX_SSL_MAX_SESSION_SIZE]; + u_char buf[NGX_SSL_CLIENT_MAX_SESSION_SIZE]; #endif #if (NGX_STREAM_UPSTREAM_ZONE) @@ -800,7 +800,7 @@ ngx_stream_upstream_save_round_robin_pee /* do not cache too big session */ - if (len > NGX_SSL_MAX_SESSION_SIZE) { + if (len > NGX_SSL_CLIENT_MAX_SESSION_SIZE) { return; } From mdounin at mdounin.ru Fri Dec 22 21:30:26 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 23 Dec 2023 00:30:26 +0300 Subject: [PATCH 2 of 2] Win32: extended ngx_random range to 0x7fffffff In-Reply-To: <20231222135230.xdxaiz5lrx4625oj@Y9MQ9X2QVV> References: <26182490-3FF6-491F-8BE9-FA7E342E54CF@nginx.com> <20231222135230.xdxaiz5lrx4625oj@Y9MQ9X2QVV> Message-ID: Hello! On Fri, Dec 22, 2023 at 05:52:30PM +0400, Sergey Kandaurov wrote: > On Thu, Dec 21, 2023 at 07:14:40PM +0300, Maxim Dounin wrote: > > Hello! > > > > On Thu, Dec 21, 2023 at 05:37:02PM +0400, Sergey Kandaurov wrote: > > > > > > On 16 Dec 2023, at 06:57, Maxim Dounin wrote: > > > > > > > > Hello! > > > > > > > > On Sat, Dec 09, 2023 at 08:42:11AM +0000, J Carter wrote: > > > > > > > >> On Sat, 09 Dec 2023 07:46:10 +0000 > > > >> J Carter wrote: > > > >> > > > >>> # HG changeset patch > > > >>> # User J Carter > > > >>> # Date 1702101635 0 > > > >>> # Sat Dec 09 06:00:35 2023 +0000 > > > >>> # Node ID 1a77698f82d2580aa8b8f62ce89b4dbb6d678c5d > > > >>> # Parent d9275e982a7188a1ea7855295ffa93362ea9830f > > > >>> Win32: extended ngx_random range to 0x7fffffff > > > >>> > > > >>> rand() is used on win32. RAND_MAX is implementation defined. win32's is > > > >>> 0x7fff. > > > >>> > > > >>> Existing uses of ngx_random rely upon 0x7fffffff range provided by > > > >>> posix implementations of random(). > > > >>> > > > >>> diff -r d9275e982a71 -r 1a77698f82d2 src/os/win32/ngx_win32_config.h > > > >>> --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 > > > >>> +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 06:00:35 2023 +0000 > > > >>> @@ -280,7 +280,9 @@ > > > >>> > > > >>> #define NGX_HAVE_GETADDRINFO 1 > > > >>> > > > >>> -#define ngx_random rand > > > >>> +#define ngx_random \ > > > >>> + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) > > > >>> + > > > >>> #define ngx_debug_init() > > > >>> > > > >>> > > > >> > > > >> ^ my mistake - copying error.. > > > >> > > > >> # HG changeset patch > > > >> # User J Carter > > > >> # Date 1702111094 0 > > > >> # Sat Dec 09 08:38:14 2023 +0000 > > > >> # Node ID 10ef59a412a330872fc6d46de64f42e32e997b3a > > > >> # Parent d9275e982a7188a1ea7855295ffa93362ea9830f > > > >> Win32: extended ngx_random range to 0x7fffffff > > > > > > > > Nitpicking: > > > > > > > > Win32: extended ngx_random() range to 0x7fffffff. > > > > > > > >> > > > >> rand() is used on win32. RAND_MAX is implementation defined. win32's is > > > >> 0x7fff. > > > >> > > > >> Existing uses of ngx_random rely upon 0x7fffffff range provided by > > > >> posix implementations of random(). > > > > > > > > Thanks for catching this. > > > > > > > > As far as I can see, the only module which actually relies on the > > > > range is the random index module. Relying on the ngx_random() > > > > range generally looks wrong to me, and we might want to change the > > > > code to don't. OTOH, it's the only way to get a completely > > > > uniform distribution, and that's what the module tries to do. As > > > > such, it might be good enough to preserve it as is, at least till > > > > further changes to ngx_random(). > > > > > > > > Either way, wider range for ngx_random() should be beneficial in > > > > other places. > > > > > > > >> > > > >> diff -r d9275e982a71 -r 10ef59a412a3 src/os/win32/ngx_win32_config.h > > > >> --- a/src/os/win32/ngx_win32_config.h Sat Dec 09 05:09:07 2023 +0000 > > > >> +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 08:38:14 2023 +0000 > > > >> @@ -280,7 +280,9 @@ > > > >> > > > >> #define NGX_HAVE_GETADDRINFO 1 > > > >> > > > >> -#define ngx_random rand > > > >> +#define ngx_random() \ > > > > > > > > Nitpicking: the "\" character should be at the 79th column (in > > > > some files at 78th). This ensures that a diff won't wrap on a > > > > standard 80-column terminal. > > > > > > > >> + ((rand() << 16) | (rand() << 1) | (rand() >> 14)) > > > >> + > > > >> #define ngx_debug_init() > > > > > > > > Using "|" for random numbers looks utterly wrong to me, even if > > > > ORed values are guaranteed to not interfere. > > > > > > > > I would rather use "^", and avoid dependency on the particular > > > > value of RAND_MAX (other than POSIX-required minimum of 32767) by > > > > using something like > > > > > > > > 0x7fffffff & ((rand() << 16) ^ (rand() << 8) ^ rand()) > > > > > > > > with proper typecasts. > > > > > > > > Something like this should work: > > > > > > > > diff --git a/src/os/win32/ngx_win32_config.h b/src/os/win32/ngx_win32_config.h > > > > --- a/src/os/win32/ngx_win32_config.h > > > > +++ b/src/os/win32/ngx_win32_config.h > > > > @@ -280,7 +280,11 @@ typedef int sig_atomic_t > > > > > > > > #define NGX_HAVE_GETADDRINFO 1 > > > > > > > > -#define ngx_random rand > > > > +#define ngx_random() \ > > > > + ((long) (0x7fffffff & ( ((uint32_t) rand() << 16) \ > > > > + ^ ((uint32_t) rand() << 8) \ > > > > + ^ ((uint32_t) rand()) ))) > > > > + > > > > #define ngx_debug_init() > > > > > > > > > > > > > > Nitpicking: you might want to re-align the "^" operator to the first > > > symbol of the left-hand operand (similar to NGX_CONF_TAKE1234, or > > > even NGX_UNIX_ADDRSTRLEN). Other than that, it looks good. > > > > It's intentionally aligned this way to simplify reading. Such > > style is occasionally used in complex macro definitions, see > > ngx_mp4_get_32value() or ngx_proxy_protocol_parse_uint32() for > > some examples. > > > > Just for completeness, below is the updated patch. > > > > # HG changeset patch > > # User J Carter > > # Date 1702111094 0 > > # Sat Dec 09 08:38:14 2023 +0000 > > # Node ID 92923ac5ea2a395774b28460f07d0fd2e1a2de24 > > # Parent cc16989c6d61385027c1ebfd43929f8369fa5f62 > > Win32: extended ngx_random() range to 0x7fffffff. > > > > rand() is used on win32. RAND_MAX is implementation defined. win32's is > > 0x7fff. > > I'd unwrap this line to look more pleasant, it fits into 80 columns. That's from the J Carter's original commit log, unmodified. While I can't say it cannot be improved, I've tried to refrain from changes. Also, wrapping here looks correct to me, 72 chars line width is perfectly fine unless there are reasons to use something different, and it is in line with wrapping of the rest of the commit log. > > > > > Existing uses of ngx_random() rely upon 0x7fffffff range provided by > > POSIX implementations of random(). > > > > diff -r cc16989c6d61 -r 92923ac5ea2a src/os/win32/ngx_win32_config.h > > --- a/src/os/win32/ngx_win32_config.h Sat Dec 16 03:40:01 2023 +0400 > > +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 08:38:14 2023 +0000 > > @@ -280,7 +280,11 @@ typedef int sig_atomic_t > > > > #define NGX_HAVE_GETADDRINFO 1 > > > > -#define ngx_random rand > > +#define ngx_random() \ > > + ((long) (0x7fffffff & ( ((uint32_t) rand() << 16) \ > > If I'm not mistaken, indentation is now 5 spaces. > Otherwise, looks good. Yep, thanks for catching, slipped in from an earlier version of the patch. Fixed and pushed to http://mdounin.ru/hg/nginx, thanks for looking. [...] -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Fri Dec 22 21:46:47 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 23 Dec 2023 00:46:47 +0300 Subject: [PATCH] SSL: raised limit for upstream session size In-Reply-To: References: Message-ID: Hello! On Fri, Dec 22, 2023 at 06:28:34PM +0400, Sergey Kandaurov wrote: > # HG changeset patch > # User Sergey Kandaurov > # Date 1703255284 -14400 > # Fri Dec 22 18:28:04 2023 +0400 > # Node ID a463fb67e143c051fd373d1df94e5813a37d5cea > # Parent 44266e0651c44f530c4aa66e68c1b9464a9acee7 > SSL: raised limit for upstream session size. > > Unlike shared session cache used to store multiple client SSL sessions and > which may be per a single SSL connection, sessions saved from upstream are > per upstream server peer, so there is no such multiplier effect, but they > may be of noticeably larger size due to session tickets being used. > > It was observed that session tickets sent from JVM backends may result in > a decoded session size nearly the previous maximum session size limit of > 4096 or slightly beyond. Raising the limit allows to save such sessions. Session tickets are not expected to be larger than sessions itself, except by several bytes used for key identification and encryption overhead. I see no reasons why the limit should be different in different places. And 4096 for an SSL session looks a lot. The only justification I can assume here is an SSL session with the client certificate (or even certificate chain) being saved into the session. It might worth looking into what actually happens here. [...] -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Sat Dec 23 21:30:32 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 24 Dec 2023 00:30:32 +0300 Subject: [PATCH] ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 In-Reply-To: References: <2001e73ce136d5bfc9bd.1700771381@fedora> <18C46B67-C82F-490A-B3AA-14AF5D9CC369@nginx.com> Message-ID: Hello! On Fri, Dec 22, 2023 at 05:53:30PM +0400, Sergey Kandaurov wrote: > > > On 21 Dec 2023, at 02:40, Maxim Dounin wrote: > > > > Hello! > > > > On Tue, Dec 19, 2023 at 10:44:00PM +0400, Sergey Kandaurov wrote: > > > >> > >>> On 19 Dec 2023, at 12:58, Maxim Dounin wrote: > >>> > >>> Hello! > >>> > >>> On Tue, Dec 19, 2023 at 02:09:10AM +0400, Sergey Kandaurov wrote: > >>> > >>>>> On 24 Nov 2023, at 00:29, Ilya Shipitsin wrote: > >>>>> > >>>>> # HG changeset patch > >>>>> # User Ilya Shipitsin > >>>>> # Date 1700769135 -3600 > >>>>> # Thu Nov 23 20:52:15 2023 +0100 > >>>>> # Node ID 2001e73ce136d5bfc9bde27d338865b14b8ad436 > >>>>> # Parent 7ec761f0365f418511e30b82e9adf80bc56681df > >>>>> ssl: SSL_get0_verified_chain is available for LibreSSL >= 3.3.6 > >>>> > >>>> style: SSL prefix should be uppercase. > >>>> > >>>>> > >>>>> diff -r 7ec761f0365f -r 2001e73ce136 src/event/ngx_event_openssl_stapling.c > >>>>> --- a/src/event/ngx_event_openssl_stapling.c Thu Oct 26 23:35:09 2023 +0300 > >>>>> +++ b/src/event/ngx_event_openssl_stapling.c Thu Nov 23 20:52:15 2023 +0100 > >>>>> @@ -893,7 +893,8 @@ > >>>>> ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > >>>>> ocsp->conf = ocf; > >>>>> > >>>>> -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > >>>>> +/* minimum OpenSSL 1.1.1 & LibreSSL 3.3.6 */ > >>>>> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) || (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER >= 0x3030600L)) > >>>>> > >>>>> ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > >>>>> > >>>> > >>>> Testing "defined(LIBRESSL_VERSION_NUMBER)" is superfluous. > >>>> The macro test suffers from a very long line. > >>>> > >>>> The correct version test seems to be against LibreSSL 3.5.0, see > >>>> https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-3.5.0-relnotes.txt > >>>> > >>>> So, the resulting change would be as follows: > >>>> > >>>> diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c > >>>> --- a/src/event/ngx_event_openssl_stapling.c > >>>> +++ b/src/event/ngx_event_openssl_stapling.c > >>>> @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * > >>>> ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > >>>> ocsp->conf = ocf; > >>>> > >>>> -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > >>>> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ > >>>> + && !defined LIBRESSL_VERSION_NUMBER) \ > >>>> + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL > >>> > >>> Rather, > >>> > >>> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ > >>> + && (!defined LIBRESSL_VERSION_NUMBER \ > >>> + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL)) > >>> > >> > >> Agree. For the sake of completeness: > >> > >> # HG changeset patch > >> # User Sergey Kandaurov > >> # Date 1703004490 -14400 > >> # Tue Dec 19 20:48:10 2023 +0400 > >> # Node ID 267cee796462f4f6bacf825c8fd24d13845d36f4 > >> # Parent 7a6d52990f2e2d88460a3dc6cc84aac89b7329ea > >> SSL: using SSL_get0_verified_chain() with LibreSSL 3.5.0+. > >> > >> Prodded by Ilya Shipitsin. > >> > >> diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c > >> --- a/src/event/ngx_event_openssl_stapling.c > >> +++ b/src/event/ngx_event_openssl_stapling.c > >> @@ -893,7 +893,9 @@ ngx_ssl_ocsp_validate(ngx_connection_t * > >> ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > >> ocsp->conf = ocf; > >> > >> -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > >> +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L \ > >> + && (!defined LIBRESSL_VERSION_NUMBER \ > >> + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL)) > >> > >> ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > >> > >> > >>>> > >>>> ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > >>>> > >>>> > >>>> On the other hand, I don't like the resulting style mudness. > >>>> It may have sense just to drop old LibreSSL versions support: > >>>> maintaining one or two most recent stable branches should be enough. > >>> > >>> +1 on this. > >>> > >>> We certainly don't want to maintain support for ancient LibreSSL > >>> versions. IMO, just two branches is more than enough (and this is > >>> what I use in my testing, which usually means latest development > >>> release and latest stable release). > >>> > >>> At most, this probably can be three branches - which seems to > >>> match LibreSSL support policy, > >>> https://www.libressl.org/releases.html: > >>> > >>> : LibreSSL transitions to a new stable release branch every 6 months > >>> : in coordination with the OpenBSD development schedule. LibreSSL > >>> : stable branches are updated for 1 year after their corresponding > >>> : OpenBSD branch is tagged for release. See below for the current > >>> : stable release branches. > >>> > >>> In either case, LibreSSL versions below 3.5.0 are already not > >>> supported. If I understand correctly, right now the oldest > >>> supported branch is 3.7.x. > >> > >> Agree. Also, Repology shows that modern popular distributions, > >> such as Alpine Linux and FreeBSD, have at least LibreSSL 3.5.x: > >> https://repology.org/project/libressl/versions > >> > >>> > >>>> But anyway, I don't see an obvious win over the existing code: > >>>> the certificate chain is reconstructed if SSL_get0_verified_chain() > >>>> is (detected to be) not present, which should be fine in most cases. > >>>> > >>>> That said, it doesn't seem to deserve introducing 3-line macro test, > >>>> or (see OTOH note) breaking old LibreSSL support for no apparent reason. > >>> > >>> Reconstruction of the chain implies verification of signatures > >>> along the chain and can be costly. As such, it certainly would be > >>> better to use SSL_get0_verified_chain() as long as it is > >>> available. > >> > >> Agree. > >> My point is that not using SSL_get0_verified_chain() should not result > >> in a broken functionality, as in the OCSP cert validation. > >> So, intention to start using it in LibreSSL may look an insufficient > >> argument per se to drop old LibreSSL versions. > >> Though, dropping them may be orthogonal to SSL_get0_verified_chain(). > >> > >>> > >>> Also, removing the "!defined LIBRESSL_VERSION_NUMBER" check might > >>> be seen as positive even without any additional benefits. > >>> > >>> Along with that, however, we might want to adjust the > >>> LIBRESSL_VERSION_NUMBER check in the ngx_event_openssl.h file, so > >>> OPENSSL_VERSION_NUMBER is set to a better value for old LibreSSL > >>> versions - for example, to only set OPENSSL_VERSION_NUMBER to > >>> 0x1010000fL for LibreSSL 3.5.0 or above. > >> > >> Sounds like a plan if we are fine to drop older LibreSSL versions. > >> > >>> This might allow to > >>> preserve limited compatibility with ancient LibreSSL versions > >>> without additional efforts (not tested though). > >>> > >> > >> This won't build with any LibreSSL version in the [2.8.0, 3.5.0) range. > >> Particularly, SSL_CTX_sess_set_get_cb() has got a const argument in > >> LibreSSL 2.8.0, which is not backward compatible, see 7337:cab37803ebb3. > >> Another reason is SSL was made opaque by default in 3.4.x. > > > > The const argument can be easily ignored by using > > -Wno-error=incompatible-function-pointer-types (or just > > -Wno-error), which seems to be reasonable when trying to build > > things with ancient libraries. This makes it possible to build > > with LibreSSL 2.8.0-3.3.6 with minimal efforts. > > Ok, that makes sense. > For the record, GCC uses another warning option name: > > src/event/ngx_event_openssl.c:3770:43: error: passing argument 2 of 'SSL_CTX_sess_set_get_cb' from incompatible pointer type [-Werror=incompatible-pointer-type] > > There is a typo in GCC 12 diagnostics, the actual option name > appears to be -Wno-incompatible-pointer-types (note "s"). > BTW, it appears to be a valid option name in Clang as well > to suppress such warnings. Yep, obviously enough required options might be different in different compilers. > > For LibreSSL 3.4.x, the dependency on the SSL internals can be > > easily eliminated by testing SSL_OP_NO_CLIENT_RENEGOTIATION, which > > anyway seems to be a reasonable change. > > Agree, this is really true as tested on various LibreSSL versions, > from 2.5.0 to 3.8.2 (SSL_OP_NO_CLIENT_RENEGOTIATION appeared in 2.6.0). > > > > >> > >> (Others seem not to affect building on older versions if pick up 3.5.0: > >> - X509_up_ref appeared in LibreSSL 2.6.0, X509 made opaque in 3.5.0; > >> - X509_get0_notAfter appeared in 2.7.0, X509_get_notAfter still there.) > >> > >> Personally I'm fine to drop ancient LibreSSL versions, because it has > >> to happen someday and we don't want to maintain them eternally. > >> Alternative patch for your consideration: > >> > >> # HG changeset patch > >> # User Sergey Kandaurov > >> # Date 1703011348 -14400 > >> # Tue Dec 19 22:42:28 2023 +0400 > >> # Node ID 94d4ef4a2316da66fea084952913ff2b0f84827d > >> # Parent 7a6d52990f2e2d88460a3dc6cc84aac89b7329ea > >> SSL: removed compatibility with LibreSSL < 3.5.0. > >> > >> OPENSSL_VERSION_NUMBER is now redefined to 0x1010000fL for LibreSSL 3.5.0+. > >> As older versions starting from LibreSSL 2.8.0 won't build with a lesser > >> OPENSSL_VERSION_NUMBER value (see 7337:cab37803ebb3 for details), they are > >> now explicitly unsupported. > >> > >> Besides that, this allows to start using SSL_get0_verified_chain() > >> with LibreSSL without additional macro tests. > >> > >> diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h > >> --- a/src/event/ngx_event_openssl.h > >> +++ b/src/event/ngx_event_openssl.h > >> @@ -45,10 +45,10 @@ > >> > >> #if (defined LIBRESSL_VERSION_NUMBER && OPENSSL_VERSION_NUMBER == 0x20000000L) > >> #undef OPENSSL_VERSION_NUMBER > >> -#if (LIBRESSL_VERSION_NUMBER >= 0x2080000fL) > >> +#if (LIBRESSL_VERSION_NUMBER >= 0x3050000fL) > >> #define OPENSSL_VERSION_NUMBER 0x1010000fL > >> #else > >> -#define OPENSSL_VERSION_NUMBER 0x1000107fL > >> +#error LibreSSL too old, need at least 3.5.0 > >> #endif > >> #endif > > > > I'm certainly against the idea of explicitly rejecting old > > versions. As demonstrated above, even versions affected by > > various changes can be used with minimal efforts, such as > > disabling -Werror. > > > > For the record, here is a patch I tested with: > > > > diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c > > --- a/src/event/ngx_event_openssl.c > > +++ b/src/event/ngx_event_openssl.c > > @@ -1105,7 +1105,8 @@ ngx_ssl_info_callback(const ngx_ssl_conn > > BIO *rbio, *wbio; > > ngx_connection_t *c; > > > > -#ifndef SSL_OP_NO_RENEGOTIATION > > +#if (!defined SSL_OP_NO_RENEGOTIATION \ > > + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) > > > > if ((where & SSL_CB_HANDSHAKE_START) > > && SSL_is_server((ngx_ssl_conn_t *) ssl_conn)) > > @@ -1838,9 +1839,10 @@ ngx_ssl_handshake(ngx_connection_t *c) > > c->read->ready = 1; > > c->write->ready = 1; > > > > -#ifndef SSL_OP_NO_RENEGOTIATION > > -#if OPENSSL_VERSION_NUMBER < 0x10100000L > > -#ifdef SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS > > +#if (!defined SSL_OP_NO_RENEGOTIATION \ > > + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION \ > > + && defined SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS \ > > + && OPENSSL_VERSION_NUMBER < 0x10100000L) > > Keeping macro tests nested allows to add/remove them with minimal diff. > I don't mind though to combine primaries into a single test, this is > what nginx happens to use in the rest style. The only justified > exceptions I found that use nested tests are "#if (NGX_GNU_HURD)" > and recently introduced "#if (NGX_QUIC)" in ngx_event_openssl.h. Here nesting is mostly historic due to different tests added over time. Given there are 4 tests now, and SSL_OP_NO_RENEGOTIATION is combined with SSL_OP_NO_CLIENT_RENEGOTIATION in other places, combining all the tests looks like the way to go. > > > > > /* initial handshake done, disable renegotiation (CVE-2009-3555) */ > > if (c->ssl->connection->s3 && SSL_is_server(c->ssl->connection)) { > > @@ -1848,8 +1850,6 @@ ngx_ssl_handshake(ngx_connection_t *c) > > } > > > > #endif > > -#endif > > -#endif > > > > #if (defined BIO_get_ktls_send && !NGX_WIN32) > > > > @@ -2483,7 +2483,8 @@ ngx_ssl_handle_recv(ngx_connection_t *c, > > int sslerr; > > ngx_err_t err; > > > > -#ifndef SSL_OP_NO_RENEGOTIATION > > +#if (!defined SSL_OP_NO_RENEGOTIATION \ > > + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) > > > > if (c->ssl->renegotiation) { > > /* > > diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h > > --- a/src/event/ngx_event_openssl.h > > +++ b/src/event/ngx_event_openssl.h > > @@ -45,7 +45,7 @@ > > > > #if (defined LIBRESSL_VERSION_NUMBER && OPENSSL_VERSION_NUMBER == 0x20000000L) > > #undef OPENSSL_VERSION_NUMBER > > -#if (LIBRESSL_VERSION_NUMBER >= 0x2080000fL) > > +#if (LIBRESSL_VERSION_NUMBER >= 0x3050000fL) > > #define OPENSSL_VERSION_NUMBER 0x1010000fL > > #else > > #define OPENSSL_VERSION_NUMBER 0x1000107fL > > diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c > > --- a/src/event/ngx_event_openssl_stapling.c > > +++ b/src/event/ngx_event_openssl_stapling.c > > @@ -893,7 +893,7 @@ ngx_ssl_ocsp_validate(ngx_connection_t * > > ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > > ocsp->conf = ocf; > > > > -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > > +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) > > Not sure if brackets are needed here, since it's no longer combined. > Currently, there are 17 occurrences in nginx that don't use brackets, > and 3 simple cases that do. I think that brackets are generally needed, but OPENSSL_VERSION_NUMBER used to be checked without brackets in most cases. As such, I don't object either variant. If needed, brackets can be added to all affected #if's by a separate style-only commit. > > > > > ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > > > > Overall, things look good to me. > As discussed, broke this into two changes: > > # HG changeset patch > # User Sergey Kandaurov > # Date 1703252997 -14400 > # Fri Dec 22 17:49:57 2023 +0400 > # Node ID 7fc017b776047b26c7e42b355a1bf142cf968537 > # Parent 500071f3265d259eb1917cd8367828834ff0ae14 > SSL: disabled renegotiation checks with LibreSSL. > > Similar to 7356:e3ba4026c02d, as long as SSL_OP_NO_CLIENT_RENEGOTIATION > is defined, it is the library responsibility to prevent renegotiation. > > Additionally, this allows to raise LibreSSL version used to redefine > OPENSSL_VERSION_NUMBER to 0x1010000fL, such that this won't result in > attempts to dereference SSL objects made opaque in LibreSSL 3.4.0. > > Patch by Maxim Dounin. > > diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c > --- a/src/event/ngx_event_openssl.c > +++ b/src/event/ngx_event_openssl.c > @@ -1105,7 +1105,8 @@ ngx_ssl_info_callback(const ngx_ssl_conn > BIO *rbio, *wbio; > ngx_connection_t *c; > > -#ifndef SSL_OP_NO_RENEGOTIATION > +#if (!defined SSL_OP_NO_RENEGOTIATION \ > + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) > > if ((where & SSL_CB_HANDSHAKE_START) > && SSL_is_server((ngx_ssl_conn_t *) ssl_conn)) > @@ -1838,9 +1839,10 @@ ngx_ssl_handshake(ngx_connection_t *c) > c->read->ready = 1; > c->write->ready = 1; > > -#ifndef SSL_OP_NO_RENEGOTIATION > -#if OPENSSL_VERSION_NUMBER < 0x10100000L > -#ifdef SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS > +#if (!defined SSL_OP_NO_RENEGOTIATION \ > + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION \ > + && defined SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS \ > + && OPENSSL_VERSION_NUMBER < 0x10100000L) > > /* initial handshake done, disable renegotiation (CVE-2009-3555) */ > if (c->ssl->connection->s3 && SSL_is_server(c->ssl->connection)) { > @@ -1848,8 +1850,6 @@ ngx_ssl_handshake(ngx_connection_t *c) > } > > #endif > -#endif > -#endif > > #if (defined BIO_get_ktls_send && !NGX_WIN32) > > @@ -2483,7 +2483,8 @@ ngx_ssl_handle_recv(ngx_connection_t *c, > int sslerr; > ngx_err_t err; > > -#ifndef SSL_OP_NO_RENEGOTIATION > +#if (!defined SSL_OP_NO_RENEGOTIATION \ > + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) > > if (c->ssl->renegotiation) { > /* > # HG changeset patch > # User Sergey Kandaurov > # Date 1703253041 -14400 > # Fri Dec 22 17:50:41 2023 +0400 > # Node ID 44266e0651c44f530c4aa66e68c1b9464a9acee7 > # Parent 7fc017b776047b26c7e42b355a1bf142cf968537 > SSL: reasonable version for LibreSSL adjusted. > > OPENSSL_VERSION_NUMBER is now redefined to 0x1010000fL for LibreSSL 3.5.0 > and above. Building with older LibreSSL versions, such as 2.8.0, may now > produce warnings (see cab37803ebb3) and may require appropriate compiler > options to suppress them. > > Notably, this allows to start using SSL_get0_verified_chain() appeared > in OpenSSL 1.1.0 and LibreSSL 3.5.0, without additional macro tests. > > Prodded by Ilya Shipitsin. > > diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h > --- a/src/event/ngx_event_openssl.h > +++ b/src/event/ngx_event_openssl.h > @@ -45,7 +45,7 @@ > > #if (defined LIBRESSL_VERSION_NUMBER && OPENSSL_VERSION_NUMBER == 0x20000000L) > #undef OPENSSL_VERSION_NUMBER > -#if (LIBRESSL_VERSION_NUMBER >= 0x2080000fL) > +#if (LIBRESSL_VERSION_NUMBER >= 0x3050000fL) > #define OPENSSL_VERSION_NUMBER 0x1010000fL > #else > #define OPENSSL_VERSION_NUMBER 0x1000107fL > diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c > --- a/src/event/ngx_event_openssl_stapling.c > +++ b/src/event/ngx_event_openssl_stapling.c > @@ -893,7 +893,7 @@ ngx_ssl_ocsp_validate(ngx_connection_t * > ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; > ocsp->conf = ocf; > > -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) > +#if OPENSSL_VERSION_NUMBER >= 0x10100000L > > ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); > Looks good. -- Maxim Dounin http://mdounin.ru/ From vl at inspert.ru Mon Dec 25 16:52:42 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:42 +0300 Subject: [PATCH 01 of 12] QUIC: fixed accounting of in-flight PING frames In-Reply-To: References: Message-ID: <5ea917e44e03e88a2b6b.1703523162@vlws> Previously, such frames were not accounted as in-flight, and they were not stored in sent queue. This prevented proper PTO calculation and ACK handling. src/event/quic/ngx_event_quic_ack.c | 62 +++++++++++++++++++++++++----------- 1 files changed, 43 insertions(+), 19 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-01.patch Type: text/x-patch Size: 3221 bytes Desc: not available URL: From vl at inspert.ru Mon Dec 25 16:52:41 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:41 +0300 Subject: [PATCH 00 of 12] HTTP/3 proxying to upstreams Message-ID: Hello, everyone, and Merry Christmas to all! I'm a developer of an nginx fork Angie. Recently we implemented an HTTP/3 proxy support in our fork [1]. We'd like to contribute this functionality to nginx OSS community. Hence here is a patch series backported from Angie to the current head of nginx mainline branch (1.25.3) If you find patching and building nginx from source irritating in order to test the feature, you can use the prebuilt packages of Angie [2] [1] https://angie.software/en/http_proxy/#proxy-http-version [2] https://angie.software/en/install/ Your feedback is welcome! __. .--, *-/___, ,-/___,-/___,-/___,-/___, _.-.=,{\/ _/ /`) `\ _ ),-/___,-/___,-/___,-/___, ) _..-'`-(`._(_.;` / /< \\=`\ _ )`\ _ )`\ _ )`\ _ )<`--''` (__\_________/___, /< <\ directives is available that configure quic settings. For the interop testing purposes, HQ support is available. Below are technical details about the current state of the patch set. *** TESTS *** The patchset includes tests which are added to the "t" directory for convenience. Copy them to nginx-tests and run them as usual. Most of them are proxy tests adapted for use with HTTP/3. *** LIMITATIONS *** The following features are NOT implemented: * Trailers: it requires full trailers support in nginx first * Connection migration: does not seem necessary for proxying scenarios * 0RTT: currently not supported The SSL library requirements are the same as for the server-side support. There are some interoperability issues when using different libraries on client and server: the combination of client + openssl/compat and server + boringssl leads to a handshake failure with an error: >> SSL_do_handshake() failed (SSL: error:10000132:SSL routines: >> OPENSSL_internal:UNEXPECTED_COMPATIBILITY_MODE) *** MULTIPLEXING *** With keepalive disabled, the HTTP/3 connection to backend is very similar to a normal TCP SSL connection: the connection is established, handshake is performed, the request stream is created and everything is closed when the request is completed. With keepalive enabled, the underlying QUIC connection is cached, and can be reused by another client. Each client is using its own QUIC connection. Theoretically, it is possible to use only one quic connection to each backend and use separate HTTP/3 streams to make requests. This is NOT currently implemented, as it requires more changes to the upstream and keepalive modules and has security implications. *** INTERNALS *** This is a first attempt to integrating the HTTP/3 proxy into nginx, so all currently exposed interfaces are not final. Probably, the HTTP/3 proxy should be implemented in a separate module. Currently it is a patch to the HTTP proxy module to minimize boilerplate. Things that need improvement: - client interface: the way to create client, start handshake and create first stream to use for request; The way SSL sessions are supported doesn't look good. - upstreams interface: one way is to hide quic details and make it feel more SSL-like, maybe even kind of SSL module. Probably need a separate keepalive module for HTTP/3 to allow some controlled level of multiplexing. - connection termination is quite tricky due to the handling of the underlying quic UDP connection and stream requests. Closing an HTTP/3 connection may be incorrect in some cases. - Some interop tests still fail. This is partly due to the nature of the tests. This part requires more work with hard-to-reproduce cases. From vl at inspert.ru Mon Dec 25 16:52:44 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:44 +0300 Subject: [PATCH 03 of 12] QUIC: added a structure for stream limits/counters In-Reply-To: References: Message-ID: To simplify code dealing with stream states when both client and server are supported, instead of 8 named fields, use two structures split into uni/bidi and client/server. src/event/quic/ngx_event_quic.c | 8 ++-- src/event/quic/ngx_event_quic_ack.c | 4 +- src/event/quic/ngx_event_quic_connection.h | 23 +++++++---- src/event/quic/ngx_event_quic_streams.c | 60 +++++++++++++++--------------- 4 files changed, 50 insertions(+), 45 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-03.patch Type: text/x-patch Size: 10179 bytes Desc: not available URL: From vl at inspert.ru Mon Dec 25 16:52:43 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:43 +0300 Subject: [PATCH 02 of 12] QUIC: renamed "ctp" to "peer_tp" In-Reply-To: References: Message-ID: The "ctp" refers to "client transport parameters", but in the code that supports both client and server, the name is confusing, thus rename. src/event/quic/ngx_event_quic.c | 41 +++++++++++++------------ src/event/quic/ngx_event_quic_ack.c | 8 ++-- src/event/quic/ngx_event_quic_connection.h | 5 +- src/event/quic/ngx_event_quic_connid.c | 4 +- src/event/quic/ngx_event_quic_migration.c | 7 ++- src/event/quic/ngx_event_quic_openssl_compat.c | 11 +++--- src/event/quic/ngx_event_quic_ssl.c | 9 +++-- src/event/quic/ngx_event_quic_streams.c | 9 +++-- 8 files changed, 51 insertions(+), 43 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-02.patch Type: text/x-patch Size: 12766 bytes Desc: not available URL: From vl at inspert.ru Mon Dec 25 16:52:45 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:45 +0300 Subject: [PATCH 04 of 12] QUIC: client support In-Reply-To: References: Message-ID: Patch subject is complete summary. src/event/quic/ngx_event_quic.c | 617 ++++++++++++++++++++++++- src/event/quic/ngx_event_quic.h | 11 + src/event/quic/ngx_event_quic_ack.c | 13 + src/event/quic/ngx_event_quic_ack.h | 2 + src/event/quic/ngx_event_quic_connection.h | 10 + src/event/quic/ngx_event_quic_connid.c | 64 ++- src/event/quic/ngx_event_quic_connid.h | 7 +- src/event/quic/ngx_event_quic_migration.c | 10 +- src/event/quic/ngx_event_quic_openssl_compat.c | 4 + src/event/quic/ngx_event_quic_output.c | 109 +++- src/event/quic/ngx_event_quic_protection.c | 101 ++- src/event/quic/ngx_event_quic_protection.h | 3 + src/event/quic/ngx_event_quic_socket.c | 71 ++- src/event/quic/ngx_event_quic_socket.h | 4 +- src/event/quic/ngx_event_quic_ssl.c | 172 +++++- src/event/quic/ngx_event_quic_ssl.h | 3 + src/event/quic/ngx_event_quic_streams.c | 550 +++++++++++++++------ src/event/quic/ngx_event_quic_streams.h | 3 + src/event/quic/ngx_event_quic_tokens.c | 48 + src/event/quic/ngx_event_quic_tokens.h | 9 + src/event/quic/ngx_event_quic_transport.c | 214 ++++++-- src/event/quic/ngx_event_quic_transport.h | 7 +- 22 files changed, 1677 insertions(+), 355 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-04.patch Type: text/x-patch Size: 98195 bytes Desc: not available URL: From vl at inspert.ru Mon Dec 25 16:52:47 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:47 +0300 Subject: [PATCH 06 of 12] HTTP/3: make http/3 request defines available In-Reply-To: References: Message-ID: Patch subject is complete summary. src/http/v3/ngx_http_v3.h | 20 ++++++++++++++++++++ src/http/v3/ngx_http_v3_filter_module.c | 20 +------------------- 2 files changed, 21 insertions(+), 19 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-06.patch Type: text/x-patch Size: 3100 bytes Desc: not available URL: From vl at inspert.ru Mon Dec 25 16:52:46 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:46 +0300 Subject: [PATCH 05 of 12] QUIC: client loss detection updates In-Reply-To: References: Message-ID: Patch subject is complete summary. src/event/quic/ngx_event_quic_ack.c | 69 +++++++++++++++++++++++++++++++++++- 1 files changed, 66 insertions(+), 3 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-05.patch Type: text/x-patch Size: 4103 bytes Desc: not available URL: From vl at inspert.ru Mon Dec 25 16:52:48 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:48 +0300 Subject: [PATCH 07 of 12] Upstream: refactored upstream initialization In-Reply-To: References: Message-ID: No functional changes. This will be used by the following patches. src/http/ngx_http_upstream.c | 133 +++++++++++++++++++++++------------------- 1 files changed, 74 insertions(+), 59 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-07.patch Type: text/x-patch Size: 5960 bytes Desc: not available URL: From vl at inspert.ru Mon Dec 25 16:52:49 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:49 +0300 Subject: [PATCH 08 of 12] Upstream: separate function to handle upstream connection closing In-Reply-To: References: Message-ID: <1fb8eae095661a3fa1ce.1703523169@vlws> No functional changes. src/http/ngx_http_upstream.c | 91 ++++++++++++++++++++----------------------- 1 files changed, 43 insertions(+), 48 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-08.patch Type: text/x-patch Size: 4039 bytes Desc: not available URL: From vl at inspert.ru Mon Dec 25 16:52:50 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:50 +0300 Subject: [PATCH 09 of 12] HTTP/3: added $quic_connection variable In-Reply-To: References: Message-ID: <183d5a20c159a380d9a7.1703523170@vlws> The variable contains number of main quic connection (shared between streams). This is useful for keepalive tests. src/http/v3/ngx_http_v3_module.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 files changed, 40 insertions(+), 0 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-09.patch Type: text/x-patch Size: 2266 bytes Desc: not available URL: From vl at inspert.ru Mon Dec 25 16:52:51 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:51 +0300 Subject: [PATCH 10 of 12] Added host/host_set logic to proxy module In-Reply-To: References: Message-ID: Patch is to be merged with next. This is basically a copy from grpc proxy. src/http/modules/ngx_http_proxy_module.c | 67 ++++++++++++++++++++++++++++++++ 1 files changed, 67 insertions(+), 0 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-10.patch Type: text/x-patch Size: 4438 bytes Desc: not available URL: From vl at inspert.ru Mon Dec 25 16:52:52 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:52 +0300 Subject: [PATCH 11 of 12] Proxy: HTTP/3 support In-Reply-To: References: Message-ID: <6150bf13f72af4f2ecc9.1703523172@vlws> Example configuration: location /foo { proxy_http_version 3; proxy_pass https://http3-server.example.com:4433; } src/http/modules/ngx_http_proxy_module.c | 2276 ++++++++++++++++- src/http/modules/ngx_http_upstream_keepalive_module.c | 47 +- src/http/ngx_http_header_filter_module.c | 50 + src/http/ngx_http_request.h | 2 + src/http/ngx_http_upstream.c | 556 ++++- src/http/ngx_http_upstream.h | 14 + src/http/v3/ngx_http_v3.h | 7 + src/http/v3/ngx_http_v3_parse.c | 36 +- src/http/v3/ngx_http_v3_request.c | 23 + src/http/v3/ngx_http_v3_uni.c | 45 +- 10 files changed, 3018 insertions(+), 38 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-11.patch Type: text/x-patch Size: 100946 bytes Desc: not available URL: From vl at inspert.ru Mon Dec 25 16:52:53 2023 From: vl at inspert.ru (=?iso-8859-1?q?Vladimir_Homutov?=) Date: Mon, 25 Dec 2023 19:52:53 +0300 Subject: [PATCH 12 of 12] Tests: HTTP/3 proxy tests In-Reply-To: References: Message-ID: Added to patchset for for convenience. Contents of the "t" directory is to be put into nginx-tests repository. t/proxy_h3_bind.t | 129 ++++ t/proxy_h3_cache.t | 171 ++++++ t/proxy_h3_cache_bypass.t | 126 ++++ t/proxy_h3_cache_control.t | 302 ++++++++++ t/proxy_h3_cache_convert_head.t | 125 ++++ t/proxy_h3_cache_error.t | 113 +++ t/proxy_h3_cache_lock_ssi.t | 132 ++++ t/proxy_h3_cache_max_range_offset.t | 150 +++++ t/proxy_h3_cache_min_free.t | 106 +++ t/proxy_h3_cache_path.t | 115 ++++ t/proxy_h3_cache_range.t | 149 +++++ t/proxy_h3_cache_revalidate.t | 195 ++++++ t/proxy_h3_cache_use_stale.t | 331 +++++++++++ t/proxy_h3_cache_valid.t | 160 +++++ t/proxy_h3_cache_variables.t | 121 ++++ t/proxy_h3_cache_vary.t | 359 ++++++++++++ t/proxy_h3_client.t | 883 +++++++++++++++++++++++++++++++ t/proxy_h3_cookie.t | 149 +++++ t/proxy_h3_cookie_flags.t | 172 ++++++ t/proxy_h3_force_ranges.t | 157 +++++ t/proxy_h3_if.t | 257 +++++++++ t/proxy_h3_intercept_errors.t | 131 ++++ t/proxy_h3_max_temp_file_size.t | 111 +++ t/proxy_h3_next_upstream.t | 184 ++++++ t/proxy_h3_non_idempotent.t | 163 +++++ t/proxy_h3_redirect.t | 185 ++++++ t/proxy_h3_request_buffering.t | 316 +++++++++++ t/proxy_h3_request_buffering_chunked.t | 367 ++++++++++++ t/proxy_h3_request_buffering_keepalive.t | 113 +++ t/proxy_h3_request_buffering_ssl.t | 321 +++++++++++ t/proxy_h3_ssl_certificate.t | 152 +++++ t/proxy_h3_ssl_certificate_empty.t | 113 +++ t/proxy_h3_ssl_certificate_vars.t | 154 +++++ t/proxy_h3_ssl_name.t | 165 +++++ t/proxy_h3_ssl_verify.t | 176 ++++++ t/proxy_h3_upstream_cookie.t | 145 +++++ 36 files changed, 7198 insertions(+), 0 deletions(-) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-12.patch Type: text/x-patch Size: 192145 bytes Desc: not available URL: From pluknet at nginx.com Mon Dec 25 17:15:26 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Mon, 25 Dec 2023 17:15:26 +0000 Subject: [nginx] Win32: extended ngx_random() range to 0x7fffffff. Message-ID: details: https://hg.nginx.org/nginx/rev/514c518b9d6c branches: changeset: 9198:514c518b9d6c user: J Carter date: Sat Dec 09 08:38:14 2023 +0000 description: Win32: extended ngx_random() range to 0x7fffffff. rand() is used on win32. RAND_MAX is implementation defined. win32's is 0x7fff. Existing uses of ngx_random() rely upon 0x7fffffff range provided by POSIX implementations of random(). diffstat: src/os/win32/ngx_win32_config.h | 6 +++++- 1 files changed, 5 insertions(+), 1 deletions(-) diffs (16 lines): diff -r cc16989c6d61 -r 514c518b9d6c src/os/win32/ngx_win32_config.h --- a/src/os/win32/ngx_win32_config.h Sat Dec 16 03:40:01 2023 +0400 +++ b/src/os/win32/ngx_win32_config.h Sat Dec 09 08:38:14 2023 +0000 @@ -280,7 +280,11 @@ typedef int sig_atomic_t #define NGX_HAVE_GETADDRINFO 1 -#define ngx_random rand +#define ngx_random() \ + ((long) (0x7fffffff & ( ((uint32_t) rand() << 16) \ + ^ ((uint32_t) rand() << 8) \ + ^ ((uint32_t) rand()) ))) + #define ngx_debug_init() From pluknet at nginx.com Mon Dec 25 18:52:03 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Mon, 25 Dec 2023 18:52:03 +0000 Subject: [nginx] SSL: disabled renegotiation checks with LibreSSL. Message-ID: details: https://hg.nginx.org/nginx/rev/875cd36b8617 branches: changeset: 9199:875cd36b8617 user: Sergey Kandaurov date: Mon Dec 25 21:15:47 2023 +0400 description: SSL: disabled renegotiation checks with LibreSSL. Similar to 7356:e3ba4026c02d, as long as SSL_OP_NO_CLIENT_RENEGOTIATION is defined, it is the library responsibility to prevent renegotiation. Additionally, this allows to raise LibreSSL version used to redefine OPENSSL_VERSION_NUMBER to 0x1010000fL, such that this won't result in attempts to dereference SSL objects made opaque in LibreSSL 3.4.0. Patch by Maxim Dounin. diffstat: src/event/ngx_event_openssl.c | 15 ++++++++------- 1 files changed, 8 insertions(+), 7 deletions(-) diffs (46 lines): diff -r 514c518b9d6c -r 875cd36b8617 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Sat Dec 09 08:38:14 2023 +0000 +++ b/src/event/ngx_event_openssl.c Mon Dec 25 21:15:47 2023 +0400 @@ -1105,7 +1105,8 @@ ngx_ssl_info_callback(const ngx_ssl_conn BIO *rbio, *wbio; ngx_connection_t *c; -#ifndef SSL_OP_NO_RENEGOTIATION +#if (!defined SSL_OP_NO_RENEGOTIATION \ + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) if ((where & SSL_CB_HANDSHAKE_START) && SSL_is_server((ngx_ssl_conn_t *) ssl_conn)) @@ -1838,9 +1839,10 @@ ngx_ssl_handshake(ngx_connection_t *c) c->read->ready = 1; c->write->ready = 1; -#ifndef SSL_OP_NO_RENEGOTIATION -#if OPENSSL_VERSION_NUMBER < 0x10100000L -#ifdef SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS +#if (!defined SSL_OP_NO_RENEGOTIATION \ + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION \ + && defined SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS \ + && OPENSSL_VERSION_NUMBER < 0x10100000L) /* initial handshake done, disable renegotiation (CVE-2009-3555) */ if (c->ssl->connection->s3 && SSL_is_server(c->ssl->connection)) { @@ -1848,8 +1850,6 @@ ngx_ssl_handshake(ngx_connection_t *c) } #endif -#endif -#endif #if (defined BIO_get_ktls_send && !NGX_WIN32) @@ -2483,7 +2483,8 @@ ngx_ssl_handle_recv(ngx_connection_t *c, int sslerr; ngx_err_t err; -#ifndef SSL_OP_NO_RENEGOTIATION +#if (!defined SSL_OP_NO_RENEGOTIATION \ + && !defined SSL_OP_NO_CLIENT_RENEGOTIATION) if (c->ssl->renegotiation) { /* From pluknet at nginx.com Mon Dec 25 18:52:06 2023 From: pluknet at nginx.com (=?utf-8?q?Sergey_Kandaurov?=) Date: Mon, 25 Dec 2023 18:52:06 +0000 Subject: [nginx] SSL: reasonable version for LibreSSL adjusted. Message-ID: details: https://hg.nginx.org/nginx/rev/ee40e2b1d083 branches: changeset: 9200:ee40e2b1d083 user: Sergey Kandaurov date: Mon Dec 25 21:15:48 2023 +0400 description: SSL: reasonable version for LibreSSL adjusted. OPENSSL_VERSION_NUMBER is now redefined to 0x1010000fL for LibreSSL 3.5.0 and above. Building with older LibreSSL versions, such as 2.8.0, may now produce warnings (see cab37803ebb3) and may require appropriate compiler options to suppress them. Notably, this allows to start using SSL_get0_verified_chain() appeared in OpenSSL 1.1.0 and LibreSSL 3.5.0, without additional macro tests. Prodded by Ilya Shipitsin. diffstat: src/event/ngx_event_openssl.h | 2 +- src/event/ngx_event_openssl_stapling.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diffs (24 lines): diff -r 875cd36b8617 -r ee40e2b1d083 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Mon Dec 25 21:15:47 2023 +0400 +++ b/src/event/ngx_event_openssl.h Mon Dec 25 21:15:48 2023 +0400 @@ -45,7 +45,7 @@ #if (defined LIBRESSL_VERSION_NUMBER && OPENSSL_VERSION_NUMBER == 0x20000000L) #undef OPENSSL_VERSION_NUMBER -#if (LIBRESSL_VERSION_NUMBER >= 0x2080000fL) +#if (LIBRESSL_VERSION_NUMBER >= 0x3050000fL) #define OPENSSL_VERSION_NUMBER 0x1010000fL #else #define OPENSSL_VERSION_NUMBER 0x1000107fL diff -r 875cd36b8617 -r ee40e2b1d083 src/event/ngx_event_openssl_stapling.c --- a/src/event/ngx_event_openssl_stapling.c Mon Dec 25 21:15:47 2023 +0400 +++ b/src/event/ngx_event_openssl_stapling.c Mon Dec 25 21:15:48 2023 +0400 @@ -893,7 +893,7 @@ ngx_ssl_ocsp_validate(ngx_connection_t * ocsp->cert_status = V_OCSP_CERTSTATUS_GOOD; ocsp->conf = ocf; -#if (OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined LIBRESSL_VERSION_NUMBER) +#if OPENSSL_VERSION_NUMBER >= 0x10100000L ocsp->certs = SSL_get0_verified_chain(c->ssl->connection); From pluknet at nginx.com Mon Dec 25 20:29:54 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 26 Dec 2023 00:29:54 +0400 Subject: [PATCH] SSL: raised limit for upstream session size In-Reply-To: References: Message-ID: > On 23 Dec 2023, at 01:46, Maxim Dounin wrote: > > Hello! > > On Fri, Dec 22, 2023 at 06:28:34PM +0400, Sergey Kandaurov wrote: > >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1703255284 -14400 >> # Fri Dec 22 18:28:04 2023 +0400 >> # Node ID a463fb67e143c051fd373d1df94e5813a37d5cea >> # Parent 44266e0651c44f530c4aa66e68c1b9464a9acee7 >> SSL: raised limit for upstream session size. >> >> Unlike shared session cache used to store multiple client SSL sessions and >> which may be per a single SSL connection, sessions saved from upstream are >> per upstream server peer, so there is no such multiplier effect, but they >> may be of noticeably larger size due to session tickets being used. >> >> It was observed that session tickets sent from JVM backends may result in >> a decoded session size nearly the previous maximum session size limit of >> 4096 or slightly beyond. Raising the limit allows to save such sessions. > > Session tickets are not expected to be larger than sessions > itself, except by several bytes used for key identification and > encryption overhead. I see no reasons why the limit should be > different in different places. > > And 4096 for an SSL session looks a lot. The only justification I > can assume here is an SSL session with the client certificate (or > even certificate chain) being saved into the session. It might > worth looking into what actually happens here. > Indeed. Both local and peer certificate chains are serialized and encrypted as part of constructing a session ticket. Per the original change to support tickets, this is hardcoded and may not be adjusted: https://hg.openjdk.org/jdk/jdk/rev/c2398053ee90#l4.352 https://hg.openjdk.org/jdk/jdk/rev/c2398053ee90#l10.261 -- Sergey Kandaurov From pluknet at nginx.com Tue Dec 26 15:07:37 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 26 Dec 2023 19:07:37 +0400 Subject: [PATCH] Proxy: altered limit_rate to support variables In-Reply-To: References: Message-ID: <59ACCD59-ABEA-49AB-95EE-5A2B4EE30F5C@nginx.com> > On 26 Nov 2023, at 03:37, J Carter wrote: > > # HG changeset patch > # User J Carter > # Date 1700949429 0 > # Sat Nov 25 21:57:09 2023 +0000 > # Node ID 98306e705015758eab0a05103d90e6bdb1da2819 > # Parent f366007dd23a6ce8e8427c1b3042781b618a2ade > Proxy: altered limit_rate to support variables Since it changes the upstream interface, I'd rather say: Upstream: variables support in proxy_limit_rate and friends. The change looks good to me, also it compliments variables support in the stream proxy module's directives. Any particular use-cases you'd like to share for this functionality? > > diff -r f366007dd23a -r 98306e705015 src/http/modules/ngx_http_fastcgi_module.c > --- a/src/http/modules/ngx_http_fastcgi_module.c Tue Nov 14 15:26:02 2023 +0400 > +++ b/src/http/modules/ngx_http_fastcgi_module.c Sat Nov 25 21:57:09 2023 +0000 > @@ -375,7 +375,7 @@ > > { ngx_string("fastcgi_limit_rate"), > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > - ngx_conf_set_size_slot, > + ngx_http_set_complex_value_size_slot, > NGX_HTTP_LOC_CONF_OFFSET, > offsetof(ngx_http_fastcgi_loc_conf_t, upstream.limit_rate), > NULL }, > @@ -2898,7 +2898,7 @@ > > conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; > conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; > - conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; > + conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; > > conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; > conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; > @@ -3015,8 +3015,8 @@ > prev->upstream.buffer_size, > (size_t) ngx_pagesize); > > - ngx_conf_merge_size_value(conf->upstream.limit_rate, > - prev->upstream.limit_rate, 0); > + ngx_conf_merge_ptr_value(conf->upstream.limit_rate, > + prev->upstream.limit_rate, NULL); > > > ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, > diff -r f366007dd23a -r 98306e705015 src/http/modules/ngx_http_proxy_module.c > --- a/src/http/modules/ngx_http_proxy_module.c Tue Nov 14 15:26:02 2023 +0400 > +++ b/src/http/modules/ngx_http_proxy_module.c Sat Nov 25 21:57:09 2023 +0000 > @@ -494,7 +494,7 @@ > > { ngx_string("proxy_limit_rate"), > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > - ngx_conf_set_size_slot, > + ngx_http_set_complex_value_size_slot, > NGX_HTTP_LOC_CONF_OFFSET, > offsetof(ngx_http_proxy_loc_conf_t, upstream.limit_rate), > NULL }, > @@ -3371,7 +3371,7 @@ > > conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; > conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; > - conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; > + conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; > > conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; > conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; > @@ -3515,8 +3515,8 @@ > prev->upstream.buffer_size, > (size_t) ngx_pagesize); > > - ngx_conf_merge_size_value(conf->upstream.limit_rate, > - prev->upstream.limit_rate, 0); > + ngx_conf_merge_ptr_value(conf->upstream.limit_rate, > + prev->upstream.limit_rate, NULL); > > ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, > 8, ngx_pagesize); > diff -r f366007dd23a -r 98306e705015 src/http/modules/ngx_http_scgi_module.c > --- a/src/http/modules/ngx_http_scgi_module.c Tue Nov 14 15:26:02 2023 +0400 > +++ b/src/http/modules/ngx_http_scgi_module.c Sat Nov 25 21:57:09 2023 +0000 > @@ -223,7 +223,7 @@ > > { ngx_string("scgi_limit_rate"), > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > - ngx_conf_set_size_slot, > + ngx_http_set_complex_value_size_slot, > NGX_HTTP_LOC_CONF_OFFSET, > offsetof(ngx_http_scgi_loc_conf_t, upstream.limit_rate), > NULL }, > @@ -1301,7 +1301,7 @@ > > conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; > conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; > - conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; > + conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; > > conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; > conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; > @@ -1413,8 +1413,8 @@ > prev->upstream.buffer_size, > (size_t) ngx_pagesize); > > - ngx_conf_merge_size_value(conf->upstream.limit_rate, > - prev->upstream.limit_rate, 0); > + ngx_conf_merge_ptr_value(conf->upstream.limit_rate, > + prev->upstream.limit_rate, NULL); > > > ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, > diff -r f366007dd23a -r 98306e705015 src/http/modules/ngx_http_uwsgi_module.c > --- a/src/http/modules/ngx_http_uwsgi_module.c Tue Nov 14 15:26:02 2023 +0400 > +++ b/src/http/modules/ngx_http_uwsgi_module.c Sat Nov 25 21:57:09 2023 +0000 > @@ -289,7 +289,7 @@ > > { ngx_string("uwsgi_limit_rate"), > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > - ngx_conf_set_size_slot, > + ngx_http_set_complex_value_size_slot, > NGX_HTTP_LOC_CONF_OFFSET, > offsetof(ngx_http_uwsgi_loc_conf_t, upstream.limit_rate), > NULL }, > @@ -1532,7 +1532,7 @@ > > conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; > conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; > - conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; > + conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; > > conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; > conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; > @@ -1656,8 +1656,8 @@ > prev->upstream.buffer_size, > (size_t) ngx_pagesize); > > - ngx_conf_merge_size_value(conf->upstream.limit_rate, > - prev->upstream.limit_rate, 0); > + ngx_conf_merge_ptr_value(conf->upstream.limit_rate, > + prev->upstream.limit_rate, NULL); > > > ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, > diff -r f366007dd23a -r 98306e705015 src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c Tue Nov 14 15:26:02 2023 +0400 > +++ b/src/http/ngx_http_upstream.c Sat Nov 25 21:57:09 2023 +0000 > @@ -3236,7 +3236,7 @@ > p->downstream = c; > p->pool = r->pool; > p->log = c->log; > - p->limit_rate = u->conf->limit_rate; > + p->limit_rate = ngx_http_complex_value_size(r, u->conf->limit_rate, 0); > p->start_sec = ngx_time(); > > p->cacheable = u->cacheable || u->store; > diff -r f366007dd23a -r 98306e705015 src/http/ngx_http_upstream.h > --- a/src/http/ngx_http_upstream.h Tue Nov 14 15:26:02 2023 +0400 > +++ b/src/http/ngx_http_upstream.h Sat Nov 25 21:57:09 2023 +0000 > @@ -156,7 +156,7 @@ > > size_t send_lowat; > size_t buffer_size; > - size_t limit_rate; > + ngx_http_complex_value_t *limit_rate; > > size_t busy_buffers_size; > size_t max_temp_file_size; -- Sergey Kandaurov From jiuzhoucui at 163.com Wed Dec 27 02:56:44 2023 From: jiuzhoucui at 163.com (Jiuzhou Cui) Date: Wed, 27 Dec 2023 10:56:44 +0800 (CST) Subject: Don't delete timer of write event when it's delayed. Message-ID: <23161f56.1ea5.18ca933f853.Coremail.jiuzhoucui@163.com> Hello! # HG changeset patch # User Jiuzhou Cui # Date 1703645578 -28800 # Wed Dec 27 10:52:58 2023 +0800 # Node ID 474ae07e47272e435d81c0ca9e4867aae35c30ab # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 Don't delete timer of write event when it's delayed. This will make download speed alway zero when limit_rate in body filter. diff -r ee40e2b1d083 -r 474ae07e4727 src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Mon Dec 25 21:15:48 2023 +0400 +++ b/src/http/ngx_http_upstream.c Wed Dec 27 10:52:58 2023 +0800 @@ -3787,11 +3787,13 @@ } } - if (downstream->write->active && !downstream->write->ready) { - ngx_add_timer(downstream->write, clcf->send_timeout); - - } else if (downstream->write->timer_set) { - ngx_del_timer(downstream->write); + if (!downstream->write->delayed) { + if (downstream->write->active && !downstream->write->ready) { + ngx_add_timer(downstream->write, clcf->send_timeout); + + } else if (downstream->write->timer_set) { + ngx_del_timer(downstream->write); + } } if (upstream->read->eof || upstream->read->error) { -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Wed Dec 27 11:19:01 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 27 Dec 2023 14:19:01 +0300 Subject: Don't delete timer of write event when it's delayed. In-Reply-To: <23161f56.1ea5.18ca933f853.Coremail.jiuzhoucui@163.com> References: <23161f56.1ea5.18ca933f853.Coremail.jiuzhoucui@163.com> Message-ID: Hello! On Wed, Dec 27, 2023 at 10:56:44AM +0800, Jiuzhou Cui wrote: > Hello! > > > # HG changeset patch > # User Jiuzhou Cui > # Date 1703645578 -28800 > # Wed Dec 27 10:52:58 2023 +0800 > # Node ID 474ae07e47272e435d81c0ca9e4867aae35c30ab > # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 > Don't delete timer of write event when it's delayed. > > > This will make download speed alway zero when limit_rate in body filter. > > > diff -r ee40e2b1d083 -r 474ae07e4727 src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c Mon Dec 25 21:15:48 2023 +0400 > +++ b/src/http/ngx_http_upstream.c Wed Dec 27 10:52:58 2023 +0800 > @@ -3787,11 +3787,13 @@ > } > } > > > - if (downstream->write->active && !downstream->write->ready) { > - ngx_add_timer(downstream->write, clcf->send_timeout); > - > - } else if (downstream->write->timer_set) { > - ngx_del_timer(downstream->write); > + if (!downstream->write->delayed) { > + if (downstream->write->active && !downstream->write->ready) { > + ngx_add_timer(downstream->write, clcf->send_timeout); > + > + } else if (downstream->write->timer_set) { > + ngx_del_timer(downstream->write); > + } > } > > > if (upstream->read->eof || upstream->read->error) { Thank you for the patch. You are patching the ngx_http_upstream_process_non_buffered_request() function, which is, as can be correctly concluded from the function name, is used for non-buffered proxying. Non-buffered proxying is specifically designed to return responses as long as they are available, and is not compatible with limit_rate. Moreover, limit_rate is explicitly disabled in the ngx_http_upstream_send_response() function when the relevant handers are set: u->read_event_handler = ngx_http_upstream_process_non_buffered_upstream; r->write_event_handler = ngx_http_upstream_process_non_buffered_downstream; r->limit_rate = 0; r->limit_rate_set = 1; (https://hg.nginx.org/nginx/file/release-1.25.3/src/http/ngx_http_upstream.c#l3092) As such, the issue you are trying to fix is not expected to appear. Could you please clarify the configuration you are seeing the issue with, and steps to reproduce the issue? -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Wed Dec 27 11:48:04 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 27 Dec 2023 14:48:04 +0300 Subject: [PATCH 00 of 12] HTTP/3 proxying to upstreams In-Reply-To: References: Message-ID: Hello! On Mon, Dec 25, 2023 at 07:52:41PM +0300, Vladimir Homutov via nginx-devel wrote: > Hello, everyone, > > and Merry Christmas to all! > > I'm a developer of an nginx fork Angie. Recently we implemented > an HTTP/3 proxy support in our fork [1]. > > We'd like to contribute this functionality to nginx OSS community. > Hence here is a patch series backported from Angie to the current > head of nginx mainline branch (1.25.3) Thank you for the patches. Are there any expected benefits from HTTP/3 being used as a protocol to upstream servers? [...] > Probably, the HTTP/3 proxy should be implemented in a separate module. > Currently it is a patch to the HTTP proxy module to minimize boilerplate. Sure. I'm very much against the idea of mixing different upstream protocols in a single protocol module. (OTOH, there are some uncertain plans to make proxy module able to work with other protocols based on the scheme, such as in "proxy_pass fastcgi://127.0.0.1:9000;". This is mostly irrelevant though, and might never happen.) [...] -- Maxim Dounin http://mdounin.ru/ From jiuzhoucui at 163.com Wed Dec 27 12:38:15 2023 From: jiuzhoucui at 163.com (Jiuzhou Cui) Date: Wed, 27 Dec 2023 20:38:15 +0800 (CST) Subject: Don't delete timer of write event when it's delayed. In-Reply-To: References: <23161f56.1ea5.18ca933f853.Coremail.jiuzhoucui@163.com> Message-ID: <7f7ba8ac.6da7.18cab485bf1.Coremail.jiuzhoucui@163.com> Thank you for your reply. Firstly, we meet the problem. And this patch works for me. My scenario is after send response body about 10-20MB, we just set: 1. limit_rate = 1KB 2. limit_rate_after = body_bytes_sent 3. proxy_buffering = "on" (I think this is the key issue) At the request begining, we didn't set proxy_buffering = "on" and limit_rate. At 2023-12-27 19:19:01, "Maxim Dounin" wrote: >Hello! > >On Wed, Dec 27, 2023 at 10:56:44AM +0800, Jiuzhou Cui wrote: > >> Hello! >> >> >> # HG changeset patch >> # User Jiuzhou Cui >> # Date 1703645578 -28800 >> # Wed Dec 27 10:52:58 2023 +0800 >> # Node ID 474ae07e47272e435d81c0ca9e4867aae35c30ab >> # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 >> Don't delete timer of write event when it's delayed. >> >> >> This will make download speed alway zero when limit_rate in body filter. >> >> >> diff -r ee40e2b1d083 -r 474ae07e4727 src/http/ngx_http_upstream.c >> --- a/src/http/ngx_http_upstream.c Mon Dec 25 21:15:48 2023 +0400 >> +++ b/src/http/ngx_http_upstream.c Wed Dec 27 10:52:58 2023 +0800 >> @@ -3787,11 +3787,13 @@ >> } >> } >> >> >> - if (downstream->write->active && !downstream->write->ready) { >> - ngx_add_timer(downstream->write, clcf->send_timeout); >> - >> - } else if (downstream->write->timer_set) { >> - ngx_del_timer(downstream->write); >> + if (!downstream->write->delayed) { >> + if (downstream->write->active && !downstream->write->ready) { >> + ngx_add_timer(downstream->write, clcf->send_timeout); >> + >> + } else if (downstream->write->timer_set) { >> + ngx_del_timer(downstream->write); >> + } >> } >> >> >> if (upstream->read->eof || upstream->read->error) { > >Thank you for the patch. > >You are patching the >ngx_http_upstream_process_non_buffered_request() function, which >is, as can be correctly concluded from the function name, is used >for non-buffered proxying. Non-buffered proxying is specifically >designed to return responses as long as they are available, and is >not compatible with limit_rate. Moreover, limit_rate is >explicitly disabled in the ngx_http_upstream_send_response() >function when the relevant handers are set: > > u->read_event_handler = ngx_http_upstream_process_non_buffered_upstream; > r->write_event_handler = > ngx_http_upstream_process_non_buffered_downstream; > > r->limit_rate = 0; > r->limit_rate_set = 1; > >(https://hg.nginx.org/nginx/file/release-1.25.3/src/http/ngx_http_upstream.c#l3092) > >As such, the issue you are trying to fix is not expected to >appear. > >Could you please clarify the configuration you are seeing the >issue with, and steps to reproduce the issue? > >-- >Maxim Dounin >http://mdounin.ru/ >_______________________________________________ >nginx-devel mailing list >nginx-devel at nginx.org >https://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: From vl at inspert.ru Wed Dec 27 13:17:38 2023 From: vl at inspert.ru (Vladimir Homutov) Date: Wed, 27 Dec 2023 16:17:38 +0300 Subject: [PATCH 00 of 12] HTTP/3 proxying to upstreams In-Reply-To: References: Message-ID: On Wed, Dec 27, 2023 at 02:48:04PM +0300, Maxim Dounin wrote: > Hello! > > On Mon, Dec 25, 2023 at 07:52:41PM +0300, Vladimir Homutov via nginx-devel wrote: > > > Hello, everyone, > > > > and Merry Christmas to all! > > > > I'm a developer of an nginx fork Angie. Recently we implemented > > an HTTP/3 proxy support in our fork [1]. > > > > We'd like to contribute this functionality to nginx OSS community. > > Hence here is a patch series backported from Angie to the current > > head of nginx mainline branch (1.25.3) > > Thank you for the patches. > > Are there any expected benefits from HTTP/3 being used as a > protocol to upstream servers? Personally, I don't see much. Probably, faster connection establishing to due 0RTT support (need to be implemented) and better multiplexing (again, if implemented wisely). I have made some simple benchmarks, and it looks more or less similar to usual SSL connections. > > [...] > > > Probably, the HTTP/3 proxy should be implemented in a separate module. > > Currently it is a patch to the HTTP proxy module to minimize boilerplate. > > Sure. I'm very much against the idea of mixing different upstream > protocols in a single protocol module. noted. > (OTOH, there are some uncertain plans to make proxy module able to > work with other protocols based on the scheme, such as in > "proxy_pass fastcgi://127.0.0.1:9000;". This is mostly irrelevant > though, and might never happen.) well, currently we have separate proxying modules that are similar enough to think about merging them like suggested. Not sure if one big module with methods will worth it, as semantics is slightly different. proxy modules are already addons on top of upstream module, which does the heavy lifting. What requires improvement is probably the configuration that makes user to remember many similar directives doing the same thing but for different protocols. From mdounin at mdounin.ru Wed Dec 27 13:58:48 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 27 Dec 2023 16:58:48 +0300 Subject: Don't delete timer of write event when it's delayed. In-Reply-To: <7f7ba8ac.6da7.18cab485bf1.Coremail.jiuzhoucui@163.com> References: <23161f56.1ea5.18ca933f853.Coremail.jiuzhoucui@163.com> <7f7ba8ac.6da7.18cab485bf1.Coremail.jiuzhoucui@163.com> Message-ID: Hello! On Wed, Dec 27, 2023 at 08:38:15PM +0800, Jiuzhou Cui wrote: > Thank you for your reply. > > Firstly, we meet the problem. And this patch works for me. > > My scenario is after send response body about 10-20MB, we just set: > 1. limit_rate = 1KB > 2. limit_rate_after = body_bytes_sent > 3. proxy_buffering = "on" (I think this is the key issue) > > At the request begining, we didn't set proxy_buffering = "on" and limit_rate. Sorry, not sure what you are trying to say. You modify r->limit_rate and r->limit_rate_after from your module after sending some parts of the response? This is not expected to work due to the mentioned design limitation of non-buffered proxying, and generally looks like a bug in your module, it shouldn't do this. Further, it is not possible to change upstream buffering after nginx started sending the response. It's a one-time choice, and modifications of r->upstream->buffering won't do anything (though also incorrect, as it's not something expected to be modified by modules). Or I understood something incorrectly? -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Wed Dec 27 14:34:58 2023 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 27 Dec 2023 18:34:58 +0400 Subject: [PATCH 1 of 3] Stream: socket peek in preread phase In-Reply-To: <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> References: <966331bb4936888ef2f0.1699610839@arut-laptop> <20231213140659.nt4kcbem26hkyrsd@N00W24XTQX> Message-ID: <20231227143458.n36haxmr57zfhdua@Y9MQ9X2QVV> On Wed, Dec 13, 2023 at 06:06:59PM +0400, Roman Arutyunyan wrote: > # HG changeset patch > # User Roman Arutyunyan > # Date 1702476295 -14400 > # Wed Dec 13 18:04:55 2023 +0400 > # Node ID 844486cdd43a32d10b78493d7e7b80e9e2239d7e > # Parent 6c8595b77e667bd58fd28186939ed820f2e55e0e > Stream: socket peek in preread phase. > > Previously, preread buffer was always read out from socket, which made it > impossible to terminate SSL on the connection without introducing additional > SSL BIOs. The following patches will rely on this. > > Now, when possible, recv(MSG_PEEK) is used instead, which keeps data in socket. > It's called if SSL is not already terminated and if an egde-triggered event > method is used. For epoll, EPOLLRDHUP support is also required. > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -10,6 +10,10 @@ > #include > > > +static ngx_int_t ngx_stream_preread_peek(ngx_stream_session_t *s, > + ngx_stream_phase_handler_t *ph); > +static ngx_int_t ngx_stream_preread(ngx_stream_session_t *s, > + ngx_stream_phase_handler_t *ph); > static ngx_int_t ngx_stream_core_preconfiguration(ngx_conf_t *cf); > static void *ngx_stream_core_create_main_conf(ngx_conf_t *cf); > static char *ngx_stream_core_init_main_conf(ngx_conf_t *cf, void *conf); > @@ -203,8 +207,6 @@ ngx_int_t > ngx_stream_core_preread_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph) > { > - size_t size; > - ssize_t n; > ngx_int_t rc; > ngx_connection_t *c; > ngx_stream_core_srv_conf_t *cscf; > @@ -217,56 +219,40 @@ ngx_stream_core_preread_phase(ngx_stream > > if (c->read->timedout) { > rc = NGX_STREAM_OK; > + goto done; > + } > > - } else if (c->read->timer_set) { > - rc = NGX_AGAIN; > + if (!c->read->timer_set) { > + rc = ph->handler(s); > > - } else { > - rc = ph->handler(s); > + if (rc != NGX_AGAIN) { > + goto done; > + } > } > > - while (rc == NGX_AGAIN) { > - > + if (c->buffer == NULL) { > + c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > if (c->buffer == NULL) { > - c->buffer = ngx_create_temp_buf(c->pool, cscf->preread_buffer_size); > - if (c->buffer == NULL) { > - rc = NGX_ERROR; > - break; > - } > + rc = NGX_ERROR; > + goto done; > } > - > - size = c->buffer->end - c->buffer->last; > - > - if (size == 0) { > - ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > - rc = NGX_STREAM_BAD_REQUEST; > - break; > - } > + } > > - if (c->read->eof) { > - rc = NGX_STREAM_OK; > - break; > - } > - > - if (!c->read->ready) { > - break; > - } > - > - n = c->recv(c, c->buffer->last, size); > + if (c->ssl == NULL > + && (ngx_event_flags & NGX_USE_CLEAR_EVENT) > + && ((ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0 > +#if (NGX_HAVE_EPOLLRDHUP) > + || ngx_use_epoll_rdhup > +#endif BTW, c->ssl needs to be guarded under an appropriate macro test. Probably, it makes sense to rewrite this in a more readable way. For example: : peak = 0; : : #if (NGX_HAVE_KQUEUE) : if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { : peak = 1; : } : #endif : : #if (NGX_HAVE_EPOLLRDHUP) : if ((ngx_event_flags & NGX_USE_EPOLL_EVENT) && ngx_use_epoll_rdhup) { : peak = 1; : } : #endif : : #if (NGX_STREAM_SSL) : if (c->ssl) { : peak = 0; : } : #endif > + )) > + { > + rc = ngx_stream_preread_peek(s, ph); > > - if (n == NGX_ERROR || n == 0) { > - rc = NGX_STREAM_OK; > - break; > - } > + } else { > + rc = ngx_stream_preread(s, ph); > + } > > - if (n == NGX_AGAIN) { > - break; > - } > - > - c->buffer->last += n; > - > - rc = ph->handler(s); > - } > +done: > > if (rc == NGX_AGAIN) { > if (ngx_handle_read_event(c->read, 0) != NGX_OK) { > @@ -311,6 +297,100 @@ ngx_stream_core_preread_phase(ngx_stream > } > > > +static ngx_int_t > +ngx_stream_preread_peek(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > +{ > + ssize_t n; > + ngx_int_t rc; > + ngx_err_t err; > + ngx_connection_t *c; > + > + c = s->connection; > + > + n = recv(c->fd, (char *) c->buffer->last, > + c->buffer->end - c->buffer->last, MSG_PEEK); > + > + err = ngx_socket_errno; > + > + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream recv(): %z", n); > + > + if (n == -1) { > + if (err == NGX_EAGAIN) { > + c->read->ready = 0; > + return NGX_AGAIN; > + } > + > + ngx_connection_error(c, err, "recv() failed"); > + return NGX_STREAM_OK; > + } > + > + if (n == 0) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last += n; > + > + rc = ph->handler(s); > + > + if (rc != NGX_AGAIN) { > + c->buffer->last = c->buffer->pos; > + return rc; > + } > + > + if (c->buffer->last == c->buffer->end) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > + return NGX_STREAM_BAD_REQUEST; > + } > + > + if (c->read->pending_eof) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last = c->buffer->pos; > + > + return NGX_AGAIN; > +} > + > + > +static ngx_int_t > +ngx_stream_preread(ngx_stream_session_t *s, ngx_stream_phase_handler_t *ph) > +{ > + ssize_t n; > + ngx_int_t rc; > + ngx_connection_t *c; > + > + c = s->connection; > + > + while (c->read->ready) { > + > + n = c->recv(c, c->buffer->last, c->buffer->end - c->buffer->last); > + > + if (n == NGX_AGAIN) { > + return NGX_AGAIN; > + } > + > + if (n == NGX_ERROR || n == 0) { > + return NGX_STREAM_OK; > + } > + > + c->buffer->last += n; > + > + rc = ph->handler(s); > + > + if (rc != NGX_AGAIN) { > + return rc; > + } > + > + if (c->buffer->last == c->buffer->end) { > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "preread buffer full"); > + return NGX_STREAM_BAD_REQUEST; > + } > + } > + > + return NGX_AGAIN; > +} > + > + > ngx_int_t > ngx_stream_core_content_phase(ngx_stream_session_t *s, > ngx_stream_phase_handler_t *ph) From jiuzhoucui at 163.com Thu Dec 28 03:03:36 2023 From: jiuzhoucui at 163.com (Jiuzhou Cui) Date: Thu, 28 Dec 2023 11:03:36 +0800 (CST) Subject: Don't delete timer of write event when it's delayed. In-Reply-To: References: <23161f56.1ea5.18ca933f853.Coremail.jiuzhoucui@163.com> <7f7ba8ac.6da7.18cab485bf1.Coremail.jiuzhoucui@163.com> Message-ID: <7473e6ab.20f9.18cae609f16.Coremail.jiuzhoucui@163.com> >You modify r->limit_rate and r->limit_rate_after from your module >after sending some parts of the response? Yes. This isn't a bug but a requirement, so we need handle it. OK, I got your idea and no more questions. Thanks. At 2023-12-27 21:58:48, "Maxim Dounin" wrote: >Hello! > >On Wed, Dec 27, 2023 at 08:38:15PM +0800, Jiuzhou Cui wrote: > >> Thank you for your reply. >> >> Firstly, we meet the problem. And this patch works for me. >> >> My scenario is after send response body about 10-20MB, we just set: >> 1. limit_rate = 1KB >> 2. limit_rate_after = body_bytes_sent >> 3. proxy_buffering = "on" (I think this is the key issue) >> >> At the request begining, we didn't set proxy_buffering = "on" and limit_rate. > >Sorry, not sure what you are trying to say. > >You modify r->limit_rate and r->limit_rate_after from your module >after sending some parts of the response? This is not expected to >work due to the mentioned design limitation of non-buffered >proxying, and generally looks like a bug in your module, it >shouldn't do this. > >Further, it is not possible to change upstream buffering after >nginx started sending the response. It's a one-time choice, and >modifications of r->upstream->buffering won't do anything (though >also incorrect, as it's not something expected to be modified by >modules). > >Or I understood something incorrectly? > >-- >Maxim Dounin >http://mdounin.ru/ >_______________________________________________ >nginx-devel mailing list >nginx-devel at nginx.org >https://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: From jordanc.carter at outlook.com Thu Dec 28 10:48:20 2023 From: jordanc.carter at outlook.com (J Carter) Date: Thu, 28 Dec 2023 10:48:20 +0000 Subject: [PATCH] Proxy: altered limit_rate to support variables In-Reply-To: <59ACCD59-ABEA-49AB-95EE-5A2B4EE30F5C@nginx.com> References: <59ACCD59-ABEA-49AB-95EE-5A2B4EE30F5C@nginx.com> Message-ID: Hello, Thanks for the review and feedback. On Tue, 26 Dec 2023 19:07:37 +0400 Sergey Kandaurov wrote: > > On 26 Nov 2023, at 03:37, J Carter wrote: > > > > # HG changeset patch > > # User J Carter > > # Date 1700949429 0 > > # Sat Nov 25 21:57:09 2023 +0000 > > # Node ID 98306e705015758eab0a05103d90e6bdb1da2819 > > # Parent f366007dd23a6ce8e8427c1b3042781b618a2ade > > Proxy: altered limit_rate to support variables > > Since it changes the upstream interface, I'd rather say: > > Upstream: variables support in proxy_limit_rate and friends. > Yep, makes sense. > > The change looks good to me, also it compliments variables support > in the stream proxy module's directives. > Any particular use-cases you'd like to share for this functionality? > There are several, many are quite 'involved'. A variable proxy_limit_rate (driven by external means), combined with max_conns set on upstream servers gives a reasonable method of dynamically capping upstream bandwidth utilization. One scenario in which this has come up is when nginx is used a caching server, as often upstream servers cannot support 100% of the 'real' traffic load. When the cache is cleared or many items expire concurrently, requests reaching the upstream can spike and it is necessary to adjust (down) proxy_limit_rate in a semi continuous fashion to prevent overloading upstreams as they serve responses. A novel way of showing this using only core modules is: upstream backend { server 127.0.0.1:8080 max_conns=100; ... } ... server { listen 80; location / { ... proxy_pass http://backend; proxy_hide_header rate; proxy_limit_rate $upstream_http_rate; } } ... server { listen 8080; location / { #serve a file ... add_header rate $my_rate; } } Of course, this is most useful where 'backend' isn't nginx, and doesn't have it's own client side bandwidth limiting (ie. limit_rate, limit_rate_after, limit_conn) - I've just used it here for this example for convenience. > > > > diff -r f366007dd23a -r 98306e705015 src/http/modules/ngx_http_fastcgi_module.c > > --- a/src/http/modules/ngx_http_fastcgi_module.c Tue Nov 14 15:26:02 2023 +0400 > > +++ b/src/http/modules/ngx_http_fastcgi_module.c Sat Nov 25 21:57:09 2023 +0000 > > @@ -375,7 +375,7 @@ > > > > { ngx_string("fastcgi_limit_rate"), > > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > > - ngx_conf_set_size_slot, > > + ngx_http_set_complex_value_size_slot, > > NGX_HTTP_LOC_CONF_OFFSET, > > offsetof(ngx_http_fastcgi_loc_conf_t, upstream.limit_rate), > > NULL }, > > @@ -2898,7 +2898,7 @@ > > > > conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; > > conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; > > - conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; > > + conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; > > > > conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; > > conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; > > @@ -3015,8 +3015,8 @@ > > prev->upstream.buffer_size, > > (size_t) ngx_pagesize); > > > > - ngx_conf_merge_size_value(conf->upstream.limit_rate, > > - prev->upstream.limit_rate, 0); > > + ngx_conf_merge_ptr_value(conf->upstream.limit_rate, > > + prev->upstream.limit_rate, NULL); > > > > > > ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, > > diff -r f366007dd23a -r 98306e705015 src/http/modules/ngx_http_proxy_module.c > > --- a/src/http/modules/ngx_http_proxy_module.c Tue Nov 14 15:26:02 2023 +0400 > > +++ b/src/http/modules/ngx_http_proxy_module.c Sat Nov 25 21:57:09 2023 +0000 > > @@ -494,7 +494,7 @@ > > > > { ngx_string("proxy_limit_rate"), > > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > > - ngx_conf_set_size_slot, > > + ngx_http_set_complex_value_size_slot, > > NGX_HTTP_LOC_CONF_OFFSET, > > offsetof(ngx_http_proxy_loc_conf_t, upstream.limit_rate), > > NULL }, > > @@ -3371,7 +3371,7 @@ > > > > conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; > > conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; > > - conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; > > + conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; > > > > conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; > > conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; > > @@ -3515,8 +3515,8 @@ > > prev->upstream.buffer_size, > > (size_t) ngx_pagesize); > > > > - ngx_conf_merge_size_value(conf->upstream.limit_rate, > > - prev->upstream.limit_rate, 0); > > + ngx_conf_merge_ptr_value(conf->upstream.limit_rate, > > + prev->upstream.limit_rate, NULL); > > > > ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, > > 8, ngx_pagesize); > > diff -r f366007dd23a -r 98306e705015 src/http/modules/ngx_http_scgi_module.c > > --- a/src/http/modules/ngx_http_scgi_module.c Tue Nov 14 15:26:02 2023 +0400 > > +++ b/src/http/modules/ngx_http_scgi_module.c Sat Nov 25 21:57:09 2023 +0000 > > @@ -223,7 +223,7 @@ > > > > { ngx_string("scgi_limit_rate"), > > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > > - ngx_conf_set_size_slot, > > + ngx_http_set_complex_value_size_slot, > > NGX_HTTP_LOC_CONF_OFFSET, > > offsetof(ngx_http_scgi_loc_conf_t, upstream.limit_rate), > > NULL }, > > @@ -1301,7 +1301,7 @@ > > > > conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; > > conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; > > - conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; > > + conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; > > > > conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; > > conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; > > @@ -1413,8 +1413,8 @@ > > prev->upstream.buffer_size, > > (size_t) ngx_pagesize); > > > > - ngx_conf_merge_size_value(conf->upstream.limit_rate, > > - prev->upstream.limit_rate, 0); > > + ngx_conf_merge_ptr_value(conf->upstream.limit_rate, > > + prev->upstream.limit_rate, NULL); > > > > > > ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, > > diff -r f366007dd23a -r 98306e705015 src/http/modules/ngx_http_uwsgi_module.c > > --- a/src/http/modules/ngx_http_uwsgi_module.c Tue Nov 14 15:26:02 2023 +0400 > > +++ b/src/http/modules/ngx_http_uwsgi_module.c Sat Nov 25 21:57:09 2023 +0000 > > @@ -289,7 +289,7 @@ > > > > { ngx_string("uwsgi_limit_rate"), > > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > > - ngx_conf_set_size_slot, > > + ngx_http_set_complex_value_size_slot, > > NGX_HTTP_LOC_CONF_OFFSET, > > offsetof(ngx_http_uwsgi_loc_conf_t, upstream.limit_rate), > > NULL }, > > @@ -1532,7 +1532,7 @@ > > > > conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; > > conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; > > - conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; > > + conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; > > > > conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; > > conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; > > @@ -1656,8 +1656,8 @@ > > prev->upstream.buffer_size, > > (size_t) ngx_pagesize); > > > > - ngx_conf_merge_size_value(conf->upstream.limit_rate, > > - prev->upstream.limit_rate, 0); > > + ngx_conf_merge_ptr_value(conf->upstream.limit_rate, > > + prev->upstream.limit_rate, NULL); > > > > > > ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, > > diff -r f366007dd23a -r 98306e705015 src/http/ngx_http_upstream.c > > --- a/src/http/ngx_http_upstream.c Tue Nov 14 15:26:02 2023 +0400 > > +++ b/src/http/ngx_http_upstream.c Sat Nov 25 21:57:09 2023 +0000 > > @@ -3236,7 +3236,7 @@ > > p->downstream = c; > > p->pool = r->pool; > > p->log = c->log; > > - p->limit_rate = u->conf->limit_rate; > > + p->limit_rate = ngx_http_complex_value_size(r, u->conf->limit_rate, 0); > > p->start_sec = ngx_time(); > > > > p->cacheable = u->cacheable || u->store; > > diff -r f366007dd23a -r 98306e705015 src/http/ngx_http_upstream.h > > --- a/src/http/ngx_http_upstream.h Tue Nov 14 15:26:02 2023 +0400 > > +++ b/src/http/ngx_http_upstream.h Sat Nov 25 21:57:09 2023 +0000 > > @@ -156,7 +156,7 @@ > > > > size_t send_lowat; > > size_t buffer_size; > > - size_t limit_rate; > > + ngx_http_complex_value_t *limit_rate; > > > > size_t busy_buffers_size; > > size_t max_temp_file_size; > From mdounin at mdounin.ru Thu Dec 28 13:31:41 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 28 Dec 2023 16:31:41 +0300 Subject: [PATCH 00 of 12] HTTP/3 proxying to upstreams In-Reply-To: References: Message-ID: Hello! On Wed, Dec 27, 2023 at 04:17:38PM +0300, Vladimir Homutov via nginx-devel wrote: > On Wed, Dec 27, 2023 at 02:48:04PM +0300, Maxim Dounin wrote: > > Hello! > > > > On Mon, Dec 25, 2023 at 07:52:41PM +0300, Vladimir Homutov via nginx-devel wrote: > > > > > Hello, everyone, > > > > > > and Merry Christmas to all! > > > > > > I'm a developer of an nginx fork Angie. Recently we implemented > > > an HTTP/3 proxy support in our fork [1]. > > > > > > We'd like to contribute this functionality to nginx OSS community. > > > Hence here is a patch series backported from Angie to the current > > > head of nginx mainline branch (1.25.3) > > > > Thank you for the patches. > > > > Are there any expected benefits from HTTP/3 being used as a > > protocol to upstream servers? > > Personally, I don't see much. > > Probably, faster connection establishing to due 0RTT support (need to be > implemented) and better multiplexing (again, if implemented wisely). > I have made some simple benchmarks, and it looks more or less similar > to usual SSL connections. Thanks for the details. Multiplexing is available since introduction of the FastCGI protocol, yet to see it working in upstream connections. As for 0-RTT, using keepalive connections is probably more efficient anyway (and not really needed for upstream connections in most cases as well). > > > > [...] > > > > > Probably, the HTTP/3 proxy should be implemented in a separate module. > > > Currently it is a patch to the HTTP proxy module to minimize boilerplate. > > > > Sure. I'm very much against the idea of mixing different upstream > > protocols in a single protocol module. > > noted. > > > (OTOH, there are some uncertain plans to make proxy module able to > > work with other protocols based on the scheme, such as in > > "proxy_pass fastcgi://127.0.0.1:9000;". This is mostly irrelevant > > though, and might never happen.) > > well, currently we have separate proxying modules that are similar enough to > think about merging them like suggested. Not sure if one big module with > methods will worth it, as semantics is slightly different. > > proxy modules are already addons on top of upstream module, which does > the heavy lifting. What requires improvement is probably the > configuration that makes user to remember many similar directives doing > the same thing but for different protocols. Yep, making things easier to configure (and modify, if something related to configuration directives is changed or additional protocol is added) is the main motivator. Still, there are indeed differences between protocol modules, and this makes single module inconvenient sometimes. As such, plans are uncertain (and the previous attempt to do this failed miserably). -- Maxim Dounin http://mdounin.ru/ From vl at inspert.ru Thu Dec 28 14:23:38 2023 From: vl at inspert.ru (Vladimir Homutov) Date: Thu, 28 Dec 2023 17:23:38 +0300 Subject: [PATCH 00 of 12] HTTP/3 proxying to upstreams In-Reply-To: References: Message-ID: On Thu, Dec 28, 2023 at 04:31:41PM +0300, Maxim Dounin wrote: > Hello! > > On Wed, Dec 27, 2023 at 04:17:38PM +0300, Vladimir Homutov via nginx-devel wrote: > > > On Wed, Dec 27, 2023 at 02:48:04PM +0300, Maxim Dounin wrote: > > > Hello! > > > > > > On Mon, Dec 25, 2023 at 07:52:41PM +0300, Vladimir Homutov via nginx-devel wrote: > > > > > > > Hello, everyone, > > > > > > > > and Merry Christmas to all! > > > > > > > > I'm a developer of an nginx fork Angie. Recently we implemented > > > > an HTTP/3 proxy support in our fork [1]. > > > > > > > > We'd like to contribute this functionality to nginx OSS community. > > > > Hence here is a patch series backported from Angie to the current > > > > head of nginx mainline branch (1.25.3) > > > > > > Thank you for the patches. > > > > > > Are there any expected benefits from HTTP/3 being used as a > > > protocol to upstream servers? > > > > Personally, I don't see much. > > > > Probably, faster connection establishing to due 0RTT support (need to be > > implemented) and better multiplexing (again, if implemented wisely). > > I have made some simple benchmarks, and it looks more or less similar > > to usual SSL connections. > > Thanks for the details. > > Multiplexing is available since introduction of the FastCGI > protocol, yet to see it working in upstream connections. As for > 0-RTT, using keepalive connections is probably more efficient > anyway (and not really needed for upstream connections in most > cases as well). With HTTP/3 and keepalive we can have just one quic "connection" per upstream server (in extreme). We perform heavy handshake once, and leave it open. Next we just create HTTP/3 streams to perform request. They can perfectly run in parallel and use same quic connection. Probably, this is something worth implementing, with limitations of course: we don't want to mix requests from different (classes of) clients in same connection, we don't want eternal life of such connection and we need means to control level of such multiplexing. > > > > > > > [...] > > > > > > > Probably, the HTTP/3 proxy should be implemented in a separate module. > > > > Currently it is a patch to the HTTP proxy module to minimize boilerplate. > > > > > > Sure. I'm very much against the idea of mixing different upstream > > > protocols in a single protocol module. > > > > noted. > > > > > (OTOH, there are some uncertain plans to make proxy module able to > > > work with other protocols based on the scheme, such as in > > > "proxy_pass fastcgi://127.0.0.1:9000;". This is mostly irrelevant > > > though, and might never happen.) > > > > well, currently we have separate proxying modules that are similar enough to > > think about merging them like suggested. Not sure if one big module with > > methods will worth it, as semantics is slightly different. > > > > proxy modules are already addons on top of upstream module, which does > > the heavy lifting. What requires improvement is probably the > > configuration that makes user to remember many similar directives doing > > the same thing but for different protocols. > > Yep, making things easier to configure (and modify, if something > related to configuration directives is changed or additional > protocol is added) is the main motivator. Still, there are indeed > differences between protocol modules, and this makes single module > inconvenient sometimes. As such, plans are uncertain (and the > previous attempt to do this failed miserably). > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From jordanc.carter at outlook.com Thu Dec 28 15:59:28 2023 From: jordanc.carter at outlook.com (J Carter) Date: Thu, 28 Dec 2023 15:59:28 +0000 Subject: [PATCH 00 of 12] HTTP/3 proxying to upstreams In-Reply-To: References: Message-ID: On Thu, 28 Dec 2023 17:23:38 +0300 Vladimir Homutov via nginx-devel wrote: > On Thu, Dec 28, 2023 at 04:31:41PM +0300, Maxim Dounin wrote: > > Hello! > > > > On Wed, Dec 27, 2023 at 04:17:38PM +0300, Vladimir Homutov via nginx-devel wrote: > > > > > On Wed, Dec 27, 2023 at 02:48:04PM +0300, Maxim Dounin wrote: > > > > Hello! > > > > > > > > On Mon, Dec 25, 2023 at 07:52:41PM +0300, Vladimir Homutov via nginx-devel wrote: > > > > > > > > > Hello, everyone, > > > > > > > > > > and Merry Christmas to all! > > > > > > > > > > I'm a developer of an nginx fork Angie. Recently we implemented > > > > > an HTTP/3 proxy support in our fork [1]. > > > > > > > > > > We'd like to contribute this functionality to nginx OSS community. > > > > > Hence here is a patch series backported from Angie to the current > > > > > head of nginx mainline branch (1.25.3) > > > > > > > > Thank you for the patches. > > > > > > > > Are there any expected benefits from HTTP/3 being used as a > > > > protocol to upstream servers? > > > > > > Personally, I don't see much. > > > > > > Probably, faster connection establishing to due 0RTT support (need to be > > > implemented) and better multiplexing (again, if implemented wisely). > > > I have made some simple benchmarks, and it looks more or less similar > > > to usual SSL connections. > > > > Thanks for the details. > > > > Multiplexing is available since introduction of the FastCGI > > protocol, yet to see it working in upstream connections. As for > > 0-RTT, using keepalive connections is probably more efficient > > anyway (and not really needed for upstream connections in most > > cases as well). > > With HTTP/3 and keepalive we can have just one quic "connection" per upstream > server (in extreme). We perform heavy handshake once, and leave it open. > Next we just create HTTP/3 streams to perform request. They can perfectly > run in parallel and use same quic connection. Probably, this is something > worth implementing, with limitations of course: we don't want to mix > requests from different (classes of) clients in same connection, we > don't want eternal life of such connection and we need means to control > level of such multiplexing. > Those heavy handshakes wouldn't be the only concern either... Lack of upstream multiplexing has come up as a concern in the past with the grpc module (which lacks it) due to that amplification effect of client side h2 connections and streams being translated into x*y upstream connections. This poses a danger of ephemeral port exhaustion when targeting relatively few upstream servers (such as proxying to an L4 load balancer instead of direct to application servers). This necessitates provisioning a ton of VIPs and using proxy_bind (which isn't always practical / is a pain). It would be exactly the same for h3 (and more so once grpc over h3 eventually becomes solid, especially bidi). > > > > > > > > > > [...] > > > > > > > > > Probably, the HTTP/3 proxy should be implemented in a separate module. > > > > > Currently it is a patch to the HTTP proxy module to minimize boilerplate. > > > > > > > > Sure. I'm very much against the idea of mixing different upstream > > > > protocols in a single protocol module. > > > > > > noted. > > > > > > > (OTOH, there are some uncertain plans to make proxy module able to > > > > work with other protocols based on the scheme, such as in > > > > "proxy_pass fastcgi://127.0.0.1:9000;". This is mostly irrelevant > > > > though, and might never happen.) > > > > > > well, currently we have separate proxying modules that are similar enough to > > > think about merging them like suggested. Not sure if one big module with > > > methods will worth it, as semantics is slightly different. > > > > > > proxy modules are already addons on top of upstream module, which does > > > the heavy lifting. What requires improvement is probably the > > > configuration that makes user to remember many similar directives doing > > > the same thing but for different protocols. > > > > Yep, making things easier to configure (and modify, if something > > related to configuration directives is changed or additional > > protocol is added) is the main motivator. Still, there are indeed > > differences between protocol modules, and this makes single module > > inconvenient sometimes. As such, plans are uncertain (and the > > previous attempt to do this failed miserably). > > > > From mdounin at mdounin.ru Fri Dec 29 11:44:18 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 29 Dec 2023 14:44:18 +0300 Subject: [PATCH 00 of 12] HTTP/3 proxying to upstreams In-Reply-To: References: Message-ID: Hello! On Thu, Dec 28, 2023 at 05:23:38PM +0300, Vladimir Homutov via nginx-devel wrote: > On Thu, Dec 28, 2023 at 04:31:41PM +0300, Maxim Dounin wrote: > > Hello! > > > > On Wed, Dec 27, 2023 at 04:17:38PM +0300, Vladimir Homutov via nginx-devel wrote: > > > > > On Wed, Dec 27, 2023 at 02:48:04PM +0300, Maxim Dounin wrote: > > > > Hello! > > > > > > > > On Mon, Dec 25, 2023 at 07:52:41PM +0300, Vladimir Homutov via nginx-devel wrote: > > > > > > > > > Hello, everyone, > > > > > > > > > > and Merry Christmas to all! > > > > > > > > > > I'm a developer of an nginx fork Angie. Recently we implemented > > > > > an HTTP/3 proxy support in our fork [1]. > > > > > > > > > > We'd like to contribute this functionality to nginx OSS community. > > > > > Hence here is a patch series backported from Angie to the current > > > > > head of nginx mainline branch (1.25.3) > > > > > > > > Thank you for the patches. > > > > > > > > Are there any expected benefits from HTTP/3 being used as a > > > > protocol to upstream servers? > > > > > > Personally, I don't see much. > > > > > > Probably, faster connection establishing to due 0RTT support (need to be > > > implemented) and better multiplexing (again, if implemented wisely). > > > I have made some simple benchmarks, and it looks more or less similar > > > to usual SSL connections. > > > > Thanks for the details. > > > > Multiplexing is available since introduction of the FastCGI > > protocol, yet to see it working in upstream connections. As for > > 0-RTT, using keepalive connections is probably more efficient > > anyway (and not really needed for upstream connections in most > > cases as well). > > With HTTP/3 and keepalive we can have just one quic "connection" per upstream > server (in extreme). We perform heavy handshake once, and leave it open. > Next we just create HTTP/3 streams to perform request. They can perfectly > run in parallel and use same quic connection. Probably, this is something > worth implementing, with limitations of course: we don't want to mix > requests from different (classes of) clients in same connection, we > don't want eternal life of such connection and we need means to control > level of such multiplexing. Multiplexing has various downsides: already mentioned security implications, issues with balancing requests between upstream entities not directly visible to the client (such as different worker processes), added complexity. And, as already mentioned, it is not something new in HTTP/3. [...] -- Maxim Dounin http://mdounin.ru/ From benjamin.p.kallus.gr at dartmouth.edu Fri Dec 29 16:50:36 2023 From: benjamin.p.kallus.gr at dartmouth.edu (Ben Kallus) Date: Fri, 29 Dec 2023 16:50:36 +0000 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Message-ID: > Still, -O0 is often used at least during development, and it might > be unreasonable to introduce extra function calls in basic > primitives. I don't think this is a major cause for concern. It is perfectly reasonable for ngx_memcpy be a wrapper function around memcpy; I think most people would assume that from the name. In fact, it's *already* implemented as a function when NGX_MEMCPY_LIMIT is defined. > Further, nginx generally supports all available platforms > reasonably compatible with POSIX and C89. This implies that > inline might be not available. On such platforms, moving ngx_memcpy to a function may introduce some performance overhead. The question is whether slightly better performance on obscure systems is worth the mental overhead of working with function-like macros. > Sure (but see above about performance overhead; and another > question is if it needs to be solved, or following existing style > is enough to never see the issue). A little extra performance overhead on C89 systems, or builds with -O0 is a very small price to pay. ngx_resolver.c:4283 contains direct evidence that function-like macros incur mental overhead: ``` ngx_memcpy(sin6->sin6_addr.s6_addr, addr6[j++].s6_addr, 16); ``` Here we have an expression with a side-effect being passed into a function-like macro. As luck would have it, the second argument to ngx_memcpy is evaluated only once, so this is coincidentally okay. This particular landmine has been armed for a decade (see https://github.com/nginx/nginx/blob/769eded73267274e018f460dd76b417538aa5934/src/core/ngx_resolver.c#L2902). Thus, the existing style guidelines are not enough to prevent issues with function-like macros from arising in nginx. Inline functions solve this problem near-optimally. > good luck with doing something like "ngx_max(foo & 0xff, bar)". Nginx is littered with uses of expressions in ngx_max and ngx_min, it just happens that none of those expressions use operators of lower precedence than < and >. This seems like an invitation for human error. Thus, I argue that the best solution to the memcpy-from-NULL problem is to replace ngx_memcpy and ngx_cpymem with inline functions with the appropriate checks for n==0. Going forward, it's probably smart to consider transitioning away from function-like macros more generally. -Ben From jordanc.carter at outlook.com Sun Dec 31 03:51:40 2023 From: jordanc.carter at outlook.com (=?iso-8859-1?q?J_Carter?=) Date: Sun, 31 Dec 2023 03:51:40 +0000 Subject: [PATCH] Image filter: fixed sharpen parsing Message-ID: # HG changeset patch # User J Carter # Date 1703993644 0 # Sun Dec 31 03:34:04 2023 +0000 # Node ID 1eaf9f76184f849fb21c5d2d543f2aa2df3be40c # Parent ee40e2b1d0833b46128a357fbc84c6e23be9be07 Image filter: fixed sharpen parsing. Previously non numeric values were read as 0. This patch makes such values an error. diff -r ee40e2b1d083 -r 1eaf9f76184f src/http/modules/ngx_http_image_filter_module.c --- a/src/http/modules/ngx_http_image_filter_module.c Mon Dec 25 21:15:48 2023 +0400 +++ b/src/http/modules/ngx_http_image_filter_module.c Sun Dec 31 03:34:04 2023 +0000 @@ -1639,7 +1639,7 @@ } if (cv.lengths == NULL) { - n = ngx_http_image_filter_value(&value[1]); + n = ngx_atoi(value[1].data, value[1].len); if (n < 0) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, From mdounin at mdounin.ru Sun Dec 31 19:53:22 2023 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 31 Dec 2023 22:53:22 +0300 Subject: Core: Avoid memcpy from NULL In-Reply-To: References: <6fb91d26f5e60149d7b98c3ad37a0683@sebres.de> Message-ID: Hello! On Fri, Dec 29, 2023 at 04:50:36PM +0000, Ben Kallus wrote: > > Still, -O0 is often used at least during development, and it might > > be unreasonable to introduce extra function calls in basic > > primitives. > > I don't think this is a major cause for concern. It is perfectly > reasonable for ngx_memcpy be a wrapper function around memcpy; I think > most people would assume that from the name. In fact, it's *already* > implemented as a function when NGX_MEMCPY_LIMIT is defined. The NGX_MEMCPY_LIMIT is a very specific debugging define, which implies additional overhead for memcpy calls. And it's certainly not an excuse for changing all the macro definitions to function calls. Whether or not ngx_memcpy() can be implemented as a wrapper function is a separate question. > > Further, nginx generally supports all available platforms > > reasonably compatible with POSIX and C89. This implies that > > inline might be not available. > > On such platforms, moving ngx_memcpy to a function may introduce some > performance overhead. The question is whether slightly better > performance on obscure systems is worth the mental overhead of working > with function-like macros. As said earlier in the thread, while some might prefer other approaches, function-like macros are used everywhere in nginx. > > Sure (but see above about performance overhead; and another > > question is if it needs to be solved, or following existing style > > is enough to never see the issue). > > A little extra performance overhead on C89 systems, or builds with -O0 > is a very small price to pay. ngx_resolver.c:4283 contains direct > evidence that function-like macros incur mental overhead: > ``` > ngx_memcpy(sin6->sin6_addr.s6_addr, addr6[j++].s6_addr, 16); > ``` > Here we have an expression with a side-effect being passed into a > function-like macro. As luck would have it, the second argument to > ngx_memcpy is evaluated only once, so this is coincidentally okay. > This particular landmine has been armed for a decade (see > https://github.com/nginx/nginx/blob/769eded73267274e018f460dd76b417538aa5934/src/core/ngx_resolver.c#L2902). > Thus, the existing style guidelines are not enough to prevent issues > with function-like macros from arising in nginx. Inline functions > solve this problem near-optimally. The mentioned call specifically assumes that the second argument is only evaluated once, and it was certainly checked that it is only evaluated once when the call was written. That is, there is no issue here. Still, general style guidelines suggests that the code shouldn't be written this way, and the only reason for j++ in the line in question is that it mimics corresponding IPv4 code. > > good luck with doing something like "ngx_max(foo & 0xff, bar)". > > Nginx is littered with uses of expressions in ngx_max and ngx_min, it > just happens that none of those expressions use operators of lower > precedence than < and >. This seems like an invitation for human > error. It's not "just happens". > Thus, I argue that the best solution to the memcpy-from-NULL problem > is to replace ngx_memcpy and ngx_cpymem with inline functions with the > appropriate checks for n==0. Going forward, it's probably smart to > consider transitioning away from function-like macros more generally. As I said earlier in this thread, I'm not exactly against using inline functions here, and I think a solution with inline functions can be accepted provided it is demonstrated that it introduces no measurable performance degradation. Still, I'm very sceptical about the idea of "transitioning away from function-like macros". -- Maxim Dounin http://mdounin.ru/