From xeioex at nginx.com Mon Nov 2 12:43:40 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 02 Nov 2020 12:43:40 +0000 Subject: [njs] Types: fixed signature of subrequest() with detached: true. Message-ID: details: https://hg.nginx.org/njs/rev/7ae9a0502fa1 branches: changeset: 1556:7ae9a0502fa1 user: Jakub Jirutka date: Sat Oct 31 22:38:22 2020 +0100 description: Types: fixed signature of subrequest() with detached: true. Order of method overloading declaration matters. diffstat: test/ts/test.ts | 10 +++++++--- ts/ngx_http_js_module.d.ts | 4 ++-- 2 files changed, 9 insertions(+), 5 deletions(-) diffs (44 lines): diff -r fa3ffb3a159e -r 7ae9a0502fa1 test/ts/test.ts --- a/test/ts/test.ts Thu Oct 29 12:51:21 2020 +0000 +++ b/test/ts/test.ts Sat Oct 31 22:38:22 2020 +0100 @@ -5,6 +5,7 @@ import crypto from 'crypto'; function http_module(r: NginxHTTPRequest) { var bs: NjsByteString; var s: string; + var vod: void; // builtin string vs NjsByteString @@ -57,9 +58,12 @@ function http_module(r: NginxHTTPRequest // r.subrequest r.subrequest('/p/sub1').then(reply => r.return(reply.status)); - r.subrequest('/p/sub2', reply => r.return(reply.status)); - r.subrequest('/p/sub3', {detached:true}); - r.subrequest('/p/sub4', 'a=1&b=2').then(reply => r.return(reply.status, + r.subrequest('/p/sub2', {method:'POST'}).then(reply => r.return(reply.status)); + vod = r.subrequest('/p/sub3', reply => r.return(reply.status)); + vod = r.subrequest('/p/sub4', {method:'POST'}, reply => r.return(reply.status)); + vod = r.subrequest('/p/sub5', {detached:true}); + // Warning: vod = r.subrequest('/p/sub9', {detached:true}, reply => r.return(reply.status)); + r.subrequest('/p/sub6', 'a=1&b=2').then(reply => r.return(reply.status, JSON.stringify(JSON.parse(reply.responseBody ?? '')))); } diff -r fa3ffb3a159e -r 7ae9a0502fa1 ts/ngx_http_js_module.d.ts --- a/ts/ngx_http_js_module.d.ts Thu Oct 29 12:51:21 2020 +0000 +++ b/ts/ngx_http_js_module.d.ts Sat Oct 31 22:38:22 2020 +0100 @@ -346,11 +346,11 @@ interface NginxHTTPRequest { * @param options Subrequest options. * @param callback Completion callback. */ + subrequest(uri: NjsStringLike, options: NginxSubrequestOptions & { detached: true }): void; subrequest(uri: NjsStringLike, options?: NginxSubrequestOptions | string): Promise; - subrequest(uri: NjsStringLike, options: NginxSubrequestOptions | string, + subrequest(uri: NjsStringLike, options: NginxSubrequestOptions & { detached?: false } | string, callback:(reply:NginxHTTPRequest) => void): void; subrequest(uri: NjsStringLike, callback:(reply:NginxHTTPRequest) => void): void; - subrequest(uri: NjsStringLike, options: NginxSubrequestOptions & { detached: true }): void; /** * Current URI in request, normalized. */ From pluknet at nginx.com Mon Nov 2 13:39:39 2020 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 2 Nov 2020 13:39:39 +0000 Subject: HTTP/3 with multiple server blocks In-Reply-To: References: Message-ID: <202A0091-2682-410E-9129-4665C207BE9C@nginx.com> > On 30 Oct 2020, at 22:21, Jonny Barnes wrote: > > Can we do this? > > First issue is if I add the line `listen 443 http3 resueport` to more than one server {} block the nginx conf test gives the following error: > > > nginx: [emerg] duplicate listen options for 0.0.0.0:443 in /usr/local/nginx/conf/sites-enabled/legolas:2 > > nginx: configuration file /usr/local/nginx/conf/nginx.conf test failed That's expected behaviour, see for detailed explanation: https://trac.nginx.org/nginx/ticket/1912 > > It seemingly works if i only have it for one of my server blocks. But I can?t get my firefox browser to connect with HTTP/3. It does connect to quic.nginx.org over HTTP/3. You may want to hint Firefox about QUIC protocol using Alt-Svc. -- Sergey Kandaurov From xeioex at nginx.com Mon Nov 2 19:10:20 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 02 Nov 2020 19:10:20 +0000 Subject: [njs] Fixed querystring.stringify(). Message-ID: details: https://hg.nginx.org/njs/rev/b523bbbd8e6d branches: changeset: 1557:b523bbbd8e6d user: Artem S. Povalyukhin date: Sat Oct 31 23:00:03 2020 +0300 description: Fixed querystring.stringify(). diffstat: src/njs_query_string.c | 36 ++++++++++++++++++++++++------------ src/test/njs_unit_test.c | 44 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 13 deletions(-) diffs (140 lines): diff -r 7ae9a0502fa1 -r b523bbbd8e6d src/njs_query_string.c --- a/src/njs_query_string.c Sat Oct 31 22:38:22 2020 +0100 +++ b/src/njs_query_string.c Sat Oct 31 23:00:03 2020 +0300 @@ -549,8 +549,8 @@ njs_inline njs_int_t njs_query_string_push(njs_vm_t *vm, njs_chb_t *chain, njs_value_t *key, njs_value_t *value, njs_string_prop_t *eq, njs_function_t *encoder) { + double num; njs_int_t ret, length; - njs_str_t str; length = 0; @@ -561,25 +561,37 @@ njs_query_string_push(njs_vm_t *vm, njs_ length += ret; - if (!njs_is_string(value)) { - ret = njs_value_to_string(vm, value, value); + njs_chb_append(chain, eq->start, eq->size); + length += eq->length; + + switch (value->type) { + case NJS_NUMBER: + num = njs_number(value); + if (njs_slow_path(isnan(num) || isinf(num))) { + break; + } + + /* Fall through. */ + + case NJS_BOOLEAN: + ret = njs_primitive_value_to_string(vm, value, value); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } - } - - njs_string_get(value, &str); - if (str.length > 0) { - njs_chb_append(chain, eq->start, eq->size); - length += eq->length; + /* Fall through. */ + case NJS_STRING: ret = njs_query_string_encoder_call(vm, chain, encoder, value); if (njs_slow_path(ret < 0)) { return NJS_ERROR; } length += ret; + break; + + default: + break; } return length; @@ -673,7 +685,7 @@ njs_query_string_stringify(njs_vm_t *vm, njs_chb_init(&chain, vm->mem_pool); keys = njs_value_own_enumerate(vm, object, NJS_ENUM_KEYS, NJS_ENUM_STRING, - 1); + 0); if (njs_slow_path(keys == NULL)) { return NJS_ERROR; } @@ -692,7 +704,7 @@ njs_query_string_stringify(njs_vm_t *vm, array = njs_array(&value); for (i = 0; i < array->length; i++) { - if (i != 0) { + if (chain.last != NULL) { njs_chb_append(&chain, sep.start, sep.size); length += sep.length; } @@ -721,7 +733,7 @@ njs_query_string_stringify(njs_vm_t *vm, goto failed; } - if (i != 0) { + if (chain.last != NULL) { njs_chb_append(&chain, sep.start, sep.size); length += sep.length; } diff -r 7ae9a0502fa1 -r b523bbbd8e6d src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Sat Oct 31 22:38:22 2020 +0100 +++ b/src/test/njs_unit_test.c Sat Oct 31 23:00:03 2020 +0300 @@ -18233,7 +18233,49 @@ static njs_unit_test_t njs_test[] = { njs_str("var qs = require('querystring');" "qs.stringify({X:{toString(){return 3}}})"), - njs_str("X=3") }, + njs_str("X=") }, + + { njs_str("var qs = require('querystring');" + "qs.stringify({ name: undefined, age: 12 })"), + njs_str("name=&age=12") }, + + { njs_str("var qs = require('querystring');" + "qs.stringify(Object.create({ name: undefined, age: 12 }))"), + njs_str("") }, + + { njs_str("var qs = require('querystring');" + "qs.stringify([])"), + njs_str("") }, + + { njs_str("var qs = require('querystring');" + "qs.stringify(['','',''])"), + njs_str("0=&1=&2=") }, + + { njs_str("var qs = require('querystring');" + "qs.stringify([undefined, null, Symbol(), Object(0), Object('test'), Object(false),,,])"), + njs_str("0=&1=&2=&3=&4=&5=") }, + +#if 0 + { njs_str("var qs = require('querystring');" + "qs.stringify([NaN, Infinity, -Infinity, 2**69, 2**70])"), + njs_str("0=&1=&2=&3=590295810358705700000&4=1.1805916207174113e%2B21") }, +#else + { njs_str("var qs = require('querystring');" + "qs.stringify([NaN, Infinity, -Infinity, 2**69, 2**70])"), + njs_str("0=&1=&2=&3=590295810358705700000&4=1.1805916207174114e%2B21") }, +#endif + + { njs_str("var qs = require('querystring');" + "qs.stringify([[1,2,3],[4,5,6]])"), + njs_str("0=1&0=2&0=3&1=4&1=5&1=6") }, + + { njs_str("var qs = require('querystring');" + "qs.stringify([['a',,,],['b',,,]])"), + njs_str("0=a&0=&0=&1=b&1=&1=") }, + + { njs_str("var qs = require('querystring');" + "qs.stringify([[,'a','b',,]])"), + njs_str("0=&0=a&0=b&0=") }, { njs_str("var qs = require('querystring');" "qs.escape('abc????def')"), From jonnybarnes at gmail.com Mon Nov 2 20:00:42 2020 From: jonnybarnes at gmail.com (Jonny Barnes) Date: Mon, 2 Nov 2020 20:00:42 +0000 Subject: HTTP/3 with multiple server blocks In-Reply-To: <202A0091-2682-410E-9129-4665C207BE9C@nginx.com> References: <202A0091-2682-410E-9129-4665C207BE9C@nginx.com> Message-ID: If I understand the link correctly, I want to only set options on the `default_server`. So I know have three vhosts setup, the first is a generic https redirector and looks like: >server { > listen 80; > listen [::]:80; > return 301 https://$host$request_uri; >} The next starts: >server { > listen 443 http3 default_server reuseport; > listen [::]:443 http3 default_server reuseport; > > listen 443 ssl http2; > listen [::]:443 ssl http2; > > server_name foo; > > add_header Alt-Svc '$http3=":443"; ma=15'; And the second: >server { > listen 443; > listen [::]:443; > > server_name bar; > > add_header Alt-Svc '$http3=":443"; ma=15'; The nginx config test passes, but everything is served over HTTP/2. Is there something obvious I?m missing here? On Mon, 2 Nov 2020 at 13:39, Sergey Kandaurov wrote: > > > On 30 Oct 2020, at 22:21, Jonny Barnes wrote: > > > > Can we do this? > > > > First issue is if I add the line `listen 443 http3 resueport` to more > than one server {} block the nginx conf test gives the following error: > > > > > nginx: [emerg] duplicate listen options for 0.0.0.0:443 in > /usr/local/nginx/conf/sites-enabled/legolas:2 > > > nginx: configuration file /usr/local/nginx/conf/nginx.conf test failed > > That's expected behaviour, see for detailed explanation: > https://trac.nginx.org/nginx/ticket/1912 > > > > > It seemingly works if i only have it for one of my server blocks. But I > can?t get my firefox browser to connect with HTTP/3. It does connect to > quic.nginx.org over HTTP/3. > > You may want to hint Firefox about QUIC protocol using Alt-Svc. > > -- > Sergey Kandaurov > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: From jonnybarnes at gmail.com Mon Nov 2 20:14:09 2020 From: jonnybarnes at gmail.com (Jonny Barnes) Date: Mon, 2 Nov 2020 20:14:09 +0000 Subject: HTTP/3 with multiple server blocks In-Reply-To: References: <202A0091-2682-410E-9129-4665C207BE9C@nginx.com> Message-ID: Okay, progress, fyi for anyone adding HTTP/3 support, make sure your server?s firewall has port 443 open for udp as well as tcp! :face_palm: First server worked after opening the port To get the second server I seemed to need to repeat myself slightly. I needed: > listen 443 http3; > listen [::]:443 http3; > > listen 443; > listen [::]:443; Is this to get it to listen on both udp and tcp? On Mon, 2 Nov 2020 at 20:00, Jonny Barnes wrote: > If I understand the link correctly, I want to only set options on the > `default_server`. > > So I know have three vhosts setup, the first is a generic https redirector > and looks like: > >server { > > listen 80; > > listen [::]:80; > > return 301 https://$host$request_uri; > >} > > The next starts: > >server { > > listen 443 http3 default_server reuseport; > > listen [::]:443 http3 default_server reuseport; > > > > listen 443 ssl http2; > > listen [::]:443 ssl http2; > > > > server_name foo; > > > > add_header Alt-Svc '$http3=":443"; ma=15'; > > And the second: > >server { > > listen 443; > > listen [::]:443; > > > > server_name bar; > > > > add_header Alt-Svc '$http3=":443"; ma=15'; > > The nginx config test passes, but everything is served over HTTP/2. > > Is there something obvious I?m missing here? > > On Mon, 2 Nov 2020 at 13:39, Sergey Kandaurov wrote: > >> >> > On 30 Oct 2020, at 22:21, Jonny Barnes wrote: >> > >> > Can we do this? >> > >> > First issue is if I add the line `listen 443 http3 resueport` to more >> than one server {} block the nginx conf test gives the following error: >> > >> > > nginx: [emerg] duplicate listen options for 0.0.0.0:443 in >> /usr/local/nginx/conf/sites-enabled/legolas:2 >> > > nginx: configuration file /usr/local/nginx/conf/nginx.conf test failed >> >> That's expected behaviour, see for detailed explanation: >> https://trac.nginx.org/nginx/ticket/1912 >> >> > >> > It seemingly works if i only have it for one of my server blocks. But I >> can?t get my firefox browser to connect with HTTP/3. It does connect to >> quic.nginx.org over HTTP/3. >> >> You may want to hint Firefox about QUIC protocol using Alt-Svc. >> >> -- >> Sergey Kandaurov >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > -------------- next part -------------- An HTML attachment was scrubbed... URL: From alexander.borisov at nginx.com Tue Nov 3 17:17:33 2020 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Tue, 03 Nov 2020 17:17:33 +0000 Subject: [njs] Promise: fixed the catch handler for Promise.prototype.finally(). Message-ID: details: https://hg.nginx.org/njs/rev/c791e3943df1 branches: changeset: 1558:c791e3943df1 user: Alexander Borisov date: Tue Nov 03 20:14:33 2020 +0300 description: Promise: fixed the catch handler for Promise.prototype.finally(). By spec, the catch handler for the .finally() should always return an exception. The issue was introduced in 61bf7a31e685. diffstat: src/njs_promise.c | 6 +++++- test/js/promise_then_throw_finally_catch.js | 4 ++++ test/njs_expect_test.exp | 3 +++ 3 files changed, 12 insertions(+), 1 deletions(-) diffs (35 lines): diff -r b523bbbd8e6d -r c791e3943df1 src/njs_promise.c --- a/src/njs_promise.c Sat Oct 31 23:00:03 2020 +0300 +++ b/src/njs_promise.c Tue Nov 03 20:14:33 2020 +0300 @@ -1023,7 +1023,11 @@ static njs_int_t njs_promise_catch_finally_function(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { - return njs_promise_then_finally_function(vm, args, nargs, unused); + (void) njs_promise_then_finally_function(vm, args, nargs, unused); + + njs_vm_retval_set(vm, njs_arg(args, nargs, 1)); + + return NJS_ERROR; } diff -r b523bbbd8e6d -r c791e3943df1 test/js/promise_then_throw_finally_catch.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_then_throw_finally_catch.js Tue Nov 03 20:14:33 2020 +0300 @@ -0,0 +1,4 @@ +Promise.resolve() +.then(() => {nonExsisting()}) +.finally(() => {}) +.catch(() => {console.log("Done")}); \ No newline at end of file diff -r b523bbbd8e6d -r c791e3943df1 test/njs_expect_test.exp --- a/test/njs_expect_test.exp Sat Oct 31 23:00:03 2020 +0300 +++ b/test/njs_expect_test.exp Tue Nov 03 20:14:33 2020 +0300 @@ -1044,3 +1044,6 @@ njs_run {"./test/js/fs_promises_008.js"} njs_run {"./test/js/fs_promises_009.js"} \ "test recursive fs.rmdirSync" + +njs_run {"./test/js/promise_then_throw_finally_catch.js"} \ +"Done" From cherrychenjie at didiglobal.com Wed Nov 4 12:58:51 2020 From: cherrychenjie at didiglobal.com (=?utf-8?B?6ZmI5rSBIENqaHVzdCBDaGVu?=) Date: Wed, 4 Nov 2020 12:58:51 +0000 Subject: improve the first selection of SWRR algorithm Message-ID: Hi: We improve the Smooth Weighted Round-Robin?SWRR? algorithm to successfully resolve the problem in the following situations. Situation 1? upstream backend-server { server 1.1.1.1:8000 weight=100; server 2.2.2.2:8000 weight=101; server 3.3.3.3:8000 weight=100; } 1. When each machine in the cluster mode executes "-s reload" at the same time , the first selection of each machine is the machine 2.2.2.2:8000 having higher weight , which will lead to 300%+ increase of 2.2.2.2:8000 traffic. 2. More and more companies are implementing service discovery based on nginx. Adding or removing machine will also lead to 300%+ increase of 2.2.2.2:8000 traffic. Situation 2? upstream backend-server { server 1.1.1.1:8000 weight=100; server 2.2.2.2:8000 weight=100; server 3.3.3.3:8000 weight=100; } 1. When each machine in the cluster mode executes "-s reload" at the same time , the first selection of each machine is the first machine 1.1.1.1:8000, which will lead to 300%+ increase of 1.1.1.1:8000 traffic. 2. More and more companies are implementing service discovery based on nginx. Adding or removing machine will also lead to 300%+ increase of 1.1.1.1:8000 traffic. # HG changeset patch # User Jie Chen > # Date 1599813602 -28800 # Fri Sep 11 16:40:02 2020 +0800 # Node ID 931b0c055626657d68f886781c193ffb09245a2e # Parent da5e3f5b16733167142b599b6af3ce9469a07d52 improve the first selection of SWRR algorithm diff -r da5e3f5b1673 -r 931b0c055626 src/http/ngx_http_upstream_round_robin.c --- a/src/http/ngx_http_upstream_round_robin.c Wed Sep 02 23:13:36 2020 +0300 +++ b/src/http/ngx_http_upstream_round_robin.c Fri Sep 11 16:40:02 2020 +0800 @@ -91,7 +91,7 @@ peer[n].name = server[i].addrs[j].name; peer[n].weight = server[i].weight; peer[n].effective_weight = server[i].weight; - peer[n].current_weight = 0; + peer[n].current_weight = 0 - ngx_random() % peers->total_weight; peer[n].max_conns = server[i].max_conns; peer[n].max_fails = server[i].max_fails; peer[n].fail_timeout = server[i].fail_timeout; @@ -155,7 +155,7 @@ peer[n].name = server[i].addrs[j].name; peer[n].weight = server[i].weight; peer[n].effective_weight = server[i].weight; - peer[n].current_weight = 0; + peer[n].current_weight = 0 - ngx_random() % peers->total_weight; peer[n].max_conns = server[i].max_conns; peer[n].max_fails = server[i].max_fails; peer[n].fail_timeout = server[i].fail_timeout; Thanks~ cherrychenjie at didiglobal.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From alexander.borisov at nginx.com Thu Nov 5 10:16:11 2020 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Thu, 05 Nov 2020 10:16:11 +0000 Subject: [njs] Promise: tracking unhandled promise rejection. Message-ID: details: https://hg.nginx.org/njs/rev/2d1abd2d38b4 branches: changeset: 1559:2d1abd2d38b4 user: Alexander Borisov date: Tue Nov 03 15:31:41 2020 +0300 description: Promise: tracking unhandled promise rejection. By default, promises should finish processing normally for .then(), .catch(), .finally() and so on. The patch adds the ability to report unhandled exception from promises to the user. This closes #346 issue on GitHub. diffstat: nginx/ngx_http_js_module.c | 1 + nginx/ngx_stream_js_module.c | 1 + src/njs.h | 23 ++++++--- src/njs_promise.c | 74 ++++++++++++++++++++++++++++++- src/njs_shell.c | 8 +++ src/njs_vm.c | 22 +++++++++ src/njs_vm.h | 2 + test/js/promise_catch_then_throw_catch.js | 5 ++ test/js/promise_catch_throw.js | 3 + test/js/promise_finally_throw.js | 2 + test/js/promise_finally_throw_catch.js | 3 + test/js/promise_reject_catch.js | 1 + test/js/promise_reject_post_catch.js | 2 + test/js/promise_then_throw.js | 2 + test/js/promise_then_throw_catch.js | 3 + test/js/promise_two_first_then_throw.js | 6 ++ test/js/promise_two_then_throw.js | 5 ++ test/njs_expect_test.exp | 30 ++++++++++++ 18 files changed, 184 insertions(+), 9 deletions(-) diffs (400 lines): diff -r c791e3943df1 -r 2d1abd2d38b4 nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Tue Nov 03 20:14:33 2020 +0300 +++ b/nginx/ngx_http_js_module.c Tue Nov 03 15:31:41 2020 +0300 @@ -3033,6 +3033,7 @@ ngx_http_js_init_main_conf(ngx_conf_t *c njs_vm_opt_init(&options); options.backtrace = 1; + options.unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_THROW; options.ops = &ngx_http_js_ops; options.argv = ngx_argv; options.argc = ngx_argc; diff -r c791e3943df1 -r 2d1abd2d38b4 nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Tue Nov 03 20:14:33 2020 +0300 +++ b/nginx/ngx_stream_js_module.c Tue Nov 03 15:31:41 2020 +0300 @@ -1478,6 +1478,7 @@ ngx_stream_js_init_main_conf(ngx_conf_t njs_vm_opt_init(&options); options.backtrace = 1; + options.unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_THROW; options.ops = &ngx_stream_js_ops; options.argv = ngx_argv; options.argc = ngx_argc; diff -r c791e3943df1 -r 2d1abd2d38b4 src/njs.h --- a/src/njs.h Tue Nov 03 20:14:33 2020 +0300 +++ b/src/njs.h Tue Nov 03 15:31:41 2020 +0300 @@ -195,18 +195,24 @@ typedef struct { char **argv; njs_uint_t argc; +#define NJS_VM_OPT_UNHANDLED_REJECTION_IGNORE 0 +#define NJS_VM_OPT_UNHANDLED_REJECTION_THROW 1 + /* - * accumulative - enables "accumulative" mode to support incremental compiling. + * accumulative - enables "accumulative" mode to support incremental compiling. * (REPL). Allows starting parent VM without cloning. - * disassemble - enables disassemble. - * backtrace - enables backtraces. - * quiet - removes filenames from backtraces. To produce comparable + * disassemble - enables disassemble. + * backtrace - enables backtraces. + * quiet - removes filenames from backtraces. To produce comparable test262 diffs. - * sandbox - "sandbox" mode. Disables file access. - * unsafe - enables unsafe language features: + * sandbox - "sandbox" mode. Disables file access. + * unsafe - enables unsafe language features: * - Function constructors. - * module - ES6 "module" mode. Script mode is default. - * ast - print AST. + * module - ES6 "module" mode. Script mode is default. + * ast - print AST. + * unhandled_rejection IGNORE | THROW - tracks unhandled promise rejections: + * - throwing inside a Promise without a catch block. + * - throwing inside in a finally or catch block. */ uint8_t trailer; /* 1 bit */ @@ -219,6 +225,7 @@ typedef struct { uint8_t unsafe; /* 1 bit */ uint8_t module; /* 1 bit */ uint8_t ast; /* 1 bit */ + uint8_t unhandled_rejection; } njs_vm_opt_t; diff -r c791e3943df1 -r 2d1abd2d38b4 src/njs_promise.c --- a/src/njs_promise.c Tue Nov 03 20:14:33 2020 +0300 +++ b/src/njs_promise.c Tue Nov 03 15:31:41 2020 +0300 @@ -12,6 +12,11 @@ typedef enum { NJS_PROMISE_REJECTED } njs_promise_type_t; +typedef enum { + NJS_PROMISE_HANDLE = 0, + NJS_PROMISE_REJECT +} njs_promise_rejection_type_t; + typedef struct { njs_promise_type_t state; njs_value_t result; @@ -51,6 +56,8 @@ static njs_int_t njs_promise_value_const njs_value_t *dst); static njs_int_t njs_promise_capability_executor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t retval); +static njs_int_t njs_promise_host_rejection_tracker(njs_vm_t *vm, + njs_promise_t *promise, njs_promise_rejection_type_t operation); static njs_int_t njs_promise_resolve_function(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t retval); static njs_promise_t *njs_promise_resolve(njs_vm_t *vm, @@ -497,6 +504,7 @@ njs_promise_fulfill(njs_vm_t *vm, njs_pr njs_inline njs_value_t * njs_promise_reject(njs_vm_t *vm, njs_promise_t *promise, njs_value_t *reason) { + njs_int_t ret; njs_queue_t queue; njs_promise_data_t *data; @@ -505,6 +513,14 @@ njs_promise_reject(njs_vm_t *vm, njs_pro data->result = *reason; data->state = NJS_PROMISE_REJECTED; + if (!data->is_handled) { + ret = njs_promise_host_rejection_tracker(vm, promise, + NJS_PROMISE_REJECT); + if (njs_slow_path(ret != NJS_OK)) { + return njs_value_arg(&njs_value_null); + } + } + if (njs_queue_is_empty(&data->reject_queue)) { return njs_value_arg(&njs_value_undefined); @@ -523,6 +539,58 @@ njs_promise_reject(njs_vm_t *vm, njs_pro static njs_int_t +njs_promise_host_rejection_tracker(njs_vm_t *vm, njs_promise_t *promise, + njs_promise_rejection_type_t operation) +{ + uint32_t i, length; + njs_value_t *value; + njs_promise_data_t *data; + + if (vm->options.unhandled_rejection + == NJS_VM_OPT_UNHANDLED_REJECTION_IGNORE) + { + return NJS_OK; + } + + if (vm->promise_reason == NULL) { + vm->promise_reason = njs_array_alloc(vm, 1, 0, NJS_ARRAY_SPARE); + if (njs_slow_path(vm->promise_reason == NULL)) { + return NJS_ERROR; + } + } + + data = njs_data(&promise->value); + + if (operation == NJS_PROMISE_REJECT) { + if (vm->promise_reason != NULL) { + return njs_array_add(vm, vm->promise_reason, &data->result); + } + + } else { + value = vm->promise_reason->start; + length = vm->promise_reason->length; + + for (i = 0; i < length; i++) { + if (memcmp(&value[i], &data->result, sizeof(njs_value_t)) == 0) { + length--; + + if (i < length) { + memmove(&value[i], &value[i + 1], + sizeof(njs_value_t) * (length - i)); + } + + break; + } + } + + vm->promise_reason->length = length; + } + + return NJS_OK; +} + + +static njs_int_t njs_promise_invoke_then(njs_vm_t *vm, njs_value_t *promise, njs_value_t *args, njs_int_t nargs) { @@ -880,7 +948,11 @@ njs_promise_perform_then(njs_vm_t *vm, n if (data->state == NJS_PROMISE_REJECTED) { njs_set_data(&arguments[0], rejected_reaction, 0); - /* TODO: HostPromiseRejectionTracker */ + ret = njs_promise_host_rejection_tracker(vm, promise, + NJS_PROMISE_HANDLE); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } } else { njs_set_data(&arguments[0], fulfilled_reaction, 0); diff -r c791e3943df1 -r 2d1abd2d38b4 src/njs_shell.c --- a/src/njs_shell.c Tue Nov 03 20:14:33 2020 +0300 +++ b/src/njs_shell.c Tue Nov 03 15:31:41 2020 +0300 @@ -35,6 +35,7 @@ typedef struct { uint8_t safe; uint8_t version; uint8_t ast; + uint8_t unhandled_rejection; char *file; char *command; @@ -270,6 +271,7 @@ main(int argc, char **argv) vm_options.argv = opts.argv; vm_options.argc = opts.argc; vm_options.ast = opts.ast; + vm_options.unhandled_rejection = opts.unhandled_rejection; if (opts.interactive) { ret = njs_interactive_shell(&opts, &vm_options); @@ -315,6 +317,7 @@ njs_get_options(njs_opts_t *opts, int ar " -f disabled denormals mode.\n" " -p set path prefix for modules.\n" " -q disable interactive introduction prompt.\n" + " -r ignore unhandled promise rejection.\n" " -s sandbox mode.\n" " -t script|module source code type (script is default).\n" " -v print njs version and exit.\n" @@ -324,6 +327,7 @@ njs_get_options(njs_opts_t *opts, int ar ret = NJS_DONE; opts->denormals = 1; + opts->unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_THROW; for (i = 1; i < argc; i++) { @@ -393,6 +397,10 @@ njs_get_options(njs_opts_t *opts, int ar opts->quiet = 1; break; + case 'r': + opts->unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_IGNORE; + break; + case 's': opts->sandbox = 1; break; diff -r c791e3943df1 -r 2d1abd2d38b4 src/njs_vm.c --- a/src/njs_vm.c Tue Nov 03 20:14:33 2020 +0300 +++ b/src/njs_vm.c Tue Nov 03 15:31:41 2020 +0300 @@ -505,6 +505,8 @@ static njs_int_t njs_vm_handle_events(njs_vm_t *vm) { njs_int_t ret; + njs_str_t str; + njs_value_t string; njs_event_t *ev; njs_queue_t *promise_events, *posted_events; njs_queue_link_t *link; @@ -530,6 +532,26 @@ njs_vm_handle_events(njs_vm_t *vm) } } + if (vm->options.unhandled_rejection + == NJS_VM_OPT_UNHANDLED_REJECTION_THROW) + { + if (vm->promise_reason != NULL && vm->promise_reason->length != 0) { + ret = njs_value_to_string(vm, &string, + &vm->promise_reason->start[0]); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + njs_string_get(&string, &str); + njs_vm_error(vm, "unhandled promise rejection: %V", &str); + + njs_mp_free(vm->mem_pool, vm->promise_reason); + vm->promise_reason = NULL; + + return NJS_ERROR; + } + } + for ( ;; ) { link = njs_queue_first(posted_events); diff -r c791e3943df1 -r 2d1abd2d38b4 src/njs_vm.h --- a/src/njs_vm.h Tue Nov 03 20:14:33 2020 +0300 +++ b/src/njs_vm.h Tue Nov 03 15:31:41 2020 +0300 @@ -220,6 +220,8 @@ struct njs_vm_s { njs_regex_context_t *regex_context; njs_regex_match_data_t *single_match_data; + njs_array_t *promise_reason; + /* * MemoryError is statically allocated immutable Error object * with the InternalError prototype. diff -r c791e3943df1 -r 2d1abd2d38b4 test/js/promise_catch_then_throw_catch.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_catch_then_throw_catch.js Tue Nov 03 15:31:41 2020 +0300 @@ -0,0 +1,5 @@ +Promise.resolve() +.then(() => {}) +.catch(() => {}) +.then(() => {nonExsisting()}) +.catch(() => {console.log("Done")}); \ No newline at end of file diff -r c791e3943df1 -r 2d1abd2d38b4 test/js/promise_catch_throw.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_catch_throw.js Tue Nov 03 15:31:41 2020 +0300 @@ -0,0 +1,3 @@ +Promise.resolve() +.then(() => {nonExsisting()}) +.catch(() => {nonExsistingInCatch()}); \ No newline at end of file diff -r c791e3943df1 -r 2d1abd2d38b4 test/js/promise_finally_throw.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_finally_throw.js Tue Nov 03 15:31:41 2020 +0300 @@ -0,0 +1,2 @@ +Promise.resolve() +.finally(() => {nonExsistingInFinally()}); \ No newline at end of file diff -r c791e3943df1 -r 2d1abd2d38b4 test/js/promise_finally_throw_catch.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_finally_throw_catch.js Tue Nov 03 15:31:41 2020 +0300 @@ -0,0 +1,3 @@ +Promise.resolve() +.finally(() => {nonExsistingInFinally()}) +.catch(() => {console.log("Done")}); \ No newline at end of file diff -r c791e3943df1 -r 2d1abd2d38b4 test/js/promise_reject_catch.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_reject_catch.js Tue Nov 03 15:31:41 2020 +0300 @@ -0,0 +1,1 @@ +Promise.reject("test").catch((x) => console.log('rejected', x)); \ No newline at end of file diff -r c791e3943df1 -r 2d1abd2d38b4 test/js/promise_reject_post_catch.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_reject_post_catch.js Tue Nov 03 15:31:41 2020 +0300 @@ -0,0 +1,2 @@ +var p = Promise.reject(); +setImmediate(() => {p.catch(() => {})}); \ No newline at end of file diff -r c791e3943df1 -r 2d1abd2d38b4 test/js/promise_then_throw.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_then_throw.js Tue Nov 03 15:31:41 2020 +0300 @@ -0,0 +1,2 @@ +Promise.resolve() +.then(() => {nonExsisting()}); \ No newline at end of file diff -r c791e3943df1 -r 2d1abd2d38b4 test/js/promise_then_throw_catch.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_then_throw_catch.js Tue Nov 03 15:31:41 2020 +0300 @@ -0,0 +1,3 @@ +Promise.resolve() +.then(() => {nonExsisting()}) +.catch(() => {console.log("Done")}); \ No newline at end of file diff -r c791e3943df1 -r 2d1abd2d38b4 test/js/promise_two_first_then_throw.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_two_first_then_throw.js Tue Nov 03 15:31:41 2020 +0300 @@ -0,0 +1,6 @@ +Promise.resolve() +.then(() => {nonExsistingOne()}); + +Promise.resolve() +.then(() => {nonExsistingTwo()}) +.catch(() => {}); diff -r c791e3943df1 -r 2d1abd2d38b4 test/js/promise_two_then_throw.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_two_then_throw.js Tue Nov 03 15:31:41 2020 +0300 @@ -0,0 +1,5 @@ +Promise.resolve() +.then(() => {nonExsistingOne()}); + +Promise.resolve() +.then(() => {nonExsistingTwo()}); \ No newline at end of file diff -r c791e3943df1 -r 2d1abd2d38b4 test/njs_expect_test.exp --- a/test/njs_expect_test.exp Tue Nov 03 20:14:33 2020 +0300 +++ b/test/njs_expect_test.exp Tue Nov 03 15:31:41 2020 +0300 @@ -1047,3 +1047,33 @@ njs_run {"./test/js/fs_promises_009.js"} njs_run {"./test/js/promise_then_throw_finally_catch.js"} \ "Done" + +njs_run {"./test/js/promise_catch_throw.js"} \ +"Error: unhandled promise rejection: ReferenceError: \"nonExsistingInCatch\" is not defined" + +njs_run {"./test/js/promise_then_throw.js"} \ +"Error: unhandled promise rejection: ReferenceError: \"nonExsisting\" is not defined" + +njs_run {"./test/js/promise_then_throw_catch.js"} \ +"Done" + +njs_run {"./test/js/promise_catch_then_throw_catch.js"} \ +"Done" + +njs_run {"./test/js/promise_finally_throw.js"} \ +"Error: unhandled promise rejection: ReferenceError: \"nonExsistingInFinally\" is not defined" + +njs_run {"./test/js/promise_finally_throw_catch.js"} \ +"Done" + +njs_run {"./test/js/promise_two_then_throw.js"} \ +"Error: unhandled promise rejection: ReferenceError: \"nonExsistingOne\" is not defined" + +njs_run {"./test/js/promise_two_first_then_throw.js"} \ +"Error: unhandled promise rejection: ReferenceError: \"nonExsistingOne\" is not defined" + +njs_run {"./test/js/promise_reject_catch.js"} \ +"rejected test" + +njs_run {"./test/js/promise_reject_post_catch.js"} \ +"Error: unhandled promise rejection: undefined" From xeioex at nginx.com Fri Nov 6 11:42:28 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Fri, 06 Nov 2020 11:42:28 +0000 Subject: [njs] Fixed querystring.parse(). Message-ID: details: https://hg.nginx.org/njs/rev/3e7f9e326219 branches: changeset: 1560:3e7f9e326219 user: Dmitry Volyntsev date: Fri Nov 06 11:41:32 2020 +0000 description: Fixed querystring.parse(). The issue happened when the first eq symbol is located after the separator, whereas it should be looked for only in the string segment before the separator. This fixes #349 issue on Github. diffstat: src/njs_query_string.c | 4 ++-- src/test/njs_unit_test.c | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diffs (36 lines): diff -r 2d1abd2d38b4 -r 3e7f9e326219 src/njs_query_string.c --- a/src/njs_query_string.c Tue Nov 03 15:31:41 2020 +0300 +++ b/src/njs_query_string.c Fri Nov 06 11:41:32 2020 +0000 @@ -269,7 +269,7 @@ njs_query_string_match(u_char *p, u_char return p; } - while (p < (end - length)) { + while (p <= (end - length)) { if (memcmp(p, v->start, length) == 0) { return p; } @@ -402,7 +402,7 @@ njs_query_string_parse(njs_vm_t *vm, njs goto next; } - val = njs_query_string_match(key, end, &eq); + val = njs_query_string_match(key, part, &eq); size = val - key; diff -r 2d1abd2d38b4 -r 3e7f9e326219 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Tue Nov 03 15:31:41 2020 +0300 +++ b/src/test/njs_unit_test.c Fri Nov 06 11:41:32 2020 +0000 @@ -17987,6 +17987,11 @@ static njs_unit_test_t njs_test[] = njs_str("{freespace:''}") }, { njs_str("var qs = require('querystring');" + "var obj = qs.parse('name&value=12');" + "njs.dump(obj)"), + njs_str("{name:'',value:'12'}") }, + + { njs_str("var qs = require('querystring');" "var obj = qs.parse('baz=fuz&muz=tax', 'fuz');" "njs.dump(obj)"), njs_str("{baz:'',&muz:'tax'}") }, From xeioex at nginx.com Fri Nov 6 13:48:51 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Fri, 06 Nov 2020 13:48:51 +0000 Subject: [njs] Introduced initial iterator support. Message-ID: details: https://hg.nginx.org/njs/rev/1c2313826b2b branches: changeset: 1561:1c2313826b2b user: Artem S. Povalyukhin date: Sun Oct 25 18:29:15 2020 +0300 description: Introduced initial iterator support. diffstat: auto/sources | 1 + src/njs_array.c | 54 ++++++++ src/njs_builtin.c | 9 + src/njs_iterator.c | 299 +++++++++++++++++++++++++++++++++++++++++++++++ src/njs_iterator.h | 21 +++ src/njs_main.h | 1 + src/njs_string.c | 27 ++++ src/njs_typed_array.c | 59 +++++++++ src/njs_value.h | 1 + src/njs_vm.h | 4 +- src/test/njs_unit_test.c | 132 ++++++++++++++++++++ 11 files changed, 607 insertions(+), 1 deletions(-) diffs (769 lines): diff -r 3e7f9e326219 -r 1c2313826b2b auto/sources --- a/auto/sources Fri Nov 06 11:41:32 2020 +0000 +++ b/auto/sources Sun Oct 25 18:29:15 2020 +0300 @@ -58,6 +58,7 @@ NJS_LIB_SRCS=" \ src/njs_query_string.c \ src/njs_encoding.c \ src/njs_buffer.c \ + src/njs_iterator.c \ " NJS_LIB_TEST_SRCS=" \ diff -r 3e7f9e326219 -r 1c2313826b2b src/njs_array.c --- a/src/njs_array.c Fri Nov 06 11:41:32 2020 +0000 +++ b/src/njs_array.c Sun Oct 25 18:29:15 2020 +0300 @@ -3351,6 +3351,24 @@ njs_array_prototype_copy_within(njs_vm_t } +static njs_int_t +njs_array_prototype_iterator_obj(njs_vm_t *vm, njs_value_t *args, + njs_uint_t nargs, njs_index_t kind) +{ + njs_int_t ret; + njs_value_t *this; + + this = njs_argument(args, 0); + + ret = njs_value_to_object(vm, this); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + return njs_array_iterator_create(vm, this, &vm->retval, kind); +} + + static const njs_object_prop_t njs_array_prototype_properties[] = { { @@ -3386,6 +3404,15 @@ static const njs_object_prop_t njs_arra { .type = NJS_PROPERTY, + .name = njs_string("entries"), + .value = njs_native_function2(njs_array_prototype_iterator_obj, 0, + NJS_ENUM_BOTH), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, .name = njs_string("every"), .value = njs_native_function2(njs_array_prototype_iterator, 1, njs_array_func(NJS_ARRAY_EVERY)), @@ -3465,6 +3492,15 @@ static const njs_object_prop_t njs_arra { .type = NJS_PROPERTY, + .name = njs_string("keys"), + .value = njs_native_function2(njs_array_prototype_iterator_obj, 0, + NJS_ENUM_KEYS), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, .name = njs_string("lastIndexOf"), .value = njs_native_function2(njs_array_prototype_reverse_iterator, 1, NJS_ARRAY_LAST_INDEX_OF), @@ -3579,6 +3615,24 @@ static const njs_object_prop_t njs_arra .writable = 1, .configurable = 1, }, + + { + .type = NJS_PROPERTY, + .name = njs_string("values"), + .value = njs_native_function2(njs_array_prototype_iterator_obj, 0, + NJS_ENUM_VALUES), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_wellknown_symbol(NJS_SYMBOL_ITERATOR), + .value = njs_native_function2(njs_array_prototype_iterator_obj, 0, + NJS_ENUM_VALUES), + .writable = 1, + .configurable = 1, + }, }; diff -r 3e7f9e326219 -r 1c2313826b2b src/njs_builtin.c --- a/src/njs_builtin.c Fri Nov 06 11:41:32 2020 +0000 +++ b/src/njs_builtin.c Sun Oct 25 18:29:15 2020 +0300 @@ -85,6 +85,8 @@ static const njs_object_type_init_t *con /* Hidden types. */ + &njs_iterator_type_init, + &njs_array_iterator_type_init, &njs_dirent_type_init, &njs_hash_type_init, &njs_hmac_type_init, @@ -294,6 +296,10 @@ njs_builtin_objects_create(njs_vm_t *vm) constructor = shared->constructors; for (i = NJS_OBJ_TYPE_OBJECT; i < NJS_OBJ_TYPE_MAX; i++) { + if (njs_object_type_init[i]->constructor_props == NULL) { + continue; + } + constructor[i] = njs_object_type_init[i]->constructor; constructor[i].object.shared = 0; @@ -360,6 +366,9 @@ njs_builtin_objects_clone(njs_vm_t *vm, vm->prototypes[i].object.__proto__ = typed_array_prototype; } + vm->prototypes[NJS_OBJ_TYPE_ARRAY_ITERATOR].object.__proto__ = + &vm->prototypes[NJS_OBJ_TYPE_ITERATOR].object; + vm->prototypes[NJS_OBJ_TYPE_BUFFER].object.__proto__ = &vm->prototypes[NJS_OBJ_TYPE_UINT8_ARRAY].object; diff -r 3e7f9e326219 -r 1c2313826b2b src/njs_iterator.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/njs_iterator.c Sun Oct 25 18:29:15 2020 +0300 @@ -0,0 +1,299 @@ + +/* + * Copyright (C) Artem S. Povalyukhin + * Copyright (C) NGINX, Inc. + */ + + +#include + + +struct njs_value_iterator_s { + njs_value_t target; + int64_t next; + njs_object_enum_t kind; +}; + + +typedef struct njs_value_iterator_s njs_array_iterator_t; + + +static const njs_value_t string_done = njs_string("done"); +static const njs_value_t string_value = njs_string("value"); + + +njs_int_t +njs_array_iterator_create(njs_vm_t *vm, const njs_value_t *target, + njs_value_t *retval, njs_object_enum_t kind) +{ + njs_object_value_t *ov; + njs_array_iterator_t *it; + + ov = njs_mp_alloc(vm->mem_pool, sizeof(njs_object_value_t)); + if (njs_slow_path(ov == NULL)) { + njs_memory_error(vm); + return NJS_ERROR; + } + + njs_lvlhsh_init(&ov->object.hash); + njs_lvlhsh_init(&ov->object.shared_hash); + ov->object.type = NJS_OBJECT_VALUE; + ov->object.shared = 0; + ov->object.extensible = 1; + ov->object.error_data = 0; + ov->object.fast_array = 0; + + ov->object.__proto__ = + &vm->prototypes[NJS_OBJ_TYPE_ARRAY_ITERATOR].object; + ov->object.slots = NULL; + + it = njs_mp_alloc(vm->mem_pool, sizeof(njs_array_iterator_t)); + if (njs_slow_path(it == NULL)) { + njs_memory_error(vm); + return NJS_ERROR; + } + + /* GC retain it->target */ + it->target = *target; + it->next = 0; + it->kind = kind; + + njs_set_data(&ov->value, it, NJS_DATA_TAG_ARRAY_ITERATOR); + njs_set_object_value(retval, ov); + + return NJS_OK; +} + + +njs_int_t +njs_array_iterator_next(njs_vm_t *vm, njs_value_t *iterator, + njs_value_t *retval) +{ + int64_t length; + njs_int_t ret; + njs_array_t *array, *entry; + njs_typed_array_t *tarray; + const njs_value_t *value; + njs_array_iterator_t *it; + + if (njs_slow_path(!njs_is_valid(njs_object_value(iterator)))) { + return NJS_DECLINED; + } + + it = njs_object_data(iterator); + value = &njs_value_undefined; + + if (njs_is_fast_array(&it->target)) { + array = njs_array(&it->target); + length = array->length; + + if (it->next >= length) { + goto release; + } + + if (it->kind > NJS_ENUM_KEYS && njs_is_valid(&array->start[it->next])) { + value = &array->start[it->next]; + } + + } else if (njs_is_typed_array(&it->target)) { + tarray = njs_typed_array(&it->target); + + if (njs_slow_path(njs_is_detached_buffer(tarray->buffer))) { + njs_type_error(vm, "detached buffer"); + return NJS_ERROR; + } + + length = njs_typed_array_length(tarray); + + if (it->next >= length) { + goto release; + } + + if (it->kind > NJS_ENUM_KEYS) { + njs_set_number(retval, njs_typed_array_prop(tarray, it->next)); + value = retval; + } + + } else { + ret = njs_object_length(vm, &it->target, &length); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + if (it->next >= length) { + goto release; + } + + if (it->kind > NJS_ENUM_KEYS) { + ret = njs_value_property_i64(vm, &it->target, it->next, retval); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + value = njs_is_valid(retval) ? retval + : &njs_value_undefined; + } + } + + switch (it->kind) { + case NJS_ENUM_KEYS: + njs_set_number(retval, it->next++); + break; + + case NJS_ENUM_VALUES: + it->next++; + *retval = *value; + break; + + case NJS_ENUM_BOTH: + entry = njs_array_alloc(vm, 0, 2, 0); + if (njs_slow_path(entry == NULL)) { + return NJS_ERROR; + } + + njs_set_number(&entry->start[0], it->next++); + entry->start[1] = *value; + + njs_set_array(retval, entry); + break; + + default: + njs_internal_error(vm, "invalid enum kind"); + return NJS_ERROR; + } + + return NJS_OK; + +release: + + /* GC release it->target */ + njs_mp_free(vm->mem_pool, it); + njs_set_invalid(njs_object_value(iterator)); + + return NJS_DECLINED; +} + + +static njs_int_t +njs_iterator_prototype_get_this(njs_vm_t *vm, njs_value_t *args, + njs_uint_t nargs, njs_index_t unused) +{ + vm->retval = args[0]; + + return NJS_OK; +} + + +static const njs_object_prop_t njs_iterator_prototype_properties[] = +{ + { + .type = NJS_PROPERTY, + .name = njs_wellknown_symbol(NJS_SYMBOL_ITERATOR), + .value = njs_native_function(njs_iterator_prototype_get_this, 0), + .configurable = 1, + .writable = 1, + }, +}; + + +static const njs_object_init_t njs_iterator_prototype_init = { + njs_iterator_prototype_properties, + njs_nitems(njs_iterator_prototype_properties), +}; + + +const njs_object_type_init_t njs_iterator_type_init = { + .prototype_props = &njs_iterator_prototype_init, + .prototype_value = { .object = { .type = NJS_OBJECT } }, +}; + + +static njs_int_t +njs_array_iterator_prototype_next(njs_vm_t *vm, njs_value_t *args, + njs_uint_t nargs, njs_index_t tag) +{ + njs_int_t ret; + njs_bool_t check; + njs_value_t *this; + njs_object_t *object; + njs_object_prop_t *prop_value, *prop_done; + + this = njs_argument(args, 0); + + check = njs_is_object_value(this) + && (njs_is_object_data(this, NJS_DATA_TAG_ARRAY_ITERATOR) + || !njs_is_valid(njs_object_value(this))); + + if (njs_slow_path(!check)) { + njs_type_error(vm, "Method [Array Iterator].prototype.next" + " called on incompatible receiver"); + return NJS_ERROR; + } + + object = njs_object_alloc(vm); + if (njs_slow_path(object == NULL)) { + return NJS_ERROR; + } + + njs_set_object(&vm->retval, object); + + prop_value = njs_object_property_add(vm, &vm->retval, + njs_value_arg(&string_value), 0); + if (njs_slow_path(prop_value == NULL)) { + return NJS_ERROR; + } + + prop_done = njs_object_property_add(vm, &vm->retval, + njs_value_arg(&string_done), 0); + if (njs_slow_path(prop_done == NULL)) { + return NJS_ERROR; + } + + ret = njs_array_iterator_next(vm, this, &prop_value->value); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + if (njs_slow_path(ret == NJS_DECLINED)) { + njs_set_undefined(&prop_value->value); + njs_set_boolean(&prop_done->value, 1); + + return NJS_OK; + } + + njs_set_boolean(&prop_done->value, 0); + + return NJS_OK; +} + + +static const njs_object_prop_t njs_array_iterator_prototype_properties[] = +{ + { + .type = NJS_PROPERTY, + .name = njs_string("next"), + .value = njs_native_function2(njs_array_iterator_prototype_next, 0, + NJS_DATA_TAG_ARRAY_ITERATOR), + .configurable = 1, + .writable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_wellknown_symbol(NJS_SYMBOL_TO_STRING_TAG), + .value = njs_string("Array Iterator"), + .configurable = 1, + }, +}; + + +static const njs_object_init_t njs_array_iterator_prototype_init = { + njs_array_iterator_prototype_properties, + njs_nitems(njs_array_iterator_prototype_properties), +}; + + +const njs_object_type_init_t njs_array_iterator_type_init = { + .prototype_props = &njs_array_iterator_prototype_init, + .prototype_value = { .object = { .type = NJS_OBJECT } }, +}; diff -r 3e7f9e326219 -r 1c2313826b2b src/njs_iterator.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/njs_iterator.h Sun Oct 25 18:29:15 2020 +0300 @@ -0,0 +1,21 @@ + +/* + * Copyright (C) Artem S. Povalyukhin + * Copyright (C) NGINX, Inc. + */ + +#ifndef _NJS_ITERATOR_H_INCLUDED_ +#define _NJS_ITERATOR_H_INCLUDED_ + + +njs_int_t njs_array_iterator_create(njs_vm_t *vm, const njs_value_t *src, + njs_value_t *dst, njs_object_enum_t kind); + +njs_int_t njs_array_iterator_next(njs_vm_t *vm, njs_value_t *iterator, + njs_value_t *retval); + + +extern const njs_object_type_init_t njs_iterator_type_init; +extern const njs_object_type_init_t njs_array_iterator_type_init; + +#endif /* _NJS_ITERATOR_H_INCLUDED_ */ diff -r 3e7f9e326219 -r 1c2313826b2b src/njs_main.h --- a/src/njs_main.h Fri Nov 06 11:41:32 2020 +0000 +++ b/src/njs_main.h Sun Oct 25 18:29:15 2020 +0300 @@ -71,6 +71,7 @@ #include #include #include +#include #include #include diff -r 3e7f9e326219 -r 1c2313826b2b src/njs_string.c --- a/src/njs_string.c Fri Nov 06 11:41:32 2020 +0000 +++ b/src/njs_string.c Sun Oct 25 18:29:15 2020 +0300 @@ -3845,6 +3845,24 @@ njs_string_prototype_replace(njs_vm_t *v } +static njs_int_t +njs_string_prototype_iterator_obj(njs_vm_t *vm, njs_value_t *args, + njs_uint_t nargs, njs_index_t kind) +{ + njs_int_t ret; + njs_value_t *this; + + this = njs_argument(args, 0); + + ret = njs_string_object_validate(vm, this); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + return njs_array_iterator_create(vm, this, &vm->retval, kind); +} + + double njs_string_to_number(const njs_value_t *value, njs_bool_t parse_float) { @@ -4327,6 +4345,15 @@ static const njs_object_prop_t njs_stri .writable = 1, .configurable = 1, }, + + { + .type = NJS_PROPERTY, + .name = njs_wellknown_symbol(NJS_SYMBOL_ITERATOR), + .value = njs_native_function2(njs_string_prototype_iterator_obj, 0, + NJS_ENUM_VALUES), + .writable = 1, + .configurable = 1, + }, }; diff -r 3e7f9e326219 -r 1c2313826b2b src/njs_typed_array.c --- a/src/njs_typed_array.c Fri Nov 06 11:41:32 2020 +0000 +++ b/src/njs_typed_array.c Sun Oct 25 18:29:15 2020 +0300 @@ -2184,6 +2184,29 @@ njs_typed_array_prototype_join(njs_vm_t static njs_int_t +njs_typed_array_prototype_iterator_obj(njs_vm_t *vm, njs_value_t *args, + njs_uint_t nargs, njs_index_t kind) +{ + njs_value_t *this; + njs_typed_array_t *array; + + this = njs_argument(args, 0); + if (njs_slow_path(!njs_is_typed_array(this))) { + njs_type_error(vm, "this is not a typed array"); + return NJS_ERROR; + } + + array = njs_typed_array(this); + if (njs_slow_path(njs_is_detached_buffer(array->buffer))) { + njs_type_error(vm, "detached buffer"); + return NJS_ERROR; + } + + return njs_array_iterator_create(vm, this, &vm->retval, kind); +} + + +static njs_int_t njs_typed_array_constructor_intrinsic(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { @@ -2326,6 +2349,15 @@ static const njs_object_prop_t njs_type { .type = NJS_PROPERTY, + .name = njs_string("entries"), + .value = njs_native_function2(njs_typed_array_prototype_iterator_obj, 0, + NJS_ENUM_BOTH), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, .name = njs_string("every"), .value = njs_native_function2(njs_typed_array_prototype_iterator, 1, NJS_ARRAY_EVERY), @@ -2403,6 +2435,15 @@ static const njs_object_prop_t njs_type { .type = NJS_PROPERTY, + .name = njs_string("keys"), + .value = njs_native_function2(njs_typed_array_prototype_iterator_obj, 0, + NJS_ENUM_KEYS), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, .name = njs_string("lastIndexOf"), .value = njs_native_function2(njs_typed_array_prototype_index_of, 1, 2), .writable = 1, @@ -2490,6 +2531,24 @@ static const njs_object_prop_t njs_type .writable = 1, .configurable = 1, }, + + { + .type = NJS_PROPERTY, + .name = njs_string("values"), + .value = njs_native_function2(njs_typed_array_prototype_iterator_obj, 0, + NJS_ENUM_VALUES), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_wellknown_symbol(NJS_SYMBOL_ITERATOR), + .value = njs_native_function2(njs_typed_array_prototype_iterator_obj, 0, + NJS_ENUM_VALUES), + .writable = 1, + .configurable = 1, + }, }; diff -r 3e7f9e326219 -r 1c2313826b2b src/njs_value.h --- a/src/njs_value.h Fri Nov 06 11:41:32 2020 +0000 +++ b/src/njs_value.h Sun Oct 25 18:29:15 2020 +0300 @@ -84,6 +84,7 @@ typedef enum { NJS_DATA_TAG_CRYPTO_HMAC, NJS_DATA_TAG_TEXT_ENCODER, NJS_DATA_TAG_TEXT_DECODER, + NJS_DATA_TAG_ARRAY_ITERATOR, NJS_DATA_TAG_MAX } njs_data_tag_t; diff -r 3e7f9e326219 -r 1c2313826b2b src/njs_vm.h --- a/src/njs_vm.h Fri Nov 06 11:41:32 2020 +0000 +++ b/src/njs_vm.h Sun Oct 25 18:29:15 2020 +0300 @@ -89,7 +89,9 @@ typedef enum { NJS_OBJ_TYPE_TEXT_ENCODER, NJS_OBJ_TYPE_BUFFER, -#define NJS_OBJ_TYPE_HIDDEN_MIN (NJS_OBJ_TYPE_FS_DIRENT) +#define NJS_OBJ_TYPE_HIDDEN_MIN (NJS_OBJ_TYPE_ITERATOR) + NJS_OBJ_TYPE_ITERATOR, + NJS_OBJ_TYPE_ARRAY_ITERATOR, NJS_OBJ_TYPE_FS_DIRENT, NJS_OBJ_TYPE_CRYPTO_HASH, NJS_OBJ_TYPE_CRYPTO_HMAC, diff -r 3e7f9e326219 -r 1c2313826b2b src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Fri Nov 06 11:41:32 2020 +0000 +++ b/src/test/njs_unit_test.c Sun Oct 25 18:29:15 2020 +0300 @@ -6864,6 +6864,138 @@ static njs_unit_test_t njs_test[] = { njs_str("[1,2].sort(1)"), njs_str("TypeError: comparefn must be callable or undefined") }, + /* + Array.prototype.keys() + Array.prototype.values() + Array.prototype.entries() + */ + + { njs_str("['keys', 'values', 'entries', Symbol.iterator]" + ".every((x) => typeof Array.prototype[x] == 'function')"), + njs_str("true") }, + + { njs_str("['keys', 'values', 'entries', Symbol.iterator]" + ".every((x) => Array.prototype[x].length === 0)"), + njs_str("true") }, + +#if 0 + { njs_str("Array.prototype[Symbol.iterator] === Array.prototype.values"), + njs_str("true") }, +#endif + + { njs_str("['keys', 'values', 'entries', Symbol.iterator]" + ".every((x) => typeof [][x]() == 'object')"), + njs_str("true") }, + + { njs_str("['keys', 'values', 'entries', Symbol.iterator]" + ".every((x) => typeof [][x]().next == 'function')"), + njs_str("true") }, + + { njs_str("var i = [1,2,3].keys();" + "[i.next(), i.next(), i.next(), i.next()].map((x) => x.value)"), + njs_str("0,1,2,") }, + + { njs_str("var i = [1,2,3].values();" + "[i.next(), i.next(), i.next(), i.next()].map((x) => x.value)"), + njs_str("1,2,3,") }, + + { njs_str("var a = [], i = a.values();" + "a.push(1); a.push(2); a.push(3);" + "[i.next(), i.next(), i.next(), i.next()].map((x) => x.value)"), + njs_str("1,2,3,") }, + + { njs_str("var a = [], i = a.values(); i.next();" + "a.push(1); a.push(2); a.push(3);" + "[i.next(), i.next(), i.next(), i.next()].map((x) => x.value)"), + njs_str(",,,") }, + + { njs_str("var i = [1,2,3].entries();" + "[i.next(), i.next(), i.next(), i.next()].map((x) => x.value)"), + njs_str("0,1,1,2,2,3,") }, + + { njs_str("var i = Array.prototype.keys.call('abc');" + "[i.next(), i.next(), i.next(), i.next()].map((x) => x.done)"), + njs_str("false,false,false,true") }, + + { njs_str("var i = Array.prototype.values.call('abc');" + "[i.next(), i.next(), i.next(), i.next()].map((x) => x.value)"), + njs_str("a,b,c,") }, + + { njs_str("var x = [true, 1, Symbol()];" + "x.map((x) => Array.prototype.keys.call(x).next()).every((x) => x.done)"), + njs_str("true") }, + + { njs_str("var x = [true, 1, Symbol()];" + "x.forEach((x) => Object.getPrototypeOf(Object(x)).length = 1);" + "x.map((x) => Array.prototype.keys.call(x).next()).every((x) => !x.done)"), + njs_str("true") }, + + /* + TypedArray.prototype.keys() + TypedArray.prototype.values() + TypedArray.prototype.entries() + */ + + { njs_str("['keys', 'values', 'entries', Symbol.iterator]" + ".every((x) => typeof Buffer.prototype[x] == 'function')"), + njs_str("true") }, + + { njs_str("var i = Buffer.from([1,2,3]).keys();" + "[i.next(), i.next(), i.next(), i.next()].map((x) => x.value)"), + njs_str("0,1,2,") }, + + { njs_str("var i = Buffer.from([1,2,3]).values();" + "[i.next(), i.next(), i.next(), i.next()].map((x) => x.value)"), + njs_str("1,2,3,") }, + + { njs_str("var i = Buffer.from([1,2,3]).entries();" + "[i.next(), i.next(), i.next(), i.next()].map((x) => x.value)"), + njs_str("0,1,1,2,2,3,") }, + + { njs_str("[true, 1, Symbol(), 'test', [], { length: 1 }]" + ".map((x) => { try { Buffer.prototype.keys.call(x); return x; } catch (e) { return e; } })" + ".every((x) => x instanceof TypeError)"), + njs_str("true") }, + + /* %IteratorPrototype% */ + + { njs_str("var x = Object.getPrototypeOf(Object.getPrototypeOf([].keys()));" + "typeof x[Symbol.iterator] == 'function'"), + njs_str("true") }, + + { njs_str("var x = Object.getPrototypeOf(Object.getPrototypeOf([].keys()));" + "x[Symbol.iterator]() === x"), + njs_str("true") }, + + /* %ArrayIteratorPrototype% */ + + { njs_str("var x = Object.getPrototypeOf([].keys());" + "typeof x.next == 'function'"), + njs_str("true") }, + + { njs_str("var x = Object.getPrototypeOf([].keys());" + "x[Symbol.toStringTag] == 'Array Iterator'"), + njs_str("true") }, + + /* %StringIteratorPrototype% */ + + { njs_str("typeof String.prototype[Symbol.iterator] == 'function'"), + njs_str("true") }, + + { njs_str("var x = Object.getPrototypeOf(''[Symbol.iterator]());" + "typeof x.next == 'function'"), + njs_str("true") }, + +#if 0 + { njs_str("var x = Object.getPrototypeOf(''[Symbol.iterator]());" + "x[Symbol.toStringTag] == 'String Iterator'"), + njs_str("true") }, +#else + { njs_str("var x = Object.getPrototypeOf(''[Symbol.iterator]());" + "x[Symbol.toStringTag] == 'Array Iterator'"), + njs_str("true") }, +#endif + /* Template literal. */ { njs_str("`"), From vl at nginx.com Fri Nov 6 16:44:30 2020 From: vl at nginx.com (Vladimir Homutov) Date: Fri, 06 Nov 2020 16:44:30 +0000 Subject: [nginx] Version bump. Message-ID: details: https://hg.nginx.org/nginx/rev/908f48bd3c2f branches: changeset: 7735:908f48bd3c2f user: Vladimir Homutov date: Thu Nov 05 22:37:27 2020 +0300 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r df41a6b6c2aa -r 908f48bd3c2f src/core/nginx.h --- a/src/core/nginx.h Tue Oct 27 18:09:20 2020 +0300 +++ b/src/core/nginx.h Thu Nov 05 22:37:27 2020 +0300 @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1019004 -#define NGINX_VERSION "1.19.4" +#define nginx_version 1019005 +#define NGINX_VERSION "1.19.5" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From vl at nginx.com Fri Nov 6 16:44:33 2020 From: vl at nginx.com (Vladimir Homutov) Date: Fri, 06 Nov 2020 16:44:33 +0000 Subject: [nginx] Core: added format specifiers to output binary data as hex. Message-ID: details: https://hg.nginx.org/nginx/rev/a46fcf101cfc branches: changeset: 7736:a46fcf101cfc user: Vladimir Homutov date: Wed Oct 28 10:56:11 2020 +0300 description: Core: added format specifiers to output binary data as hex. Now "s", "V", and "v" format specifiers may be prefixed with "x" (lowercase) or "X" (uppercase) to output corresponding data in hexadecimal format. In collaboration with Maxim Dounin. diffstat: src/core/ngx_string.c | 87 +++++++++++++++++++++++++------- src/event/ngx_event_openssl.c | 16 ++--- src/event/ngx_event_openssl_stapling.c | 12 +--- src/http/modules/ngx_http_grpc_module.c | 38 +++---------- 4 files changed, 86 insertions(+), 67 deletions(-) diffs (262 lines): diff -r 908f48bd3c2f -r a46fcf101cfc src/core/ngx_string.c --- a/src/core/ngx_string.c Thu Nov 05 22:37:27 2020 +0300 +++ b/src/core/ngx_string.c Wed Oct 28 10:56:11 2020 +0300 @@ -11,6 +11,8 @@ static u_char *ngx_sprintf_num(u_char *buf, u_char *last, uint64_t ui64, u_char zero, ngx_uint_t hexadecimal, ngx_uint_t width); +static u_char *ngx_sprintf_str(u_char *buf, u_char *last, u_char *src, + size_t len, ngx_uint_t hexadecimal); static void ngx_encode_base64_internal(ngx_str_t *dst, ngx_str_t *src, const u_char *basis, ngx_uint_t padding); static ngx_int_t ngx_decode_base64_internal(ngx_str_t *dst, ngx_str_t *src, @@ -101,10 +103,10 @@ ngx_pstrdup(ngx_pool_t *pool, ngx_str_t * %M ngx_msec_t * %r rlim_t * %p void * - * %V ngx_str_t * - * %v ngx_variable_value_t * - * %s null-terminated string - * %*s length and string + * %[x|X]V ngx_str_t * + * %[x|X]v ngx_variable_value_t * + * %[x|X]s null-terminated string + * %*[x|X]s length and string * %Z '\0' * %N '\n' * %c char @@ -165,7 +167,7 @@ ngx_vslprintf(u_char *buf, u_char *last, u_char *p, zero; int d; double f; - size_t len, slen; + size_t slen; int64_t i64; uint64_t ui64, frac; ngx_msec_t ms; @@ -250,8 +252,7 @@ ngx_vslprintf(u_char *buf, u_char *last, case 'V': v = va_arg(args, ngx_str_t *); - len = ngx_min(((size_t) (last - buf)), v->len); - buf = ngx_cpymem(buf, v->data, len); + buf = ngx_sprintf_str(buf, last, v->data, v->len, hex); fmt++; continue; @@ -259,8 +260,7 @@ ngx_vslprintf(u_char *buf, u_char *last, case 'v': vv = va_arg(args, ngx_variable_value_t *); - len = ngx_min(((size_t) (last - buf)), vv->len); - buf = ngx_cpymem(buf, vv->data, len); + buf = ngx_sprintf_str(buf, last, vv->data, vv->len, hex); fmt++; continue; @@ -268,16 +268,7 @@ ngx_vslprintf(u_char *buf, u_char *last, case 's': p = va_arg(args, u_char *); - if (slen == (size_t) -1) { - while (*p && buf < last) { - *buf++ = *p++; - } - - } else { - len = ngx_min(((size_t) (last - buf)), slen); - buf = ngx_cpymem(buf, p, len); - } - + buf = ngx_sprintf_str(buf, last, p, slen, hex); fmt++; continue; @@ -576,6 +567,64 @@ ngx_sprintf_num(u_char *buf, u_char *las } +static u_char * +ngx_sprintf_str(u_char *buf, u_char *last, u_char *src, size_t len, + ngx_uint_t hexadecimal) +{ + static u_char hex[] = "0123456789abcdef"; + static u_char HEX[] = "0123456789ABCDEF"; + + if (hexadecimal == 0) { + + if (len == (size_t) -1) { + while (*src && buf < last) { + *buf++ = *src++; + } + + } else { + len = ngx_min((size_t) (last - buf), len); + buf = ngx_cpymem(buf, src, len); + } + + } else if (hexadecimal == 1) { + + if (len == (size_t) -1) { + + while (*src && buf < last - 1) { + *buf++ = hex[*src >> 4]; + *buf++ = hex[*src++ & 0xf]; + } + + } else { + + while (len-- && buf < last - 1) { + *buf++ = hex[*src >> 4]; + *buf++ = hex[*src++ & 0xf]; + } + } + + } else { /* hexadecimal == 2 */ + + if (len == (size_t) -1) { + + while (*src && buf < last - 1) { + *buf++ = HEX[*src >> 4]; + *buf++ = HEX[*src++ & 0xf]; + } + + } else { + + while (len-- && buf < last - 1) { + *buf++ = HEX[*src >> 4]; + *buf++ = HEX[*src++ & 0xf]; + } + } + } + + return buf; +} + + /* * We use ngx_strcasecmp()/ngx_strncasecmp() for 7-bit ASCII strings only, * and implement our own ngx_strcasecmp()/ngx_strncasecmp() diff -r 908f48bd3c2f -r a46fcf101cfc src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Thu Nov 05 22:37:27 2020 +0300 +++ b/src/event/ngx_event_openssl.c Wed Oct 28 10:56:11 2020 +0300 @@ -4057,9 +4057,6 @@ ngx_ssl_session_ticket_key_callback(ngx_ ngx_ssl_session_ticket_key_t *key; const EVP_MD *digest; const EVP_CIPHER *cipher; -#if (NGX_DEBUG) - u_char buf[32]; -#endif c = ngx_ssl_get_connection(ssl_conn); ssl_ctx = c->ssl->session_ctx; @@ -4081,8 +4078,8 @@ ngx_ssl_session_ticket_key_callback(ngx_ /* encrypt session ticket */ ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, - "ssl session ticket encrypt, key: \"%*s\" (%s session)", - ngx_hex_dump(buf, key[0].name, 16) - buf, buf, + "ssl session ticket encrypt, key: \"%*xs\" (%s session)", + (size_t) 16, key[0].name, SSL_session_reused(ssl_conn) ? "reused" : "new"); if (key[0].size == 48) { @@ -4128,17 +4125,16 @@ ngx_ssl_session_ticket_key_callback(ngx_ } ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, - "ssl session ticket decrypt, key: \"%*s\" not found", - ngx_hex_dump(buf, name, 16) - buf, buf); + "ssl session ticket decrypt, key: \"%*xs\" not found", + (size_t) 16, name); return 0; found: ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, - "ssl session ticket decrypt, key: \"%*s\"%s", - ngx_hex_dump(buf, key[i].name, 16) - buf, buf, - (i == 0) ? " (default)" : ""); + "ssl session ticket decrypt, key: \"%*xs\"%s", + (size_t) 16, key[i].name, (i == 0) ? " (default)" : ""); if (key[i].size == 48) { cipher = EVP_aes_128_cbc(); diff -r 908f48bd3c2f -r a46fcf101cfc src/event/ngx_event_openssl_stapling.c --- a/src/event/ngx_event_openssl_stapling.c Thu Nov 05 22:37:27 2020 +0300 +++ b/src/event/ngx_event_openssl_stapling.c Wed Oct 28 10:56:11 2020 +0300 @@ -2662,16 +2662,8 @@ ngx_ssl_ocsp_create_key(ngx_ssl_ocsp_ctx p = ngx_cpymem(p, serial->data, serial->length); ngx_memzero(p, 20 - serial->length); -#if (NGX_DEBUG) - { - u_char buf[120]; - - ngx_hex_dump(buf, ctx->key.data, ctx->key.len); - - ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ctx->log, 0, - "ssl ocsp key %*s", sizeof(buf), buf); - } -#endif + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp key %xV", &ctx->key); return NGX_OK; } diff -r 908f48bd3c2f -r a46fcf101cfc src/http/modules/ngx_http_grpc_module.c --- a/src/http/modules/ngx_http_grpc_module.c Thu Nov 05 22:37:27 2020 +0300 +++ b/src/http/modules/ngx_http_grpc_module.c Wed Oct 28 10:56:11 2020 +0300 @@ -1141,20 +1141,11 @@ ngx_http_grpc_create_request(ngx_http_re f->flags |= NGX_HTTP_V2_END_HEADERS_FLAG; -#if (NGX_DEBUG) - if (r->connection->log->log_level & NGX_LOG_DEBUG_HTTP) { - u_char buf[512]; - size_t n, m; - - n = ngx_min(b->last - b->pos, 256); - m = ngx_hex_dump(buf, b->pos, n) - buf; - - ngx_log_debug4(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, - "grpc header: %*s%s, len: %uz", - m, buf, b->last - b->pos > 256 ? "..." : "", - b->last - b->pos); - } -#endif + ngx_log_debug4(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "grpc header: %*xs%s, len: %uz", + (size_t) ngx_min(b->last - b->pos, 256), b->pos, + b->last - b->pos > 256 ? "..." : "", + b->last - b->pos); if (r->request_body_no_buffering) { @@ -1604,20 +1595,11 @@ ngx_http_grpc_process_header(ngx_http_re u = r->upstream; b = &u->buffer; -#if (NGX_DEBUG) - if (r->connection->log->log_level & NGX_LOG_DEBUG_HTTP) { - u_char buf[512]; - size_t n, m; - - n = ngx_min(b->last - b->pos, 256); - m = ngx_hex_dump(buf, b->pos, n) - buf; - - ngx_log_debug4(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, - "grpc response: %*s%s, len: %uz", - m, buf, b->last - b->pos > 256 ? "..." : "", - b->last - b->pos); - } -#endif + ngx_log_debug4(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "grpc response: %*xs%s, len: %uz", + (size_t) ngx_min(b->last - b->pos, 256), + b->pos, b->last - b->pos > 256 ? "..." : "", + b->last - b->pos); ctx = ngx_http_grpc_get_ctx(r); From ru at nginx.com Fri Nov 6 20:45:19 2020 From: ru at nginx.com (Ruslan Ermilov) Date: Fri, 06 Nov 2020 20:45:19 +0000 Subject: [nginx] Removed dead code from ngx_http_set_keepalive(). Message-ID: details: https://hg.nginx.org/nginx/rev/ed17a2a95c8d branches: changeset: 7737:ed17a2a95c8d user: Ruslan Ermilov date: Fri Nov 06 23:44:47 2020 +0300 description: Removed dead code from ngx_http_set_keepalive(). The code removed became dead after 98f03cd8d6cc (0.8.14), circa when the request reference counting was introduced. diffstat: src/http/ngx_http_request.c | 7 ------- 1 files changed, 0 insertions(+), 7 deletions(-) diffs (17 lines): diff -r a46fcf101cfc -r ed17a2a95c8d src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Wed Oct 28 10:56:11 2020 +0300 +++ b/src/http/ngx_http_request.c Fri Nov 06 23:44:47 2020 +0300 @@ -3039,13 +3039,6 @@ ngx_http_set_keepalive(ngx_http_request_ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "set http keepalive handler"); - if (r->discard_body) { - r->write_event_handler = ngx_http_request_empty_handler; - r->lingering_time = ngx_time() + (time_t) (clcf->lingering_time / 1000); - ngx_add_timer(rev, clcf->lingering_timeout); - return; - } - c->log->action = "closing request"; hc = r->http_connection; From ru at nginx.com Fri Nov 6 20:45:22 2020 From: ru at nginx.com (Ruslan Ermilov) Date: Fri, 06 Nov 2020 20:45:22 +0000 Subject: [nginx] SSL: fixed non-working SSL shutdown on lingering close. Message-ID: details: https://hg.nginx.org/nginx/rev/554c6ae25ffc branches: changeset: 7738:554c6ae25ffc user: Ruslan Ermilov date: Fri Nov 06 23:44:54 2020 +0300 description: SSL: fixed non-working SSL shutdown on lingering close. When doing lingering close, the socket was first shut down for writing, so SSL shutdown initiated after lingering close was not able to send the close_notify alerts (ticket #2056). The fix is to call ngx_ssl_shutdown() before shutting down the socket. diffstat: src/http/ngx_http_request.c | 39 +++++++++++++++++++++++++++++------- src/http/ngx_http_request_body.c | 1 + src/http/v2/ngx_http_v2.c | 42 +++++++++++++++++++++++++++++++-------- 3 files changed, 65 insertions(+), 17 deletions(-) diffs (185 lines): diff -r ed17a2a95c8d -r 554c6ae25ffc src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Fri Nov 06 23:44:47 2020 +0300 +++ b/src/http/ngx_http_request.c Fri Nov 06 23:44:54 2020 +0300 @@ -49,7 +49,7 @@ static void ngx_http_request_finalizer(n static void ngx_http_set_keepalive(ngx_http_request_t *r); static void ngx_http_keepalive_handler(ngx_event_t *ev); -static void ngx_http_set_lingering_close(ngx_http_request_t *r); +static void ngx_http_set_lingering_close(ngx_connection_t *c); static void ngx_http_lingering_close_handler(ngx_event_t *ev); static ngx_int_t ngx_http_post_action(ngx_http_request_t *r); static void ngx_http_close_request(ngx_http_request_t *r, ngx_int_t error); @@ -2754,7 +2754,7 @@ ngx_http_finalize_connection(ngx_http_re || r->header_in->pos < r->header_in->last || r->connection->read->ready))) { - ngx_http_set_lingering_close(r); + ngx_http_set_lingering_close(r->connection); return; } @@ -3368,22 +3368,43 @@ ngx_http_keepalive_handler(ngx_event_t * static void -ngx_http_set_lingering_close(ngx_http_request_t *r) +ngx_http_set_lingering_close(ngx_connection_t *c) { ngx_event_t *rev, *wev; - ngx_connection_t *c; + ngx_http_request_t *r; ngx_http_core_loc_conf_t *clcf; - c = r->connection; + r = c->data; clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); + if (r->lingering_time == 0) { + r->lingering_time = ngx_time() + (time_t) (clcf->lingering_time / 1000); + } + +#if (NGX_HTTP_SSL) + if (c->ssl) { + ngx_int_t rc; + + rc = ngx_ssl_shutdown(c); + + if (rc == NGX_ERROR) { + ngx_http_close_request(r, 0); + return; + } + + if (rc == NGX_AGAIN) { + c->ssl->handler = ngx_http_set_lingering_close; + return; + } + + c->recv = ngx_recv; + } +#endif + rev = c->read; rev->handler = ngx_http_lingering_close_handler; - r->lingering_time = ngx_time() + (time_t) (clcf->lingering_time / 1000); - ngx_add_timer(rev, clcf->lingering_timeout); - if (ngx_handle_read_event(rev, 0) != NGX_OK) { ngx_http_close_request(r, 0); return; @@ -3406,6 +3427,8 @@ ngx_http_set_lingering_close(ngx_http_re return; } + ngx_add_timer(rev, clcf->lingering_timeout); + if (rev->ready) { ngx_http_lingering_close_handler(rev); } diff -r ed17a2a95c8d -r 554c6ae25ffc src/http/ngx_http_request_body.c --- a/src/http/ngx_http_request_body.c Fri Nov 06 23:44:47 2020 +0300 +++ b/src/http/ngx_http_request_body.c Fri Nov 06 23:44:54 2020 +0300 @@ -674,6 +674,7 @@ ngx_http_discarded_request_body_handler( if (rc == NGX_OK) { r->discard_body = 0; r->lingering_close = 0; + r->lingering_time = 0; ngx_http_finalize_request(r, NGX_DONE); return; } diff -r ed17a2a95c8d -r 554c6ae25ffc src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Fri Nov 06 23:44:47 2020 +0300 +++ b/src/http/v2/ngx_http_v2.c Fri Nov 06 23:44:54 2020 +0300 @@ -60,7 +60,7 @@ typedef struct { static void ngx_http_v2_read_handler(ngx_event_t *rev); static void ngx_http_v2_write_handler(ngx_event_t *wev); static void ngx_http_v2_handle_connection(ngx_http_v2_connection_t *h2c); -static void ngx_http_v2_lingering_close(ngx_http_v2_connection_t *h2c); +static void ngx_http_v2_lingering_close(ngx_connection_t *c); static void ngx_http_v2_lingering_close_handler(ngx_event_t *rev); static u_char *ngx_http_v2_state_proxy_protocol(ngx_http_v2_connection_t *h2c, @@ -664,7 +664,7 @@ ngx_http_v2_handle_connection(ngx_http_v } if (h2c->goaway) { - ngx_http_v2_lingering_close(h2c); + ngx_http_v2_lingering_close(c); return; } @@ -703,13 +703,13 @@ ngx_http_v2_handle_connection(ngx_http_v static void -ngx_http_v2_lingering_close(ngx_http_v2_connection_t *h2c) +ngx_http_v2_lingering_close(ngx_connection_t *c) { ngx_event_t *rev, *wev; - ngx_connection_t *c; + ngx_http_v2_connection_t *h2c; ngx_http_core_loc_conf_t *clcf; - c = h2c->connection; + h2c = c->data; clcf = ngx_http_get_module_loc_conf(h2c->http_connection->conf_ctx, ngx_http_core_module); @@ -719,12 +719,34 @@ ngx_http_v2_lingering_close(ngx_http_v2_ return; } + if (h2c->lingering_time == 0) { + h2c->lingering_time = ngx_time() + + (time_t) (clcf->lingering_time / 1000); + } + +#if (NGX_HTTP_SSL) + if (c->ssl) { + ngx_int_t rc; + + rc = ngx_ssl_shutdown(c); + + if (rc == NGX_ERROR) { + ngx_http_close_connection(c); + return; + } + + if (rc == NGX_AGAIN) { + c->ssl->handler = ngx_http_v2_lingering_close; + return; + } + + c->recv = ngx_recv; + } +#endif + rev = c->read; rev->handler = ngx_http_v2_lingering_close_handler; - h2c->lingering_time = ngx_time() + (time_t) (clcf->lingering_time / 1000); - ngx_add_timer(rev, clcf->lingering_timeout); - if (ngx_handle_read_event(rev, 0) != NGX_OK) { ngx_http_close_connection(c); return; @@ -747,6 +769,8 @@ ngx_http_v2_lingering_close(ngx_http_v2_ return; } + ngx_add_timer(rev, clcf->lingering_timeout); + if (rev->ready) { ngx_http_v2_lingering_close_handler(rev); } @@ -4757,7 +4781,7 @@ done: return; } - ngx_http_v2_lingering_close(h2c); + ngx_http_v2_lingering_close(c); } From vbt at anche.no Sat Nov 7 08:05:34 2020 From: vbt at anche.no (vbt at anche.no) Date: Sat, 07 Nov 2020 13:35:34 +0530 Subject: [PATCH] autoindex : Add viewport meta tag for responsive scaling to narrow screen devices Message-ID: # HG changeset patch # User Var Bhat # Date 1604734673 -19800 # Sat Nov 07 13:07:53 2020 +0530 # Node ID 703eb6ae6cbf2ad87620d8ca4e0788be7e121538 # Parent 554c6ae25ffc634e29adae8b51bef6ddabebea39 ngx_http_autoindex_module: Added viewport meta tag to autoindex html rendering Previously , Auto Indexing Directory didn't include viewport meta tag which caused html not to scale to the screen width . This caused Narrow screen devices (e.g. mobiles) render pages in a virtual window or viewport, which is usually wider than the screen, and then shrink the rendered result down so it can all be seen at once. This required frequent zooming and scrolling as things looked small. This made autoindexing unresponsive on mobile and other narrow screen devices. This patch makes autoindexing mobile responsive. diff -r 554c6ae25ffc -r 703eb6ae6cbf src/http/modules/ngx_http_autoindex_module.c --- a/src/http/modules/ngx_http_autoindex_module.c Fri Nov 06 23:44:54 2020 +0300 +++ b/src/http/modules/ngx_http_autoindex_module.c Sat Nov 07 13:07:53 2020 +0530 @@ -445,7 +445,8 @@ static u_char title[] = "" CRLF - "Index of " + "<head><meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">" CRLF + "<title>Index of " ; static u_char header[] = From xeioex at nginx.com Mon Nov 9 11:55:18 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 09 Nov 2020 11:55:18 +0000 Subject: [njs] Removed unused key field from njs_hmac_t. Message-ID: <hg.6dd867b1f0fd.1604922918.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/6dd867b1f0fd branches: changeset: 1562:6dd867b1f0fd user: Dmitry Volyntsev <xeioex at nginx.com> date: Mon Nov 09 11:54:55 2020 +0000 description: Removed unused key field from njs_hmac_t. diffstat: src/njs_crypto.c | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diffs (11 lines): diff -r 1c2313826b2b -r 6dd867b1f0fd src/njs_crypto.c --- a/src/njs_crypto.c Sun Oct 25 18:29:15 2020 +0300 +++ b/src/njs_crypto.c Mon Nov 09 11:54:55 2020 +0000 @@ -36,7 +36,6 @@ typedef struct { } njs_digest_t; typedef struct { - njs_str_t key; u_char opad[64]; union { From mdounin at mdounin.ru Mon Nov 9 21:21:39 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 09 Nov 2020 21:21:39 +0000 Subject: [nginx] Request body: improved logging. Message-ID: <hg.4d5b04daeaff.1604956899.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/4d5b04daeaff branches: changeset: 7739:4d5b04daeaff user: Maxim Dounin <mdounin at mdounin.ru> date: Mon Nov 09 22:40:53 2020 +0300 description: Request body: improved logging. Added logging before returning NGX_HTTP_INTERNAL_SERVER_ERROR if there are busy buffers after a request body flush. This should never happen with current code, though bugs can be introduced by 3rd party modules. Make sure debugging will be easy enough. diffstat: src/http/ngx_http_request_body.c | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diffs (13 lines): diff -r 554c6ae25ffc -r 4d5b04daeaff src/http/ngx_http_request_body.c --- a/src/http/ngx_http_request_body.c Fri Nov 06 23:44:54 2020 +0300 +++ b/src/http/ngx_http_request_body.c Mon Nov 09 22:40:53 2020 +0300 @@ -305,6 +305,9 @@ ngx_http_do_read_client_request_body(ngx return NGX_AGAIN; } + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "busy buffers after request body flush"); + return NGX_HTTP_INTERNAL_SERVER_ERROR; } From mdounin at mdounin.ru Mon Nov 9 21:21:42 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 09 Nov 2020 21:21:42 +0000 Subject: [nginx] Request body: removed error assumption (ticket #2058). Message-ID: <hg.967cfa6e2ff8.1604956902.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/967cfa6e2ff8 branches: changeset: 7740:967cfa6e2ff8 user: Maxim Dounin <mdounin at mdounin.ru> date: Mon Nov 09 22:41:54 2020 +0300 description: Request body: removed error assumption (ticket #2058). Before introduction of request body filter in 42d9beeb22db, the only possible return code from the ngx_http_request_body_filter() call without actual buffers was NGX_HTTP_INTERNAL_SERVER_ERROR, and the code in ngx_http_read_client_request_body() hardcoded the only possible error to simplify the code of initial call to set rb->rest. This is no longer true after introduction of request body filters though, as a request body filter might need to return other errors, such as 403. Fix is to preserve the error code actually returned by the call instead of assuming 500. diffstat: src/http/ngx_http_request_body.c | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diffs (15 lines): diff -r 4d5b04daeaff -r 967cfa6e2ff8 src/http/ngx_http_request_body.c --- a/src/http/ngx_http_request_body.c Mon Nov 09 22:40:53 2020 +0300 +++ b/src/http/ngx_http_request_body.c Mon Nov 09 22:41:54 2020 +0300 @@ -137,8 +137,9 @@ ngx_http_read_client_request_body(ngx_ht } else { /* set rb->rest */ - if (ngx_http_request_body_filter(r, NULL) != NGX_OK) { - rc = NGX_HTTP_INTERNAL_SERVER_ERROR; + rc = ngx_http_request_body_filter(r, NULL); + + if (rc != NGX_OK) { goto done; } } From jmheisz at gmail.com Tue Nov 10 01:54:43 2020 From: jmheisz at gmail.com (Jeff Heisz) Date: Mon, 9 Nov 2020 20:54:43 -0500 Subject: One last try - large long-running worker tasks Message-ID: <CAHdKRjtQtRYemLpazN=9Z8exmSndG6qsvioajZ2zvgdQKn6xHA@mail.gmail.com> Hi all, I've asked this before with no response, trying one last time before I just make something work. I'm making a custom module for nginx that does a number of things but one of the actions is a long-running (in the nginx sense) task that could produce a large response. I've already got proper processing around using worker tasks for the other long-running operations that have small datasets, but worry about accumulating a large amount of memory in a buffer chain for the response. Ideally it would drain as fast as the client can consume it and throttle appropriately, there could conceivably be gigabytes of content. My choices (besides blowing all of the memory in the system) are: - write to a temporary file and attach a file buffer as the response, less than ideal as it's essentially translating a file to begin with, so it's a lot of disk I/O and performance will be less than stellar. >From what I can tell, this is one of the models for the various CGI systems for caching, although in my case caching is not of use - somehow hook into the eventing system of nginx to detect the write transitions and implement flow control directly using threading conditionals. I've tried this for a few weeks but can't figure out the 'right' thing to make the hooks work in a separate module without changing the core nginx code, which I'm loathe to do (unless you are looking for someone to contribute such a solution, but I'd probably need some initial guidance) - attach a kernel pipe object (yah yah, won't work on Windows, don't care) to each of my worker instances and somehow 'connect' that as an upstream-like resource, so that the nginx event loop handles the read/write consumption and the thread automatically blocks when full on the kernel pipe. Would need some jiggery to handle reuse and start/end markers. Also not clear if I can override the connection model for the upstream without again changing core nginx server code Any thoughts? Not looking for code here (although telling me to look at the blah-blah-blah that does exactly this would be awesome), but if someone who is more familiar with the inner workings of the nginx data flow could just say which solution is a non-starter (so I don't waste time trying to make it work) or even which would be a suitable solution would be awesome! jmh From serg.brester at sebres.de Tue Nov 10 12:48:29 2020 From: serg.brester at sebres.de (Dipl. Ing. Sergey Brester) Date: Tue, 10 Nov 2020 13:48:29 +0100 Subject: One last try - large long-running worker tasks In-Reply-To: <CAHdKRjtQtRYemLpazN=9Z8exmSndG6qsvioajZ2zvgdQKn6xHA@mail.gmail.com> References: <CAHdKRjtQtRYemLpazN=9Z8exmSndG6qsvioajZ2zvgdQKn6xHA@mail.gmail.com> Message-ID: <20b8d00abb04ba4ca7f733180e028b54@sebres.de> You could do it similar proxy module is buffering the response, for instance see proxy_buffering [2] directive: _When buffering is enabled, nginx receives a response from the proxied server as soon as possible, saving it into the buffers set by the proxy_buffer_size [3] and proxy_buffers [4] directives. If the whole response does not fit into memory, a part of it can be saved to a temporary file [5] on the disk. Writing to temporary files is controlled by the proxy_max_temp_file_size [6] and proxy_temp_file_write_size [7] directives._ This or other communicating modules (like fcgi, scgi or uwsgi) using upstream buffering of response. The handling around buffering of upstream is almost the same in all modules. This is already event-driven - handler is called on readable, by incoming response chunk (or on writable of downstream). Basically depending on how your module architecture is built, you could: * either use default upstream buffering mechanism (if you have something like upstream or can simulate that). In thin case you have to set certain properties of r->upstream: buffering, buffer_size, bufs.num and bufs.size, temp_file_write_size and max_temp_file_size and of course register the handler reading the upstream pipe. * or organize your own response buffering as it is implemented in ngx_event_pipe.c and ngx_http_upstream.c, take a look there for implementation details. As for performance (disk I/O, etc) - it depends (buffer size, system cache, mount type of temp storage, speed of clients downstream, etc). But if you would configure the buffers large enough, nginx could use it as long as possible and the storing in temp file can be considered as safe on demand fallback to smooth out the peak of load, to avoid OOM situation. Usage a kernel pipe buffers could be surely faster, but indirect you'd just relocate the potential OOM issue from nginx process to the system. Regards, Sergey 10.11.2020 02:54, Jeff Heisz wrote: > Hi all, I've asked this before with no response, trying one last time > before I just make something work. > > I'm making a custom module for nginx that does a number of things but > one of the actions is a long-running (in the nginx sense) task that > could produce a large response. I've already got proper processing > around using worker tasks for the other long-running operations that > have small datasets, but worry about accumulating a large amount of > memory in a buffer chain for the response. Ideally it would drain as > fast as the client can consume it and throttle appropriately, there > could conceivably be gigabytes of content. > > My choices (besides blowing all of the memory in the system) are: > > - write to a temporary file and attach a file buffer as the response, > less than ideal as it's essentially translating a file to begin with, > so it's a lot of disk I/O and performance will be less than stellar. > From what I can tell, this is one of the models for the various CGI > systems for caching, although in my case caching is not of use > > - somehow hook into the eventing system of nginx to detect the write > transitions and implement flow control directly using threading > conditionals. I've tried this for a few weeks but can't figure out > the 'right' thing to make the hooks work in a separate module without > changing the core nginx code, which I'm loathe to do (unless you are > looking for someone to contribute such a solution, but I'd probably > need some initial guidance) > > - attach a kernel pipe object (yah yah, won't work on Windows, don't > care) to each of my worker instances and somehow 'connect' that as an > upstream-like resource, so that the nginx event loop handles the > read/write consumption and the thread automatically blocks when full > on the kernel pipe. Would need some jiggery to handle reuse and > start/end markers. Also not clear if I can override the connection > model for the upstream without again changing core nginx server code > > Any thoughts? Not looking for code here (although telling me to look > at the blah-blah-blah that does exactly this would be awesome), but if > someone who is more familiar with the inner workings of the nginx data > flow could just say which solution is a non-starter (so I don't waste > time trying to make it work) or even which would be a suitable > solution would be awesome! > > jmh > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel [1] Links: ------ [1] http://mailman.nginx.org/mailman/listinfo/nginx-devel [2] http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering [3] http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size [4] http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers [5] http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_temp_path [6] http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_max_temp_file_size [7] http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_temp_file_write_size -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201110/71df6da4/attachment.htm> From jmheisz at gmail.com Tue Nov 10 13:34:03 2020 From: jmheisz at gmail.com (Jeff Heisz) Date: Tue, 10 Nov 2020 08:34:03 -0500 Subject: One last try - large long-running worker tasks In-Reply-To: <20b8d00abb04ba4ca7f733180e028b54@sebres.de> References: <CAHdKRjtQtRYemLpazN=9Z8exmSndG6qsvioajZ2zvgdQKn6xHA@mail.gmail.com> <20b8d00abb04ba4ca7f733180e028b54@sebres.de> Message-ID: <CAHdKRjtpkxjEu76KvPrcrEWdX=T_JSC6A1i_aj_A7Fta1Z-BtA@mail.gmail.com> Thanks Sergey, that's kind of where I was heading and I've looked at those, but this is where I get tripped up. Unlike the 'real' upstream-dependent instances (which are reading from an upstream socket instance and can process incoming data in the main event loop, no locking required), the processing in my module is a bunch of code doing direct API accesses (no file descriptors exposed) in the task handler method and generating content. The final response for the smaller cases is written in the completion handler. But because that worker task is running in a separate thread, there's no safe mechanism I can see to "start" the response and then stream content from the task handler and the main outbound HTTP response writer. What would be ideal (but doesn't exist in nginx core today) is an 'always' callback in the event loop, in which I could manage the streaming, but a post task can't insert another post task for the next loop, it'll get processed (I guess I could use a 0 millisecond timer, but ugly). This is why I was looking at a kernel pipe, I could use that to simulate the upstream, even writing data through it and the pipe would enable data transfer between the task thread and the main event thread for responding... jmh On Tue, Nov 10, 2020 at 7:48 AM Dipl. Ing. Sergey Brester <serg.brester at sebres.de> wrote: > > You could do it similar proxy module is buffering the response, for instance see proxy_buffering directive: > > When buffering is enabled, nginx receives a response from the proxied server as soon as possible, saving it into the buffers set by the proxy_buffer_size and proxy_buffers directives. If the whole response does not fit into memory, a part of it can be saved to a temporary file on the disk. Writing to temporary files is controlled by the proxy_max_temp_file_size and proxy_temp_file_write_size directives. > > This or other communicating modules (like fcgi, scgi or uwsgi) using upstream buffering of response. The handling around buffering of upstream is almost the same in all modules. > This is already event-driven - handler is called on readable, by incoming response chunk (or on writable of downstream). > > Basically depending on how your module architecture is built, you could: > > either use default upstream buffering mechanism (if you have something like upstream or can simulate that). In thin case you have to set certain properties of r->upstream: buffering, buffer_size, bufs.num and bufs.size, temp_file_write_size and max_temp_file_size and of course register the handler reading the upstream pipe. > or organize your own response buffering as it is implemented in ngx_event_pipe.c and ngx_http_upstream.c, take a look there for implementation details. > > As for performance (disk I/O, etc) - it depends (buffer size, system cache, mount type of temp storage, speed of clients downstream, etc). But if you would configure the buffers large enough, nginx could use it as long as possible and the storing in temp file can be considered as safe on demand fallback to smooth out the peak of load, to avoid OOM situation. > Usage a kernel pipe buffers could be surely faster, but indirect you'd just relocate the potential OOM issue from nginx process to the system. > > Regards, > Sergey > > 10.11.2020 02:54, Jeff Heisz wrote: > > Hi all, I've asked this before with no response, trying one last time > before I just make something work. > > I'm making a custom module for nginx that does a number of things but > one of the actions is a long-running (in the nginx sense) task that > could produce a large response. I've already got proper processing > around using worker tasks for the other long-running operations that > have small datasets, but worry about accumulating a large amount of > memory in a buffer chain for the response. Ideally it would drain as > fast as the client can consume it and throttle appropriately, there > could conceivably be gigabytes of content. > > My choices (besides blowing all of the memory in the system) are: > > - write to a temporary file and attach a file buffer as the response, > less than ideal as it's essentially translating a file to begin with, > so it's a lot of disk I/O and performance will be less than stellar. > From what I can tell, this is one of the models for the various CGI > systems for caching, although in my case caching is not of use > > - somehow hook into the eventing system of nginx to detect the write > transitions and implement flow control directly using threading > conditionals. I've tried this for a few weeks but can't figure out > the 'right' thing to make the hooks work in a separate module without > changing the core nginx code, which I'm loathe to do (unless you are > looking for someone to contribute such a solution, but I'd probably > need some initial guidance) > > - attach a kernel pipe object (yah yah, won't work on Windows, don't > care) to each of my worker instances and somehow 'connect' that as an > upstream-like resource, so that the nginx event loop handles the > read/write consumption and the thread automatically blocks when full > on the kernel pipe. Would need some jiggery to handle reuse and > start/end markers. Also not clear if I can override the connection > model for the upstream without again changing core nginx server code > > Any thoughts? Not looking for code here (although telling me to look > at the blah-blah-blah that does exactly this would be awesome), but if > someone who is more familiar with the inner workings of the nginx data > flow could just say which solution is a non-starter (so I don't waste > time trying to make it work) or even which would be a suitable > solution would be awesome! > > jmh > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Tue Nov 10 15:50:17 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Nov 2020 15:50:17 +0000 Subject: [nginx] Configure: style. Message-ID: <hg.f0110b3663b7.1605023417.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/f0110b3663b7 branches: changeset: 7741:f0110b3663b7 user: Maxim Dounin <mdounin at mdounin.ru> date: Tue Nov 10 17:13:14 2020 +0300 description: Configure: style. diffstat: auto/make | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diffs (48 lines): diff -r 967cfa6e2ff8 -r f0110b3663b7 auto/make --- a/auto/make Mon Nov 09 22:41:54 2020 +0300 +++ b/auto/make Tue Nov 10 17:13:14 2020 +0300 @@ -313,7 +313,7 @@ END END fi - done + done fi @@ -343,7 +343,7 @@ if [ $MAIL = YES ]; then $ngx_cc$ngx_tab$ngx_objout$ngx_obj$ngx_tab$ngx_src$NGX_AUX END - done + done fi @@ -373,7 +373,7 @@ if [ $STREAM = YES ]; then $ngx_cc$ngx_tab$ngx_objout$ngx_obj$ngx_tab$ngx_src$NGX_AUX END - done + done fi @@ -399,7 +399,7 @@ if test -n "$MISC_SRCS"; then $ngx_cc$ngx_tab$ngx_objout$ngx_obj$ngx_tab$ngx_src$NGX_AUX END - done + done fi @@ -431,7 +431,7 @@ if test -n "$NGX_ADDON_SRCS"; then $ngx_cc$ngx_tab$ngx_objout$ngx_obj$ngx_tab$ngx_src$NGX_AUX END - done + done fi From mdounin at mdounin.ru Tue Nov 10 15:50:20 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Nov 2020 15:50:20 +0000 Subject: [nginx] Configure: initialization of NGX_ADDON_SRCS. Message-ID: <hg.4a69fec53b2f.1605023420.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/4a69fec53b2f branches: changeset: 7742:4a69fec53b2f user: Maxim Dounin <mdounin at mdounin.ru> date: Tue Nov 10 17:13:17 2020 +0300 description: Configure: initialization of NGX_ADDON_SRCS. diffstat: auto/options | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (11 lines): diff -r f0110b3663b7 -r 4a69fec53b2f auto/options --- a/auto/options Tue Nov 10 17:13:14 2020 +0300 +++ b/auto/options Tue Nov 10 17:13:17 2020 +0300 @@ -134,6 +134,7 @@ STREAM_SSL_PREREAD=NO DYNAMIC_MODULES= NGX_ADDONS= +NGX_ADDON_SRCS= NGX_ADDON_DEPS= DYNAMIC_ADDONS= From mdounin at mdounin.ru Tue Nov 10 15:50:23 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Nov 2020 15:50:23 +0000 Subject: [nginx] Configure: shared sources for addon modules. Message-ID: <hg.4b1299b1856a.1605023423.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/4b1299b1856a branches: changeset: 7743:4b1299b1856a user: Maxim Dounin <mdounin at mdounin.ru> date: Tue Nov 10 17:13:20 2020 +0300 description: Configure: shared sources for addon modules. Addon modules, both dynamic and static, can now use shared source files. Shared sources result in only one make rule even if specified several times in different modules. diffstat: auto/make | 3 ++- auto/module | 44 ++++++++++++++++++++++++++++++++++++++++++-- auto/options | 1 + 3 files changed, 45 insertions(+), 3 deletions(-) diffs (99 lines): diff -r 4a69fec53b2f -r 4b1299b1856a auto/make --- a/auto/make Tue Nov 10 17:13:17 2020 +0300 +++ b/auto/make Tue Nov 10 17:13:20 2020 +0300 @@ -502,6 +502,7 @@ fi for ngx_module in $DYNAMIC_MODULES do eval ngx_module_srcs="\$${ngx_module}_SRCS" + eval ngx_module_shrd="\$${ngx_module}_SHRD" eval eval ngx_module_libs="\\\"\$${ngx_module}_LIBS\\\"" eval ngx_module_modules="\$${ngx_module}_MODULES" @@ -567,7 +568,7 @@ END | sed -e "s/\(.*\.\)c/\1$ngx_objext/"` ngx_module_objs= - for ngx_src in $ngx_module_srcs + for ngx_src in $ngx_module_srcs $ngx_module_shrd do case "$ngx_src" in src/*) diff -r 4a69fec53b2f -r 4b1299b1856a auto/module --- a/auto/module Tue Nov 10 17:13:17 2020 +0300 +++ b/auto/module Tue Nov 10 17:13:20 2020 +0300 @@ -17,7 +17,6 @@ if [ "$ngx_module_link" = DYNAMIC ]; the done DYNAMIC_MODULES="$DYNAMIC_MODULES $ngx_module" - eval ${ngx_module}_SRCS=\"$ngx_module_srcs\" eval ${ngx_module}_MODULES=\"$ngx_module_name\" @@ -31,6 +30,30 @@ if [ "$ngx_module_link" = DYNAMIC ]; the eval ${ngx_module}_ORDER=\"$ngx_module_order\" fi + srcs= + shrd= + for src in $ngx_module_srcs + do + found=no + for old in $DYNAMIC_MODULES_SRCS + do + if [ $src = $old ]; then + found=yes + break + fi + done + + if [ $found = no ]; then + srcs="$srcs $src" + else + shrd="$shrd $src" + fi + done + eval ${ngx_module}_SRCS=\"$srcs\" + eval ${ngx_module}_SHRD=\"$shrd\" + + DYNAMIC_MODULES_SRCS="$DYNAMIC_MODULES_SRCS $srcs" + if test -n "$ngx_module_incs"; then CORE_INCS="$CORE_INCS $ngx_module_incs" fi @@ -107,7 +130,24 @@ elif [ "$ngx_module_link" = ADDON ]; the eval ${ngx_module_type}_MODULES=\"\$${ngx_module_type}_MODULES \ $ngx_module_name\" - NGX_ADDON_SRCS="$NGX_ADDON_SRCS $ngx_module_srcs" + srcs= + for src in $ngx_module_srcs + do + found=no + for old in $NGX_ADDON_SRCS + do + if [ $src = $old ]; then + found=yes + break + fi + done + + if [ $found = no ]; then + srcs="$srcs $src" + fi + done + + NGX_ADDON_SRCS="$NGX_ADDON_SRCS $srcs" if test -n "$ngx_module_incs"; then eval ${ngx_var}_INCS=\"\$${ngx_var}_INCS $ngx_module_incs\" diff -r 4a69fec53b2f -r 4b1299b1856a auto/options --- a/auto/options Tue Nov 10 17:13:17 2020 +0300 +++ b/auto/options Tue Nov 10 17:13:20 2020 +0300 @@ -132,6 +132,7 @@ STREAM_UPSTREAM_ZONE=YES STREAM_SSL_PREREAD=NO DYNAMIC_MODULES= +DYNAMIC_MODULES_SRCS= NGX_ADDONS= NGX_ADDON_SRCS= From mdounin at mdounin.ru Tue Nov 10 16:41:04 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Nov 2020 19:41:04 +0300 Subject: improve the first selection of SWRR algorithm In-Reply-To: <E4CA0F19-F77A-4F6A-9681-292D228021C4@didiglobal.com> References: <E4CA0F19-F77A-4F6A-9681-292D228021C4@didiglobal.com> Message-ID: <20201110164104.GS1147@mdounin.ru> Hello! On Wed, Nov 04, 2020 at 12:58:51PM +0000, ?? Cjhust Chen wrote: > Hi: > We improve the Smooth Weighted Round-Robin?SWRR? > algorithm to successfully resolve the problem in the > following situations. > > Situation 1? > upstream backend-server { > server 1.1.1.1:8000 weight=100; > server 2.2.2.2:8000 weight=101; > server 3.3.3.3:8000 weight=100; > } > > 1. When each machine in the cluster mode executes "-s reload" at > the same time , the first selection of each machine is the > machine 2.2.2.2:8000 having higher weight , which will lead to > 300%+ increase of 2.2.2.2:8000 traffic. > 2. More and more companies are implementing service discovery > based on nginx. Adding or removing machine will also lead to > 300%+ increase of 2.2.2.2:8000 traffic. > > > > Situation 2? > upstream backend-server { > server 1.1.1.1:8000 weight=100; > server 2.2.2.2:8000 weight=100; > server 3.3.3.3:8000 weight=100; > } > > 1. When each machine in the cluster mode executes "-s reload" at > the same time , the first selection of each machine is the first > machine 1.1.1.1:8000, which will lead to 300%+ increase of > 1.1.1.1:8000 traffic. > 2. More and more companies are implementing service discovery > based on nginx. Adding or removing machine will also lead to > 300%+ increase of 1.1.1.1:8000 traffic. > > > > > > # HG changeset patch > # User Jie Chen <cherrychenjie at didiglobal.com<mailto:cherrychenjie at didiglobal.com>> > # Date 1599813602 -28800 > # Fri Sep 11 16:40:02 2020 +0800 > # Node ID 931b0c055626657d68f886781c193ffb09245a2e > # Parent da5e3f5b16733167142b599b6af3ce9469a07d52 > improve the first selection of SWRR algorithm > > diff -r da5e3f5b1673 -r 931b0c055626 src/http/ngx_http_upstream_round_robin.c > --- a/src/http/ngx_http_upstream_round_robin.c Wed Sep 02 23:13:36 2020 +0300 > +++ b/src/http/ngx_http_upstream_round_robin.c Fri Sep 11 16:40:02 2020 +0800 > @@ -91,7 +91,7 @@ > peer[n].name = server[i].addrs[j].name; > peer[n].weight = server[i].weight; > peer[n].effective_weight = server[i].weight; > - peer[n].current_weight = 0; > + peer[n].current_weight = 0 - ngx_random() % peers->total_weight; > peer[n].max_conns = server[i].max_conns; > peer[n].max_fails = server[i].max_fails; > peer[n].fail_timeout = server[i].fail_timeout; > @@ -155,7 +155,7 @@ > peer[n].name = server[i].addrs[j].name; > peer[n].weight = server[i].weight; > peer[n].effective_weight = server[i].weight; > - peer[n].current_weight = 0; > + peer[n].current_weight = 0 - ngx_random() % peers->total_weight; > peer[n].max_conns = server[i].max_conns; > peer[n].max_fails = server[i].max_fails; > peer[n].fail_timeout = server[i].fail_timeout; > > Thank you for your patch. In no particular order: - Traffic on a particular server is not expected to be noticeably increased after nginx restart / configuration reload unless there are very few requests. - Further, given that a reload happens at some random time, adding another random is not going to help. That is, the patch seems to only improve things if nginx is reloaded after a small non-random amount of requests. - Using "peers->total_weight" for backup peers is wrong. - Using the same current_weight for all worker processes is essentially the same problem as the one you are trying to solve. - The patch breaks the "sum of all current weights is 0" invariant. This is not fatal, yet complicates things for no obvious reasons. - In general, it might be a better idea to use the random balancer if you are indeed facing the problems described (http://nginx.org/r/random). -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Tue Nov 10 18:10:24 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 10 Nov 2020 18:10:24 +0000 Subject: [njs] Introduced a library of common functionality for the modules. Message-ID: <hg.a3f5ebcd7ef7.1605031824.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/a3f5ebcd7ef7 branches: changeset: 1563:a3f5ebcd7ef7 user: Dmitry Volyntsev <xeioex at nginx.com> date: Tue Nov 10 17:46:01 2020 +0000 description: Introduced a library of common functionality for the modules. diffstat: nginx/config | 15 +++- nginx/ngx_http_js_module.c | 147 ++++++++---------------------------------- nginx/ngx_js.c | 95 +++++++++++++++++++++++++++ nginx/ngx_js.h | 28 ++++++++ nginx/ngx_stream_js_module.c | 102 +++++----------------------- 5 files changed, 183 insertions(+), 204 deletions(-) diffs (733 lines): diff -r 6dd867b1f0fd -r a3f5ebcd7ef7 nginx/config --- a/nginx/config Mon Nov 09 11:54:55 2020 +0000 +++ b/nginx/config Tue Nov 10 17:46:01 2020 +0000 @@ -1,22 +1,29 @@ ngx_addon_name="ngx_js_module" +NJS_DEPS="$ngx_addon_dir/ngx_js.h" +NJS_SRCS="$ngx_addon_dir/ngx_js.c" + if [ $HTTP != NO ]; then ngx_module_type=HTTP ngx_module_name=ngx_http_js_module ngx_module_incs="$ngx_addon_dir/../src $ngx_addon_dir/../build" - ngx_module_deps="$ngx_addon_dir/../build/libnjs.a" - ngx_module_srcs="$ngx_addon_dir/ngx_http_js_module.c" + ngx_module_deps="$ngx_addon_dir/../build/libnjs.a $NJS_DEPS" + ngx_module_srcs="$ngx_addon_dir/ngx_http_js_module.c $NJS_SRCS" ngx_module_libs="PCRE $ngx_addon_dir/../build/libnjs.a -lm" . auto/module + + if [ "$ngx_module_link" != DYNAMIC ]; then + NJS_SRCS= + fi fi if [ $STREAM != NO ]; then ngx_module_type=STREAM ngx_module_name=ngx_stream_js_module ngx_module_incs="$ngx_addon_dir/../src $ngx_addon_dir/../build" - ngx_module_deps="$ngx_addon_dir/../build/libnjs.a" - ngx_module_srcs="$ngx_addon_dir/ngx_stream_js_module.c" + ngx_module_deps="$ngx_addon_dir/../build/libnjs.a $NJS_DEPS" + ngx_module_srcs="$ngx_addon_dir/ngx_stream_js_module.c $NJS_SRCS" ngx_module_libs="PCRE $ngx_addon_dir/../build/libnjs.a -lm" . auto/module diff -r 6dd867b1f0fd -r a3f5ebcd7ef7 nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Mon Nov 09 11:54:55 2020 +0000 +++ b/nginx/ngx_http_js_module.c Tue Nov 10 17:46:01 2020 +0000 @@ -9,8 +9,7 @@ #include <ngx_config.h> #include <ngx_core.h> #include <ngx_http.h> - -#include <njs.h> +#include "ngx_js.h" typedef struct { @@ -77,9 +76,6 @@ static ngx_int_t ngx_http_js_init_vm(ngx static void ngx_http_js_cleanup_ctx(void *data); static void ngx_http_js_cleanup_vm(void *data); -static njs_int_t ngx_http_js_ext_get_string(njs_vm_t *vm, - njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, - njs_value_t *retval); static njs_int_t ngx_http_js_ext_keys_header(njs_vm_t *vm, njs_value_t *value, njs_value_t *keys, ngx_list_t *headers); static ngx_table_elt_t *ngx_http_js_get_header(ngx_list_part_t *part, @@ -182,8 +178,6 @@ static void ngx_http_js_clear_timer(njs_ static void ngx_http_js_timer_handler(ngx_event_t *ev); static void ngx_http_js_handle_event(ngx_http_request_t *r, njs_vm_event_t vm_event, njs_value_t *args, njs_uint_t nargs); -static njs_int_t ngx_http_js_string(njs_vm_t *vm, njs_value_t *value, - njs_str_t *str); static char *ngx_http_js_include(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); @@ -286,7 +280,7 @@ static njs_external_t ngx_http_js_ext_r .name.string = njs_str("uri"), .enumerable = 1, .u.property = { - .handler = ngx_http_js_ext_get_string, + .handler = ngx_js_ext_string, .magic32 = offsetof(ngx_http_request_t, uri), } }, @@ -296,7 +290,7 @@ static njs_external_t ngx_http_js_ext_r .name.string = njs_str("method"), .enumerable = 1, .u.property = { - .handler = ngx_http_js_ext_get_string, + .handler = ngx_js_ext_string, .magic32 = offsetof(ngx_http_request_t, method_name), } }, @@ -551,8 +545,6 @@ static void ngx_http_js_content_event_handler(ngx_http_request_t *r) { ngx_int_t rc; - njs_str_t name, exception; - njs_function_t *func; ngx_http_js_ctx_t *ctx; ngx_http_js_loc_conf_t *jlcf; @@ -573,17 +565,6 @@ ngx_http_js_content_event_handler(ngx_ht ctx = ngx_http_get_module_ctx(r, ngx_http_js_module); - name.start = jlcf->content.data; - name.length = jlcf->content.len; - - func = njs_vm_function(ctx->vm, &name); - if (func == NULL) { - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, - "js function \"%V\" not found", &jlcf->content); - ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); - return; - } - /* * status is expected to be overriden by finish(), return() or * internalRedirect() methods, otherwise the content handler is @@ -592,17 +573,15 @@ ngx_http_js_content_event_handler(ngx_ht ctx->status = NGX_HTTP_INTERNAL_SERVER_ERROR; - if (njs_vm_call(ctx->vm, func, njs_value_arg(&ctx->request), 1) != NJS_OK) { - njs_vm_retval_string(ctx->vm, &exception); - - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, - "js exception: %*s", exception.length, exception.start); - + rc = ngx_js_call(ctx->vm, &jlcf->content, &ctx->request, + r->connection->log); + + if (rc == NGX_ERROR) { ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } - if (njs_vm_pending(ctx->vm)) { + if (rc == NGX_AGAIN) { r->write_event_handler = ngx_http_js_content_write_event_handler; return; } @@ -691,8 +670,7 @@ ngx_http_js_variable(ngx_http_request_t ngx_int_t rc; njs_int_t pending; - njs_str_t name, value, exception; - njs_function_t *func; + njs_str_t value; ngx_http_js_ctx_t *ctx; rc = ngx_http_js_init_vm(r); @@ -711,39 +689,25 @@ ngx_http_js_variable(ngx_http_request_t ctx = ngx_http_get_module_ctx(r, ngx_http_js_module); - name.start = fname->data; - name.length = fname->len; - - func = njs_vm_function(ctx->vm, &name); - if (func == NULL) { - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, - "js function \"%V\" not found", fname); + pending = njs_vm_pending(ctx->vm); + + rc = ngx_js_call(ctx->vm, fname, &ctx->request, r->connection->log); + + if (rc == NGX_ERROR) { v->not_found = 1; return NGX_OK; } - pending = njs_vm_pending(ctx->vm); - - if (njs_vm_call(ctx->vm, func, njs_value_arg(&ctx->request), 1) != NJS_OK) { - njs_vm_retval_string(ctx->vm, &exception); - + if (!pending && rc == NGX_AGAIN) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, - "js exception: %*s", exception.length, exception.start); - - v->not_found = 1; - return NGX_OK; + "async operation inside \"%V\" variable handler", fname); + return NGX_ERROR; } if (njs_vm_retval_string(ctx->vm, &value) != NJS_OK) { return NGX_ERROR; } - if (!pending && njs_vm_pending(ctx->vm)) { - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, - "async operation inside \"%V\" variable handler", fname); - return NGX_ERROR; - } - v->len = value.length; v->valid = 1; v->no_cacheable = 0; @@ -840,25 +804,6 @@ ngx_http_js_cleanup_vm(void *data) static njs_int_t -ngx_http_js_ext_get_string(njs_vm_t *vm, njs_object_prop_t *prop, - njs_value_t *value, njs_value_t *setval, njs_value_t *retval) -{ - char *p; - ngx_str_t *field; - - p = njs_vm_external(vm, value); - if (p == NULL) { - njs_value_undefined_set(retval); - return NJS_DECLINED; - } - - field = (ngx_str_t *) (p + njs_vm_prop_magic32(prop)); - - return njs_vm_value_string_set(vm, retval, field->data, field->len); -} - - -static njs_int_t ngx_http_js_ext_keys_header(njs_vm_t *vm, njs_value_t *value, njs_value_t *keys, ngx_list_t *headers) { @@ -1144,8 +1089,7 @@ ngx_http_js_header_out_special(njs_vm_t setval = njs_vm_array_prop(vm, setval, length - 1, &lvalue); } - rc = ngx_http_js_string(vm, setval, &s); - if (rc != NJS_OK) { + if (ngx_js_string(vm, setval, &s) != NGX_OK) { return NJS_ERROR; } @@ -1389,8 +1333,7 @@ ngx_http_js_header_generic(njs_vm_t *vm, setval = njs_vm_array_prop(vm, array, i, &lvalue); } - rc = ngx_http_js_string(vm, setval, &s); - if (rc != NJS_OK) { + if (ngx_js_string(vm, setval, &s) != NGX_OK) { return NJS_ERROR; } @@ -1527,8 +1470,7 @@ ngx_http_js_content_type(njs_vm_t *vm, n setval = njs_vm_array_prop(vm, setval, length - 1, &lvalue); } - rc = ngx_http_js_string(vm, setval, &s); - if (rc != NJS_OK) { + if (ngx_js_string(vm, setval, &s) != NGX_OK) { return NJS_ERROR; } @@ -1595,9 +1537,7 @@ static njs_int_t ngx_http_js_ext_status(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval) { - njs_int_t rc; ngx_int_t n; - njs_str_t s; ngx_http_request_t *r; r = njs_vm_external(vm, value); @@ -1611,13 +1551,7 @@ ngx_http_js_ext_status(njs_vm_t *vm, njs return NJS_OK; } - rc = ngx_http_js_string(vm, setval, &s); - if (rc != NJS_OK) { - return NJS_ERROR; - } - - n = ngx_atoi(s.start, s.length); - if (n == NGX_ERROR) { + if (ngx_js_integer(vm, setval, &n) != NGX_OK) { return NJS_ERROR; } @@ -1763,7 +1697,6 @@ ngx_http_js_ext_return(njs_vm_t *vm, njs { njs_str_t text; ngx_int_t status; - njs_value_t *value; ngx_http_js_ctx_t *ctx; ngx_http_request_t *r; ngx_http_complex_value_t cv; @@ -1774,20 +1707,16 @@ ngx_http_js_ext_return(njs_vm_t *vm, njs return NJS_ERROR; } - value = njs_arg(args, nargs, 1); - if (!njs_value_is_valid_number(value)) { - njs_vm_error(vm, "code is not a number"); + if (ngx_js_integer(vm, njs_arg(args, nargs, 1), &status) != NGX_OK) { return NJS_ERROR; } - status = njs_value_number(value); - if (status < 0 || status > 999) { njs_vm_error(vm, "code is out of range"); return NJS_ERROR; } - if (ngx_http_js_string(vm, njs_arg(args, nargs, 2), &text) != NJS_OK) { + if (ngx_js_string(vm, njs_arg(args, nargs, 2), &text) != NGX_OK) { njs_vm_error(vm, "failed to convert text"); return NJS_ERROR; } @@ -1838,7 +1767,7 @@ ngx_http_js_ext_internal_redirect(njs_vm ctx = ngx_http_get_module_ctx(r, ngx_http_js_module); - if (ngx_http_js_string(vm, njs_arg(args, nargs, 1), &uri) != NJS_OK) { + if (ngx_js_string(vm, njs_arg(args, nargs, 1), &uri) != NGX_OK) { njs_vm_error(vm, "failed to convert uri arg"); return NJS_ERROR; } @@ -2326,8 +2255,7 @@ ngx_http_js_ext_variables(njs_vm_t *vm, return NJS_ERROR; } - rc = ngx_http_js_string(vm, setval, &s); - if (rc != NJS_OK) { + if (ngx_js_string(vm, setval, &s) != NGX_OK) { return NJS_ERROR; } @@ -2445,7 +2373,7 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, return NJS_ERROR; } - if (ngx_http_js_string(vm, njs_arg(args, nargs, 1), &uri_arg) != NJS_OK) { + if (ngx_js_string(vm, njs_arg(args, nargs, 1), &uri_arg) != NGX_OK) { njs_vm_error(vm, "failed to convert uri arg"); return NJS_ERROR; } @@ -2489,7 +2417,7 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, if (options != NULL) { value = njs_vm_object_prop(vm, options, &args_key, &lvalue); if (value != NULL) { - if (ngx_http_js_string(vm, value, &args_arg) != NJS_OK) { + if (ngx_js_string(vm, value, &args_arg) != NGX_OK) { njs_vm_error(vm, "failed to convert options.args"); return NJS_ERROR; } @@ -2502,7 +2430,7 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, value = njs_vm_object_prop(vm, options, &method_key, &lvalue); if (value != NULL) { - if (ngx_http_js_string(vm, value, &method_name) != NJS_OK) { + if (ngx_js_string(vm, value, &method_name) != NGX_OK) { njs_vm_error(vm, "failed to convert options.method"); return NJS_ERROR; } @@ -2522,7 +2450,7 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, value = njs_vm_object_prop(vm, options, &body_key, &lvalue); if (value != NULL) { - if (ngx_http_js_string(vm, value, &body_arg) != NJS_OK) { + if (ngx_js_string(vm, value, &body_arg) != NGX_OK) { njs_vm_error(vm, "failed to convert options.body"); return NJS_ERROR; } @@ -2905,23 +2833,6 @@ ngx_http_js_handle_event(ngx_http_reques } -static njs_int_t -ngx_http_js_string(njs_vm_t *vm, njs_value_t *value, njs_str_t *str) -{ - if (value != NULL && !njs_value_is_null_or_undefined(value)) { - if (njs_vm_value_to_string(vm, str, value) == NJS_ERROR) { - return NJS_ERROR; - } - - } else { - str->start = NULL; - str->length = 0; - } - - return NJS_OK; -} - - static char * ngx_http_js_init_main_conf(ngx_conf_t *cf, void *conf) { diff -r 6dd867b1f0fd -r a3f5ebcd7ef7 nginx/ngx_js.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/nginx/ngx_js.c Tue Nov 10 17:46:01 2020 +0000 @@ -0,0 +1,95 @@ + +/* + * Copyright (C) Roman Arutyunyan + * Copyright (C) Dmitry Volyntsev + * Copyright (C) NGINX, Inc. + */ + + +#include <ngx_config.h> +#include <ngx_core.h> +#include "ngx_js.h" + + +ngx_int_t +ngx_js_call(njs_vm_t *vm, ngx_str_t *fname, njs_opaque_value_t *value, + ngx_log_t *log) +{ + njs_str_t name, exception; + njs_function_t *func; + + name.start = fname->data; + name.length = fname->len; + + func = njs_vm_function(vm, &name); + if (func == NULL) { + ngx_log_error(NGX_LOG_ERR, log, 0, + "js function \"%V\" not found", fname); + return NGX_ERROR; + } + + if (njs_vm_call(vm, func, njs_value_arg(value), 1) != NJS_OK) { + njs_vm_retval_string(vm, &exception); + + ngx_log_error(NGX_LOG_ERR, log, 0, + "js exception: %*s", exception.length, exception.start); + + return NGX_ERROR; + } + + if (njs_vm_pending(vm)) { + return NGX_AGAIN; + } + + return NGX_OK; +} + + +njs_int_t +ngx_js_ext_string(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, + njs_value_t *setval, njs_value_t *retval) +{ + char *p; + ngx_str_t *field; + + p = njs_vm_external(vm, value); + if (p == NULL) { + njs_value_undefined_set(retval); + return NJS_DECLINED; + } + + field = (ngx_str_t *) (p + njs_vm_prop_magic32(prop)); + + return njs_vm_value_string_set(vm, retval, field->data, field->len); +} + + +ngx_int_t +ngx_js_integer(njs_vm_t *vm, njs_value_t *value, ngx_int_t *n) +{ + if (!njs_value_is_valid_number(value)) { + njs_vm_error(vm, "is not a number"); + return NGX_ERROR; + } + + *n = njs_value_number(value); + + return NGX_OK; +} + + +ngx_int_t +ngx_js_string(njs_vm_t *vm, njs_value_t *value, njs_str_t *str) +{ + if (value != NULL && !njs_value_is_null_or_undefined(value)) { + if (njs_vm_value_to_string(vm, str, value) == NJS_ERROR) { + return NGX_ERROR; + } + + } else { + str->start = NULL; + str->length = 0; + } + + return NGX_OK; +} diff -r 6dd867b1f0fd -r a3f5ebcd7ef7 nginx/ngx_js.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/nginx/ngx_js.h Tue Nov 10 17:46:01 2020 +0000 @@ -0,0 +1,28 @@ + +/* + * Copyright (C) Roman Arutyunyan + * Copyright (C) Dmitry Volyntsev + * Copyright (C) NGINX, Inc. + */ + + +#ifndef _NGX_JS_H_INCLUDED_ +#define _NGX_JS_H_INCLUDED_ + + +#include <ngx_config.h> +#include <ngx_core.h> +#include <njs.h> + + +ngx_int_t ngx_js_call(njs_vm_t *vm, ngx_str_t *s, njs_opaque_value_t *value, + ngx_log_t *log); + +njs_int_t ngx_js_ext_string(njs_vm_t *vm, njs_object_prop_t *prop, + njs_value_t *value, njs_value_t *setval, njs_value_t *retval); + +ngx_int_t ngx_js_string(njs_vm_t *vm, njs_value_t *value, njs_str_t *str); +ngx_int_t ngx_js_integer(njs_vm_t *vm, njs_value_t *value, ngx_int_t *n); + + +#endif /* _NGX_JS_H_INCLUDED_ */ diff -r 6dd867b1f0fd -r a3f5ebcd7ef7 nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Mon Nov 09 11:54:55 2020 +0000 +++ b/nginx/ngx_stream_js_module.c Tue Nov 10 17:46:01 2020 +0000 @@ -9,8 +9,7 @@ #include <ngx_config.h> #include <ngx_core.h> #include <ngx_stream.h> - -#include <njs.h> +#include "ngx_js.h" typedef struct { @@ -110,8 +109,6 @@ static void ngx_stream_js_clear_timer(nj static void ngx_stream_js_timer_handler(ngx_event_t *ev); static void ngx_stream_js_handle_event(ngx_stream_session_t *s, njs_vm_event_t vm_event, njs_value_t *args, njs_uint_t nargs); -static njs_int_t ngx_stream_js_string(njs_vm_t *vm, njs_value_t *value, - njs_str_t *str); static char *ngx_stream_js_include(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); @@ -399,10 +396,9 @@ ngx_stream_js_preread_handler(ngx_stream static ngx_int_t ngx_stream_js_phase_handler(ngx_stream_session_t *s, ngx_str_t *name) { - njs_str_t fname, exception; + njs_str_t exception; njs_int_t ret; ngx_int_t rc; - njs_function_t *func; ngx_connection_t *c; ngx_stream_js_ctx_t *ctx; @@ -423,17 +419,6 @@ ngx_stream_js_phase_handler(ngx_stream_s ctx = ngx_stream_get_module_ctx(s, ngx_stream_js_module); if (!ctx->in_progress) { - fname.start = name->data; - fname.length = name->len; - - func = njs_vm_function(ctx->vm, &fname); - - if (func == NULL) { - ngx_log_error(NGX_LOG_ERR, c->log, 0, - "js function \"%V\" not found", name); - return NGX_ERROR; - } - /* * status is expected to be overriden by allow(), deny(), decline() or * done() methods. @@ -441,9 +426,9 @@ ngx_stream_js_phase_handler(ngx_stream_s ctx->status = NGX_ERROR; - ret = njs_vm_call(ctx->vm, func, njs_value_arg(&ctx->args), 1); - if (ret != NJS_OK) { - goto exception; + rc = ngx_js_call(ctx->vm, name, &ctx->args[0], c->log); + if (rc == NGX_ERROR) { + return rc; } } @@ -500,11 +485,10 @@ static ngx_int_t ngx_stream_js_body_filter(ngx_stream_session_t *s, ngx_chain_t *in, ngx_uint_t from_upstream) { - njs_str_t name, exception; + njs_str_t exception; njs_int_t ret; ngx_int_t rc; ngx_chain_t *out, *cl; - njs_function_t *func; ngx_connection_t *c; ngx_stream_js_ctx_t *ctx; ngx_stream_js_srv_conf_t *jscf; @@ -532,20 +516,9 @@ ngx_stream_js_body_filter(ngx_stream_ses ctx = ngx_stream_get_module_ctx(s, ngx_stream_js_module); if (!ctx->filter) { - name.start = jscf->filter.data; - name.length = jscf->filter.len; - - func = njs_vm_function(ctx->vm, &name); - - if (func == NULL) { - ngx_log_error(NGX_LOG_ERR, c->log, 0, - "js function \"%V\" not found", &jscf->filter); - return NGX_ERROR; - } - - ret = njs_vm_call(ctx->vm, func, njs_value_arg(&ctx->args), 1); - if (ret != NJS_OK) { - goto exception; + rc = ngx_js_call(ctx->vm, &jscf->filter, &ctx->args[0], c->log); + if (rc == NGX_ERROR) { + return rc; } } @@ -626,8 +599,7 @@ ngx_stream_js_variable(ngx_stream_sessio ngx_int_t rc; njs_int_t pending; - njs_str_t name, value, exception; - njs_function_t *func; + njs_str_t value; ngx_stream_js_ctx_t *ctx; rc = ngx_stream_js_init_vm(s); @@ -646,39 +618,25 @@ ngx_stream_js_variable(ngx_stream_sessio ctx = ngx_stream_get_module_ctx(s, ngx_stream_js_module); - name.start = fname->data; - name.length = fname->len; + pending = njs_vm_pending(ctx->vm); - func = njs_vm_function(ctx->vm, &name); - if (func == NULL) { - ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, - "js function \"%V\" not found", fname); + rc = ngx_js_call(ctx->vm, fname, &ctx->args[0], s->connection->log); + + if (rc == NGX_ERROR) { v->not_found = 1; return NGX_OK; } - pending = njs_vm_pending(ctx->vm); - - if (njs_vm_call(ctx->vm, func, njs_value_arg(&ctx->args), 1) != NJS_OK) { - njs_vm_retval_string(ctx->vm, &exception); - + if (!pending && rc == NGX_AGAIN) { ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, - "js exception: %*s", exception.length, exception.start); - - v->not_found = 1; - return NGX_OK; + "async operation inside \"%V\" variable handler", fname); + return NGX_ERROR; } if (njs_vm_retval_string(ctx->vm, &value) != NJS_OK) { return NGX_ERROR; } - if (!pending && njs_vm_pending(ctx->vm)) { - ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, - "async operation inside \"%V\" variable handler", fname); - return NGX_ERROR; - } - v->len = value.length; v->valid = 1; v->no_cacheable = 0; @@ -926,12 +884,10 @@ ngx_stream_js_ext_done(njs_vm_t *vm, njs code = njs_arg(args, nargs, 1); if (!njs_value_is_undefined(code)) { - if (!njs_value_is_valid_number(code)) { - njs_vm_error(vm, "code is not a number"); + if (ngx_js_integer(vm, code, &status) != NGX_OK) { return NJS_ERROR; } - status = njs_value_number(code); if (status < NGX_ABORT || status > NGX_STREAM_SERVICE_UNAVAILABLE) { njs_vm_error(vm, "code is out of range"); return NJS_ERROR; @@ -1116,7 +1072,7 @@ ngx_stream_js_ext_send(njs_vm_t *vm, njs return NJS_ERROR; } - if (ngx_stream_js_string(vm, njs_arg(args, nargs, 1), &buffer) != NJS_OK) { + if (ngx_js_string(vm, njs_arg(args, nargs, 1), &buffer) != NGX_OK) { njs_vm_error(vm, "failed to get buffer arg"); return NJS_ERROR; } @@ -1218,8 +1174,7 @@ ngx_stream_js_ext_variables(njs_vm_t *vm return NJS_ERROR; } - rc = ngx_stream_js_string(vm, setval, &val); - if (rc != NJS_OK) { + if (ngx_js_string(vm, setval, &val) != NGX_OK) { return NJS_ERROR; } @@ -1350,23 +1305,6 @@ ngx_stream_js_handle_event(ngx_stream_se } -static njs_int_t -ngx_stream_js_string(njs_vm_t *vm, njs_value_t *value, njs_str_t *str) -{ - if (!njs_value_is_null_or_undefined(value)) { - if (njs_vm_value_to_string(vm, str, value) == NJS_ERROR) { - return NJS_ERROR; - } - - } else { - str->start = NULL; - str->length = 0; - } - - return NJS_OK; -} - - static char * ngx_stream_js_init_main_conf(ngx_conf_t *cf, void *conf) { From mdounin at mdounin.ru Tue Nov 10 19:25:58 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Nov 2020 22:25:58 +0300 Subject: [PATCH] autoindex : Add viewport meta tag for responsive scaling to narrow screen devices In-Reply-To: <aaffe643fe0d81a257000542294d442f@anche.no> References: <aaffe643fe0d81a257000542294d442f@anche.no> Message-ID: <20201110192558.GT1147@mdounin.ru> Hello! On Sat, Nov 07, 2020 at 01:35:34PM +0530, vbt at anche.no wrote: > # HG changeset patch > # User Var Bhat <vbt at anche.no> > # Date 1604734673 -19800 > # Sat Nov 07 13:07:53 2020 +0530 > # Node ID 703eb6ae6cbf2ad87620d8ca4e0788be7e121538 > # Parent 554c6ae25ffc634e29adae8b51bef6ddabebea39 > ngx_http_autoindex_module: Added viewport meta tag to autoindex html > rendering > > Previously , Auto Indexing Directory didn't include viewport meta tag > which caused html not to scale to the screen width . > This caused Narrow screen devices (e.g. mobiles) render pages in a > virtual window or viewport, > which is usually wider than the screen, and then shrink the rendered > result down so it can all be seen at once. > This required frequent zooming and scrolling as things looked small. > This made autoindexing unresponsive on mobile and other narrow screen > devices. > > This patch makes autoindexing mobile responsive. > > diff -r 554c6ae25ffc -r 703eb6ae6cbf > src/http/modules/ngx_http_autoindex_module.c > --- a/src/http/modules/ngx_http_autoindex_module.c Fri Nov 06 23:44:54 > 2020 +0300 > +++ b/src/http/modules/ngx_http_autoindex_module.c Sat Nov 07 13:07:53 > 2020 +0530 > @@ -445,7 +445,8 @@ > > static u_char title[] = > "<html>" CRLF > - "<head><title>Index of " > + "<head><meta name=\"viewport\" content=\"width=device-width, > initial-scale=1.0\">" CRLF > + "<title>Index of " > ; > > static u_char header[] = Thank you for the patch. I don't think it is a good idea to hardcode non-standard meta tags. Also, I'm not really sure the result looks better on mobile devices, since it essentially hides the size and modification time from the listing. To make arbitrary design changes to autoindex output, consider using autoindex_format xml coupled with XSLT processing, see http://nginx.org/r/autoindex_format. Also, quick-and-dirty approach might be to use sub_filter (http//nginx.org/r/sub_filiter), for example: sub_filter '<head>' '<head><meta name="viewport" content="width=device-width, initial-scale=1.0">'; If there is a need for some easier means to make autoidex more flexible, we can consider introducing something like "autoindex_head" to make it possible to add arbitrary code to autoindex html output. -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Fri Nov 13 15:09:16 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Fri, 13 Nov 2020 15:09:16 +0000 Subject: [njs] Configure: improved dependencies tracking. Message-ID: <hg.fb059e00a887.1605280156.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/fb059e00a887 branches: changeset: 1564:fb059e00a887 user: Dmitry Volyntsev <xeioex at nginx.com> date: Fri Nov 13 15:08:05 2020 +0000 description: Configure: improved dependencies tracking. Using nginx Makefile as a dependency for libnjs.a to ensure libnjs.a is rebuilt after auto/configure invocation. diffstat: nginx/config.make | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (11 lines): diff -r a3f5ebcd7ef7 -r fb059e00a887 nginx/config.make --- a/nginx/config.make Tue Nov 10 17:46:01 2020 +0000 +++ b/nginx/config.make Fri Nov 13 15:08:05 2020 +0000 @@ -1,6 +1,6 @@ cat << END >> $NGX_MAKEFILE -$ngx_addon_dir/../build/libnjs.a: +$ngx_addon_dir/../build/libnjs.a: $NGX_MAKEFILE cd $ngx_addon_dir/.. \\ && if [ -f build/Makefile ]; then \$(MAKE) clean; fi \\ && CFLAGS="\$(CFLAGS)" CC="\$(CC)" ./configure \\ From xeioex at nginx.com Fri Nov 13 18:18:20 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Fri, 13 Nov 2020 18:18:20 +0000 Subject: [njs] Modules: introduced global "ngx" object. Message-ID: <hg.d7f6e719af98.1605291500.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/d7f6e719af98 branches: changeset: 1565:d7f6e719af98 user: Dmitry Volyntsev <xeioex at nginx.com> date: Fri Oct 02 18:38:12 2020 +0000 description: Modules: introduced global "ngx" object. diffstat: nginx/ngx_http_js_module.c | 62 +++++---------- nginx/ngx_js.c | 172 ++++++++++++++++++++++++++++++++++++++---- nginx/ngx_js.h | 11 ++ nginx/ngx_stream_js_module.c | 56 +++---------- src/njs.h | 8 ++ src/njs_extern.c | 9 ++- src/njs_vm.c | 14 +++ 7 files changed, 230 insertions(+), 102 deletions(-) diffs (520 lines): diff -r fb059e00a887 -r d7f6e719af98 nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Fri Nov 13 15:08:05 2020 +0000 +++ b/nginx/ngx_http_js_module.c Fri Oct 02 18:38:12 2020 +0000 @@ -122,9 +122,6 @@ static njs_int_t ngx_http_js_ext_return( static njs_int_t ngx_http_js_ext_internal_redirect(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused); -static njs_int_t ngx_http_js_ext_log(njs_vm_t *vm, njs_value_t *args, - njs_uint_t nargs, njs_index_t level); - static njs_int_t ngx_http_js_ext_get_http_version(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval); @@ -429,7 +426,7 @@ static njs_external_t ngx_http_js_ext_r .configurable = 1, .enumerable = 1, .u.method = { - .native = ngx_http_js_ext_log, + .native = ngx_js_ext_log, .magic8 = NGX_LOG_INFO, } }, @@ -441,7 +438,7 @@ static njs_external_t ngx_http_js_ext_r .configurable = 1, .enumerable = 1, .u.method = { - .native = ngx_http_js_ext_log, + .native = ngx_js_ext_log, .magic8 = NGX_LOG_WARN, } }, @@ -453,7 +450,7 @@ static njs_external_t ngx_http_js_ext_r .configurable = 1, .enumerable = 1, .u.method = { - .native = ngx_http_js_ext_log, + .native = ngx_js_ext_log, .magic8 = NGX_LOG_ERR, } }, @@ -522,6 +519,17 @@ static njs_vm_ops_t ngx_http_js_ops = { }; +static uintptr_t ngx_http_js_uptr[] = { + offsetof(ngx_http_request_t, connection), +}; + + +static njs_vm_meta_t ngx_http_js_metas = { + .size = 1, + .values = ngx_http_js_uptr +}; + + static ngx_int_t ngx_http_js_content_handler(ngx_http_request_t *r) { @@ -1789,41 +1797,6 @@ ngx_http_js_ext_internal_redirect(njs_vm static njs_int_t -ngx_http_js_ext_log(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, - njs_index_t level) -{ - njs_str_t msg; - ngx_connection_t *c; - ngx_log_handler_pt handler; - ngx_http_request_t *r; - - r = njs_vm_external(vm, njs_arg(args, nargs, 0)); - if (r == NULL) { - return NJS_ERROR; - } - - c = r->connection; - - if (njs_vm_value_to_string(vm, &msg, njs_arg(args, nargs, 1)) - == NJS_ERROR) - { - return NJS_ERROR; - } - - handler = c->log->handler; - c->log->handler = NULL; - - ngx_log_error(level, c->log, 0, "js: %*s", msg.length, msg.start); - - c->log->handler = handler; - - njs_value_undefined_set(njs_vm_retval(vm)); - - return NJS_OK; -} - - -static njs_int_t ngx_http_js_ext_get_http_version(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval) { @@ -2946,6 +2919,7 @@ ngx_http_js_init_main_conf(ngx_conf_t *c options.backtrace = 1; options.unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_THROW; options.ops = &ngx_http_js_ops; + options.metas = &ngx_http_js_metas; options.argv = ngx_argv; options.argc = ngx_argc; @@ -3011,6 +2985,12 @@ ngx_http_js_init_main_conf(ngx_conf_t *c } jmcf->req_proto = proto; + + rc = ngx_js_core_init(jmcf->vm, cf->log); + if (njs_slow_path(rc != NJS_OK)) { + return NGX_CONF_ERROR; + } + end = start + size; rc = njs_vm_compile(jmcf->vm, &start, end); diff -r fb059e00a887 -r d7f6e719af98 nginx/ngx_js.c --- a/nginx/ngx_js.c Fri Nov 13 15:08:05 2020 +0000 +++ b/nginx/ngx_js.c Fri Oct 02 18:38:12 2020 +0000 @@ -11,6 +11,48 @@ #include "ngx_js.h" +static njs_external_t ngx_js_ext_core[] = { + + { + .flags = NJS_EXTERN_METHOD, + .name.string = njs_str("log"), + .writable = 1, + .configurable = 1, + .enumerable = 1, + .u.method = { + .native = ngx_js_ext_log, + } + }, + + { + .flags = NJS_EXTERN_PROPERTY, + .name.string = njs_str("INFO"), + .u.property = { + .handler = ngx_js_ext_constant, + .magic32 = NGX_LOG_INFO, + } + }, + + { + .flags = NJS_EXTERN_PROPERTY, + .name.string = njs_str("WARN"), + .u.property = { + .handler = ngx_js_ext_constant, + .magic32 = NGX_LOG_WARN, + } + }, + + { + .flags = NJS_EXTERN_PROPERTY, + .name.string = njs_str("ERR"), + .u.property = { + .handler = ngx_js_ext_constant, + .magic32 = NGX_LOG_ERR, + } + }, +}; + + ngx_int_t ngx_js_call(njs_vm_t *vm, ngx_str_t *fname, njs_opaque_value_t *value, ngx_log_t *log) @@ -45,25 +87,6 @@ ngx_js_call(njs_vm_t *vm, ngx_str_t *fna } -njs_int_t -ngx_js_ext_string(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, - njs_value_t *setval, njs_value_t *retval) -{ - char *p; - ngx_str_t *field; - - p = njs_vm_external(vm, value); - if (p == NULL) { - njs_value_undefined_set(retval); - return NJS_DECLINED; - } - - field = (ngx_str_t *) (p + njs_vm_prop_magic32(prop)); - - return njs_vm_value_string_set(vm, retval, field->data, field->len); -} - - ngx_int_t ngx_js_integer(njs_vm_t *vm, njs_value_t *value, ngx_int_t *n) { @@ -93,3 +116,114 @@ ngx_js_string(njs_vm_t *vm, njs_value_t return NGX_OK; } + + +ngx_int_t +ngx_js_core_init(njs_vm_t *vm, ngx_log_t *log) +{ + njs_int_t ret; + njs_str_t name; + njs_opaque_value_t value; + njs_external_proto_t proto; + + proto = njs_vm_external_prototype(vm, ngx_js_ext_core, + njs_nitems(ngx_js_ext_core)); + if (proto == NULL) { + ngx_log_error(NGX_LOG_EMERG, log, 0, "failed to add js core proto"); + return NGX_ERROR; + } + + ret = njs_vm_external_create(vm, njs_value_arg(&value), proto, NULL, 1); + if (njs_slow_path(ret != NJS_OK)) { + ngx_log_error(NGX_LOG_EMERG, log, 0, + "njs_vm_external_create() failed\n"); + return NGX_ERROR; + } + + name.length = 3; + name.start = (u_char *) "ngx"; + + ret = njs_vm_bind(vm, &name, njs_value_arg(&value), 1); + if (njs_slow_path(ret != NJS_OK)) { + ngx_log_error(NGX_LOG_EMERG, log, 0, "njs_vm_bind() failed\n"); + return NGX_ERROR; + } + + return NGX_OK; +} + + +njs_int_t +ngx_js_ext_string(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, + njs_value_t *setval, njs_value_t *retval) +{ + char *p; + ngx_str_t *field; + + p = njs_vm_external(vm, value); + if (p == NULL) { + njs_value_undefined_set(retval); + return NJS_DECLINED; + } + + field = (ngx_str_t *) (p + njs_vm_prop_magic32(prop)); + + return njs_vm_value_string_set(vm, retval, field->data, field->len); +} + + +njs_int_t +ngx_js_ext_constant(njs_vm_t *vm, njs_object_prop_t *prop, + njs_value_t *value, njs_value_t *setval, njs_value_t *retval) +{ + njs_value_number_set(retval, njs_vm_prop_magic32(prop)); + + return NJS_OK; +} + + +njs_int_t +ngx_js_ext_log(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t level) +{ + char *p; + ngx_int_t lvl; + njs_str_t msg; + njs_value_t *value; + ngx_connection_t *c; + ngx_log_handler_pt handler; + + p = njs_vm_external(vm, njs_arg(args, nargs, 0)); + if (p == NULL) { + njs_vm_error(vm, "\"this\" is not an external"); + return NJS_ERROR; + } + + value = njs_arg(args, nargs, (level != 0) ? 1 : 2); + + if (level == 0) { + if (ngx_js_integer(vm, njs_arg(args, nargs, 1), &lvl) != NGX_OK) { + return NJS_ERROR; + } + + level = lvl; + } + + if (ngx_js_string(vm, value, &msg) != NGX_OK) { + return NJS_ERROR; + } + + c = ngx_external_connection(vm, p); + handler = c->log->handler; + c->log->handler = NULL; + + ngx_log_error(level, c->log, 0, "js: %*s", msg.length, msg.start); + + c->log->handler = handler; + + njs_value_undefined_set(njs_vm_retval(vm)); + + return NJS_OK; +} + + diff -r fb059e00a887 -r d7f6e719af98 nginx/ngx_js.h --- a/nginx/ngx_js.h Fri Nov 13 15:08:05 2020 +0000 +++ b/nginx/ngx_js.h Fri Oct 02 18:38:12 2020 +0000 @@ -15,11 +15,22 @@ #include <njs.h> +#define ngx_external_connection(vm, ext) \ + (*((ngx_connection_t **) ((u_char *) ext + njs_vm_meta(vm, 0)))) + + ngx_int_t ngx_js_call(njs_vm_t *vm, ngx_str_t *s, njs_opaque_value_t *value, ngx_log_t *log); +njs_int_t ngx_js_ext_log(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t level); + njs_int_t ngx_js_ext_string(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval); +njs_int_t ngx_js_ext_constant(njs_vm_t *vm, njs_object_prop_t *prop, + njs_value_t *value, njs_value_t *setval, njs_value_t *retval); + +ngx_int_t ngx_js_core_init(njs_vm_t *vm, ngx_log_t *log); ngx_int_t ngx_js_string(njs_vm_t *vm, njs_value_t *value, njs_str_t *str); ngx_int_t ngx_js_integer(njs_vm_t *vm, njs_value_t *value, ngx_int_t *n); diff -r fb059e00a887 -r d7f6e719af98 nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Fri Nov 13 15:08:05 2020 +0000 +++ b/nginx/ngx_stream_js_module.c Fri Oct 02 18:38:12 2020 +0000 @@ -89,8 +89,6 @@ static njs_int_t ngx_stream_js_ext_get_r static njs_int_t ngx_stream_js_ext_done(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused); -static njs_int_t ngx_stream_js_ext_log(njs_vm_t *vm, njs_value_t *args, - njs_uint_t nargs, njs_index_t unused); static njs_int_t ngx_stream_js_ext_on(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused); static njs_int_t ngx_stream_js_ext_off(njs_vm_t *vm, njs_value_t *args, @@ -291,7 +289,7 @@ static njs_external_t ngx_stream_js_ext .configurable = 1, .enumerable = 1, .u.method = { - .native = ngx_stream_js_ext_log, + .native = ngx_js_ext_log, .magic8 = NGX_LOG_INFO, } }, @@ -303,7 +301,7 @@ static njs_external_t ngx_stream_js_ext .configurable = 1, .enumerable = 1, .u.method = { - .native = ngx_stream_js_ext_log, + .native = ngx_js_ext_log, .magic8 = NGX_LOG_WARN, } }, @@ -315,7 +313,7 @@ static njs_external_t ngx_stream_js_ext .configurable = 1, .enumerable = 1, .u.method = { - .native = ngx_stream_js_ext_log, + .native = ngx_js_ext_log, .magic8 = NGX_LOG_ERR, } }, @@ -362,6 +360,17 @@ static njs_vm_ops_t ngx_stream_js_ops = }; +static uintptr_t ngx_stream_js_uptr[] = { + offsetof(ngx_stream_session_t, connection), +}; + + +static njs_vm_meta_t ngx_stream_js_metas = { + .size = 1, + .values = ngx_stream_js_uptr +}; + + static ngx_stream_filter_pt ngx_stream_next_filter; @@ -919,42 +928,6 @@ ngx_stream_js_ext_done(njs_vm_t *vm, njs static njs_int_t -ngx_stream_js_ext_log(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, - njs_index_t level) -{ - njs_str_t msg; - ngx_connection_t *c; - ngx_log_handler_pt handler; - ngx_stream_session_t *s; - - s = njs_vm_external(vm, njs_arg(args, nargs, 0)); - if (s == NULL) { - njs_vm_error(vm, "\"this\" is not an external"); - return NJS_ERROR; - } - - c = s->connection; - - if (njs_vm_value_to_string(vm, &msg, njs_arg(args, nargs, 1)) - == NJS_ERROR) - { - return NJS_ERROR; - } - - handler = c->log->handler; - c->log->handler = NULL; - - ngx_log_error(level, c->log, 0, "js: %*s", msg.length, msg.start); - - c->log->handler = handler; - - njs_value_undefined_set(njs_vm_retval(vm)); - - return NJS_OK; -} - - -static njs_int_t ngx_stream_js_ext_on(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { @@ -1418,6 +1391,7 @@ ngx_stream_js_init_main_conf(ngx_conf_t options.backtrace = 1; options.unhandled_rejection = NJS_VM_OPT_UNHANDLED_REJECTION_THROW; options.ops = &ngx_stream_js_ops; + options.metas = &ngx_stream_js_metas; options.argv = ngx_argv; options.argc = ngx_argc; diff -r fb059e00a887 -r d7f6e719af98 src/njs.h --- a/src/njs.h Fri Nov 13 15:08:05 2020 +0000 +++ b/src/njs.h Fri Oct 02 18:38:12 2020 +0000 @@ -187,9 +187,16 @@ typedef struct { typedef struct { + size_t size; + uintptr_t *values; +} njs_vm_meta_t; + + +typedef struct { njs_external_ptr_t external; njs_vm_shared_t *shared; njs_vm_ops_t *ops; + njs_vm_meta_t *metas; njs_str_t file; char **argv; @@ -295,6 +302,7 @@ NJS_EXPORT njs_int_t njs_vm_external_cre njs_external_proto_t proto, njs_external_ptr_t external, njs_bool_t shared); NJS_EXPORT njs_external_ptr_t njs_vm_external(njs_vm_t *vm, const njs_value_t *value); +NJS_EXPORT uintptr_t njs_vm_meta(njs_vm_t *vm, njs_uint_t index); NJS_EXPORT njs_function_t *njs_vm_function_alloc(njs_vm_t *vm, njs_function_native_t native); diff -r fb059e00a887 -r d7f6e719af98 src/njs_extern.c --- a/src/njs_extern.c Fri Nov 13 15:08:05 2020 +0000 +++ b/src/njs_extern.c Fri Oct 02 18:38:12 2020 +0000 @@ -322,8 +322,15 @@ njs_vm_external_create(njs_vm_t *vm, njs njs_external_ptr_t njs_vm_external(njs_vm_t *vm, const njs_value_t *value) { + njs_external_ptr_t external; + if (njs_fast_path(njs_is_object_data(value, NJS_DATA_TAG_EXTERNAL))) { - return njs_object_data(value); + external = njs_object_data(value); + if (external == NULL) { + external = vm->external; + } + + return external; } return NULL; diff -r fb059e00a887 -r d7f6e719af98 src/njs_vm.c --- a/src/njs_vm.c Fri Nov 13 15:08:05 2020 +0000 +++ b/src/njs_vm.c Fri Oct 02 18:38:12 2020 +0000 @@ -612,6 +612,20 @@ njs_vm_retval(njs_vm_t *vm) } +uintptr_t +njs_vm_meta(njs_vm_t *vm, njs_uint_t index) +{ + njs_vm_meta_t *metas; + + metas = vm->options.metas; + if (njs_slow_path(metas == NULL || metas->size <= index)) { + return -1; + } + + return metas->values[index]; +} + + void njs_vm_retval_set(njs_vm_t *vm, const njs_value_t *value) { From qiao.liu at intel.com Tue Nov 17 00:34:38 2020 From: qiao.liu at intel.com (Liu, Qiao) Date: Tue, 17 Nov 2020 00:34:38 +0000 Subject: [PATCH] Use BPF to distribute packet to different work thread. In-Reply-To: <SN6PR11MB351734F2EEDC51AB02E12EBF87390@SN6PR11MB3517.namprd11.prod.outlook.com> References: <SN6PR11MB35172BC6F818934BACB7FA3A87270@SN6PR11MB3517.namprd11.prod.outlook.com> <20200910101132.GA38727@vl.krasnogorsk.ru> <SN6PR11MB351752D836F2B1659E8FF67787240@SN6PR11MB3517.namprd11.prod.outlook.com> <20200913233936.GV18881@mdounin.ru> <SN6PR11MB3517F8A7E457669E49D3483C87230@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB3517E97F77028B2A11C3B9EF87200@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB351734F2EEDC51AB02E12EBF87390@SN6PR11MB3517.namprd11.prod.outlook.com> Message-ID: <SN6PR11MB3517ECE17C51097465A4A5B187E20@SN6PR11MB3517.namprd11.prod.outlook.com> Hi, what is the result of this patch set now? Thanks LQ -----Original Message----- From: Liu, Qiao Sent: Thursday, September 24, 2020 8:59 AM To: nginx-devel at nginx.org Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. Remove printf # HG changeset patch # User Liu Qiao <qiao.liu at intel.com> # Date 1599735293 14400 # Thu Sep 10 06:54:53 2020 -0400 # Node ID c2eabe9168d0cbefc030807a0808568d86c93e4f # Parent da5e3f5b16733167142b599b6af3ce9469a07d52 Use BPF to distribute packet to different work thread. Use Berkeley Packet Filter to get packet queue_mapping number, and use this queue_mapping number to distribute the packet to different work thread, this will improve CPU utilization and http latency. Author: Samudrala, Sridhar <sridhar.samudrala at intel.com> diff -r da5e3f5b1673 -r c2eabe9168d0 auto/os/linux --- a/auto/os/linux Wed Sep 02 23:13:36 2020 +0300 +++ b/auto/os/linux Thu Sep 10 06:54:53 2020 -0400 @@ -32,6 +32,10 @@ have=NGX_HAVE_POSIX_FADVISE . auto/nohave fi +if [ $version -lt 263680 ]; then + have=NGX_HAVE_REUSEPORT_CBPF . auto/nohave fi + # epoll, EPOLLET version ngx_feature="epoll" diff -r da5e3f5b1673 -r c2eabe9168d0 auto/unix --- a/auto/unix Wed Sep 02 23:13:36 2020 +0300 +++ b/auto/unix Thu Sep 10 06:54:53 2020 -0400 @@ -331,6 +331,17 @@ ngx_feature_test="setsockopt(0, SOL_SOCKET, SO_REUSEPORT, NULL, 0)" . auto/feature +ngx_feature="SO_REUSEPORT_CBPF" +ngx_feature_name="NGX_HAVE_REUSEPORT_CBPF" +ngx_feature_run=no +ngx_feature_incs="#include <sys/socket.h> + #include <linux/filter.h> + #include <error.h>" +ngx_feature_path= +ngx_feature_libs= +ngx_feature_test="setsockopt(0, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, NULL, 0)" +. auto/feature + ngx_feature="SO_ACCEPTFILTER" ngx_feature_name="NGX_HAVE_DEFERRED_ACCEPT" diff -r da5e3f5b1673 -r c2eabe9168d0 src/core/ngx_connection.c --- a/src/core/ngx_connection.c Wed Sep 02 23:13:36 2020 +0300 +++ b/src/core/ngx_connection.c Thu Sep 10 06:54:53 2020 -0400 @@ -8,7 +8,10 @@ #include <ngx_config.h> #include <ngx_core.h> #include <ngx_event.h> - +#if (NGX_HAVE_REUSEPORT_CBPF) +#include <linux/filter.h> +#include <error.h> +#endif ngx_os_io_t ngx_io; @@ -708,6 +711,35 @@ return NGX_OK; } +#if(NGX_HAVE_REUSEPORT) +#if(NGX_HAVE_REUSEPORT_CBPF) +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif + +static ngx_int_t attach_bpf(int fd, uint16_t n) { + struct sock_filter code[] = { + /* A = skb->queue_mapping */ + { BPF_LD | BPF_W | BPF_ABS, 0, 0, SKF_AD_OFF + SKF_AD_QUEUE }, + /* A = A % n */ + { BPF_ALU | BPF_MOD, 0, 0, n }, + /* return A */ + { BPF_RET | BPF_A, 0, 0, 0 }, + }; + struct sock_fprog p = { + .len = ARRAY_SIZE(code), + .filter = code, + }; + + if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, &p, sizeof(p))) + return NGX_ERROR; + else + return NGX_OK; +} +#endif +#endif + void ngx_configure_listening_sockets(ngx_cycle_t *cycle) @@ -719,6 +751,11 @@ #if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) struct accept_filter_arg af; #endif +#if (NGX_HAVE_REUSEPORT) +#if (NGX_HAVE_REUSEPORT_CBPF) + ngx_core_conf_t* ccf ; +#endif +#endif ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { @@ -1011,6 +1048,31 @@ } #endif +#if (NGX_HAVE_REUSEPORT) +#if (NGX_HAVE_REUSEPORT_CBPF) + if(ls[i].reuseport) + { + ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx,ngx_core_module); + if(ccf) + { + if( NGX_OK == attach_bpf(ls[i].fd, ccf->worker_processes) ) + { + ngx_log_error(NGX_LOG_INFO,cycle->log ,ngx_socket_errno,\ + "bpf prog attached to fd:%d\n", ls[i].fd); + } + else + { + ngx_log_error(NGX_LOG_ERR,cycle->log ,ngx_socket_errno,\ + "failed to set SO_ATTACH_REUSEPORT_CBPF\n"); + } + } + else + ngx_log_error(NGX_LOG_ERR,cycle->log ,ngx_socket_errno,\ + "can not get config, attach bpf failed\n"); + + } +#endif +#endif } return; -----Original Message----- From: Liu, Qiao Sent: Tuesday, September 15, 2020 10:09 AM To: nginx-devel at nginx.org Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. Below is 5 times test result compare, 112 threads, 10000 connections, 1M object http request. Seems P99 have great improvement, and Max is also reduced AVG Stdev Max P99 test 1 1.32s 447.09ms 5.48s 2.82s BPF test 2 1.39s 513.8ms 9.42s 3.1s test 3 1.4s 341.38ms 5.63s 2.55s test 4 1.41s 407.45ms 6.96s 2.77s test 5 1.29s 644.81ms 9.45s 3.74s Average 1.362s 470.906ms 7.388s 2.996s NonBPF test 1 1.48s 916.88ms 9.44s 5.08s test 2 1.43s 658.48ms 9.54s 3.92s test 3 1.41s 650.38ms 8.63s 3.59s test 4 1.29s 1010ms 10s 5.21s test 5 1.31s 875.01ms 9.53s 4.39s Average 1.384s 822.15ms 9.428s 4.438s Thanks LQ -----Original Message----- From: nginx-devel <nginx-devel-bounces at nginx.org> On Behalf Of Liu, Qiao Sent: Monday, September 14, 2020 9:18 AM To: nginx-devel at nginx.org Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. Hi, Maxim Dounin: Thanks for your reply, this server is random selected, we just do BPF and no-BPF test, I think the latency based on server configuration, not related with BPF patch, also the NIC of the server is Mellanox, not ADQ capable hardware , we will do more test Thanks LQ -----Original Message----- From: nginx-devel <nginx-devel-bounces at nginx.org> On Behalf Of Maxim Dounin Sent: Monday, September 14, 2020 7:40 AM To: nginx-devel at nginx.org Subject: Re: [PATCH] Use BPF to distribute packet to different work thread. Hello! On Fri, Sep 11, 2020 at 05:41:47AM +0000, Liu, Qiao wrote: > Hi, Vladimir Homutov: > The below is our WRK test result output with BPF enable > > 112 threads and 10000 connections > Thread Stats Avg Stdev Max +/- Stdev > Latency 608.23ms 820.71ms 10.00s 87.48% > Connect 16.52ms 54.53ms 1.99s 94.73% > Delay 153.13ms 182.17ms 2.00s 90.74% > Req/Sec 244.79 142.32 1.99k 68.40% > Latency Distribution > 50.00% 293.50ms > 75.00% 778.33ms > 90.00% 1.61s > 99.00% 3.71s > 99.90% 7.03s > 99.99% 8.94s > Connect Distribution > 50.00% 1.93ms > 75.00% 2.85ms > 90.00% 55.76ms > 99.00% 229.19ms > 99.90% 656.79ms > 99.99% 1.43s > Delay Distribution > 50.00% 110.96ms > 75.00% 193.67ms > 90.00% 321.77ms > 99.00% 959.27ms > 99.90% 1.57s > 99.99% 1.91s > Compared with no BPF but enable reuseport as below > > 112 threads and 10000 connections > Thread Stats Avg Stdev Max +/- Stdev > Latency 680.50ms 943.69ms 10.00s 87.18% > Connect 58.44ms 238.08ms 2.00s 94.58% > Delay 158.84ms 256.28ms 2.00s 90.92% > Req/Sec 244.51 151.00 1.41k 69.67% > Latency Distribution > 50.00% 317.61ms > 75.00% 913.52ms > 90.00% 1.90s > 99.00% 4.30s > 99.90% 6.52s > 99.99% 8.80s > Connect Distribution > 50.00% 1.88ms > 75.00% 2.21ms > 90.00% 55.94ms > 99.00% 1.45s > 99.90% 1.95s > 99.99% 2.00s > Delay Distribution > 50.00% 73.01ms > 75.00% 190.40ms > 90.00% 387.01ms > 99.00% 1.34s > 99.90% 1.86s > 99.99% 1.99s > > > From the above results, there shows almost 20% percent latency > reduction. P99 latency of BPF is 3.71s , but without BPF is 4.3s. Thank you for the results. Given that latency stdev is way higher than the average latency, I don't think the "20% percent latency reduction" observed is statistically significant. Please try running several tests and use ministat(1) to check the results. Also, the latency values look very high, and request rate very low. What's on the server side? -- Maxim Dounin http://mdounin.ru/ _______________________________________________ nginx-devel mailing list nginx-devel at nginx.org http://mailman.nginx.org/mailman/listinfo/nginx-devel _______________________________________________ nginx-devel mailing list nginx-devel at nginx.org http://mailman.nginx.org/mailman/listinfo/nginx-devel From mikhail.isachenkov at nginx.com Tue Nov 17 09:09:09 2020 From: mikhail.isachenkov at nginx.com (Mikhail Isachenkov) Date: Tue, 17 Nov 2020 12:09:09 +0300 Subject: [PATCH] Use BPF to distribute packet to different work thread. In-Reply-To: <SN6PR11MB3517ECE17C51097465A4A5B187E20@SN6PR11MB3517.namprd11.prod.outlook.com> References: <SN6PR11MB35172BC6F818934BACB7FA3A87270@SN6PR11MB3517.namprd11.prod.outlook.com> <20200910101132.GA38727@vl.krasnogorsk.ru> <SN6PR11MB351752D836F2B1659E8FF67787240@SN6PR11MB3517.namprd11.prod.outlook.com> <20200913233936.GV18881@mdounin.ru> <SN6PR11MB3517F8A7E457669E49D3483C87230@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB3517E97F77028B2A11C3B9EF87200@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB351734F2EEDC51AB02E12EBF87390@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB3517ECE17C51097465A4A5B187E20@SN6PR11MB3517.namprd11.prod.outlook.com> Message-ID: <ad46ae97-e704-ccdf-b082-260ed0e51dc3@nginx.com> Hi Liu Quao, Looks like you didn't receive my answer for some reason. You can find it in maillist archive: http://mailman.nginx.org/pipermail/nginx-devel/2020-September/013483.html And let me quote it a little: a) please share your test stand/test scripts/nginx configuration b) did you perform any tests with server and client running on the same server? 17.11.2020 03:34, Liu, Qiao ?????: > Hi, what is the result of this patch set now? > Thanks > LQ > > -----Original Message----- > From: Liu, Qiao > Sent: Thursday, September 24, 2020 8:59 AM > To: nginx-devel at nginx.org > Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. > > Remove printf > > # HG changeset patch > # User Liu Qiao <qiao.liu at intel.com> > # Date 1599735293 14400 > # Thu Sep 10 06:54:53 2020 -0400 > # Node ID c2eabe9168d0cbefc030807a0808568d86c93e4f > # Parent da5e3f5b16733167142b599b6af3ce9469a07d52 > Use BPF to distribute packet to different work thread. > Use Berkeley Packet Filter to get packet queue_mapping number, and use this queue_mapping number to distribute the packet to different work thread, this will improve CPU utilization and http latency. > Author: Samudrala, Sridhar <sridhar.samudrala at intel.com> > > diff -r da5e3f5b1673 -r c2eabe9168d0 auto/os/linux > --- a/auto/os/linux Wed Sep 02 23:13:36 2020 +0300 > +++ b/auto/os/linux Thu Sep 10 06:54:53 2020 -0400 > @@ -32,6 +32,10 @@ > have=NGX_HAVE_POSIX_FADVISE . auto/nohave fi > > +if [ $version -lt 263680 ]; then > + have=NGX_HAVE_REUSEPORT_CBPF . auto/nohave fi > + > # epoll, EPOLLET version > > ngx_feature="epoll" > diff -r da5e3f5b1673 -r c2eabe9168d0 auto/unix > --- a/auto/unix Wed Sep 02 23:13:36 2020 +0300 > +++ b/auto/unix Thu Sep 10 06:54:53 2020 -0400 > @@ -331,6 +331,17 @@ > ngx_feature_test="setsockopt(0, SOL_SOCKET, SO_REUSEPORT, NULL, 0)" > . auto/feature > > +ngx_feature="SO_REUSEPORT_CBPF" > +ngx_feature_name="NGX_HAVE_REUSEPORT_CBPF" > +ngx_feature_run=no > +ngx_feature_incs="#include <sys/socket.h> > + #include <linux/filter.h> > + #include <error.h>" > +ngx_feature_path= > +ngx_feature_libs= > +ngx_feature_test="setsockopt(0, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, NULL, 0)" > +. auto/feature > + > > ngx_feature="SO_ACCEPTFILTER" > ngx_feature_name="NGX_HAVE_DEFERRED_ACCEPT" > diff -r da5e3f5b1673 -r c2eabe9168d0 src/core/ngx_connection.c > --- a/src/core/ngx_connection.c Wed Sep 02 23:13:36 2020 +0300 > +++ b/src/core/ngx_connection.c Thu Sep 10 06:54:53 2020 -0400 > @@ -8,7 +8,10 @@ > #include <ngx_config.h> > #include <ngx_core.h> > #include <ngx_event.h> > - > +#if (NGX_HAVE_REUSEPORT_CBPF) > +#include <linux/filter.h> > +#include <error.h> > +#endif > > ngx_os_io_t ngx_io; > > @@ -708,6 +711,35 @@ > return NGX_OK; > } > > +#if(NGX_HAVE_REUSEPORT) > +#if(NGX_HAVE_REUSEPORT_CBPF) > +#ifndef ARRAY_SIZE > +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif > + > +static ngx_int_t attach_bpf(int fd, uint16_t n) { > + struct sock_filter code[] = { > + /* A = skb->queue_mapping */ > + { BPF_LD | BPF_W | BPF_ABS, 0, 0, SKF_AD_OFF + SKF_AD_QUEUE }, > + /* A = A % n */ > + { BPF_ALU | BPF_MOD, 0, 0, n }, > + /* return A */ > + { BPF_RET | BPF_A, 0, 0, 0 }, > + }; > + struct sock_fprog p = { > + .len = ARRAY_SIZE(code), > + .filter = code, > + }; > + > + if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, &p, sizeof(p))) > + return NGX_ERROR; > + else > + return NGX_OK; > +} > +#endif > +#endif > + > > void > ngx_configure_listening_sockets(ngx_cycle_t *cycle) @@ -719,6 +751,11 @@ #if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > struct accept_filter_arg af; > #endif > +#if (NGX_HAVE_REUSEPORT) > +#if (NGX_HAVE_REUSEPORT_CBPF) > + ngx_core_conf_t* ccf ; > +#endif > +#endif > > ls = cycle->listening.elts; > for (i = 0; i < cycle->listening.nelts; i++) { @@ -1011,6 +1048,31 @@ > } > > #endif > +#if (NGX_HAVE_REUSEPORT) > +#if (NGX_HAVE_REUSEPORT_CBPF) > + if(ls[i].reuseport) > + { > + ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx,ngx_core_module); > + if(ccf) > + { > + if( NGX_OK == attach_bpf(ls[i].fd, ccf->worker_processes) ) > + { > + ngx_log_error(NGX_LOG_INFO,cycle->log ,ngx_socket_errno,\ > + "bpf prog attached to fd:%d\n", ls[i].fd); > + } > + else > + { > + ngx_log_error(NGX_LOG_ERR,cycle->log ,ngx_socket_errno,\ > + "failed to set SO_ATTACH_REUSEPORT_CBPF\n"); > + } > + } > + else > + ngx_log_error(NGX_LOG_ERR,cycle->log ,ngx_socket_errno,\ > + "can not get config, attach bpf failed\n"); > + > + } > +#endif > +#endif > } > > return; > > -----Original Message----- > From: Liu, Qiao > Sent: Tuesday, September 15, 2020 10:09 AM > To: nginx-devel at nginx.org > Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. > > Below is 5 times test result compare, 112 threads, 10000 connections, 1M object http request. Seems P99 have great improvement, and Max is also reduced > > > > AVG Stdev Max P99 > test 1 1.32s 447.09ms 5.48s 2.82s > BPF test 2 1.39s 513.8ms 9.42s 3.1s > test 3 1.4s 341.38ms 5.63s 2.55s > test 4 1.41s 407.45ms 6.96s 2.77s > test 5 1.29s 644.81ms 9.45s 3.74s > Average 1.362s 470.906ms 7.388s 2.996s > > NonBPF test 1 1.48s 916.88ms 9.44s 5.08s > test 2 1.43s 658.48ms 9.54s 3.92s > test 3 1.41s 650.38ms 8.63s 3.59s > test 4 1.29s 1010ms 10s 5.21s > test 5 1.31s 875.01ms 9.53s 4.39s > Average 1.384s 822.15ms 9.428s 4.438s > > > Thanks > LQ > -----Original Message----- > From: nginx-devel <nginx-devel-bounces at nginx.org> On Behalf Of Liu, Qiao > Sent: Monday, September 14, 2020 9:18 AM > To: nginx-devel at nginx.org > Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. > > Hi, Maxim Dounin: > Thanks for your reply, this server is random selected, we just do BPF and no-BPF test, I think the latency based on server configuration, not related with BPF patch, also the NIC of the server is Mellanox, not ADQ capable hardware , we will do more test Thanks LQ > > -----Original Message----- > From: nginx-devel <nginx-devel-bounces at nginx.org> On Behalf Of Maxim Dounin > Sent: Monday, September 14, 2020 7:40 AM > To: nginx-devel at nginx.org > Subject: Re: [PATCH] Use BPF to distribute packet to different work thread. > > Hello! > > On Fri, Sep 11, 2020 at 05:41:47AM +0000, Liu, Qiao wrote: > >> Hi, Vladimir Homutov: >> The below is our WRK test result output with BPF enable >> >> 112 threads and 10000 connections >> Thread Stats Avg Stdev Max +/- Stdev >> Latency 608.23ms 820.71ms 10.00s 87.48% >> Connect 16.52ms 54.53ms 1.99s 94.73% >> Delay 153.13ms 182.17ms 2.00s 90.74% >> Req/Sec 244.79 142.32 1.99k 68.40% >> Latency Distribution >> 50.00% 293.50ms >> 75.00% 778.33ms >> 90.00% 1.61s >> 99.00% 3.71s >> 99.90% 7.03s >> 99.99% 8.94s >> Connect Distribution >> 50.00% 1.93ms >> 75.00% 2.85ms >> 90.00% 55.76ms >> 99.00% 229.19ms >> 99.90% 656.79ms >> 99.99% 1.43s >> Delay Distribution >> 50.00% 110.96ms >> 75.00% 193.67ms >> 90.00% 321.77ms >> 99.00% 959.27ms >> 99.90% 1.57s >> 99.99% 1.91s >> Compared with no BPF but enable reuseport as below >> >> 112 threads and 10000 connections >> Thread Stats Avg Stdev Max +/- Stdev >> Latency 680.50ms 943.69ms 10.00s 87.18% >> Connect 58.44ms 238.08ms 2.00s 94.58% >> Delay 158.84ms 256.28ms 2.00s 90.92% >> Req/Sec 244.51 151.00 1.41k 69.67% >> Latency Distribution >> 50.00% 317.61ms >> 75.00% 913.52ms >> 90.00% 1.90s >> 99.00% 4.30s >> 99.90% 6.52s >> 99.99% 8.80s >> Connect Distribution >> 50.00% 1.88ms >> 75.00% 2.21ms >> 90.00% 55.94ms >> 99.00% 1.45s >> 99.90% 1.95s >> 99.99% 2.00s >> Delay Distribution >> 50.00% 73.01ms >> 75.00% 190.40ms >> 90.00% 387.01ms >> 99.00% 1.34s >> 99.90% 1.86s >> 99.99% 1.99s >> >> >> From the above results, there shows almost 20% percent latency >> reduction. P99 latency of BPF is 3.71s , but without BPF is 4.3s. > > Thank you for the results. > > Given that latency stdev is way higher than the average latency, I don't think the "20% percent latency reduction" observed is statistically significant. Please try running several tests and use ministat(1) to check the results. > > Also, the latency values look very high, and request rate very low. What's on the server side? > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -- Best regards, Mikhail Isachenkov NGINX Professional Services From xeioex at nginx.com Tue Nov 17 13:22:24 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 17 Nov 2020 13:22:24 +0000 Subject: [njs] Stream: linking ngx object to the module missed in d7f6e719af98. Message-ID: <hg.24717ec294b8.1605619344.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/24717ec294b8 branches: changeset: 1566:24717ec294b8 user: Dmitry Volyntsev <xeioex at nginx.com> date: Tue Nov 17 13:16:47 2020 +0000 description: Stream: linking ngx object to the module missed in d7f6e719af98. diffstat: nginx/ngx_stream_js_module.c | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diffs (16 lines): diff -r d7f6e719af98 -r 24717ec294b8 nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Fri Oct 02 18:38:12 2020 +0000 +++ b/nginx/ngx_stream_js_module.c Tue Nov 17 13:16:47 2020 +0000 @@ -1457,6 +1457,12 @@ ngx_stream_js_init_main_conf(ngx_conf_t } jmcf->proto = proto; + + rc = ngx_js_core_init(jmcf->vm, cf->log); + if (njs_slow_path(rc != NJS_OK)) { + return NGX_CONF_ERROR; + } + end = start + size; rc = njs_vm_compile(jmcf->vm, &start, end); From agentzh at gmail.com Wed Nov 18 02:44:19 2020 From: agentzh at gmail.com (Yichun Zhang (agentzh)) Date: Tue, 17 Nov 2020 18:44:19 -0800 Subject: [ANN] Test::Nginx 0.29 is released Message-ID: <CAB4Tn6P0G4Og6rGin4WfA9zgh6YxbUUpxVVCFe1-VseF8RfPuw@mail.gmail.com> Hi there, I am happy to announce the new 0.29 release of Test::Nginx: https://openresty.org/en/ann-test-nginx-029.html This version fixes the Test2::Util module dependency problem introduced in the previous 0.28 release. This Perl module provides a test scaffold for automated testing in Nginx C module or OpenResty-based Lua library development and regression testing. This class inherits from Test::Base, thus bringing all its declarative power to the Nginx C module testing practices. All of our OpenResty projects are using this test scaffold for automated regression testing. Enjoy! Best regards, Yichun --- Yichun Zhang is the creator of OpenResty, the founder and CEO of OpenResty Inc. From qiao.liu at intel.com Wed Nov 18 07:16:50 2020 From: qiao.liu at intel.com (Liu, Qiao) Date: Wed, 18 Nov 2020 07:16:50 +0000 Subject: [PATCH] Use BPF to distribute packet to different work thread. In-Reply-To: <ad46ae97-e704-ccdf-b082-260ed0e51dc3@nginx.com> References: <SN6PR11MB35172BC6F818934BACB7FA3A87270@SN6PR11MB3517.namprd11.prod.outlook.com> <20200910101132.GA38727@vl.krasnogorsk.ru> <SN6PR11MB351752D836F2B1659E8FF67787240@SN6PR11MB3517.namprd11.prod.outlook.com> <20200913233936.GV18881@mdounin.ru> <SN6PR11MB3517F8A7E457669E49D3483C87230@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB3517E97F77028B2A11C3B9EF87200@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB351734F2EEDC51AB02E12EBF87390@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB3517ECE17C51097465A4A5B187E20@SN6PR11MB3517.namprd11.prod.outlook.com> <ad46ae97-e704-ccdf-b082-260ed0e51dc3@nginx.com> Message-ID: <SN6PR11MB351747A2BBAB398D5D2B6F5087E10@SN6PR11MB3517.namprd11.prod.outlook.com> Hi, Mikhail Isachenkov: Great thanks for reply, I use wrk to do the test, please see below link for wrk script and nginx config file https://gist.github.com/qiaoliu78/75e7710a02c3346d22ddda04cea83b97 I use 2 different E5 8280 servers, each with 2 Mellanox 100GB cards bound and directly connected, one server run Nginx the other server run WRK. I also run the test on same server, but seems can not prove anything. Below is the result Run wrk and nginx on same server: 112 threads and 10000 connections Thread Stats Avg Stdev Max +/- Stdev Latency 57.24ms 248.60ms 8.09s 95.49% Connect 269.96ms 450.84ms 1.07s 74.07% Delay 20.80ms 133.16ms 1.99s 99.08% Req/Sec 812.77 749.04 3.90k 76.18% Latency Distribution 50.00% 8.28ms 75.00% 9.28ms 90.00% 19.02ms 99.00% 1.36s 99.90% 2.76s 99.99% 4.63s Connect Distribution 50.00% 346.00us 75.00% 1.00s 90.00% 1.04s 99.00% 1.06s 99.90% 1.07s 99.99% 1.07s Delay Distribution 50.00% 6.60ms 75.00% 7.53ms 90.00% 9.92ms 99.00% 45.82ms 99.90% 1.55s 99.99% 1.82s 2247253 requests in 1.00m, 2.14TB read Socket errors: connect 0, read 376, write 0, pconn 581, nodata 0, timeout 19, connect_timeout 2419, delay_timeout 1178 Requests/sec: 37389.74 Transfer/sec: 36.53GB Run nginx and wrk on two different server: 112 threads and 10000 connections Thread Stats Avg Stdev Max +/- Stdev Latency 1.27s 879.93ms 9.84s 76.66% Connect 8.49ms 16.28ms 99.52ms 90.27% Delay 740.14ms 597.38ms 2.00s 48.97% Req/Sec 73.41 32.15 2.06k 68.31% Latency Distribution 50.00% 1.24s 75.00% 1.67s 90.00% 2.16s 99.00% 4.40s 99.90% 7.74s 99.99% 9.11s Connect Distribution 50.00% 2.71ms 75.00% 4.43ms 90.00% 24.43ms 99.00% 84.09ms 99.90% 99.25ms 99.99% 99.51ms Delay Distribution 50.00% 747.60ms 75.00% 1.29s 90.00% 1.51s 99.00% 1.85s 99.90% 1.98s 99.99% 2.00s 487468 requests in 1.00m, 476.98GB read Socket errors: connect 0, read 0, write 0, pconn 1, nodata 0, timeout 9, conne ct_timeout 0, delay_timeout 6912 Requests/sec: 8110.10 Transfer/sec: 7.94GB Thanks LQ -----Original Message----- From: Mikhail Isachenkov <mikhail.isachenkov at nginx.com> Sent: Tuesday, November 17, 2020 5:09 PM To: nginx-devel at nginx.org; Liu, Qiao <qiao.liu at intel.com> Subject: Re: [PATCH] Use BPF to distribute packet to different work thread. Hi Liu Quao, Looks like you didn't receive my answer for some reason. You can find it in maillist archive: http://mailman.nginx.org/pipermail/nginx-devel/2020-September/013483.html And let me quote it a little: a) please share your test stand/test scripts/nginx configuration b) did you perform any tests with server and client running on the same server? 17.11.2020 03:34, Liu, Qiao ?????: > Hi, what is the result of this patch set now? > Thanks > LQ > > -----Original Message----- > From: Liu, Qiao > Sent: Thursday, September 24, 2020 8:59 AM > To: nginx-devel at nginx.org > Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. > > Remove printf > > # HG changeset patch > # User Liu Qiao <qiao.liu at intel.com> > # Date 1599735293 14400 > # Thu Sep 10 06:54:53 2020 -0400 > # Node ID c2eabe9168d0cbefc030807a0808568d86c93e4f > # Parent da5e3f5b16733167142b599b6af3ce9469a07d52 > Use BPF to distribute packet to different work thread. > Use Berkeley Packet Filter to get packet queue_mapping number, and use this queue_mapping number to distribute the packet to different work thread, this will improve CPU utilization and http latency. > Author: Samudrala, Sridhar <sridhar.samudrala at intel.com> > > diff -r da5e3f5b1673 -r c2eabe9168d0 auto/os/linux > --- a/auto/os/linux Wed Sep 02 23:13:36 2020 +0300 > +++ b/auto/os/linux Thu Sep 10 06:54:53 2020 -0400 > @@ -32,6 +32,10 @@ > have=NGX_HAVE_POSIX_FADVISE . auto/nohave fi > > +if [ $version -lt 263680 ]; then > + have=NGX_HAVE_REUSEPORT_CBPF . auto/nohave fi > + > # epoll, EPOLLET version > > ngx_feature="epoll" > diff -r da5e3f5b1673 -r c2eabe9168d0 auto/unix > --- a/auto/unix Wed Sep 02 23:13:36 2020 +0300 > +++ b/auto/unix Thu Sep 10 06:54:53 2020 -0400 > @@ -331,6 +331,17 @@ > ngx_feature_test="setsockopt(0, SOL_SOCKET, SO_REUSEPORT, NULL, 0)" > . auto/feature > > +ngx_feature="SO_REUSEPORT_CBPF" > +ngx_feature_name="NGX_HAVE_REUSEPORT_CBPF" > +ngx_feature_run=no > +ngx_feature_incs="#include <sys/socket.h> > + #include <linux/filter.h> > + #include <error.h>" > +ngx_feature_path= > +ngx_feature_libs= > +ngx_feature_test="setsockopt(0, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, NULL, 0)" > +. auto/feature > + > > ngx_feature="SO_ACCEPTFILTER" > ngx_feature_name="NGX_HAVE_DEFERRED_ACCEPT" > diff -r da5e3f5b1673 -r c2eabe9168d0 src/core/ngx_connection.c > --- a/src/core/ngx_connection.c Wed Sep 02 23:13:36 2020 +0300 > +++ b/src/core/ngx_connection.c Thu Sep 10 06:54:53 2020 -0400 > @@ -8,7 +8,10 @@ > #include <ngx_config.h> > #include <ngx_core.h> > #include <ngx_event.h> > - > +#if (NGX_HAVE_REUSEPORT_CBPF) > +#include <linux/filter.h> > +#include <error.h> > +#endif > > ngx_os_io_t ngx_io; > > @@ -708,6 +711,35 @@ > return NGX_OK; > } > > +#if(NGX_HAVE_REUSEPORT) > +#if(NGX_HAVE_REUSEPORT_CBPF) > +#ifndef ARRAY_SIZE > +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif > + > +static ngx_int_t attach_bpf(int fd, uint16_t n) { > + struct sock_filter code[] = { > + /* A = skb->queue_mapping */ > + { BPF_LD | BPF_W | BPF_ABS, 0, 0, SKF_AD_OFF + SKF_AD_QUEUE }, > + /* A = A % n */ > + { BPF_ALU | BPF_MOD, 0, 0, n }, > + /* return A */ > + { BPF_RET | BPF_A, 0, 0, 0 }, > + }; > + struct sock_fprog p = { > + .len = ARRAY_SIZE(code), > + .filter = code, > + }; > + > + if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, &p, sizeof(p))) > + return NGX_ERROR; > + else > + return NGX_OK; > +} > +#endif > +#endif > + > > void > ngx_configure_listening_sockets(ngx_cycle_t *cycle) @@ -719,6 +751,11 @@ #if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > struct accept_filter_arg af; > #endif > +#if (NGX_HAVE_REUSEPORT) > +#if (NGX_HAVE_REUSEPORT_CBPF) > + ngx_core_conf_t* ccf ; > +#endif > +#endif > > ls = cycle->listening.elts; > for (i = 0; i < cycle->listening.nelts; i++) { @@ -1011,6 +1048,31 @@ > } > > #endif > +#if (NGX_HAVE_REUSEPORT) > +#if (NGX_HAVE_REUSEPORT_CBPF) > + if(ls[i].reuseport) > + { > + ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx,ngx_core_module); > + if(ccf) > + { > + if( NGX_OK == attach_bpf(ls[i].fd, ccf->worker_processes) ) > + { > + ngx_log_error(NGX_LOG_INFO,cycle->log ,ngx_socket_errno,\ > + "bpf prog attached to fd:%d\n", ls[i].fd); > + } > + else > + { > + ngx_log_error(NGX_LOG_ERR,cycle->log ,ngx_socket_errno,\ > + "failed to set SO_ATTACH_REUSEPORT_CBPF\n"); > + } > + } > + else > + ngx_log_error(NGX_LOG_ERR,cycle->log ,ngx_socket_errno,\ > + "can not get config, attach bpf failed\n"); > + > + } > +#endif > +#endif > } > > return; > > -----Original Message----- > From: Liu, Qiao > Sent: Tuesday, September 15, 2020 10:09 AM > To: nginx-devel at nginx.org > Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. > > Below is 5 times test result compare, 112 threads, 10000 connections, > 1M object http request. Seems P99 have great improvement, and Max is > also reduced > > > > AVG Stdev Max P99 > test 1 1.32s 447.09ms 5.48s 2.82s > BPF test 2 1.39s 513.8ms 9.42s 3.1s > test 3 1.4s 341.38ms 5.63s 2.55s > test 4 1.41s 407.45ms 6.96s 2.77s > test 5 1.29s 644.81ms 9.45s 3.74s > Average 1.362s 470.906ms 7.388s 2.996s > > NonBPF test 1 1.48s 916.88ms 9.44s 5.08s > test 2 1.43s 658.48ms 9.54s 3.92s > test 3 1.41s 650.38ms 8.63s 3.59s > test 4 1.29s 1010ms 10s 5.21s > test 5 1.31s 875.01ms 9.53s 4.39s > Average 1.384s 822.15ms 9.428s 4.438s > > > Thanks > LQ > -----Original Message----- > From: nginx-devel <nginx-devel-bounces at nginx.org> On Behalf Of Liu, > Qiao > Sent: Monday, September 14, 2020 9:18 AM > To: nginx-devel at nginx.org > Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. > > Hi, Maxim Dounin: > Thanks for your reply, this server is random selected, we just do BPF > and no-BPF test, I think the latency based on server configuration, > not related with BPF patch, also the NIC of the server is Mellanox, > not ADQ capable hardware , we will do more test Thanks LQ > > -----Original Message----- > From: nginx-devel <nginx-devel-bounces at nginx.org> On Behalf Of Maxim > Dounin > Sent: Monday, September 14, 2020 7:40 AM > To: nginx-devel at nginx.org > Subject: Re: [PATCH] Use BPF to distribute packet to different work thread. > > Hello! > > On Fri, Sep 11, 2020 at 05:41:47AM +0000, Liu, Qiao wrote: > >> Hi, Vladimir Homutov: >> The below is our WRK test result output with BPF enable >> >> 112 threads and 10000 connections >> Thread Stats Avg Stdev Max +/- Stdev >> Latency 608.23ms 820.71ms 10.00s 87.48% >> Connect 16.52ms 54.53ms 1.99s 94.73% >> Delay 153.13ms 182.17ms 2.00s 90.74% >> Req/Sec 244.79 142.32 1.99k 68.40% >> Latency Distribution >> 50.00% 293.50ms >> 75.00% 778.33ms >> 90.00% 1.61s >> 99.00% 3.71s >> 99.90% 7.03s >> 99.99% 8.94s >> Connect Distribution >> 50.00% 1.93ms >> 75.00% 2.85ms >> 90.00% 55.76ms >> 99.00% 229.19ms >> 99.90% 656.79ms >> 99.99% 1.43s >> Delay Distribution >> 50.00% 110.96ms >> 75.00% 193.67ms >> 90.00% 321.77ms >> 99.00% 959.27ms >> 99.90% 1.57s >> 99.99% 1.91s >> Compared with no BPF but enable reuseport as below >> >> 112 threads and 10000 connections >> Thread Stats Avg Stdev Max +/- Stdev >> Latency 680.50ms 943.69ms 10.00s 87.18% >> Connect 58.44ms 238.08ms 2.00s 94.58% >> Delay 158.84ms 256.28ms 2.00s 90.92% >> Req/Sec 244.51 151.00 1.41k 69.67% >> Latency Distribution >> 50.00% 317.61ms >> 75.00% 913.52ms >> 90.00% 1.90s >> 99.00% 4.30s >> 99.90% 6.52s >> 99.99% 8.80s >> Connect Distribution >> 50.00% 1.88ms >> 75.00% 2.21ms >> 90.00% 55.94ms >> 99.00% 1.45s >> 99.90% 1.95s >> 99.99% 2.00s >> Delay Distribution >> 50.00% 73.01ms >> 75.00% 190.40ms >> 90.00% 387.01ms >> 99.00% 1.34s >> 99.90% 1.86s >> 99.99% 1.99s >> >> >> From the above results, there shows almost 20% percent latency >> reduction. P99 latency of BPF is 3.71s , but without BPF is 4.3s. > > Thank you for the results. > > Given that latency stdev is way higher than the average latency, I don't think the "20% percent latency reduction" observed is statistically significant. Please try running several tests and use ministat(1) to check the results. > > Also, the latency values look very high, and request rate very low. What's on the server side? > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -- Best regards, Mikhail Isachenkov NGINX Professional Services From xeioex at nginx.com Wed Nov 18 08:32:49 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 18 Nov 2020 08:32:49 +0000 Subject: [njs] Modules: added support for Buffer object where string is expected. Message-ID: <hg.e97f76121196.1605688369.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/e97f76121196 branches: changeset: 1567:e97f76121196 user: Dmitry Volyntsev <xeioex at nginx.com> date: Tue Nov 17 13:22:34 2020 +0000 description: Modules: added support for Buffer object where string is expected. diffstat: nginx/ngx_http_js_module.c | 2 +- nginx/ngx_js.c | 2 +- nginx/ngx_stream_js_module.c | 8 +---- src/njs.h | 6 ++++ src/njs_vm.c | 61 +++++++++++++++++++++++++++++++++++++++++++ src/test/njs_externals_test.c | 8 ++-- 6 files changed, 75 insertions(+), 12 deletions(-) diffs (175 lines): diff -r 24717ec294b8 -r e97f76121196 nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Tue Nov 17 13:16:47 2020 +0000 +++ b/nginx/ngx_http_js_module.c Tue Nov 17 13:22:34 2020 +0000 @@ -2371,7 +2371,7 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, arg = njs_arg(args, nargs, 2); if (njs_value_is_string(arg)) { - if (njs_vm_value_to_string(vm, &args_arg, arg) != NJS_OK) { + if (ngx_js_string(vm, arg, &args_arg) != NJS_OK) { njs_vm_error(vm, "failed to convert args"); return NJS_ERROR; } diff -r 24717ec294b8 -r e97f76121196 nginx/ngx_js.c --- a/nginx/ngx_js.c Tue Nov 17 13:16:47 2020 +0000 +++ b/nginx/ngx_js.c Tue Nov 17 13:22:34 2020 +0000 @@ -105,7 +105,7 @@ ngx_int_t ngx_js_string(njs_vm_t *vm, njs_value_t *value, njs_str_t *str) { if (value != NULL && !njs_value_is_null_or_undefined(value)) { - if (njs_vm_value_to_string(vm, str, value) == NJS_ERROR) { + if (njs_vm_value_to_bytes(vm, str, value) == NJS_ERROR) { return NGX_ERROR; } diff -r 24717ec294b8 -r e97f76121196 nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Tue Nov 17 13:16:47 2020 +0000 +++ b/nginx/ngx_stream_js_module.c Tue Nov 17 13:22:34 2020 +0000 @@ -942,9 +942,7 @@ ngx_stream_js_ext_on(njs_vm_t *vm, njs_v return NJS_ERROR; } - if (njs_vm_value_to_string(vm, &name, njs_arg(args, nargs, 1)) - == NJS_ERROR) - { + if (ngx_js_string(vm, njs_arg(args, nargs, 1), &name) == NJS_ERROR) { njs_vm_error(vm, "failed to convert event arg"); return NJS_ERROR; } @@ -991,9 +989,7 @@ ngx_stream_js_ext_off(njs_vm_t *vm, njs_ return NJS_ERROR; } - if (njs_vm_value_to_string(vm, &name, njs_arg(args, nargs, 1)) - == NJS_ERROR) - { + if (ngx_js_string(vm, njs_arg(args, nargs, 1), &name) == NJS_ERROR) { njs_vm_error(vm, "failed to convert event arg"); return NJS_ERROR; } diff -r 24717ec294b8 -r e97f76121196 src/njs.h --- a/src/njs.h Tue Nov 17 13:16:47 2020 +0000 +++ b/src/njs.h Tue Nov 17 13:22:34 2020 +0000 @@ -339,6 +339,12 @@ NJS_EXPORT njs_int_t njs_vm_value_buffer const u_char *start, uint32_t size); /* + * Converts a value to bytes. + */ +NJS_EXPORT njs_int_t njs_vm_value_to_bytes(njs_vm_t *vm, njs_str_t *dst, + njs_value_t *src); + +/* * Converts a value to string. */ NJS_EXPORT njs_int_t njs_vm_value_to_string(njs_vm_t *vm, njs_str_t *dst, diff -r 24717ec294b8 -r e97f76121196 src/njs_vm.c --- a/src/njs_vm.c Tue Nov 17 13:16:47 2020 +0000 +++ b/src/njs_vm.c Tue Nov 17 13:22:34 2020 +0000 @@ -1116,6 +1116,67 @@ njs_vm_value_to_string(njs_vm_t *vm, njs njs_int_t +njs_vm_value_to_bytes(njs_vm_t *vm, njs_str_t *dst, njs_value_t *src) +{ + u_char *start; + size_t size; + njs_int_t ret; + njs_value_t value; + njs_typed_array_t *array; + njs_array_buffer_t *buffer; + + if (njs_slow_path(src == NULL)) { + return NJS_ERROR; + } + + ret = NJS_OK; + value = *src; + + switch (value.type) { + case NJS_TYPED_ARRAY: + case NJS_DATA_VIEW: + array = njs_typed_array(&value); + buffer = njs_typed_array_buffer(array); + if (njs_slow_path(njs_is_detached_buffer(buffer))) { + njs_type_error(vm, "detached buffer"); + return NJS_ERROR; + } + + dst->start = &buffer->u.u8[array->offset]; + dst->length = array->byte_length; + break; + + default: + ret = njs_value_to_string(vm, &value, &value); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + + size = value.short_string.size; + + if (size != NJS_STRING_LONG) { + start = njs_mp_alloc(vm->mem_pool, size); + if (njs_slow_path(start == NULL)) { + njs_memory_error(vm); + return NJS_ERROR; + } + + memcpy(start, value.short_string.start, size); + + } else { + size = value.long_string.size; + start = value.long_string.data->start; + } + + dst->length = size; + dst->start = start; + } + + return ret; +} + + +njs_int_t njs_vm_value_string_copy(njs_vm_t *vm, njs_str_t *retval, njs_value_t *value, uintptr_t *next) { diff -r 24717ec294b8 -r e97f76121196 src/test/njs_externals_test.c --- a/src/test/njs_externals_test.c Tue Nov 17 13:16:47 2020 +0000 +++ b/src/test/njs_externals_test.c Tue Nov 17 13:22:34 2020 +0000 @@ -130,7 +130,7 @@ njs_unit_test_r_uri(njs_vm_t *vm, njs_ob field = (njs_str_t *) (p + njs_vm_prop_magic32(prop)); if (setval != NULL) { - return njs_vm_value_to_string(vm, field, setval); + return njs_vm_value_to_bytes(vm, field, setval); } return njs_vm_value_string_set(vm, retval, field->start, field->length); @@ -358,7 +358,7 @@ njs_unit_test_r_method(njs_vm_t *vm, njs return NJS_ERROR; } - ret = njs_vm_value_to_string(vm, &s, njs_arg(args, nargs, 1)); + ret = njs_vm_value_to_bytes(vm, &s, njs_arg(args, nargs, 1)); if (ret == NJS_OK && s.length == 3 && memcmp(s.start, "YES", 3) == 0) { return njs_vm_value_string_set(vm, njs_vm_retval(vm), r->uri.start, r->uri.length); @@ -388,7 +388,7 @@ njs_unit_test_r_create(njs_vm_t *vm, njs goto memory_error; } - if (njs_vm_value_to_string(vm, &sr->uri, njs_arg(args, nargs, 1)) + if (njs_vm_value_to_bytes(vm, &sr->uri, njs_arg(args, nargs, 1)) != NJS_OK) { return NJS_ERROR; @@ -424,7 +424,7 @@ njs_unit_test_r_bind(njs_vm_t *vm, njs_v return NJS_ERROR; } - if (njs_vm_value_to_string(vm, &name, njs_arg(args, nargs, 1)) != NJS_OK) { + if (njs_vm_value_to_bytes(vm, &name, njs_arg(args, nargs, 1)) != NJS_OK) { return NJS_ERROR; } From xeioex at nginx.com Wed Nov 18 18:35:35 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 18 Nov 2020 18:35:35 +0000 Subject: [njs] HTTP: fixed promise subrequest() with error_page redirect. Message-ID: <hg.c947a300b96c.1605724535.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/c947a300b96c branches: changeset: 1568:c947a300b96c user: Dmitry Volyntsev <xeioex at nginx.com> date: Wed Nov 18 18:09:11 2020 +0000 description: HTTP: fixed promise subrequest() with error_page redirect. Previously, promise callbacks for a subrequest were stored in a subrequest context. This is incorrect because a subrequest content may be destroyed (for example when error_page with redirect is enabled for a subrequest location). The fix is to store callbacks in the parent context. The issue was introduced in 6fccbc9f1288 (0.3.8). diffstat: nginx/ngx_http_js_module.c | 83 +++++++++++++++++++++++++++++++++++++++------ 1 files changed, 72 insertions(+), 11 deletions(-) diffs (134 lines): diff -r e97f76121196 -r c947a300b96c nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Tue Nov 17 13:22:34 2020 +0000 +++ b/nginx/ngx_http_js_module.c Wed Nov 18 18:09:11 2020 +0000 @@ -44,12 +44,18 @@ typedef struct { njs_opaque_value_t request; njs_opaque_value_t request_body; ngx_str_t redirect_uri; - njs_opaque_value_t promise_callbacks[2]; + ngx_array_t promise_callbacks; } ngx_http_js_ctx_t; typedef struct { ngx_http_request_t *request; + njs_opaque_value_t callbacks[2]; +} ngx_http_js_cb_t; + + +typedef struct { + ngx_http_request_t *request; njs_vm_event_t vm_event; void *unused; ngx_int_t ident; @@ -2276,19 +2282,52 @@ static njs_int_t ngx_http_js_promise_trampoline(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { + ngx_uint_t i; njs_function_t *callback; + ngx_http_js_cb_t *cb, *cbs; ngx_http_js_ctx_t *ctx; ngx_http_request_t *r; r = njs_vm_external(vm, njs_argument(args, 1)); - ctx = ngx_http_get_module_ctx(r, ngx_http_js_module); + ctx = ngx_http_get_module_ctx(r->parent, ngx_http_js_module); ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, - "js subrequest promise trampoline ctx: %p", ctx); - - callback = njs_value_function(njs_value_arg(&ctx->promise_callbacks[0])); + "js subrequest promise trampoline parent ctx: %p", ctx); + + if (ctx == NULL) { + njs_vm_error(vm, "js subrequest: failed to get the parent context"); + return NJS_ERROR; + } + + cbs = ctx->promise_callbacks.elts; + + if (cbs == NULL) { + goto fail; + } + + cb = NULL; + + for (i = 0; i < ctx->promise_callbacks.nelts; i++) { + if (cbs[i].request == r) { + cb = &cbs[i]; + cb->request = NULL; + break; + } + } + + if (cb == NULL) { + goto fail; + } + + callback = njs_value_function(njs_value_arg(&cb->callbacks[0])); return njs_vm_call(vm, callback, njs_argument(args, 1), 1); + +fail: + + njs_vm_error(vm, "js subrequest: promise callback not found"); + + return NJS_ERROR; } @@ -2298,9 +2337,10 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, { ngx_int_t rc, promise; njs_str_t uri_arg, args_arg, method_name, body_arg; - ngx_uint_t method, methods_max, has_body, detached; + ngx_uint_t i, method, methods_max, has_body, detached; njs_value_t *value, *arg, *options; njs_function_t *callback; + ngx_http_js_cb_t *cb, *cbs; ngx_http_js_ctx_t *ctx; njs_opaque_value_t lvalue; ngx_http_request_t *r, *sr; @@ -2507,15 +2547,36 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, } if (promise) { - ctx = ngx_pcalloc(sr->pool, sizeof(ngx_http_js_ctx_t)); - if (ctx == NULL) { - return NGX_ERROR; + cbs = ctx->promise_callbacks.elts; + + if (cbs == NULL) { + if (ngx_array_init(&ctx->promise_callbacks, r->pool, 4, + sizeof(ngx_http_js_cb_t)) != NGX_OK) + { + goto memory_error; + } } - ngx_http_set_ctx(sr, ctx, ngx_http_js_module); + cb = NULL; + + for (i = 0; i < ctx->promise_callbacks.nelts; i++) { + if (cbs[i].request == NULL) { + cb = &cbs[i]; + break; + } + } + + if (i == ctx->promise_callbacks.nelts) { + cb = ngx_array_push(&ctx->promise_callbacks); + if (cb == NULL) { + goto memory_error; + } + } + + cb->request = sr; return njs_vm_promise_create(vm, njs_vm_retval(vm), - njs_value_arg(&ctx->promise_callbacks)); + njs_value_arg(&cb->callbacks)); } njs_value_undefined_set(njs_vm_retval(vm)); From martin.grigorov at gmail.com Thu Nov 19 07:27:32 2020 From: martin.grigorov at gmail.com (Martin Grigorov) Date: Thu, 19 Nov 2020 09:27:32 +0200 Subject: [PATCH] Use BPF to distribute packet to different work thread. In-Reply-To: <SN6PR11MB351747A2BBAB398D5D2B6F5087E10@SN6PR11MB3517.namprd11.prod.outlook.com> References: <SN6PR11MB35172BC6F818934BACB7FA3A87270@SN6PR11MB3517.namprd11.prod.outlook.com> <20200910101132.GA38727@vl.krasnogorsk.ru> <SN6PR11MB351752D836F2B1659E8FF67787240@SN6PR11MB3517.namprd11.prod.outlook.com> <20200913233936.GV18881@mdounin.ru> <SN6PR11MB3517F8A7E457669E49D3483C87230@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB3517E97F77028B2A11C3B9EF87200@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB351734F2EEDC51AB02E12EBF87390@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB3517ECE17C51097465A4A5B187E20@SN6PR11MB3517.namprd11.prod.outlook.com> <ad46ae97-e704-ccdf-b082-260ed0e51dc3@nginx.com> <SN6PR11MB351747A2BBAB398D5D2B6F5087E10@SN6PR11MB3517.namprd11.prod.outlook.com> Message-ID: <CAMomwMqCkVbqW+6h_vLS3Nd6SfTpmhUdgi8iaHuV=Xfq=VJ7Nw@mail.gmail.com> Hi, On Wed, Nov 18, 2020 at 9:17 AM Liu, Qiao <qiao.liu at intel.com> wrote: > Hi, Mikhail Isachenkov: > Great thanks for reply, I use wrk to do the test, please see below link > for wrk script and nginx config file > https://gist.github.com/qiaoliu78/75e7710a02c3346d22ddda04cea83b97 > I use 2 different E5 8280 servers, each with 2 Mellanox 100GB cards bound > and directly connected, one server run Nginx the other server run WRK. I > also run the test on same server, but seems can not prove anything. Below > is the result > Run wrk and nginx on same server: > > 112 threads and 10000 connections > Thread Stats Avg Stdev Max +/- Stdev > Latency 57.24ms 248.60ms 8.09s 95.49% > There is something wrong here. How come the Stdev value is bigger than Avg value ?! Does that mean that some responses have been sent before their request came to Nginx, i.e. they have negative latency ?! > Connect 269.96ms 450.84ms 1.07s 74.07% > Delay 20.80ms 133.16ms 1.99s 99.08% > Req/Sec 812.77 749.04 3.90k 76.18% > Latency Distribution > 50.00% 8.28ms > 75.00% 9.28ms > 90.00% 19.02ms > 99.00% 1.36s > 99.90% 2.76s > 99.99% 4.63s > Connect Distribution > 50.00% 346.00us > 75.00% 1.00s > 90.00% 1.04s > 99.00% 1.06s > 99.90% 1.07s > 99.99% 1.07s > Delay Distribution > 50.00% 6.60ms > 75.00% 7.53ms > 90.00% 9.92ms > 99.00% 45.82ms > 99.90% 1.55s > 99.99% 1.82s > 2247253 requests in 1.00m, 2.14TB read > Socket errors: connect 0, read 376, write 0, pconn 581, nodata 0, > timeout 19, connect_timeout 2419, delay_timeout 1178 > Requests/sec: 37389.74 > Transfer/sec: 36.53GB > > > Run nginx and wrk on two different server: > 112 threads and 10000 connections > Thread Stats Avg Stdev Max +/- Stdev > Latency 1.27s 879.93ms 9.84s 76.66% > Connect 8.49ms 16.28ms 99.52ms 90.27% > Delay 740.14ms 597.38ms 2.00s 48.97% > Req/Sec 73.41 32.15 2.06k 68.31% > Latency Distribution > 50.00% 1.24s > 75.00% 1.67s > 90.00% 2.16s > 99.00% 4.40s > 99.90% 7.74s > 99.99% 9.11s > Connect Distribution > 50.00% 2.71ms > 75.00% 4.43ms > 90.00% 24.43ms > 99.00% 84.09ms > 99.90% 99.25ms > 99.99% 99.51ms > Delay Distribution > 50.00% 747.60ms > 75.00% 1.29s > 90.00% 1.51s > 99.00% 1.85s > 99.90% 1.98s > 99.99% 2.00s > 487468 requests in 1.00m, 476.98GB read > Socket errors: connect 0, read 0, write 0, pconn 1, nodata 0, timeout 9, > conne > ct_timeout 0, delay_timeout 6912 > Requests/sec: 8110.10 > Transfer/sec: 7.94GB > > Thanks > LQ > -----Original Message----- > From: Mikhail Isachenkov <mikhail.isachenkov at nginx.com> > Sent: Tuesday, November 17, 2020 5:09 PM > To: nginx-devel at nginx.org; Liu, Qiao <qiao.liu at intel.com> > Subject: Re: [PATCH] Use BPF to distribute packet to different work thread. > > Hi Liu Quao, > > Looks like you didn't receive my answer for some reason. You can find it > in maillist archive: > http://mailman.nginx.org/pipermail/nginx-devel/2020-September/013483.html > > And let me quote it a little: > > a) please share your test stand/test scripts/nginx configuration > b) did you perform any tests with server and client running on the same > server? > > 17.11.2020 03:34, Liu, Qiao ?????: > > Hi, what is the result of this patch set now? > > Thanks > > LQ > > > > -----Original Message----- > > From: Liu, Qiao > > Sent: Thursday, September 24, 2020 8:59 AM > > To: nginx-devel at nginx.org > > Subject: RE: [PATCH] Use BPF to distribute packet to different work > thread. > > > > Remove printf > > > > # HG changeset patch > > # User Liu Qiao <qiao.liu at intel.com> > > # Date 1599735293 14400 > > # Thu Sep 10 06:54:53 2020 -0400 > > # Node ID c2eabe9168d0cbefc030807a0808568d86c93e4f > > # Parent da5e3f5b16733167142b599b6af3ce9469a07d52 > > Use BPF to distribute packet to different work thread. > > Use Berkeley Packet Filter to get packet queue_mapping number, and use > this queue_mapping number to distribute the packet to different work > thread, this will improve CPU utilization and http latency. > > Author: Samudrala, Sridhar <sridhar.samudrala at intel.com> > > > > diff -r da5e3f5b1673 -r c2eabe9168d0 auto/os/linux > > --- a/auto/os/linux Wed Sep 02 23:13:36 2020 +0300 > > +++ b/auto/os/linux Thu Sep 10 06:54:53 2020 -0400 > > @@ -32,6 +32,10 @@ > > have=NGX_HAVE_POSIX_FADVISE . auto/nohave fi > > > > +if [ $version -lt 263680 ]; then > > + have=NGX_HAVE_REUSEPORT_CBPF . auto/nohave fi > > + > > # epoll, EPOLLET version > > > > ngx_feature="epoll" > > diff -r da5e3f5b1673 -r c2eabe9168d0 auto/unix > > --- a/auto/unix Wed Sep 02 23:13:36 2020 +0300 > > +++ b/auto/unix Thu Sep 10 06:54:53 2020 -0400 > > @@ -331,6 +331,17 @@ > > ngx_feature_test="setsockopt(0, SOL_SOCKET, SO_REUSEPORT, NULL, 0)" > > . auto/feature > > > > +ngx_feature="SO_REUSEPORT_CBPF" > > +ngx_feature_name="NGX_HAVE_REUSEPORT_CBPF" > > +ngx_feature_run=no > > +ngx_feature_incs="#include <sys/socket.h> > > + #include <linux/filter.h> > > + #include <error.h>" > > +ngx_feature_path= > > +ngx_feature_libs= > > +ngx_feature_test="setsockopt(0, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, > NULL, 0)" > > +. auto/feature > > + > > > > ngx_feature="SO_ACCEPTFILTER" > > ngx_feature_name="NGX_HAVE_DEFERRED_ACCEPT" > > diff -r da5e3f5b1673 -r c2eabe9168d0 src/core/ngx_connection.c > > --- a/src/core/ngx_connection.c Wed Sep 02 23:13:36 2020 +0300 > > +++ b/src/core/ngx_connection.c Thu Sep 10 06:54:53 2020 -0400 > > @@ -8,7 +8,10 @@ > > #include <ngx_config.h> > > #include <ngx_core.h> > > #include <ngx_event.h> > > - > > +#if (NGX_HAVE_REUSEPORT_CBPF) > > +#include <linux/filter.h> > > +#include <error.h> > > +#endif > > > > ngx_os_io_t ngx_io; > > > > @@ -708,6 +711,35 @@ > > return NGX_OK; > > } > > > > +#if(NGX_HAVE_REUSEPORT) > > +#if(NGX_HAVE_REUSEPORT_CBPF) > > +#ifndef ARRAY_SIZE > > +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif > > + > > +static ngx_int_t attach_bpf(int fd, uint16_t n) { > > + struct sock_filter code[] = { > > + /* A = skb->queue_mapping */ > > + { BPF_LD | BPF_W | BPF_ABS, 0, 0, SKF_AD_OFF + > SKF_AD_QUEUE }, > > + /* A = A % n */ > > + { BPF_ALU | BPF_MOD, 0, 0, n }, > > + /* return A */ > > + { BPF_RET | BPF_A, 0, 0, 0 }, > > + }; > > + struct sock_fprog p = { > > + .len = ARRAY_SIZE(code), > > + .filter = code, > > + }; > > + > > + if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, &p, > sizeof(p))) > > + return NGX_ERROR; > > + else > > + return NGX_OK; > > +} > > +#endif > > +#endif > > + > > > > void > > ngx_configure_listening_sockets(ngx_cycle_t *cycle) @@ -719,6 +751,11 > @@ #if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > > struct accept_filter_arg af; > > #endif > > +#if (NGX_HAVE_REUSEPORT) > > +#if (NGX_HAVE_REUSEPORT_CBPF) > > + ngx_core_conf_t* ccf ; > > +#endif > > +#endif > > > > ls = cycle->listening.elts; > > for (i = 0; i < cycle->listening.nelts; i++) { @@ -1011,6 +1048,31 > @@ > > } > > > > #endif > > +#if (NGX_HAVE_REUSEPORT) > > +#if (NGX_HAVE_REUSEPORT_CBPF) > > + if(ls[i].reuseport) > > + { > > + ccf = (ngx_core_conf_t *) > ngx_get_conf(cycle->conf_ctx,ngx_core_module); > > + if(ccf) > > + { > > + if( NGX_OK == attach_bpf(ls[i].fd, ccf->worker_processes) ) > > + { > > + ngx_log_error(NGX_LOG_INFO,cycle->log > ,ngx_socket_errno,\ > > + "bpf prog attached to fd:%d\n", ls[i].fd); > > + } > > + else > > + { > > + ngx_log_error(NGX_LOG_ERR,cycle->log ,ngx_socket_errno,\ > > + "failed to set SO_ATTACH_REUSEPORT_CBPF\n"); > > + } > > + } > > + else > > + ngx_log_error(NGX_LOG_ERR,cycle->log ,ngx_socket_errno,\ > > + "can not get config, attach bpf failed\n"); > > + > > + } > > +#endif > > +#endif > > } > > > > return; > > > > -----Original Message----- > > From: Liu, Qiao > > Sent: Tuesday, September 15, 2020 10:09 AM > > To: nginx-devel at nginx.org > > Subject: RE: [PATCH] Use BPF to distribute packet to different work > thread. > > > > Below is 5 times test result compare, 112 threads, 10000 connections, > > 1M object http request. Seems P99 have great improvement, and Max is > > also reduced > > > > > > > > AVG Stdev Max > P99 > > test 1 1.32s 447.09ms 5.48s 2.82s > > BPF test 2 1.39s 513.8ms 9.42s 3.1s > > test 3 1.4s 341.38ms 5.63s > 2.55s > > test 4 1.41s 407.45ms 6.96s 2.77s > > test 5 1.29s 644.81ms 9.45s 3.74s > > Average 1.362s 470.906ms 7.388s 2.996s > > > > NonBPF test 1 1.48s 916.88ms 9.44s 5.08s > > test 2 1.43s 658.48ms 9.54s > 3.92s > > test 3 1.41s 650.38ms 8.63s > 3.59s > > test 4 1.29s 1010ms 10s > 5.21s > > test 5 1.31s 875.01ms 9.53s > 4.39s > > Average 1.384s 822.15ms 9.428s 4.438s > > > > > > Thanks > > LQ > > -----Original Message----- > > From: nginx-devel <nginx-devel-bounces at nginx.org> On Behalf Of Liu, > > Qiao > > Sent: Monday, September 14, 2020 9:18 AM > > To: nginx-devel at nginx.org > > Subject: RE: [PATCH] Use BPF to distribute packet to different work > thread. > > > > Hi, Maxim Dounin: > > Thanks for your reply, this server is random selected, we just do BPF > > and no-BPF test, I think the latency based on server configuration, > > not related with BPF patch, also the NIC of the server is Mellanox, > > not ADQ capable hardware , we will do more test Thanks LQ > > > > -----Original Message----- > > From: nginx-devel <nginx-devel-bounces at nginx.org> On Behalf Of Maxim > > Dounin > > Sent: Monday, September 14, 2020 7:40 AM > > To: nginx-devel at nginx.org > > Subject: Re: [PATCH] Use BPF to distribute packet to different work > thread. > > > > Hello! > > > > On Fri, Sep 11, 2020 at 05:41:47AM +0000, Liu, Qiao wrote: > > > >> Hi, Vladimir Homutov: > >> The below is our WRK test result output with BPF enable > >> > >> 112 threads and 10000 connections > >> Thread Stats Avg Stdev Max +/- Stdev > >> Latency 608.23ms 820.71ms 10.00s 87.48% > >> Connect 16.52ms 54.53ms 1.99s 94.73% > >> Delay 153.13ms 182.17ms 2.00s 90.74% > >> Req/Sec 244.79 142.32 1.99k 68.40% > >> Latency Distribution > >> 50.00% 293.50ms > >> 75.00% 778.33ms > >> 90.00% 1.61s > >> 99.00% 3.71s > >> 99.90% 7.03s > >> 99.99% 8.94s > >> Connect Distribution > >> 50.00% 1.93ms > >> 75.00% 2.85ms > >> 90.00% 55.76ms > >> 99.00% 229.19ms > >> 99.90% 656.79ms > >> 99.99% 1.43s > >> Delay Distribution > >> 50.00% 110.96ms > >> 75.00% 193.67ms > >> 90.00% 321.77ms > >> 99.00% 959.27ms > >> 99.90% 1.57s > >> 99.99% 1.91s > >> Compared with no BPF but enable reuseport as below > >> > >> 112 threads and 10000 connections > >> Thread Stats Avg Stdev Max +/- Stdev > >> Latency 680.50ms 943.69ms 10.00s 87.18% > >> Connect 58.44ms 238.08ms 2.00s 94.58% > >> Delay 158.84ms 256.28ms 2.00s 90.92% > >> Req/Sec 244.51 151.00 1.41k 69.67% > >> Latency Distribution > >> 50.00% 317.61ms > >> 75.00% 913.52ms > >> 90.00% 1.90s > >> 99.00% 4.30s > >> 99.90% 6.52s > >> 99.99% 8.80s > >> Connect Distribution > >> 50.00% 1.88ms > >> 75.00% 2.21ms > >> 90.00% 55.94ms > >> 99.00% 1.45s > >> 99.90% 1.95s > >> 99.99% 2.00s > >> Delay Distribution > >> 50.00% 73.01ms > >> 75.00% 190.40ms > >> 90.00% 387.01ms > >> 99.00% 1.34s > >> 99.90% 1.86s > >> 99.99% 1.99s > >> > >> > >> From the above results, there shows almost 20% percent latency > >> reduction. P99 latency of BPF is 3.71s , but without BPF is 4.3s. > > > > Thank you for the results. > > > > Given that latency stdev is way higher than the average latency, I don't > think the "20% percent latency reduction" observed is statistically > significant. Please try running several tests and use ministat(1) to check > the results. > > > > Also, the latency values look very high, and request rate very low. > What's on the server side? > > > > -- > > Maxim Dounin > > http://mdounin.ru/ > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > -- > Best regards, > Mikhail Isachenkov > NGINX Professional Services > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201119/0f4575c6/attachment-0001.htm> From qiao.liu at intel.com Thu Nov 19 08:08:28 2020 From: qiao.liu at intel.com (Liu, Qiao) Date: Thu, 19 Nov 2020 08:08:28 +0000 Subject: [PATCH] Use BPF to distribute packet to different work thread. In-Reply-To: <CAMomwMqCkVbqW+6h_vLS3Nd6SfTpmhUdgi8iaHuV=Xfq=VJ7Nw@mail.gmail.com> References: <SN6PR11MB35172BC6F818934BACB7FA3A87270@SN6PR11MB3517.namprd11.prod.outlook.com> <20200910101132.GA38727@vl.krasnogorsk.ru> <SN6PR11MB351752D836F2B1659E8FF67787240@SN6PR11MB3517.namprd11.prod.outlook.com> <20200913233936.GV18881@mdounin.ru> <SN6PR11MB3517F8A7E457669E49D3483C87230@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB3517E97F77028B2A11C3B9EF87200@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB351734F2EEDC51AB02E12EBF87390@SN6PR11MB3517.namprd11.prod.outlook.com> <SN6PR11MB3517ECE17C51097465A4A5B187E20@SN6PR11MB3517.namprd11.prod.outlook.com> <ad46ae97-e704-ccdf-b082-260ed0e51dc3@nginx.com> <SN6PR11MB351747A2BBAB398D5D2B6F5087E10@SN6PR11MB3517.namprd11.prod.outlook.com> <CAMomwMqCkVbqW+6h_vLS3Nd6SfTpmhUdgi8iaHuV=Xfq=VJ7Nw@mail.gmail.com> Message-ID: <SN6PR11MB351744F25D1BFC72A31D2D2687E00@SN6PR11MB3517.namprd11.prod.outlook.com> Hi, Mikhail Isachenkov The server and client on the same machine. You can see from the below detail, the AVG of course do have less value Latency Distribution 50.00% 8.28ms 75.00% 9.28ms 90.00% 19.02ms 99.00% 1.36s 99.90% 2.76s 99.99% 4.63s Thanks LQ From: nginx-devel <nginx-devel-bounces at nginx.org> On Behalf Of Martin Grigorov Sent: Thursday, November 19, 2020 3:28 PM To: nginx-devel at nginx.org Subject: Re: [PATCH] Use BPF to distribute packet to different work thread. Hi, On Wed, Nov 18, 2020 at 9:17 AM Liu, Qiao <qiao.liu at intel.com<mailto:qiao.liu at intel.com>> wrote: Hi, Mikhail Isachenkov: Great thanks for reply, I use wrk to do the test, please see below link for wrk script and nginx config file https://gist.github.com/qiaoliu78/75e7710a02c3346d22ddda04cea83b97 I use 2 different E5 8280 servers, each with 2 Mellanox 100GB cards bound and directly connected, one server run Nginx the other server run WRK. I also run the test on same server, but seems can not prove anything. Below is the result Run wrk and nginx on same server: 112 threads and 10000 connections Thread Stats Avg Stdev Max +/- Stdev Latency 57.24ms 248.60ms 8.09s 95.49% There is something wrong here. How come the Stdev value is bigger than Avg value ?! Does that mean that some responses have been sent before their request came to Nginx, i.e. they have negative latency ?! Connect 269.96ms 450.84ms 1.07s 74.07% Delay 20.80ms 133.16ms 1.99s 99.08% Req/Sec 812.77 749.04 3.90k 76.18% Latency Distribution 50.00% 8.28ms 75.00% 9.28ms 90.00% 19.02ms 99.00% 1.36s 99.90% 2.76s 99.99% 4.63s Connect Distribution 50.00% 346.00us 75.00% 1.00s 90.00% 1.04s 99.00% 1.06s 99.90% 1.07s 99.99% 1.07s Delay Distribution 50.00% 6.60ms 75.00% 7.53ms 90.00% 9.92ms 99.00% 45.82ms 99.90% 1.55s 99.99% 1.82s 2247253 requests in 1.00m, 2.14TB read Socket errors: connect 0, read 376, write 0, pconn 581, nodata 0, timeout 19, connect_timeout 2419, delay_timeout 1178 Requests/sec: 37389.74 Transfer/sec: 36.53GB Run nginx and wrk on two different server: 112 threads and 10000 connections Thread Stats Avg Stdev Max +/- Stdev Latency 1.27s 879.93ms 9.84s 76.66% Connect 8.49ms 16.28ms 99.52ms 90.27% Delay 740.14ms 597.38ms 2.00s 48.97% Req/Sec 73.41 32.15 2.06k 68.31% Latency Distribution 50.00% 1.24s 75.00% 1.67s 90.00% 2.16s 99.00% 4.40s 99.90% 7.74s 99.99% 9.11s Connect Distribution 50.00% 2.71ms 75.00% 4.43ms 90.00% 24.43ms 99.00% 84.09ms 99.90% 99.25ms 99.99% 99.51ms Delay Distribution 50.00% 747.60ms 75.00% 1.29s 90.00% 1.51s 99.00% 1.85s 99.90% 1.98s 99.99% 2.00s 487468 requests in 1.00m, 476.98GB read Socket errors: connect 0, read 0, write 0, pconn 1, nodata 0, timeout 9, conne ct_timeout 0, delay_timeout 6912 Requests/sec: 8110.10 Transfer/sec: 7.94GB Thanks LQ -----Original Message----- From: Mikhail Isachenkov <mikhail.isachenkov at nginx.com<mailto:mikhail.isachenkov at nginx.com>> Sent: Tuesday, November 17, 2020 5:09 PM To: nginx-devel at nginx.org<mailto:nginx-devel at nginx.org>; Liu, Qiao <qiao.liu at intel.com<mailto:qiao.liu at intel.com>> Subject: Re: [PATCH] Use BPF to distribute packet to different work thread. Hi Liu Quao, Looks like you didn't receive my answer for some reason. You can find it in maillist archive: http://mailman.nginx.org/pipermail/nginx-devel/2020-September/013483.html And let me quote it a little: a) please share your test stand/test scripts/nginx configuration b) did you perform any tests with server and client running on the same server? 17.11.2020 03:34, Liu, Qiao ?????: > Hi, what is the result of this patch set now? > Thanks > LQ > > -----Original Message----- > From: Liu, Qiao > Sent: Thursday, September 24, 2020 8:59 AM > To: nginx-devel at nginx.org<mailto:nginx-devel at nginx.org> > Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. > > Remove printf > > # HG changeset patch > # User Liu Qiao <qiao.liu at intel.com<mailto:qiao.liu at intel.com>> > # Date 1599735293 14400 > # Thu Sep 10 06:54:53 2020 -0400 > # Node ID c2eabe9168d0cbefc030807a0808568d86c93e4f > # Parent da5e3f5b16733167142b599b6af3ce9469a07d52 > Use BPF to distribute packet to different work thread. > Use Berkeley Packet Filter to get packet queue_mapping number, and use this queue_mapping number to distribute the packet to different work thread, this will improve CPU utilization and http latency. > Author: Samudrala, Sridhar <sridhar.samudrala at intel.com<mailto:sridhar.samudrala at intel.com>> > > diff -r da5e3f5b1673 -r c2eabe9168d0 auto/os/linux > --- a/auto/os/linux Wed Sep 02 23:13:36 2020 +0300 > +++ b/auto/os/linux Thu Sep 10 06:54:53 2020 -0400 > @@ -32,6 +32,10 @@ > have=NGX_HAVE_POSIX_FADVISE . auto/nohave fi > > +if [ $version -lt 263680 ]; then > + have=NGX_HAVE_REUSEPORT_CBPF . auto/nohave fi > + > # epoll, EPOLLET version > > ngx_feature="epoll" > diff -r da5e3f5b1673 -r c2eabe9168d0 auto/unix > --- a/auto/unix Wed Sep 02 23:13:36 2020 +0300 > +++ b/auto/unix Thu Sep 10 06:54:53 2020 -0400 > @@ -331,6 +331,17 @@ > ngx_feature_test="setsockopt(0, SOL_SOCKET, SO_REUSEPORT, NULL, 0)" > . auto/feature > > +ngx_feature="SO_REUSEPORT_CBPF" > +ngx_feature_name="NGX_HAVE_REUSEPORT_CBPF" > +ngx_feature_run=no > +ngx_feature_incs="#include <sys/socket.h> > + #include <linux/filter.h> > + #include <error.h>" > +ngx_feature_path= > +ngx_feature_libs= > +ngx_feature_test="setsockopt(0, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, NULL, 0)" > +. auto/feature > + > > ngx_feature="SO_ACCEPTFILTER" > ngx_feature_name="NGX_HAVE_DEFERRED_ACCEPT" > diff -r da5e3f5b1673 -r c2eabe9168d0 src/core/ngx_connection.c > --- a/src/core/ngx_connection.c Wed Sep 02 23:13:36 2020 +0300 > +++ b/src/core/ngx_connection.c Thu Sep 10 06:54:53 2020 -0400 > @@ -8,7 +8,10 @@ > #include <ngx_config.h> > #include <ngx_core.h> > #include <ngx_event.h> > - > +#if (NGX_HAVE_REUSEPORT_CBPF) > +#include <linux/filter.h> > +#include <error.h> > +#endif > > ngx_os_io_t ngx_io; > > @@ -708,6 +711,35 @@ > return NGX_OK; > } > > +#if(NGX_HAVE_REUSEPORT) > +#if(NGX_HAVE_REUSEPORT_CBPF) > +#ifndef ARRAY_SIZE > +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif > + > +static ngx_int_t attach_bpf(int fd, uint16_t n) { > + struct sock_filter code[] = { > + /* A = skb->queue_mapping */ > + { BPF_LD | BPF_W | BPF_ABS, 0, 0, SKF_AD_OFF + SKF_AD_QUEUE }, > + /* A = A % n */ > + { BPF_ALU | BPF_MOD, 0, 0, n }, > + /* return A */ > + { BPF_RET | BPF_A, 0, 0, 0 }, > + }; > + struct sock_fprog p = { > + .len = ARRAY_SIZE(code), > + .filter = code, > + }; > + > + if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, &p, sizeof(p))) > + return NGX_ERROR; > + else > + return NGX_OK; > +} > +#endif > +#endif > + > > void > ngx_configure_listening_sockets(ngx_cycle_t *cycle) @@ -719,6 +751,11 @@ #if (NGX_HAVE_DEFERRED_ACCEPT && defined SO_ACCEPTFILTER) > struct accept_filter_arg af; > #endif > +#if (NGX_HAVE_REUSEPORT) > +#if (NGX_HAVE_REUSEPORT_CBPF) > + ngx_core_conf_t* ccf ; > +#endif > +#endif > > ls = cycle->listening.elts; > for (i = 0; i < cycle->listening.nelts; i++) { @@ -1011,6 +1048,31 @@ > } > > #endif > +#if (NGX_HAVE_REUSEPORT) > +#if (NGX_HAVE_REUSEPORT_CBPF) > + if(ls[i].reuseport) > + { > + ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx,ngx_core_module); > + if(ccf) > + { > + if( NGX_OK == attach_bpf(ls[i].fd, ccf->worker_processes) ) > + { > + ngx_log_error(NGX_LOG_INFO,cycle->log ,ngx_socket_errno,\ > + "bpf prog attached to fd:%d\n", ls[i].fd); > + } > + else > + { > + ngx_log_error(NGX_LOG_ERR,cycle->log ,ngx_socket_errno,\ > + "failed to set SO_ATTACH_REUSEPORT_CBPF\n"); > + } > + } > + else > + ngx_log_error(NGX_LOG_ERR,cycle->log ,ngx_socket_errno,\ > + "can not get config, attach bpf failed\n"); > + > + } > +#endif > +#endif > } > > return; > > -----Original Message----- > From: Liu, Qiao > Sent: Tuesday, September 15, 2020 10:09 AM > To: nginx-devel at nginx.org<mailto:nginx-devel at nginx.org> > Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. > > Below is 5 times test result compare, 112 threads, 10000 connections, > 1M object http request. Seems P99 have great improvement, and Max is > also reduced > > > > AVG Stdev Max P99 > test 1 1.32s 447.09ms 5.48s 2.82s > BPF test 2 1.39s 513.8ms 9.42s 3.1s > test 3 1.4s 341.38ms 5.63s 2.55s > test 4 1.41s 407.45ms 6.96s 2.77s > test 5 1.29s 644.81ms 9.45s 3.74s > Average 1.362s 470.906ms 7.388s 2.996s > > NonBPF test 1 1.48s 916.88ms 9.44s 5.08s > test 2 1.43s 658.48ms 9.54s 3.92s > test 3 1.41s 650.38ms 8.63s 3.59s > test 4 1.29s 1010ms 10s 5.21s > test 5 1.31s 875.01ms 9.53s 4.39s > Average 1.384s 822.15ms 9.428s 4.438s > > > Thanks > LQ > -----Original Message----- > From: nginx-devel <nginx-devel-bounces at nginx.org<mailto:nginx-devel-bounces at nginx.org>> On Behalf Of Liu, > Qiao > Sent: Monday, September 14, 2020 9:18 AM > To: nginx-devel at nginx.org<mailto:nginx-devel at nginx.org> > Subject: RE: [PATCH] Use BPF to distribute packet to different work thread. > > Hi, Maxim Dounin: > Thanks for your reply, this server is random selected, we just do BPF > and no-BPF test, I think the latency based on server configuration, > not related with BPF patch, also the NIC of the server is Mellanox, > not ADQ capable hardware , we will do more test Thanks LQ > > -----Original Message----- > From: nginx-devel <nginx-devel-bounces at nginx.org<mailto:nginx-devel-bounces at nginx.org>> On Behalf Of Maxim > Dounin > Sent: Monday, September 14, 2020 7:40 AM > To: nginx-devel at nginx.org<mailto:nginx-devel at nginx.org> > Subject: Re: [PATCH] Use BPF to distribute packet to different work thread. > > Hello! > > On Fri, Sep 11, 2020 at 05:41:47AM +0000, Liu, Qiao wrote: > >> Hi, Vladimir Homutov: >> The below is our WRK test result output with BPF enable >> >> 112 threads and 10000 connections >> Thread Stats Avg Stdev Max +/- Stdev >> Latency 608.23ms 820.71ms 10.00s 87.48% >> Connect 16.52ms 54.53ms 1.99s 94.73% >> Delay 153.13ms 182.17ms 2.00s 90.74% >> Req/Sec 244.79 142.32 1.99k 68.40% >> Latency Distribution >> 50.00% 293.50ms >> 75.00% 778.33ms >> 90.00% 1.61s >> 99.00% 3.71s >> 99.90% 7.03s >> 99.99% 8.94s >> Connect Distribution >> 50.00% 1.93ms >> 75.00% 2.85ms >> 90.00% 55.76ms >> 99.00% 229.19ms >> 99.90% 656.79ms >> 99.99% 1.43s >> Delay Distribution >> 50.00% 110.96ms >> 75.00% 193.67ms >> 90.00% 321.77ms >> 99.00% 959.27ms >> 99.90% 1.57s >> 99.99% 1.91s >> Compared with no BPF but enable reuseport as below >> >> 112 threads and 10000 connections >> Thread Stats Avg Stdev Max +/- Stdev >> Latency 680.50ms 943.69ms 10.00s 87.18% >> Connect 58.44ms 238.08ms 2.00s 94.58% >> Delay 158.84ms 256.28ms 2.00s 90.92% >> Req/Sec 244.51 151.00 1.41k 69.67% >> Latency Distribution >> 50.00% 317.61ms >> 75.00% 913.52ms >> 90.00% 1.90s >> 99.00% 4.30s >> 99.90% 6.52s >> 99.99% 8.80s >> Connect Distribution >> 50.00% 1.88ms >> 75.00% 2.21ms >> 90.00% 55.94ms >> 99.00% 1.45s >> 99.90% 1.95s >> 99.99% 2.00s >> Delay Distribution >> 50.00% 73.01ms >> 75.00% 190.40ms >> 90.00% 387.01ms >> 99.00% 1.34s >> 99.90% 1.86s >> 99.99% 1.99s >> >> >> From the above results, there shows almost 20% percent latency >> reduction. P99 latency of BPF is 3.71s , but without BPF is 4.3s. > > Thank you for the results. > > Given that latency stdev is way higher than the average latency, I don't think the "20% percent latency reduction" observed is statistically significant. Please try running several tests and use ministat(1) to check the results. > > Also, the latency values look very high, and request rate very low. What's on the server side? > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org<mailto:nginx-devel at nginx.org> > http://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org<mailto:nginx-devel at nginx.org> > http://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org<mailto:nginx-devel at nginx.org> > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -- Best regards, Mikhail Isachenkov NGINX Professional Services _______________________________________________ nginx-devel mailing list nginx-devel at nginx.org<mailto:nginx-devel at nginx.org> http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201119/061708ca/attachment-0001.htm> From iippolitov at nginx.com Thu Nov 19 17:06:36 2020 From: iippolitov at nginx.com (Igor Ippolitov) Date: Thu, 19 Nov 2020 17:06:36 +0000 Subject: [nginx] Core: "-e" command line option. Message-ID: <hg.f18db38a9826.1605805596.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/f18db38a9826 branches: changeset: 7744:f18db38a9826 user: Igor Ippolitov <iippolitov at nginx.com> date: Thu Nov 19 16:59:00 2020 +0000 description: Core: "-e" command line option. When installing or running from a non-root user it is sometimes required to override default, compiled in error log path. There was no way to do this without rebuilding the binary (ticket #147). This patch introduced "-e" command line option which allows one to override compiled in error log path. diffstat: auto/configure | 4 ++++ docs/man/nginx.8 | 9 ++++++++- src/core/nginx.c | 41 +++++++++++++++++++++++++++++++++++++---- src/core/ngx_cycle.c | 9 +++++++++ src/core/ngx_cycle.h | 1 + src/core/ngx_log.c | 19 ++++++++----------- src/core/ngx_log.h | 2 +- 7 files changed, 68 insertions(+), 17 deletions(-) diffs (233 lines): diff -r 4b1299b1856a -r f18db38a9826 auto/configure --- a/auto/configure Tue Nov 10 17:13:20 2020 +0300 +++ b/auto/configure Thu Nov 19 16:59:00 2020 +0000 @@ -87,6 +87,10 @@ have=NGX_PID_PATH value="\"$NGX_PID_PATH have=NGX_LOCK_PATH value="\"$NGX_LOCK_PATH\"" . auto/define have=NGX_ERROR_LOG_PATH value="\"$NGX_ERROR_LOG_PATH\"" . auto/define +if [ ".$NGX_ERROR_LOG_PATH" = "." ]; then + have=NGX_ERROR_LOG_STDERR . auto/have +fi + have=NGX_HTTP_LOG_PATH value="\"$NGX_HTTP_LOG_PATH\"" . auto/define have=NGX_HTTP_CLIENT_TEMP_PATH value="\"$NGX_HTTP_CLIENT_TEMP_PATH\"" . auto/define diff -r 4b1299b1856a -r f18db38a9826 docs/man/nginx.8 --- a/docs/man/nginx.8 Tue Nov 10 17:13:20 2020 +0300 +++ b/docs/man/nginx.8 Thu Nov 19 16:59:00 2020 +0000 @@ -25,7 +25,7 @@ .\" SUCH DAMAGE. .\" .\" -.Dd December 5, 2019 +.Dd November 5, 2020 .Dt NGINX 8 .Os .Sh NAME @@ -35,6 +35,7 @@ .Nm .Op Fl ?hqTtVv .Op Fl c Ar file +.Op Fl e Ar file .Op Fl g Ar directives .Op Fl p Ar prefix .Op Fl s Ar signal @@ -54,6 +55,12 @@ Print help. .It Fl c Ar file Use an alternative configuration .Ar file . +.It Fl e Ar file +Use an alternative error log +.Ar file . +Special value +.Cm stderr +indicates that the standard error output should be used. .It Fl g Ar directives Set global configuration directives. See diff -r 4b1299b1856a -r f18db38a9826 src/core/nginx.c --- a/src/core/nginx.c Tue Nov 10 17:13:20 2020 +0300 +++ b/src/core/nginx.c Thu Nov 19 16:59:00 2020 +0000 @@ -183,6 +183,7 @@ static ngx_uint_t ngx_show_help; static ngx_uint_t ngx_show_version; static ngx_uint_t ngx_show_configure; static u_char *ngx_prefix; +static u_char *ngx_error_log; static u_char *ngx_conf_file; static u_char *ngx_conf_params; static char *ngx_signal; @@ -230,7 +231,7 @@ main(int argc, char *const *argv) ngx_pid = ngx_getpid(); ngx_parent = ngx_getppid(); - log = ngx_log_init(ngx_prefix); + log = ngx_log_init(ngx_prefix, ngx_error_log); if (log == NULL) { return 1; } @@ -393,9 +394,9 @@ ngx_show_version_info(void) if (ngx_show_help) { ngx_write_stderr( - "Usage: nginx [-?hvVtTq] [-s signal] [-c filename] " - "[-p prefix] [-g directives]" NGX_LINEFEED - NGX_LINEFEED + "Usage: nginx [-?hvVtTq] [-s signal] [-p prefix]" NGX_LINEFEED + " [-e filename] [-c filename] [-g directives]" + NGX_LINEFEED NGX_LINEFEED "Options:" NGX_LINEFEED " -?,-h : this help" NGX_LINEFEED " -v : show version and exit" NGX_LINEFEED @@ -414,6 +415,12 @@ ngx_show_version_info(void) #else " -p prefix : set prefix path (default: NONE)" NGX_LINEFEED #endif + " -e filename : set error log file (default: " +#ifdef NGX_ERROR_LOG_STDERR + "stderr)" NGX_LINEFEED +#else + NGX_ERROR_LOG_PATH ")" NGX_LINEFEED +#endif " -c filename : set configuration file (default: " NGX_CONF_PATH ")" NGX_LINEFEED " -g directives : set global directives out of configuration " @@ -800,6 +807,24 @@ ngx_get_options(int argc, char *const *a ngx_log_stderr(0, "option \"-p\" requires directory name"); return NGX_ERROR; + case 'e': + if (*p) { + ngx_error_log = p; + + } else if (argv[++i]) { + ngx_error_log = (u_char *) argv[i]; + + } else { + ngx_log_stderr(0, "option \"-e\" requires file name"); + return NGX_ERROR; + } + + if (ngx_strcmp(ngx_error_log, "stderr") == 0) { + ngx_error_log = (u_char *) ""; + } + + goto next; + case 'c': if (*p) { ngx_conf_file = p; @@ -992,6 +1017,14 @@ ngx_process_options(ngx_cycle_t *cycle) } } + if (ngx_error_log) { + cycle->error_log.len = ngx_strlen(ngx_error_log); + cycle->error_log.data = ngx_error_log; + + } else { + ngx_str_set(&cycle->error_log, NGX_ERROR_LOG_PATH); + } + if (ngx_conf_params) { cycle->conf_param.len = ngx_strlen(ngx_conf_params); cycle->conf_param.data = ngx_conf_params; diff -r 4b1299b1856a -r f18db38a9826 src/core/ngx_cycle.c --- a/src/core/ngx_cycle.c Tue Nov 10 17:13:20 2020 +0300 +++ b/src/core/ngx_cycle.c Thu Nov 19 16:59:00 2020 +0000 @@ -96,6 +96,15 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) return NULL; } + cycle->error_log.len = old_cycle->error_log.len; + cycle->error_log.data = ngx_pnalloc(pool, old_cycle->error_log.len + 1); + if (cycle->error_log.data == NULL) { + ngx_destroy_pool(pool); + return NULL; + } + ngx_cpystrn(cycle->error_log.data, old_cycle->error_log.data, + old_cycle->error_log.len + 1); + cycle->conf_file.len = old_cycle->conf_file.len; cycle->conf_file.data = ngx_pnalloc(pool, old_cycle->conf_file.len + 1); if (cycle->conf_file.data == NULL) { diff -r 4b1299b1856a -r f18db38a9826 src/core/ngx_cycle.h --- a/src/core/ngx_cycle.h Tue Nov 10 17:13:20 2020 +0300 +++ b/src/core/ngx_cycle.h Thu Nov 19 16:59:00 2020 +0000 @@ -80,6 +80,7 @@ struct ngx_cycle_s { ngx_str_t conf_param; ngx_str_t conf_prefix; ngx_str_t prefix; + ngx_str_t error_log; ngx_str_t lock_file; ngx_str_t hostname; }; diff -r 4b1299b1856a -r f18db38a9826 src/core/ngx_log.c --- a/src/core/ngx_log.c Tue Nov 10 17:13:20 2020 +0300 +++ b/src/core/ngx_log.c Thu Nov 19 16:59:00 2020 +0000 @@ -315,7 +315,7 @@ ngx_log_errno(u_char *buf, u_char *last, ngx_log_t * -ngx_log_init(u_char *prefix) +ngx_log_init(u_char *prefix, u_char *error_log) { u_char *p, *name; size_t nlen, plen; @@ -323,13 +323,11 @@ ngx_log_init(u_char *prefix) ngx_log.file = &ngx_log_file; ngx_log.log_level = NGX_LOG_NOTICE; - name = (u_char *) NGX_ERROR_LOG_PATH; + if (error_log == NULL) { + error_log = (u_char *) NGX_ERROR_LOG_PATH; + } - /* - * we use ngx_strlen() here since BCC warns about - * condition is always false and unreachable code - */ - + name = error_log; nlen = ngx_strlen(name); if (nlen == 0) { @@ -369,7 +367,7 @@ ngx_log_init(u_char *prefix) *p++ = '/'; } - ngx_cpystrn(p, (u_char *) NGX_ERROR_LOG_PATH, nlen + 1); + ngx_cpystrn(p, error_log, nlen + 1); p = name; } @@ -403,8 +401,7 @@ ngx_log_init(u_char *prefix) ngx_int_t ngx_log_open_default(ngx_cycle_t *cycle) { - ngx_log_t *log; - static ngx_str_t error_log = ngx_string(NGX_ERROR_LOG_PATH); + ngx_log_t *log; if (ngx_log_get_file_log(&cycle->new_log) != NULL) { return NGX_OK; @@ -425,7 +422,7 @@ ngx_log_open_default(ngx_cycle_t *cycle) log->log_level = NGX_LOG_ERR; - log->file = ngx_conf_open_file(cycle, &error_log); + log->file = ngx_conf_open_file(cycle, &cycle->error_log); if (log->file == NULL) { return NGX_ERROR; } diff -r 4b1299b1856a -r f18db38a9826 src/core/ngx_log.h --- a/src/core/ngx_log.h Tue Nov 10 17:13:20 2020 +0300 +++ b/src/core/ngx_log.h Thu Nov 19 16:59:00 2020 +0000 @@ -228,7 +228,7 @@ void ngx_cdecl ngx_log_debug_core(ngx_lo /*********************************/ -ngx_log_t *ngx_log_init(u_char *prefix); +ngx_log_t *ngx_log_init(u_char *prefix, u_char *error_log); void ngx_cdecl ngx_log_abort(ngx_err_t err, const char *fmt, ...); void ngx_cdecl ngx_log_stderr(ngx_err_t err, const char *fmt, ...); u_char *ngx_log_errno(u_char *buf, u_char *last, ngx_err_t err); From pluknet at nginx.com Thu Nov 19 18:57:11 2020 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 19 Nov 2020 18:57:11 +0000 Subject: [nginx] Use .Mt to mark up email addresses. Message-ID: <hg.f57a478aa16d.1605812231.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/f57a478aa16d branches: changeset: 7745:f57a478aa16d user: Sergey Kandaurov <pluknet at nginx.com> date: Thu Nov 19 17:15:22 2020 +0000 description: Use .Mt to mark up email addresses. diffstat: docs/man/nginx.8 | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (16 lines): diff -r f18db38a9826 -r f57a478aa16d docs/man/nginx.8 --- a/docs/man/nginx.8 Thu Nov 19 16:59:00 2020 +0000 +++ b/docs/man/nginx.8 Thu Nov 19 17:15:22 2020 +0000 @@ -205,10 +205,10 @@ Development of started in 2002, with the first public release on October 4, 2004. .Sh AUTHORS .An -nosplit -.An Igor Sysoev Aq igor at sysoev.ru . +.An Igor Sysoev Aq Mt igor at sysoev.ru . .Pp This manual page was originally written by -.An Sergey A. Osokin Aq osa at FreeBSD.org.ru +.An Sergey A. Osokin Aq Mt osa at FreeBSD.org.ru as a result of compiling many .Nm documents from all over the world. From p.pautov at f5.com Thu Nov 19 19:17:45 2020 From: p.pautov at f5.com (Pavel Pautov) Date: Thu, 19 Nov 2020 19:17:45 +0000 Subject: [nginx] gRPC: RST_STREAM(NO_ERROR) handling after "trailer only" responses. Message-ID: <hg.88eca63261c3.1605813465.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/88eca63261c3 branches: changeset: 7746:88eca63261c3 user: Pavel Pautov <p.pautov at f5.com> date: Wed Nov 18 18:41:16 2020 -0800 description: gRPC: RST_STREAM(NO_ERROR) handling after "trailer only" responses. Similarly to the problem fixed in 2096b21fcd10 (ticket #1792), when a "trailer only" gRPC response (that is, a response with the END_STREAM flag in the HEADERS frame) was immediately followed by RST_STREAM(NO_ERROR) in the data preread along with the response header, RST_STREAM wasn't properly skipped and caused "upstream rejected request with error 0" errors. Observed with "unknown service" gRPC errors returned by grpc-go. Fix is to set ctx->done if we are going to parse additional data, so the RST_STREAM(NO_ERROR) is properly skipped. Additionally, now ngx_http_grpc_filter() will complain about frames sent for closed stream if there are any. diffstat: src/http/modules/ngx_http_grpc_module.c | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (11 lines): diff -r f57a478aa16d -r 88eca63261c3 src/http/modules/ngx_http_grpc_module.c --- a/src/http/modules/ngx_http_grpc_module.c Thu Nov 19 17:15:22 2020 +0000 +++ b/src/http/modules/ngx_http_grpc_module.c Wed Nov 18 18:41:16 2020 -0800 @@ -1969,6 +1969,7 @@ ngx_http_grpc_filter_init(void *data) } u->length = 0; + ctx->done = 1; } else { u->length = 1; From teward at thomas-ward.net Thu Nov 19 19:25:18 2020 From: teward at thomas-ward.net (Thomas Ward) Date: Thu, 19 Nov 2020 14:25:18 -0500 Subject: Wrong list I know, but need forum.nginx.org mail admin contact Message-ID: <a99595f0-0995-64eb-ad90-c016249a0fb7@thomas-ward.net> Subject says it all, can one of you NGINX people contact me offlist with the direct contact for someone who can fix forum.nginx.org mail delivery issues? Getting a number of Rejected Messages because of something that changed on your end. Thomas -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201119/74d443ac/attachment.htm> From vbart at nginx.com Fri Nov 20 09:32:49 2020 From: vbart at nginx.com (Valentin Bartenev) Date: Fri, 20 Nov 2020 09:32:49 +0000 Subject: [njs] Ignoring pcre_study() error. Message-ID: <hg.e51da8c71f26.1605864769.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/e51da8c71f26 branches: changeset: 1569:e51da8c71f26 user: Valentin Bartenev <vbart at nginx.com> date: Fri Nov 20 12:29:30 2020 +0300 description: Ignoring pcre_study() error. It provides optional optimization that shouldn't be fatal for regex compilation. diffstat: src/njs_pcre.c | 4 +--- 1 files changed, 1 insertions(+), 3 deletions(-) diffs (15 lines): diff -r c947a300b96c -r e51da8c71f26 src/njs_pcre.c --- a/src/njs_pcre.c Wed Nov 18 18:09:11 2020 +0000 +++ b/src/njs_pcre.c Fri Nov 20 12:29:30 2020 +0300 @@ -94,10 +94,8 @@ njs_regex_compile(njs_regex_t *regex, u_ regex->extra = pcre_study(regex->code, 0, &errstr); if (njs_slow_path(errstr != NULL)) { - njs_alert(ctx->trace, NJS_LEVEL_ERROR, + njs_alert(ctx->trace, NJS_LEVEL_WARN, "pcre_study(\"%s\") failed: %s", pattern, errstr); - - goto done; } err = pcre_fullinfo(regex->code, NULL, PCRE_INFO_CAPTURECOUNT, From thdbsdox12 at gmail.com Tue Nov 24 08:43:30 2020 From: thdbsdox12 at gmail.com (=?iso-8859-1?q?SoYun_Seong?=) Date: Tue, 24 Nov 2020 08:43:30 +0000 Subject: [PATCH 2 of 8] Added variables and defined a new flag for io_uring module In-Reply-To: <1cb2b354e262e10a8e86.1606207409@ssyserver> References: <1cb2b354e262e10a8e86.1606207409@ssyserver> Message-ID: <3bbe4905410b04bdaad4.1606207410@ssyserver> # HG changeset patch # User SoYun Seong <thdbsdox12 at gmail.com> # Date 1606128433 0 # Mon Nov 23 10:47:13 2020 +0000 # Node ID 3bbe4905410b04bdaad4c956f6fdb7d0612ddf5b # Parent 1cb2b354e262e10a8e8606e238ca0fa279f70709 Added variables and defined a new flag for io_uring module. For io_uring event module, added member variables to ngx_event_s structure. ?uring_res? is used to pass the result of IO to a read or write event. ?uring_pending? means the total requested IO count of the event and ?uring_rq_size? means the total IO size of the event. Both variables is used to check completion of the request. ?uring_iov? is used to do asynchronous vector IO. ?uring_splice_pipe? is used to process asynchronous sendfile. Also added an io_uring event flag and defined requested IO type for io_uring. diff -r 1cb2b354e262 -r 3bbe4905410b src/event/ngx_event.h --- a/src/event/ngx_event.h Mon Nov 23 10:08:50 2020 +0000 +++ b/src/event/ngx_event.h Mon Nov 23 10:47:13 2020 +0000 @@ -107,6 +107,14 @@ ngx_event_ovlp_t ovlp; #endif +#if (NGX_HAVE_URING) + int uring_res; + ngx_uint_t uring_pending; + ssize_t uring_rq_size; + struct iovec uring_iov[NGX_IOVS_PREALLOCATE]; + int uring_splice_pipe[2]; +#endif + ngx_uint_t index; ngx_log_t *log; @@ -271,6 +279,11 @@ */ #define NGX_USE_VNODE_EVENT 0x00002000 +/* + * The event filter is io_uring. + */ +#define NGX_USE_URING_EVENT 0x00004000 + /* * The event filter is deleted just before the closing file. @@ -390,6 +403,19 @@ #define NGX_IOCP_CONNECT 2 #endif +#if (NGX_HAVE_URING) +#define NGX_URING_ACCEPT 0 +#define NGX_URING_READ 1 +#define NGX_URING_READV 2 +#define NGX_URING_SEND 3 +#define NGX_URING_WRITEV 4 +#define NGX_URING_SPLICE_TO_PIPE 5 +#define NGX_URING_SPLICE_FROM_PIPE 6 +#define NGX_URING_READFILE 7 +#define NGX_URING_TIMEOUT 1 +#define NGX_USE_URING_SPLICE 0 +#endif + #if (NGX_TEST_BUILD_EPOLL) #define NGX_EXCLUSIVE_EVENT 0 -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-2.patch Type: text/x-patch Size: 2148 bytes Desc: not available URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201124/202a76fe/attachment.bin> From thdbsdox12 at gmail.com Tue Nov 24 08:43:31 2020 From: thdbsdox12 at gmail.com (=?iso-8859-1?q?SoYun_Seong?=) Date: Tue, 24 Nov 2020 08:43:31 +0000 Subject: [PATCH 3 of 8] Make ngx_linux_sendfile() to global function In-Reply-To: <1cb2b354e262e10a8e86.1606207409@ssyserver> References: <1cb2b354e262e10a8e86.1606207409@ssyserver> Message-ID: <ce34097321cf3cb7f9de.1606207411@ssyserver> # HG changeset patch # User SoYun Seong <thdbsdox12 at gmail.com> # Date 1606128555 0 # Mon Nov 23 10:49:15 2020 +0000 # Node ID ce34097321cf3cb7f9de55075bd1788577bb1ad8 # Parent 3bbe4905410b04bdaad4c956f6fdb7d0612ddf5b Make ngx_linux_sendfile() to global function diff -r 3bbe4905410b -r ce34097321cf src/os/unix/ngx_linux.h --- a/src/os/unix/ngx_linux.h Mon Nov 23 10:47:13 2020 +0000 +++ b/src/os/unix/ngx_linux.h Mon Nov 23 10:49:15 2020 +0000 @@ -12,5 +12,8 @@ ngx_chain_t *ngx_linux_sendfile_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit); +ssize_t ngx_linux_sendfile(ngx_connection_t *c, ngx_buf_t *file, + size_t size); + #endif /* _NGX_LINUX_H_INCLUDED_ */ diff -r 3bbe4905410b -r ce34097321cf src/os/unix/ngx_linux_sendfile_chain.c --- a/src/os/unix/ngx_linux_sendfile_chain.c Mon Nov 23 10:47:13 2020 +0000 +++ b/src/os/unix/ngx_linux_sendfile_chain.c Mon Nov 23 10:49:15 2020 +0000 @@ -10,9 +10,6 @@ #include <ngx_event.h> -static ssize_t ngx_linux_sendfile(ngx_connection_t *c, ngx_buf_t *file, - size_t size); - #if (NGX_THREADS) #include <ngx_thread_pool.h> @@ -226,7 +223,7 @@ } -static ssize_t +ssize_t ngx_linux_sendfile(ngx_connection_t *c, ngx_buf_t *file, size_t size) { #if (NGX_HAVE_SENDFILE64) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-3.patch Type: text/x-patch Size: 1266 bytes Desc: not available URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201124/5637d0a1/attachment.bin> From thdbsdox12 at gmail.com Tue Nov 24 08:43:33 2020 From: thdbsdox12 at gmail.com (=?iso-8859-1?q?SoYun_Seong?=) Date: Tue, 24 Nov 2020 08:43:33 +0000 Subject: [PATCH 5 of 8] Added to process asynchronous accept In-Reply-To: <1cb2b354e262e10a8e86.1606207409@ssyserver> References: <1cb2b354e262e10a8e86.1606207409@ssyserver> Message-ID: <bdf6d23007d53d9b76a7.1606207413@ssyserver> # HG changeset patch # User SoYun Seong <thdbsdox12 at gmail.com> # Date 1606128989 0 # Mon Nov 23 10:56:29 2020 +0000 # Node ID bdf6d23007d53d9b76a7666c8db7ece864e31038 # Parent d7f0f0b78a24724c362cd0306dc6bbed82c66e62 Added to process asynchronous accept use a result of a requested accept to ngx_uring_module, instead of calling accept() or accept4() system call. diff -r d7f0f0b78a24 -r bdf6d23007d5 src/event/ngx_event_accept.c --- a/src/event/ngx_event_accept.c Mon Nov 23 10:52:02 2020 +0000 +++ b/src/event/ngx_event_accept.c Mon Nov 23 10:56:29 2020 +0000 @@ -55,15 +55,22 @@ do { socklen = sizeof(ngx_sockaddr_t); + if(ngx_event_flags & NGX_USE_URING_EVENT) + { + s = ev->uring_res; + sa.sockaddr = *(lc->sockaddr); + socklen = lc->socklen; + } else { #if (NGX_HAVE_ACCEPT4) - if (use_accept4) { - s = accept4(lc->fd, &sa.sockaddr, &socklen, SOCK_NONBLOCK); - } else { + if (use_accept4) { + s = accept4(lc->fd, &sa.sockaddr, &socklen, SOCK_NONBLOCK); + } else { + s = accept(lc->fd, &sa.sockaddr, &socklen); + } +#else s = accept(lc->fd, &sa.sockaddr, &socklen); +#endif } -#else - s = accept(lc->fd, &sa.sockaddr, &socklen); -#endif if (s == (ngx_socket_t) -1) { err = ngx_socket_errno; -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-5.patch Type: text/x-patch Size: 1416 bytes Desc: not available URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201124/35f72cd8/attachment-0001.bin> From thdbsdox12 at gmail.com Tue Nov 24 08:43:34 2020 From: thdbsdox12 at gmail.com (=?iso-8859-1?q?SoYun_Seong?=) Date: Tue, 24 Nov 2020 08:43:34 +0000 Subject: [PATCH 6 of 8] Disabled freeing recv buffer when asynchronous recv IO is in running In-Reply-To: <1cb2b354e262e10a8e86.1606207409@ssyserver> References: <1cb2b354e262e10a8e86.1606207409@ssyserver> Message-ID: <57e46378828172e06b32.1606207414@ssyserver> # HG changeset patch # User SoYun Seong <thdbsdox12 at gmail.com> # Date 1606129149 0 # Mon Nov 23 10:59:09 2020 +0000 # Node ID 57e46378828172e06b32b11d4c059ff0a7213d0d # Parent bdf6d23007d53d9b76a7666c8db7ece864e31038 Disabled freeing recv buffer when asynchronous recv IO is in running. diff -r bdf6d23007d5 -r 57e463788281 src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Mon Nov 23 10:56:29 2020 +0000 +++ b/src/http/ngx_http_request.c Mon Nov 23 10:59:09 2020 +0000 @@ -444,8 +444,10 @@ * We are trying to not hold c->buffer's memory for an idle connection. */ - if (ngx_pfree(c->pool, b->start) == NGX_OK) { - b->start = NULL; + if(!(ngx_event_flags & NGX_USE_URING_EVENT)) { + if (ngx_pfree(c->pool, b->start) == NGX_OK) { + b->start = NULL; + } } return; @@ -3317,13 +3319,15 @@ * c->buffer's memory for a keepalive connection. */ - if (ngx_pfree(c->pool, b->start) == NGX_OK) { - - /* - * the special note that c->buffer's memory was freed - */ - - b->pos = NULL; + if(!(ngx_event_flags & NGX_USE_URING_EVENT)) { + if (ngx_pfree(c->pool, b->start) == NGX_OK) { + + /* + * the special note that c->buffer's memory was freed + */ + + b->pos = NULL; + } } return; -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-6.patch Type: text/x-patch Size: 1476 bytes Desc: not available URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201124/a2b6e20b/attachment-0001.bin> From thdbsdox12 at gmail.com Tue Nov 24 08:43:35 2020 From: thdbsdox12 at gmail.com (=?iso-8859-1?q?SoYun_Seong?=) Date: Tue, 24 Nov 2020 08:43:35 +0000 Subject: [PATCH 7 of 8] Added creation and close of pipes for sendfile of ngx_uring_module In-Reply-To: <1cb2b354e262e10a8e86.1606207409@ssyserver> References: <1cb2b354e262e10a8e86.1606207409@ssyserver> Message-ID: <09dfe4a92414513c6bd3.1606207415@ssyserver> # HG changeset patch # User SoYun Seong <thdbsdox12 at gmail.com> # Date 1606129296 0 # Mon Nov 23 11:01:36 2020 +0000 # Node ID 09dfe4a92414513c6bd3c18d871e8a76ed19c3d7 # Parent 57e46378828172e06b32b11d4c059ff0a7213d0d Added creation and close of pipes for sendfile of ngx_uring_module. Creates pipes for asynchronous sendfile of ngx_uring module in ngx_get_connection() and destroy them in ngx_free_connection(). However this code is disabled for the performance issue. diff -r 57e463788281 -r 09dfe4a92414 src/core/ngx_connection.c --- a/src/core/ngx_connection.c Mon Nov 23 10:59:09 2020 +0000 +++ b/src/core/ngx_connection.c Mon Nov 23 11:01:36 2020 +0000 @@ -1152,6 +1152,14 @@ wev->write = 1; +#if (NGX_USE_URING_SPLICE) + if(ngx_event_flags & NGX_USE_URING_EVENT) { + if(pipe(c->write->uring_splice_pipe) < 0){ + return NULL; + } + } +#endif + return c; } @@ -1166,6 +1174,13 @@ if (ngx_cycle->files && ngx_cycle->files[c->fd] == c) { ngx_cycle->files[c->fd] = NULL; } + +#if (NGX_USE_URING_SPLICE) + if(ngx_event_flags & NGX_USE_URING_EVENT) { + close(c->write->uring_splice_pipe[0]); + close(c->write->uring_splice_pipe[1]); + } +#endif } -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-7.patch Type: text/x-patch Size: 1246 bytes Desc: not available URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201124/a732c47d/attachment-0001.bin> From thdbsdox12 at gmail.com Tue Nov 24 08:43:32 2020 From: thdbsdox12 at gmail.com (=?iso-8859-1?q?SoYun_Seong?=) Date: Tue, 24 Nov 2020 08:43:32 +0000 Subject: [PATCH 4 of 8] Added io_uring event module In-Reply-To: <1cb2b354e262e10a8e86.1606207409@ssyserver> References: <1cb2b354e262e10a8e86.1606207409@ssyserver> Message-ID: <d7f0f0b78a24724c362c.1606207412@ssyserver> # HG changeset patch # User SoYun Seong <thdbsdox12 at gmail.com> # Date 1606128722 0 # Mon Nov 23 10:52:02 2020 +0000 # Node ID d7f0f0b78a24724c362cd0306dc6bbed82c66e62 # Parent ce34097321cf3cb7f9de55075bd1788577bb1ad8 Added io_uring event module. Implemented nginx io_uring module that processes event loop with io_uring API. It requires Linux kernel 5.7 or higher with IORING_FEAT_FAST_POLL. And it supports only tcp protocol yet. To move data between a file and a socket without copying between kernel address space and user address space, I implemented asynchronous sendfile of io_uring module using two IORING_OP_SPLICE requests and pipes. However, performance of the splice requests is getting poor after accepting a client. I'm finding the reason of this problem. Because of this problem, I implemented sendfile of ngx_uring_module with a read request and a send request. This makes copy between kernel space and user space. This sendfile implementation has a high overhead when sending a big size of file. To alleviate this, when sending a file over page size(4KB), ngx_uring_module uses a Linux sendfile() system call instead. diff -r ce34097321cf -r d7f0f0b78a24 src/event/modules/ngx_uring_module.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/event/modules/ngx_uring_module.c Mon Nov 23 10:52:02 2020 +0000 @@ -0,0 +1,1191 @@ + +#include <ngx_config.h> +#include <ngx_core.h> +#include <ngx_event.h> + +#include "liburing.h" + + +typedef struct { + ngx_uint_t entries; + size_t sendfile_bound; +} ngx_uring_conf_t; + + +typedef struct { + ngx_connection_t *conn; + ngx_uint_t ev; + void *buf; +} ngx_uring_info_t; + + +static ngx_int_t ngx_uring_init(ngx_cycle_t *cycle, ngx_msec_t timer); +static void ngx_uring_done(ngx_cycle_t *cycle); +static ngx_int_t ngx_uring_add_event(ngx_event_t *ev, ngx_int_t event, + ngx_uint_t flags); +static ngx_int_t ngx_uring_add_connection(ngx_connection_t *c); +static ngx_int_t ngx_uring_del_connection(ngx_connection_t *c, + ngx_uint_t flags); +static ngx_int_t ngx_uring_process_events(ngx_cycle_t *cycle, ngx_msec_t timer, + ngx_uint_t flags); + +static void *ngx_uring_create_conf(ngx_cycle_t *cycle); +static char *ngx_uring_init_conf(ngx_cycle_t *cycle, void *conf); + + +ngx_int_t ngx_uring_accept(ngx_connection_t *c); +ssize_t ngx_uring_recv(ngx_connection_t *c, u_char *buf, size_t size); +ssize_t ngx_uring_readv_chain(ngx_connection_t *c, ngx_chain_t *chain, off_t limit); +ssize_t ngx_uring_send(ngx_connection_t *c, u_char *buf, size_t size); +ngx_chain_t * ngx_uring_writev_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit); +ngx_chain_t *ngx_uring_sendfile_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit); +static ssize_t ngx_uring_writev(ngx_connection_t *c, int nelts, int start_el); +#if (NGX_USE_URING_SPLICE) +static ssize_t ngx_uring_splice_sendfile(ngx_connection_t *c, ngx_buf_t *file, size_t size); +#else +static ssize_t ngx_uring_read_sendfile(ngx_connection_t *c, ngx_buf_t *file, size_t size); +#endif + + + +static struct io_uring ring; +static size_t linux_sendfile_bound; + +static ngx_str_t uring_name = ngx_string("io_uring"); + +static ngx_command_t ngx_uring_commands[] = { + + { ngx_string("uring_entries"), + NGX_EVENT_CONF|NGX_CONF_TAKE1, + ngx_conf_set_num_slot, + 0, + offsetof(ngx_uring_conf_t, entries), + NULL }, + + { ngx_string("linux_sendfile_bound"), + NGX_EVENT_CONF|NGX_CONF_TAKE1, + ngx_conf_set_num_slot, + 0, + offsetof(ngx_uring_conf_t, sendfile_bound), + NULL }, + + ngx_null_command +}; + + +static ngx_event_module_t ngx_uring_module_ctx = { + &uring_name, + ngx_uring_create_conf, /* create configuration */ + ngx_uring_init_conf, /* init configuration */ + + { + ngx_uring_add_event, /* add an event */ + NULL, /* delete an event */ + ngx_uring_add_event, /* enable an event */ + NULL, /* disable an event */ + ngx_uring_add_connection, /* add an connection */ + ngx_uring_del_connection, /* delete an connection */ + NULL, /* trigger a notify */ + ngx_uring_process_events, /* process the events */ + ngx_uring_init, /* init the events */ + ngx_uring_done, /* done the events */ + } +}; + +ngx_module_t ngx_uring_module = { + NGX_MODULE_V1, + &ngx_uring_module_ctx, /* module context */ + ngx_uring_commands, /* module directives */ + NGX_EVENT_MODULE, /* module type */ + NULL, /* init master */ + NULL, /* init module */ + NULL, /* init process */ + NULL, /* init thread */ + NULL, /* exit thread */ + NULL, /* exit process */ + NULL, /* exit master */ + NGX_MODULE_V1_PADDING +}; + +ngx_os_io_t ngx_uring_io = { + ngx_uring_recv, + ngx_uring_readv_chain, + NULL, /* udp recv */ + ngx_uring_send, + NULL, /* udp send */ + NULL, /* udp sendmsg chain */ +#if (1) + ngx_uring_sendfile_chain, + NGX_IO_SENDFILE +#else + ngx_uring_writev_chain, + 0 +#endif +}; + + +static ngx_int_t +ngx_uring_init(ngx_cycle_t *cycle, ngx_msec_t timer) +{ + ngx_uring_conf_t *urcf; + struct io_uring_params params; + + urcf = ngx_event_get_conf(cycle->conf_ctx, ngx_uring_module); + + if(ring.ring_fd == 0){ + ngx_memzero(¶ms, sizeof(params)); + if (io_uring_queue_init_params(urcf->entries, &ring, ¶ms) < 0) { + ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno, + "io_uring_queue_init_params() failed"); + return NGX_ERROR; + } + + if(!(params.features & IORING_FEAT_FAST_POLL)){ + ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno, + "IORING_FEAT_FAST_POLL is not available"); + return NGX_ERROR; + } + } + + linux_sendfile_bound = urcf->sendfile_bound; + + ngx_io = ngx_uring_io; + + ngx_event_actions = ngx_uring_module_ctx.actions; + +#if (NGX_HAVE_CLEAR_EVENT) + ngx_event_flags = NGX_USE_CLEAR_EVENT +#else + ngx_event_flags = NGX_USE_LEVEL_EVENT +#endif + |NGX_USE_GREEDY_EVENT + |NGX_USE_URING_EVENT; + + return NGX_OK; +} + + + +static void +ngx_uring_done(ngx_cycle_t *cycle) +{ + io_uring_queue_exit(&ring); + ngx_memset(&ring, 0, sizeof(ring)); +} + + +static ngx_int_t +ngx_uring_add_event(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags) +{ + ngx_connection_t *c; + + c = ev->data; + + if(event == NGX_READ_EVENT && c->read->accept){ + if(ngx_uring_accept(c) == NGX_ERROR) + return NGX_ERROR; + } + + ev->active = 1; + ev->ready = 1; + + return NGX_OK; +} + + +static ngx_int_t +ngx_uring_add_connection(ngx_connection_t *c) +{ + if(c->read->accept){ + if(ngx_uring_accept(c) == NGX_ERROR) + return NGX_ERROR; + } + + c->read->active = 1; + c->write->active = 1; + c->read->ready = 1; + c->write->ready = 1; + + return NGX_OK; +} + + +static ngx_int_t +ngx_uring_del_connection(ngx_connection_t *c, ngx_uint_t flags) +{ + c->read->active = 0; + c->write->active = 0; + + return NGX_OK; +} + + +static ngx_int_t +ngx_uring_process_events(ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags) +{ + unsigned head, count; + ngx_event_t *rev, *wev; + ngx_connection_t *c; + ngx_uring_info_t *ui; + struct io_uring_cqe *cqe; + struct io_uring_sqe *sqe; + struct __kernel_timespec ts; + + + /* NGX_TIMER_INFINITE == INFTIM */ + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, + "io_uring timer: %M", timer); + + if(timer != NGX_TIMER_INFINITE) { + if(timer >= 1000){ + ts.tv_sec = timer / 1000; + ts.tv_nsec = 0; + } else{ + ts.tv_sec = 0; + ts.tv_nsec = timer * 1000000; + } + + sqe = io_uring_get_sqe(&ring); + if(sqe == NULL) { + ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, + "io_uring_get_sqe() failed"); + return NGX_ERROR; + } + + io_uring_prep_timeout(sqe, &ts, 1, 0); + io_uring_sqe_set_data(sqe, (void*)NGX_URING_TIMEOUT); + } + + io_uring_submit_and_wait(&ring, 1); + + if (flags & NGX_UPDATE_TIME || ngx_event_timer_alarm) { + ngx_time_update(); + } + + count = 0; + + io_uring_for_each_cqe(&ring, head, cqe) { + ++count; + + if(cqe->user_data == NGX_URING_TIMEOUT) { + if(count > 1) continue; + + io_uring_cq_advance(&ring, count); + + if(timer != NGX_TIMER_INFINITE){ + return NGX_OK; + } + + ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, + "invalid timeout event"); + return NGX_ERROR; + } + + ui = (ngx_uring_info_t*)cqe->user_data; + + c = ui->conn; + + if (c->fd == -1) { + /* + * the stale event from a file descriptor + * that was just closed in this iteration + */ + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, + "io_uring: stale event %p", c); + continue; + } + + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, cycle->log, 0, + "io_uring: fd:%d rq:%d d:%p", + c->fd, ui->ev, cqe->user_data); + + rev = c->read; + wev = c->write; + + switch (ui->ev) + { + case NGX_URING_ACCEPT:{ + ngx_pfree(c->pool, ui); + if(cqe->res != -11) { + rev->uring_res = cqe->res; + + rev->ready = 1; + rev->complete = 1; + rev->available = -1; + + if (flags & NGX_POST_EVENTS) { + ngx_post_event(rev, &ngx_posted_accept_events); + + } else { + rev->handler(rev); + } + } + + rev->ready = 0; + rev->complete = 0; + rev->available = 1; + + if(ngx_uring_accept(c) == NGX_ERROR) + return NGX_ERROR; + break; + } + case NGX_URING_READ: + case NGX_URING_READV:{ + ngx_pfree(c->pool, ui); + rev->uring_pending -= 1; + rev->uring_res += cqe->res; + + if(rev->uring_pending == 0){ + rev->complete = 1; + rev->ready = 1; + rev->available = -1; + + if (flags & NGX_POST_EVENTS) { + ngx_post_event(rev, &ngx_posted_events); + + } else { + rev->handler(rev); + } + } + break; + } + case NGX_URING_SEND: + case NGX_URING_WRITEV: + case NGX_URING_SPLICE_FROM_PIPE:{ + if(ui->buf) ngx_pfree(c->pool, ui->buf); + ngx_pfree(c->pool, ui); + wev->uring_pending -= 1; + wev->uring_res += cqe->res; + + if(wev->uring_pending == 0) { + wev->complete = 1; + wev->ready = 1; + wev->available = -1; + + if (flags & NGX_POST_EVENTS) { + ngx_post_event(wev, &ngx_posted_events); + + } else { + wev->handler(wev); + } + } + break; + } + case NGX_URING_READFILE: + case NGX_URING_SPLICE_TO_PIPE:{ + ngx_pfree(c->pool, ui); + break; + } + default: + ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, + "io_uring invalid event type"); + return NGX_ERROR; + } + + } + + io_uring_cq_advance(&ring, count); + + if (count == 0) { + ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, + "io_uring_submit_and_wait() returned no events without timeout"); + return NGX_ERROR; + } + + return NGX_OK; +} + + +static void * +ngx_uring_create_conf(ngx_cycle_t *cycle) +{ + ngx_uring_conf_t *urcf; + + urcf = ngx_palloc(cycle->pool, sizeof(ngx_uring_conf_t)); + if (urcf == NULL) { + return NULL; + } + + urcf->entries = NGX_CONF_UNSET; + urcf->sendfile_bound = NGX_CONF_UNSET; + + return urcf; +} + + +static char * +ngx_uring_init_conf(ngx_cycle_t *cycle, void *conf) +{ + ngx_uring_conf_t *urcf = conf; + + ngx_conf_init_uint_value(urcf->entries, 32768); + ngx_conf_init_uint_value(urcf->sendfile_bound, 41984); + + return NGX_CONF_OK; +} + + +ngx_int_t +ngx_uring_accept(ngx_connection_t *c) +{ + struct io_uring_sqe *sqe; + ngx_uring_info_t *ui; + + if(c->pool == NULL){ + c->pool = ngx_create_pool(c->listening->pool_size, c->log); + if (c->pool == NULL) { + return NGX_ERROR; + } + } + + if(c->sockaddr == NULL){ + c->sockaddr = ngx_palloc(c->pool, sizeof(ngx_sockaddr_t)); + if (c->sockaddr == NULL) { + return NGX_ERROR; + } + c->socklen = sizeof(ngx_sockaddr_t); + } + + ui = ngx_palloc(c->pool, sizeof(ngx_uring_info_t)); + if(ui == NULL){ + return NGX_ERROR; + } + ui->conn = c; + ui->ev = NGX_URING_ACCEPT; + ui->buf = NULL; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "io_uring prep event: fd:%d op:%d ", + c->fd, ui->ev); + + sqe = io_uring_get_sqe(&ring); + if(sqe == NULL){ + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "io_uring_get_sqe() failed"); + return NGX_ERROR; + } + + io_uring_prep_accept(sqe, c->fd, c->sockaddr, &c->socklen, 0); + io_uring_sqe_set_data(sqe, ui); + + return NGX_OK; +} + + +ssize_t +ngx_uring_recv(ngx_connection_t *c, u_char *buf, size_t size) +{ + ssize_t n; + ngx_event_t *rev; + ngx_uring_info_t *ui; + struct io_uring_sqe *sqe; + + rev = c->read; + + if(!rev->complete && rev->uring_pending) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, "second uring_recv post"); + return NGX_AGAIN; + } + + if(rev->complete) { + n = rev->uring_res; + rev->uring_res = 0; + rev->available = 0; + rev->uring_pending = 0; + rev->complete = 0; + + if(n == 0){ + rev->ready = 0; + rev->eof = 1; + return 0; + } + if(n < 0){ + ngx_connection_error(c, 0, "uring_recv() failed"); + rev->error = 1; + return NGX_ERROR; + } + + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, + "uring_recv: fd:%d %ul of %z", + c->fd, n, size); + + return n; + } + + ui = ngx_palloc(c->pool, sizeof(ngx_uring_info_t)); + if(ui == NULL){ + return NGX_ERROR; + } + ui->conn = c; + ui->ev = NGX_URING_READ; + ui->buf = NULL; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "io_uring prep event: fd:%d op:%d ", + c->fd, ui->ev); + + sqe = io_uring_get_sqe(&ring); + if(sqe == NULL){ + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "io_uring_get_sqe() failed"); + return NGX_ERROR; + } + + io_uring_prep_recv(sqe, c->fd, buf, size, 0); + io_uring_sqe_set_data(sqe, ui); + + rev->complete = 0; + rev->ready = 0; + rev->uring_pending = 1; + + return NGX_AGAIN; +} + +ssize_t +ngx_uring_readv_chain(ngx_connection_t *c, ngx_chain_t *chain, off_t limit) +{ + u_char *prev; + ssize_t n, size; + ngx_array_t vec; + ngx_event_t *rev; + struct iovec *iov; + ngx_uring_info_t *ui; + struct io_uring_sqe *sqe; + + rev = c->read; + + if(!rev->complete && rev->uring_pending) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, "second uring_readv_chain post"); + return NGX_AGAIN; + } + + if(rev->complete) { + n = rev->uring_res; + rev->uring_res = 0; + rev->available = 0; + rev->uring_pending = 0; + rev->complete = 0; + + if(n == 0){ + rev->ready = 0; + rev->eof = 1; + return 0; + } + if(n < 0){ + ngx_connection_error(c, 0, "uring_readv() failed"); + rev->error = 1; + return NGX_ERROR; + } + + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, + "uring_readv: fd:%d %ul of %z", + c->fd, n, size); + + return n; + } + + prev = NULL; + iov = NULL; + size = 0; + + vec.elts = rev->uring_iov; + vec.nelts = 0; + vec.size = sizeof(struct iovec); + vec.nalloc = NGX_IOVS_PREALLOCATE; + vec.pool = c->pool; + + /* coalesce the neighbouring bufs */ + + while (chain) { + n = chain->buf->end - chain->buf->last; + + if (limit) { + if (size >= limit) { + break; + } + + if (size + n > limit) { + n = (ssize_t) (limit - size); + } + } + + if (prev == chain->buf->last) { + iov->iov_len += n; + + } else { + if (vec.nelts >= IOV_MAX) { + break; + } + + iov = ngx_array_push(&vec); + if (iov == NULL) { + return NGX_ERROR; + } + + iov->iov_base = (void *) chain->buf->last; + iov->iov_len = n; + } + + size += n; + prev = chain->buf->end; + chain = chain->next; + } + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + "readv: %ui, last:%uz", vec.nelts, iov->iov_len); + + ui = ngx_palloc(c->pool, sizeof(ngx_uring_info_t)); + if(ui == NULL){ + return NGX_ERROR; + } + ui->conn = c; + ui->ev = NGX_URING_READV; + ui->buf = NULL; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "io_uring prep event: fd:%d op:%d ", + c->fd, ui->ev); + + sqe = io_uring_get_sqe(&ring); + if(sqe == NULL){ + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "io_uring_get_sqe() failed"); + return NGX_ERROR; + } + + io_uring_prep_readv(sqe, c->fd, (struct iovec *) vec.elts, vec.nelts, 0); + io_uring_sqe_set_data(sqe, ui); + + rev->complete = 0; + rev->ready = 0; + rev->uring_pending = 1; + + return NGX_AGAIN; +} + +#define NGX_SENDFILE_MAXSIZE 2147483647L + +ssize_t +ngx_uring_send(ngx_connection_t *c, u_char *buf, size_t size) +{ + ssize_t n; + ngx_event_t *wev; + ngx_uring_info_t *ui; + struct io_uring_sqe *sqe; + + wev = c->write; + + if(wev->uring_pending && !wev->complete){ + return NGX_AGAIN; + } + + if(wev->complete){ + n = wev->uring_res; + wev->complete = 0; + wev->uring_pending = 0; + wev->uring_res = 0; + wev->ready = 1; + + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, + "send: fd:%d %z of %uz", c->fd, n, size); + + if (n == 0) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, "send() returned zero"); + wev->ready = 0; + return n; + } + + if (n > 0) { + if (n < (ssize_t) size) { + wev->ready = 0; + } + + c->sent += n; + + return n; + } + + + wev->error = 1; + (void) ngx_connection_error(c, 0, "send() failed"); + return NGX_ERROR; + + } + + ui = ngx_palloc(c->pool, sizeof(ngx_uring_info_t)); + if(ui == NULL){ + return NGX_ERROR; + } + ui->conn = c; + ui->ev = NGX_URING_SEND; + ui->buf = NULL; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "io_uring prep event: fd:%d op:%d ", + c->fd, ui->ev); + + sqe = io_uring_get_sqe(&ring); + if(sqe == NULL){ + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "io_uring_get_sqe() failed"); + return NGX_ERROR; + } + + io_uring_prep_send(sqe, c->fd, buf, size, 0); + io_uring_sqe_set_data(sqe, ui); + + wev->uring_rq_size = size; + wev->uring_pending = 1; + wev->complete = 0; + wev->ready = 0; + + return NGX_AGAIN; +} + +static ssize_t +ngx_uring_writev(ngx_connection_t *c, int nelts, int start_el) +{ + ngx_uring_info_t *ui; + struct io_uring_sqe *sqe; + + ui = ngx_palloc(c->pool, sizeof(ngx_uring_info_t)); + if(ui == NULL){ + return NGX_ERROR; + } + ui->conn = c; + ui->ev = NGX_URING_WRITEV; + ui->buf = NULL; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "io_uring prep event: fd:%d op:%d ", + c->fd, ui->ev); + + sqe = io_uring_get_sqe(&ring); + if(sqe == NULL){ + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "io_uring_get_sqe() failed"); + return NGX_ERROR; + } + + io_uring_prep_writev(sqe, c->fd, &c->write->uring_iov[start_el], nelts - start_el , 0); + io_uring_sqe_set_data(sqe, ui); + + return NGX_AGAIN; +} + + +ngx_chain_t * +ngx_uring_writev_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit) +{ + u_char *prev; + ssize_t n, sent, size; + off_t send; + ngx_chain_t *cl; + ngx_event_t *wev; + int nelts, start_el; + + wev = c->write; + + if(wev->uring_pending && !wev->complete){ + return in; + } + + if(wev->complete){ + sent = wev->uring_res; + + wev->complete = 0; + wev->uring_pending = 0; + wev->uring_res = 0; + wev->ready = 1; + + if(sent != wev->uring_rq_size){ + ngx_connection_error(c, 0, "uring_writev_chain failed"); + return NGX_CHAIN_ERROR; + } + + c->sent += sent; + in = ngx_chain_update_sent(in, sent); + + return in; + } + + send = 0; + nelts = 0; + prev = NULL; + start_el = 0; + + + /* the maximum limit size is the maximum size_t value - the page size */ + + if (limit == 0 || limit > (off_t) (NGX_MAX_SIZE_T_VALUE - ngx_pagesize)) { + limit = NGX_MAX_SIZE_T_VALUE - ngx_pagesize; + } + + for (cl = in; + cl && nelts < NGX_IOVS_PREALLOCATE && send < limit; + ) + { + + if (ngx_buf_special(cl->buf)) { + cl = cl->next; + continue; + } + + if (cl && cl->buf->in_file) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "file buf in writev " + "t:%d r:%d f:%d %p %p-%p %p %O-%O", + cl->buf->temporary, + cl->buf->recycled, + cl->buf->in_file, + cl->buf->start, + cl->buf->pos, + cl->buf->last, + cl->buf->file, + cl->buf->file_pos, + cl->buf->file_last); + ngx_debug_point(); + + return NGX_CHAIN_ERROR; + } + + + size = cl->buf->last - cl->buf->pos; + if (send + size > limit) { + size = (u_long) (limit - send); + } + + if (prev == cl->buf->pos) { + wev->uring_iov[nelts - 1].iov_len += cl->buf->last - cl->buf->pos; + + } else { + ++nelts; + if (nelts >= NGX_IOVS_PREALLOCATE) { + wev->error = 1; + return NGX_CHAIN_ERROR; + } + wev->uring_iov[nelts - 1].iov_base = (void *) cl->buf->pos; + wev->uring_iov[nelts - 1].iov_len = cl->buf->last - cl->buf->pos; + } + prev = cl->buf->last; + send += size; + cl = cl->next; + } + + if(nelts - start_el > 0){ + n = ngx_uring_writev(c, nelts, start_el); + + if (n == NGX_ERROR) { + return NGX_CHAIN_ERROR; + } + + sent = 0; + } + + wev->uring_rq_size = send; + wev->uring_pending = 1; + wev->complete = 0; + wev->ready = 0; + + return in; +} + +#if (NGX_USE_URING_SPLICE) +static ssize_t +ngx_uring_splice_sendfile(ngx_connection_t *c, ngx_buf_t *file, size_t size) +{ + ngx_uring_info_t *ui; + struct io_uring_sqe *sqe; +#if (NGX_HAVE_SENDFILE64) + off_t offset; +#else + int32_t offset; +#endif +#if (NGX_HAVE_SENDFILE64) + offset = file->file_pos; +#else + offset = (int32_t) file->file_pos; +#endif + + ui = ngx_palloc(c->pool, sizeof(ngx_uring_info_t)); + if(ui == NULL){ + return NGX_ERROR; + } + ui->conn = c; + ui->ev = NGX_URING_SPLICE_TO_PIPE; + ui->buf = NULL; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "io_uring prep event: fd:%d op:%d ", + c->fd, ui->ev); + + sqe = io_uring_get_sqe(&ring); + if(sqe == NULL){ + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "io_uring_get_sqe() failed"); + return NGX_ERROR; + } + io_uring_prep_splice(sqe, file->file->fd, offset, + c->write->uring_splice_pipe[1], -1, size, SPLICE_F_MOVE | SPLICE_F_MORE); + sqe->flags = IOSQE_IO_LINK; + io_uring_sqe_set_data(sqe, ui); + + + ui = ngx_palloc(c->pool, sizeof(ngx_uring_info_t)); + if(ui == NULL){ + return NGX_ERROR; + } + ui->conn = c; + ui->ev = NGX_URING_SPLICE_FROM_PIPE; + ui->buf = NULL; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "io_uring prep event: fd:%d op:%d ", + c->fd, ui->ev); + + sqe = io_uring_get_sqe(&ring); + if(sqe == NULL){ + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "io_uring_get_sqe() failed"); + return NGX_ERROR; + } + io_uring_prep_splice(sqe, c->write->uring_splice_pipe[0], -1, + c->fd, -1, size, SPLICE_F_MOVE | SPLICE_F_MORE); + io_uring_sqe_set_data(sqe, ui); + + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + "uring_splice_sendfile: @%O %uz", file->file_pos, size); + + return NGX_AGAIN; +} +#else +static ssize_t +ngx_uring_read_sendfile(ngx_connection_t *c, ngx_buf_t *file, size_t size) +{ + ngx_uring_info_t *ui; + struct io_uring_sqe *sqe; + void *buf; +#if (NGX_HAVE_SENDFILE64) + off_t offset; +#else + int32_t offset; +#endif +#if (NGX_HAVE_SENDFILE64) + offset = file->file_pos; +#else + offset = (int32_t) file->file_pos; +#endif + + buf = ngx_palloc(c->pool, size); + if(buf == NULL){ + return NGX_ERROR; + } + + ui = ngx_palloc(c->pool, sizeof(ngx_uring_info_t)); + if(ui == NULL){ + return NGX_ERROR; + } + ui->conn = c; + ui->ev = NGX_URING_READFILE; + ui->buf = NULL; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "io_uring prep event: fd:%d op:%d ", + c->fd, ui->ev); + + sqe = io_uring_get_sqe(&ring); + if(sqe == NULL){ + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "io_uring_get_sqe() failed"); + return NGX_ERROR; + } + + io_uring_prep_read(sqe, file->file->fd, buf, size, offset ); + sqe->flags = IOSQE_IO_LINK; + io_uring_sqe_set_data(sqe, ui); + + + ui = ngx_palloc(c->pool, sizeof(ngx_uring_info_t)); + if(ui == NULL){ + return NGX_ERROR; + } + ui->conn = c; + ui->ev = NGX_URING_SEND; + ui->buf = buf; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "io_uring prep event: fd:%d op:%d ", + c->fd, ui->ev); + + sqe = io_uring_get_sqe(&ring); + if(sqe == NULL){ + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "io_uring_get_sqe() failed"); + return NGX_ERROR; + } + io_uring_prep_send(sqe, c->fd, buf, size, 0); + io_uring_sqe_set_data(sqe, ui); + + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + "uring_splice_sendfile: @%O %uz", file->file_pos, size); + + return NGX_AGAIN; +} +#endif + + +ngx_chain_t * +ngx_uring_sendfile_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit) +{ + u_char *prev; + off_t send; + size_t file_size; + ngx_buf_t *file; + ngx_event_t *wev; + ngx_chain_t *cl; + ssize_t n, sent, size; + int nelts, start_el, pending; + + wev = c->write; + + if(wev->uring_pending && !wev->complete){ + return in; + } + + if(wev->complete){ + sent = wev->uring_res; + + wev->complete = 0; + wev->uring_pending = 0; + wev->uring_res = 0; + wev->ready = 1; + + if(sent != wev->uring_rq_size){ + ngx_connection_error(c, 0, "uring_writev_chain failed"); + return NGX_CHAIN_ERROR; + } + + c->sent += sent; + in = ngx_chain_update_sent(in, sent); + + return in; + } + sent = 0; + send = 0; + nelts = 0; + prev = NULL; + start_el = 0; + pending = 0; + + if (limit == 0 || limit > (off_t) (NGX_SENDFILE_MAXSIZE - ngx_pagesize)) { + limit = NGX_SENDFILE_MAXSIZE - ngx_pagesize; + } + + for (cl = in; + cl && nelts < NGX_IOVS_PREALLOCATE && send < limit; + ) + { + if (ngx_buf_special(cl->buf)) { + cl = cl->next; + continue; + } + + if (cl->buf->in_file) { + if(nelts > 0){ + ++pending; + n = ngx_uring_writev(c, nelts, start_el); + + if (n == NGX_ERROR) { + return NGX_CHAIN_ERROR; + } + + start_el += nelts; + prev = NULL; + } + + file = cl->buf; + + /* coalesce the neighbouring file bufs */ + + file_size = (size_t) ngx_chain_coalesce_file(&cl, limit - send); + + send += file_size; + + if (file_size == 0) { + ngx_debug_point(); + return NGX_CHAIN_ERROR; + } + + if(file_size >= linux_sendfile_bound) { + io_uring_submit(&ring); + + n = ngx_linux_sendfile(c, file, file_size); + + if (n == NGX_ERROR) { + return NGX_CHAIN_ERROR; + } + + if(n != NGX_AGAIN){ + sent += n; + continue; + } + } + +#if (NGX_USE_URING_SPLICE) + n = ngx_uring_splice_sendfile(c, file, file_size); +#else + n = ngx_uring_read_sendfile(c, file, file_size); +#endif + + if (n == NGX_ERROR) { + wev->error = 1; + return NGX_CHAIN_ERROR; + } + + ++pending; + continue; + } + + size = cl->buf->last - cl->buf->pos; + if (send + size > limit) { + size = (u_long) (limit - send); + } + + if (prev == cl->buf->pos) { + wev->uring_iov[nelts - 1].iov_len += cl->buf->last - cl->buf->pos; + + } else { + ++nelts; + if (nelts >= NGX_IOVS_PREALLOCATE) { + wev->error = 1; + return NGX_CHAIN_ERROR; + } + wev->uring_iov[nelts - 1].iov_base = (void *) cl->buf->pos; + wev->uring_iov[nelts - 1].iov_len = cl->buf->last - cl->buf->pos; + } + + prev = cl->buf->last; + send += size; + cl = cl->next; + } + + if(nelts - start_el > 0){ + ++pending; + n = ngx_uring_writev(c, nelts, start_el); + + if (n == NGX_ERROR) { + return NGX_CHAIN_ERROR; + } + } + + wev->uring_rq_size = send; + wev->uring_pending = pending; + wev->complete = 0; + wev->ready = 0; + wev->uring_res = sent; + + return in; +} diff -r ce34097321cf -r d7f0f0b78a24 src/event/ngx_event.c --- a/src/event/ngx_event.c Mon Nov 23 10:49:15 2020 +0000 +++ b/src/event/ngx_event.c Mon Nov 23 10:52:02 2020 +0000 @@ -17,6 +17,7 @@ extern ngx_module_t ngx_eventport_module; extern ngx_module_t ngx_devpoll_module; extern ngx_module_t ngx_epoll_module; +extern ngx_module_t ngx_uring_module; extern ngx_module_t ngx_select_module; -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-4.patch Type: text/x-patch Size: 33164 bytes Desc: not available URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201124/d6ce6f36/attachment-0001.bin> From thdbsdox12 at gmail.com Tue Nov 24 08:43:36 2020 From: thdbsdox12 at gmail.com (=?iso-8859-1?q?SoYun_Seong?=) Date: Tue, 24 Nov 2020 08:43:36 +0000 Subject: [PATCH 8 of 8] new io_uring event module In-Reply-To: <1cb2b354e262e10a8e86.1606207409@ssyserver> References: <1cb2b354e262e10a8e86.1606207409@ssyserver> Message-ID: <d7420a5777b63c8a8cfb.1606207416@ssyserver> # HG changeset patch # User SoYun Seong <thdbsdox12 at gmail.com> # Date 1606129415 0 # Mon Nov 23 11:03:35 2020 +0000 # Node ID d7420a5777b63c8a8cfb7e98a522893490995510 # Parent 09dfe4a92414513c6bd3c18d871e8a76ed19c3d7 new io_uring event module. I implemented ngx_uring_module using Linux io_uring API to improve performance of Nginx for Linux by minimizing system calls. There are performance improvements in both request/sec and average latency. The result is located at https://github.com/dachshu/nginx. However, there are some places that uses local variable buffer(stack) to recv and send data. To do asynchronous IO, recv and send buffers should be located at safe memory(like heap, data). Therefore it is needed to make these codes to use allocated memory from memory pool when using asynchronous IO. Also I am working on improve performance of Nginx for Windows using Registered IO and IOCP. diff -r 09dfe4a92414 -r d7420a5777b6 auto/os/linux --- a/auto/os/linux Mon Nov 23 11:01:36 2020 +0000 +++ b/auto/os/linux Mon Nov 23 11:03:35 2020 +0000 @@ -89,6 +89,30 @@ fi +# io_uring + +ngx_feature="uring" +ngx_feature_name="NGX_HAVE_URING" +ngx_feature_run=yes +ngx_feature_incs="#include <liburing.h>" +ngx_feature_path="-I src/liburing/src/include/" +ngx_feature_libs="-L src/liburing/src/ -luring" +ngx_feature_test="struct io_uring ring; + struct io_uring_params params; + if (io_uring_queue_init_params(32768, &ring, ¶ms) < 0) return 1; + if (!(params.features & IORING_FEAT_FAST_POLL)) return 1;" +. auto/feature + +if [ $ngx_found = yes ]; then + have=NGX_HAVE_CLEAR_EVENT . auto/have + CORE_SRCS="$CORE_SRCS $URING_SRCS" + CORE_INCS="$CORE_INCS $ngx_feature_path" + CORE_LIBS="$CORE_LIBS $ngx_feature_libs" + EVENT_MODULES="$EVENT_MODULES $URING_MODULE" + EVENT_FOUND=YES +fi + + # O_PATH and AT_EMPTY_PATH were introduced in 2.6.39, glibc 2.14 ngx_feature="O_PATH" diff -r 09dfe4a92414 -r d7420a5777b6 auto/sources --- a/auto/sources Mon Nov 23 11:01:36 2020 +0000 +++ b/auto/sources Mon Nov 23 11:03:35 2020 +0000 @@ -120,6 +120,9 @@ EPOLL_MODULE=ngx_epoll_module EPOLL_SRCS=src/event/modules/ngx_epoll_module.c +URING_MODULE=ngx_uring_module +URING_SRCS=src/event/modules/ngx_uring_module.c + IOCP_MODULE=ngx_iocp_module IOCP_SRCS=src/event/modules/ngx_iocp_module.c -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-8.patch Type: text/x-patch Size: 2392 bytes Desc: not available URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201124/39a3e007/attachment.bin> From thdbsdox12 at gmail.com Tue Nov 24 08:48:35 2020 From: thdbsdox12 at gmail.com (=?iso-8859-1?q?SoYun_Seong?=) Date: Tue, 24 Nov 2020 08:48:35 +0000 Subject: [PATCH] [PATCH 1 of 8] Cloned liburing library Message-ID: <1cb2b354e262e10a8e86.1606207715@ssyserver> The liburing repository is located at https://github.com/axboe/liburing. -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx.patch Type: text/x-patch Size: 689629 bytes Desc: not available URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201124/5c8174b3/attachment-0001.bin> From mat999 at gmail.com Tue Nov 24 09:09:29 2020 From: mat999 at gmail.com (Mathew Heard) Date: Tue, 24 Nov 2020 20:09:29 +1100 Subject: [PATCH 8 of 8] new io_uring event module In-Reply-To: <d7420a5777b63c8a8cfb.1606207416@ssyserver> References: <1cb2b354e262e10a8e86.1606207409@ssyserver> <d7420a5777b63c8a8cfb.1606207416@ssyserver> Message-ID: <CAE5sJtQ8QF6SJMJXxVawJGO4e=H2i2SVYzBMNsuDFOnPK5F_Ow@mail.gmail.com> SoYun, Interesting patchset. Have you by chance also tested proxy_pass / fastcgi_pass performance? I'd be interested to know if the significant performance improvement was due to filesystem interaction or socket. Regards, Mathew On Tue, 24 Nov 2020 at 19:43, SoYun Seong <thdbsdox12 at gmail.com> wrote: > # HG changeset patch > # User SoYun Seong <thdbsdox12 at gmail.com> > # Date 1606129415 0 > # Mon Nov 23 11:03:35 2020 +0000 > # Node ID d7420a5777b63c8a8cfb7e98a522893490995510 > # Parent 09dfe4a92414513c6bd3c18d871e8a76ed19c3d7 > new io_uring event module. > > I implemented ngx_uring_module using Linux io_uring API to improve > performance of Nginx for Linux by minimizing system calls. There are > performance improvements in both request/sec and average latency. The > result is located at https://github.com/dachshu/nginx. > > However, there are some places that uses local variable buffer(stack) to > recv and send data. To do asynchronous IO, recv and send buffers should be > located at safe memory(like heap, data). Therefore it is needed to make > these codes to use allocated memory from memory pool when using > asynchronous IO. > > Also I am working on improve performance of Nginx for Windows using > Registered IO and IOCP. > > diff -r 09dfe4a92414 -r d7420a5777b6 auto/os/linux > --- a/auto/os/linux Mon Nov 23 11:01:36 2020 +0000 > +++ b/auto/os/linux Mon Nov 23 11:03:35 2020 +0000 > @@ -89,6 +89,30 @@ > fi > > > +# io_uring > + > +ngx_feature="uring" > +ngx_feature_name="NGX_HAVE_URING" > +ngx_feature_run=yes > +ngx_feature_incs="#include <liburing.h>" > +ngx_feature_path="-I src/liburing/src/include/" > +ngx_feature_libs="-L src/liburing/src/ -luring" > +ngx_feature_test="struct io_uring ring; > + struct io_uring_params params; > + if (io_uring_queue_init_params(32768, &ring, ¶ms) < > 0) return 1; > + if (!(params.features & IORING_FEAT_FAST_POLL)) return > 1;" > +. auto/feature > + > +if [ $ngx_found = yes ]; then > + have=NGX_HAVE_CLEAR_EVENT . auto/have > + CORE_SRCS="$CORE_SRCS $URING_SRCS" > + CORE_INCS="$CORE_INCS $ngx_feature_path" > + CORE_LIBS="$CORE_LIBS $ngx_feature_libs" > + EVENT_MODULES="$EVENT_MODULES $URING_MODULE" > + EVENT_FOUND=YES > +fi > + > + > # O_PATH and AT_EMPTY_PATH were introduced in 2.6.39, glibc 2.14 > > ngx_feature="O_PATH" > diff -r 09dfe4a92414 -r d7420a5777b6 auto/sources > --- a/auto/sources Mon Nov 23 11:01:36 2020 +0000 > +++ b/auto/sources Mon Nov 23 11:03:35 2020 +0000 > @@ -120,6 +120,9 @@ > EPOLL_MODULE=ngx_epoll_module > EPOLL_SRCS=src/event/modules/ngx_epoll_module.c > > +URING_MODULE=ngx_uring_module > +URING_SRCS=src/event/modules/ngx_uring_module.c > + > IOCP_MODULE=ngx_iocp_module > IOCP_SRCS=src/event/modules/ngx_iocp_module.c > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201124/299937d2/attachment.htm> From mdounin at mdounin.ru Tue Nov 24 15:11:43 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 24 Nov 2020 15:11:43 +0000 Subject: [nginx] nginx-1.19.5-RELEASE Message-ID: <hg.8e5b068f761c.1606230703.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/8e5b068f761c branches: changeset: 7747:8e5b068f761c user: Maxim Dounin <mdounin at mdounin.ru> date: Tue Nov 24 18:06:34 2020 +0300 description: nginx-1.19.5-RELEASE diffstat: docs/xml/nginx/changes.xml | 56 ++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 56 insertions(+), 0 deletions(-) diffs (66 lines): diff -r 88eca63261c3 -r 8e5b068f761c docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml Wed Nov 18 18:41:16 2020 -0800 +++ b/docs/xml/nginx/changes.xml Tue Nov 24 18:06:34 2020 +0300 @@ -5,6 +5,62 @@ <change_log title="nginx"> +<changes ver="1.19.5" date="2020-11-24"> + +<change type="feature"> +<para lang="ru"> +???? -e. +</para> +<para lang="en"> +the -e switch. +</para> +</change> + +<change type="feature"> +<para lang="ru"> +??? ?????? ?????????????? ??????? +?????? ????? ????????? ???? ? ?? ?? ???????? ????? ? ?????? ???????. +</para> +<para lang="en"> +the same source files can now be specified in different modules +while building addon modules. +</para> +</change> + +<change type="bugfix"> +<para lang="ru"> +SSL shutdown ?? ??????? +??? ???????? ?????????? ? ????????? ?????????????? ?????? (lingering close). +</para> +<para lang="en"> +SSL shutdown did not work +when lingering close was used. +</para> +</change> + +<change type="bugfix"> +<para lang="ru"> +??? ?????? ? gRPC-????????? +????? ????????? ?????? "upstream sent frame for closed stream". +</para> +<para lang="en"> +"upstream sent frame for closed stream" errors might occur +when working with gRPC backends. +</para> +</change> + +<change type="bugfix"> +<para lang="ru"> +?? ?????????? API ??? ????????? ???? ???????. +</para> +<para lang="en"> +in request body filters internal API. +</para> +</change> + +</changes> + + <changes ver="1.19.4" date="2020-10-27"> <change type="feature"> From mdounin at mdounin.ru Tue Nov 24 15:11:46 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 24 Nov 2020 15:11:46 +0000 Subject: [nginx] release-1.19.5 tag Message-ID: <hg.66a441bf669b.1606230706.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/66a441bf669b branches: changeset: 7748:66a441bf669b user: Maxim Dounin <mdounin at mdounin.ru> date: Tue Nov 24 18:06:34 2020 +0300 description: release-1.19.5 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r 8e5b068f761c -r 66a441bf669b .hgtags --- a/.hgtags Tue Nov 24 18:06:34 2020 +0300 +++ b/.hgtags Tue Nov 24 18:06:34 2020 +0300 @@ -454,3 +454,4 @@ 062920e2f3bf871ef7a3d8496edec1b3065faf80 a7b46539f507e6c64efa0efda69ad60b6f4ffbce release-1.19.2 3cbc2602325f0ac08917a4397d76f5155c34b7b1 release-1.19.3 dc0cc425fa63a80315f6efb68697cadb6626cdf2 release-1.19.4 +8e5b068f761cd512d10c9671fbde0b568c1fd08b release-1.19.5 From xeioex at nginx.com Wed Nov 25 11:09:21 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 25 Nov 2020 11:09:21 +0000 Subject: [njs] Stream: improved vm events handling. Message-ID: <hg.0684301385d9.1606302561.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/0684301385d9 branches: changeset: 1570:0684301385d9 user: Dmitry Volyntsev <xeioex at nginx.com> date: Wed Nov 25 10:47:25 2020 +0000 description: Stream: improved vm events handling. diffstat: nginx/ngx_stream_js_module.c | 54 ++++++++++++++++++++++--------------------- 1 files changed, 28 insertions(+), 26 deletions(-) diffs (126 lines): diff -r e51da8c71f26 -r 0684301385d9 nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Fri Nov 20 12:29:30 2020 +0300 +++ b/nginx/ngx_stream_js_module.c Wed Nov 25 10:47:25 2020 +0000 @@ -48,8 +48,10 @@ typedef struct { ngx_chain_t *busy; ngx_stream_session_t *session; ngx_int_t status; - njs_vm_event_t upload_event; - njs_vm_event_t download_event; +#define NGX_JS_EVENT_UPLOAD 0 +#define NGX_JS_EVENT_DOWNLOAD 1 +#define NGX_JS_EVENT_MAX 2 + njs_vm_event_t events[2]; unsigned from_upstream:1; unsigned filter:1; unsigned in_progress:1; @@ -73,6 +75,7 @@ static ngx_int_t ngx_stream_js_body_filt static ngx_int_t ngx_stream_js_variable(ngx_stream_session_t *s, ngx_stream_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_stream_js_init_vm(ngx_stream_session_t *s); +static void ngx_stream_js_drop_events(ngx_stream_js_ctx_t *ctx); static void ngx_stream_js_cleanup_ctx(void *data); static void ngx_stream_js_cleanup_vm(void *data); static njs_int_t ngx_stream_js_buffer_arg(ngx_stream_session_t *s, @@ -441,7 +444,7 @@ ngx_stream_js_phase_handler(ngx_stream_s } } - if (ctx->upload_event != NULL) { + if (ctx->events[NGX_JS_EVENT_UPLOAD] != NULL) { ret = ngx_stream_js_buffer_arg(s, njs_value_arg(&ctx->args[1])); if (ret != NJS_OK) { goto exception; @@ -452,7 +455,7 @@ ngx_stream_js_phase_handler(ngx_stream_s goto exception; } - njs_vm_post_event(ctx->vm, ctx->upload_event, + njs_vm_post_event(ctx->vm, ctx->events[NGX_JS_EVENT_UPLOAD], njs_value_arg(&ctx->args[1]), 2); rc = njs_vm_run(ctx->vm); @@ -463,7 +466,7 @@ ngx_stream_js_phase_handler(ngx_stream_s if (njs_vm_pending(ctx->vm)) { ctx->in_progress = 1; - rc = ctx->upload_event ? NGX_AGAIN : NGX_DONE; + rc = ctx->events[NGX_JS_EVENT_UPLOAD] ? NGX_AGAIN : NGX_DONE; } else { ctx->in_progress = 0; @@ -487,7 +490,8 @@ exception: #define ngx_stream_event(from_upstream) \ - (from_upstream ? ctx->download_event : ctx->upload_event) + (from_upstream ? ctx->events[NGX_JS_EVENT_DOWNLOAD] \ + : ctx->events[NGX_JS_EVENT_UPLOAD]) static ngx_int_t @@ -720,19 +724,25 @@ ngx_stream_js_init_vm(ngx_stream_session static void +ngx_stream_js_drop_events(ngx_stream_js_ctx_t *ctx) +{ + ngx_uint_t i; + + for (i = 0; i < NGX_JS_EVENT_MAX; i++) { + if (ctx->events[i] != NULL) { + njs_vm_del_event(ctx->vm, ctx->events[i]); + ctx->events[i] = NULL; + } + } +} + + +static void ngx_stream_js_cleanup_ctx(void *data) { ngx_stream_js_ctx_t *ctx = data; - if (ctx->upload_event != NULL) { - njs_vm_del_event(ctx->vm, ctx->upload_event); - ctx->upload_event = NULL; - } - - if (ctx->download_event != NULL) { - njs_vm_del_event(ctx->vm, ctx->download_event); - ctx->download_event = NULL; - } + ngx_stream_js_drop_events(ctx); if (njs_vm_pending(ctx->vm)) { ngx_log_error(NGX_LOG_ERR, ctx->log, 0, "pending events"); @@ -840,10 +850,10 @@ ngx_stream_js_event(ngx_stream_session_t } if (i == 0) { - return &ctx->upload_event; + return &ctx->events[NGX_JS_EVENT_UPLOAD]; } - return &ctx->download_event; + return &ctx->events[NGX_JS_EVENT_DOWNLOAD]; } @@ -911,15 +921,7 @@ ngx_stream_js_ext_done(njs_vm_t *vm, njs ctx->status = status; - if (ctx->upload_event != NULL) { - njs_vm_del_event(ctx->vm, ctx->upload_event); - ctx->upload_event = NULL; - } - - if (ctx->download_event != NULL) { - njs_vm_del_event(ctx->vm, ctx->download_event); - ctx->download_event = NULL; - } + ngx_stream_js_drop_events(ctx); njs_value_undefined_set(njs_vm_retval(vm)); From xeioex at nginx.com Wed Nov 25 11:09:23 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 25 Nov 2020 11:09:23 +0000 Subject: [njs] Modules: introduced Buffer alternatives for object properties. Message-ID: <hg.434f20c29f4c.1606302563.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/434f20c29f4c branches: changeset: 1571:434f20c29f4c user: Dmitry Volyntsev <xeioex at nginx.com> date: Wed Nov 25 10:47:47 2020 +0000 description: Modules: introduced Buffer alternatives for object properties. Buffer variant returns the property bytes as is, whereas the string version may convert bytes invalid in UTF-8 encoding into replacement character. HTTP new request object properties: r.reqBody (r.requestBody), r.resBody (r.responseBody), r.vars (r.variables). Stream new stream object properties: s.vars (s.variables). new events: The events' callbacks are identical to the string counterparts, except the data argument: upstream (upload), downstream (download). diffstat: nginx/ngx_http_js_module.c | 71 +++++++++++++++++++++++-- nginx/ngx_js.h | 10 +++ nginx/ngx_stream_js_module.c | 117 ++++++++++++++++++++++++++++++++---------- src/njs.h | 2 + src/njs_extern.c | 1 + src/njs_value.c | 8 ++ src/njs_value.h | 1 + 7 files changed, 175 insertions(+), 35 deletions(-) diffs (517 lines): diff -r 0684301385d9 -r 434f20c29f4c nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Wed Nov 25 10:47:25 2020 +0000 +++ b/nginx/ngx_http_js_module.c Wed Nov 25 10:47:47 2020 +0000 @@ -43,6 +43,7 @@ typedef struct { ngx_int_t status; njs_opaque_value_t request; njs_opaque_value_t request_body; + njs_opaque_value_t response_body; ngx_str_t redirect_uri; ngx_array_t promise_callbacks; } ngx_http_js_ctx_t; @@ -319,9 +320,19 @@ static njs_external_t ngx_http_js_ext_r { .flags = NJS_EXTERN_PROPERTY, .name.string = njs_str("requestBody"), + .u.property = { + .handler = ngx_http_js_ext_get_request_body, + .magic32 = NGX_JS_STRING, + } + }, + + { + .flags = NJS_EXTERN_PROPERTY, + .name.string = njs_str("reqBody"), .enumerable = 1, .u.property = { .handler = ngx_http_js_ext_get_request_body, + .magic32 = NGX_JS_BUFFER, } }, @@ -336,9 +347,19 @@ static njs_external_t ngx_http_js_ext_r { .flags = NJS_EXTERN_PROPERTY, .name.string = njs_str("responseBody"), + .u.property = { + .handler = ngx_http_js_ext_get_response_body, + .magic32 = NGX_JS_STRING, + } + }, + + { + .flags = NJS_EXTERN_PROPERTY, + .name.string = njs_str("resBody"), .enumerable = 1, .u.property = { .handler = ngx_http_js_ext_get_response_body, + .magic32 = NGX_JS_BUFFER, } }, @@ -379,6 +400,17 @@ static njs_external_t ngx_http_js_ext_r .u.object = { .writable = 1, .prop_handler = ngx_http_js_ext_variables, + .magic32 = NGX_JS_STRING, + } + }, + + { + .flags = NJS_EXTERN_OBJECT, + .name.string = njs_str("vars"), + .u.object = { + .writable = 1, + .prop_handler = ngx_http_js_ext_variables, + .magic32 = NGX_JS_BUFFER, } }, @@ -1891,8 +1923,12 @@ ngx_http_js_ext_get_request_body(njs_vm_ request_body = (njs_value_t *) &ctx->request_body; if (!njs_value_is_null(request_body)) { - njs_value_assign(retval, request_body); - return NJS_OK; + if ((njs_vm_prop_magic32(prop) == NGX_JS_BUFFER) + == (uint32_t) njs_value_is_buffer(request_body)) + { + njs_value_assign(retval, request_body); + return NJS_OK; + } } if (r->request_body == NULL || r->request_body->bufs == NULL) { @@ -1939,8 +1975,7 @@ ngx_http_js_ext_get_request_body(njs_vm_ done: - ret = njs_vm_value_string_set(vm, request_body, body, len); - + ret = ngx_js_prop(vm, njs_vm_prop_magic32(prop), request_body, body, len); if (ret != NJS_OK) { return NJS_ERROR; } @@ -2220,7 +2255,8 @@ ngx_http_js_ext_variables(njs_vm_t *vm, return NJS_DECLINED; } - return njs_vm_value_string_set(vm, retval, vv->data, vv->len); + return ngx_js_prop(vm, njs_vm_prop_magic32(prop), retval, vv->data, + vv->len); } cmcf = ngx_http_get_module_main_conf(r, ngx_http_core_module); @@ -2743,7 +2779,10 @@ ngx_http_js_ext_get_response_body(njs_vm { size_t len; u_char *p; + njs_int_t ret; ngx_buf_t *b; + njs_value_t *response_body; + ngx_http_js_ctx_t *ctx; ngx_http_request_t *r; r = njs_vm_external(vm, value); @@ -2752,6 +2791,18 @@ ngx_http_js_ext_get_response_body(njs_vm return NJS_DECLINED; } + ctx = ngx_http_get_module_ctx(r, ngx_http_js_module); + response_body = (njs_value_t *) &ctx->response_body; + + if (!njs_value_is_null(response_body)) { + if ((njs_vm_prop_magic32(prop) == NGX_JS_BUFFER) + == (uint32_t) njs_value_is_buffer(response_body)) + { + njs_value_assign(retval, response_body); + return NJS_OK; + } + } + b = r->out ? r->out->buf : NULL; if (b == NULL) { @@ -2761,8 +2812,9 @@ ngx_http_js_ext_get_response_body(njs_vm len = b->last - b->pos; - p = njs_vm_value_string_alloc(vm, retval, len); + p = ngx_pnalloc(r->pool, len); if (p == NULL) { + njs_vm_memory_error(vm); return NJS_ERROR; } @@ -2770,6 +2822,13 @@ ngx_http_js_ext_get_response_body(njs_vm ngx_memcpy(p, b->pos, len); } + ret = ngx_js_prop(vm, njs_vm_prop_magic32(prop), response_body, p, len); + if (ret != NJS_OK) { + return NJS_ERROR; + } + + njs_value_assign(retval, response_body); + return NJS_OK; } diff -r 0684301385d9 -r 434f20c29f4c nginx/ngx_js.h --- a/nginx/ngx_js.h Wed Nov 25 10:47:25 2020 +0000 +++ b/nginx/ngx_js.h Wed Nov 25 10:47:47 2020 +0000 @@ -15,10 +15,20 @@ #include <njs.h> +#define NGX_JS_UNSET 0 +#define NGX_JS_STRING 1 +#define NGX_JS_BUFFER 2 + + #define ngx_external_connection(vm, ext) \ (*((ngx_connection_t **) ((u_char *) ext + njs_vm_meta(vm, 0)))) +#define ngx_js_prop(vm, type, value, start, len) \ + ((type == NGX_JS_STRING) ? njs_vm_value_string_set(vm, value, start, len) \ + : njs_vm_value_buffer_set(vm, value, start, len)) + + ngx_int_t ngx_js_call(njs_vm_t *vm, ngx_str_t *s, njs_opaque_value_t *value, ngx_log_t *log); diff -r 0684301385d9 -r 434f20c29f4c nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Wed Nov 25 10:47:25 2020 +0000 +++ b/nginx/ngx_stream_js_module.c Wed Nov 25 10:47:47 2020 +0000 @@ -39,6 +39,12 @@ typedef struct { typedef struct { + njs_vm_event_t ev; + ngx_uint_t data_type; +} ngx_stream_js_ev_t; + + +typedef struct { njs_vm_t *vm; ngx_log_t *log; njs_opaque_value_t args[3]; @@ -51,7 +57,7 @@ typedef struct { #define NGX_JS_EVENT_UPLOAD 0 #define NGX_JS_EVENT_DOWNLOAD 1 #define NGX_JS_EVENT_MAX 2 - njs_vm_event_t events[2]; + ngx_stream_js_ev_t events[2]; unsigned from_upstream:1; unsigned filter:1; unsigned in_progress:1; @@ -79,7 +85,7 @@ static void ngx_stream_js_drop_events(ng static void ngx_stream_js_cleanup_ctx(void *data); static void ngx_stream_js_cleanup_vm(void *data); static njs_int_t ngx_stream_js_buffer_arg(ngx_stream_session_t *s, - njs_value_t *buffer); + njs_value_t *buffer, ngx_uint_t data_type); static njs_int_t ngx_stream_js_flags_arg(ngx_stream_session_t *s, njs_value_t *flags); static njs_vm_event_t *ngx_stream_js_event(ngx_stream_session_t *s, @@ -233,6 +239,17 @@ static njs_external_t ngx_stream_js_ext .u.object = { .writable = 1, .prop_handler = ngx_stream_js_ext_variables, + .magic32 = NGX_JS_STRING, + } + }, + + { + .flags = NJS_EXTERN_OBJECT, + .name.string = njs_str("vars"), + .u.object = { + .writable = 1, + .prop_handler = ngx_stream_js_ext_variables, + .magic32 = NGX_JS_BUFFER, } }, @@ -412,6 +429,7 @@ ngx_stream_js_phase_handler(ngx_stream_s njs_int_t ret; ngx_int_t rc; ngx_connection_t *c; + ngx_stream_js_ev_t *event; ngx_stream_js_ctx_t *ctx; if (name->len == 0) { @@ -444,8 +462,11 @@ ngx_stream_js_phase_handler(ngx_stream_s } } - if (ctx->events[NGX_JS_EVENT_UPLOAD] != NULL) { - ret = ngx_stream_js_buffer_arg(s, njs_value_arg(&ctx->args[1])); + event = &ctx->events[NGX_JS_EVENT_UPLOAD]; + + if (event->ev != NULL) { + ret = ngx_stream_js_buffer_arg(s, njs_value_arg(&ctx->args[1]), + event->data_type); if (ret != NJS_OK) { goto exception; } @@ -455,8 +476,7 @@ ngx_stream_js_phase_handler(ngx_stream_s goto exception; } - njs_vm_post_event(ctx->vm, ctx->events[NGX_JS_EVENT_UPLOAD], - njs_value_arg(&ctx->args[1]), 2); + njs_vm_post_event(ctx->vm, event->ev, njs_value_arg(&ctx->args[1]), 2); rc = njs_vm_run(ctx->vm); if (rc == NJS_ERROR) { @@ -466,7 +486,7 @@ ngx_stream_js_phase_handler(ngx_stream_s if (njs_vm_pending(ctx->vm)) { ctx->in_progress = 1; - rc = ctx->events[NGX_JS_EVENT_UPLOAD] ? NGX_AGAIN : NGX_DONE; + rc = ctx->events[NGX_JS_EVENT_UPLOAD].ev ? NGX_AGAIN : NGX_DONE; } else { ctx->in_progress = 0; @@ -490,8 +510,8 @@ exception: #define ngx_stream_event(from_upstream) \ - (from_upstream ? ctx->events[NGX_JS_EVENT_DOWNLOAD] \ - : ctx->events[NGX_JS_EVENT_UPLOAD]) + (from_upstream ? &ctx->events[NGX_JS_EVENT_DOWNLOAD] \ + : &ctx->events[NGX_JS_EVENT_UPLOAD]) static ngx_int_t @@ -503,6 +523,7 @@ ngx_stream_js_body_filter(ngx_stream_ses ngx_int_t rc; ngx_chain_t *out, *cl; ngx_connection_t *c; + ngx_stream_js_ev_t *event; ngx_stream_js_ctx_t *ctx; ngx_stream_js_srv_conf_t *jscf; @@ -543,8 +564,11 @@ ngx_stream_js_body_filter(ngx_stream_ses while (in) { ctx->buf = in->buf; - if (ngx_stream_event(from_upstream) != NULL) { - ret = ngx_stream_js_buffer_arg(s, njs_value_arg(&ctx->args[1])); + event = ngx_stream_event(from_upstream); + + if (event->ev != NULL) { + ret = ngx_stream_js_buffer_arg(s, njs_value_arg(&ctx->args[1]), + event->data_type); if (ret != NJS_OK) { goto exception; } @@ -554,7 +578,7 @@ ngx_stream_js_body_filter(ngx_stream_ses goto exception; } - njs_vm_post_event(ctx->vm, ngx_stream_event(from_upstream), + njs_vm_post_event(ctx->vm, event->ev, njs_value_arg(&ctx->args[1]), 2); rc = njs_vm_run(ctx->vm); @@ -729,9 +753,9 @@ ngx_stream_js_drop_events(ngx_stream_js_ ngx_uint_t i; for (i = 0; i < NGX_JS_EVENT_MAX; i++) { - if (ctx->events[i] != NULL) { - njs_vm_del_event(ctx->vm, ctx->events[i]); - ctx->events[i] = NULL; + if (ctx->events[i].ev != NULL) { + njs_vm_del_event(ctx->vm, ctx->events[i].ev); + ctx->events[i].ev = NULL; } } } @@ -762,7 +786,8 @@ ngx_stream_js_cleanup_vm(void *data) static njs_int_t -ngx_stream_js_buffer_arg(ngx_stream_session_t *s, njs_value_t *buffer) +ngx_stream_js_buffer_arg(ngx_stream_session_t *s, njs_value_t *buffer, + ngx_uint_t data_type) { size_t len; u_char *p; @@ -777,8 +802,9 @@ ngx_stream_js_buffer_arg(ngx_stream_sess len = b ? b->last - b->pos : 0; - p = njs_vm_value_string_alloc(ctx->vm, buffer, len); + p = ngx_pnalloc(c->pool, len); if (p == NULL) { + njs_vm_memory_error(ctx->vm); return NJS_ERROR; } @@ -786,11 +812,10 @@ ngx_stream_js_buffer_arg(ngx_stream_sess ngx_memcpy(p, b->pos, len); } - return NJS_OK; + return ngx_js_prop(ctx->vm, data_type, buffer, p, len); } - static njs_int_t ngx_stream_js_flags_arg(ngx_stream_session_t *s, njs_value_t *flags) { @@ -821,12 +846,37 @@ ngx_stream_js_flags_arg(ngx_stream_sessi static njs_vm_event_t * ngx_stream_js_event(ngx_stream_session_t *s, njs_str_t *event) { - ngx_uint_t i, n; + ngx_uint_t i, n, type; ngx_stream_js_ctx_t *ctx; - static const njs_str_t events[] = { - njs_str("upload"), - njs_str("download") + static const struct { + ngx_str_t name; + ngx_uint_t data_type; + ngx_uint_t id; + } events[] = { + { + ngx_string("upload"), + NGX_JS_STRING, + NGX_JS_EVENT_UPLOAD, + }, + + { + ngx_string("download"), + NGX_JS_STRING, + NGX_JS_EVENT_DOWNLOAD, + }, + + { + ngx_string("upstream"), + NGX_JS_BUFFER, + NGX_JS_EVENT_UPLOAD, + }, + + { + ngx_string("downstream"), + NGX_JS_BUFFER, + NGX_JS_EVENT_DOWNLOAD, + }, }; ctx = ngx_stream_get_module_ctx(s, ngx_stream_js_module); @@ -835,8 +885,9 @@ ngx_stream_js_event(ngx_stream_session_t n = sizeof(events) / sizeof(events[0]); while (i < n) { - if (event->length == events[i].length - && ngx_memcmp(event->start, events[i].start, event->length) == 0) + if (event->length == events[i].name.len + && ngx_memcmp(event->start, events[i].name.data, event->length) + == 0) { break; } @@ -849,11 +900,18 @@ ngx_stream_js_event(ngx_stream_session_t return NULL; } - if (i == 0) { - return &ctx->events[NGX_JS_EVENT_UPLOAD]; + ctx->events[events[i].id].data_type = events[i].data_type; + + for (n = 0; n < NGX_JS_EVENT_MAX; n++) { + type = ctx->events[n].data_type; + if (type != NGX_JS_UNSET && type != events[i].data_type) { + njs_vm_error(ctx->vm, "mixing string and buffer events" + " is not allowed"); + return NULL; + } } - return &ctx->events[NGX_JS_EVENT_DOWNLOAD]; + return &ctx->events[events[i].id].ev; } @@ -1131,7 +1189,8 @@ ngx_stream_js_ext_variables(njs_vm_t *vm return NJS_DECLINED; } - return njs_vm_value_string_set(vm, retval, vv->data, vv->len); + return ngx_js_prop(vm, njs_vm_prop_magic32(prop), retval, vv->data, + vv->len); } cmcf = ngx_stream_get_module_main_conf(s, ngx_stream_core_module); diff -r 0684301385d9 -r 434f20c29f4c src/njs.h --- a/src/njs.h Wed Nov 25 10:47:25 2020 +0000 +++ b/src/njs.h Wed Nov 25 10:47:47 2020 +0000 @@ -144,6 +144,7 @@ struct njs_external_s { unsigned configurable; unsigned enumerable; njs_prop_handler_t prop_handler; + uint32_t magic32; njs_exotic_keys_t keys; } object; } u; @@ -389,6 +390,7 @@ NJS_EXPORT njs_int_t njs_value_is_string NJS_EXPORT njs_int_t njs_value_is_object(const njs_value_t *value); NJS_EXPORT njs_int_t njs_value_is_array(const njs_value_t *value); NJS_EXPORT njs_int_t njs_value_is_function(const njs_value_t *value); +NJS_EXPORT njs_int_t njs_value_is_buffer(const njs_value_t *value); NJS_EXPORT njs_int_t njs_vm_object_alloc(njs_vm_t *vm, njs_value_t *retval, ...); diff -r 0684301385d9 -r 434f20c29f4c src/njs_extern.c --- a/src/njs_extern.c Wed Nov 25 10:47:25 2020 +0000 +++ b/src/njs_extern.c Wed Nov 25 10:47:47 2020 +0000 @@ -132,6 +132,7 @@ njs_external_add(njs_vm_t *vm, njs_arr_t next->configurable = external->u.object.configurable; next->enumerable = external->u.object.enumerable; next->prop_handler = external->u.object.prop_handler; + next->magic32 = external->u.object.magic32; next->keys = external->u.object.keys; break; diff -r 0684301385d9 -r 434f20c29f4c src/njs_value.c --- a/src/njs_value.c Wed Nov 25 10:47:25 2020 +0000 +++ b/src/njs_value.c Wed Nov 25 10:47:47 2020 +0000 @@ -492,6 +492,13 @@ njs_value_is_function(const njs_value_t } +njs_int_t +njs_value_is_buffer(const njs_value_t *value) +{ + return njs_is_typed_array(value); +} + + /* * ES5.1, 8.12.1: [[GetOwnProperty]], [[GetProperty]]. * The njs_property_query() returns values @@ -932,6 +939,7 @@ njs_external_property_query(njs_vm_t *vm * njs_set_null(&prop->setter); */ + prop->value.data.magic32 = slots->magic32; prop->name = pq->key; pq->lhq.value = prop; diff -r 0684301385d9 -r 434f20c29f4c src/njs_value.h --- a/src/njs_value.h Wed Nov 25 10:47:25 2020 +0000 +++ b/src/njs_value.h Wed Nov 25 10:47:47 2020 +0000 @@ -189,6 +189,7 @@ union njs_value_s { typedef struct { /* Get, also Set if writable, also Delete if configurable. */ njs_prop_handler_t prop_handler; + uint32_t magic32; unsigned writable:1; unsigned configurable:1; unsigned enumerable:1; From thdbsdox12 at gmail.com Wed Nov 25 12:43:43 2020 From: thdbsdox12 at gmail.com (=?UTF-8?B?7ISx7IaM7Jyk?=) Date: Wed, 25 Nov 2020 21:43:43 +0900 Subject: [PATCH 8 of 8] new io_uring event module In-Reply-To: <CANrn0A9rfBSwvT=EsCNmkOb5kXeabgk=wJi8LRKfcCDU9VGy_A@mail.gmail.com> References: <1cb2b354e262e10a8e86.1606207409@ssyserver> <d7420a5777b63c8a8cfb.1606207416@ssyserver> <CAE5sJtQ8QF6SJMJXxVawJGO4e=H2i2SVYzBMNsuDFOnPK5F_Ow@mail.gmail.com> <CANrn0A9rfBSwvT=EsCNmkOb5kXeabgk=wJi8LRKfcCDU9VGy_A@mail.gmail.com> Message-ID: <CAERYJrYUQR9wLhYpzW4pXkpKCkBWEwwNPpuA0-FLMH9P5WikxA@mail.gmail.com> Mathew Thank you for your feedback. I did not implement file management of Nginx with io_uring. So I think the performance improvement is mostly due to socket IO. I will try to implement asynchronous file interaction with io_uring later. Also I will test proxy_pass / fastcgi_pass benchmarks and get back to you soon. Thank you. SoYun On Tue, Nov 24, 2020 at 6:09 PM Mathew Heard <mat999 at gmail.com> wrote: > SoYun, > > Interesting patchset. Have you by chance also tested proxy_pass / > fastcgi_pass performance? > > I'd be interested to know if the significant performance improvement was > due to filesystem interaction or socket. > > Regards, > Mathew > > > On Tue, 24 Nov 2020 at 19:43, SoYun Seong <thdbsdox12 at gmail.com> wrote: > >> # HG changeset patch >> # User SoYun Seong <thdbsdox12 at gmail.com> >> # Date 1606129415 0 >> # Mon Nov 23 11:03:35 2020 +0000 >> # Node ID d7420a5777b63c8a8cfb7e98a522893490995510 >> # Parent 09dfe4a92414513c6bd3c18d871e8a76ed19c3d7 >> new io_uring event module. >> >> I implemented ngx_uring_module using Linux io_uring API to improve >> performance of Nginx for Linux by minimizing system calls. There are >> performance improvements in both request/sec and average latency. The >> result is located at https://github.com/dachshu/nginx. >> >> However, there are some places that uses local variable buffer(stack) to >> recv and send data. To do asynchronous IO, recv and send buffers should be >> located at safe memory(like heap, data). Therefore it is needed to make >> these codes to use allocated memory from memory pool when using >> asynchronous IO. >> >> Also I am working on improve performance of Nginx for Windows using >> Registered IO and IOCP. >> >> diff -r 09dfe4a92414 -r d7420a5777b6 auto/os/linux >> --- a/auto/os/linux Mon Nov 23 11:01:36 2020 +0000 >> +++ b/auto/os/linux Mon Nov 23 11:03:35 2020 +0000 >> @@ -89,6 +89,30 @@ >> fi >> >> >> +# io_uring >> + >> +ngx_feature="uring" >> +ngx_feature_name="NGX_HAVE_URING" >> +ngx_feature_run=yes >> +ngx_feature_incs="#include <liburing.h>" >> +ngx_feature_path="-I src/liburing/src/include/" >> +ngx_feature_libs="-L src/liburing/src/ -luring" >> +ngx_feature_test="struct io_uring ring; >> + struct io_uring_params params; >> + if (io_uring_queue_init_params(32768, &ring, ¶ms) >> < 0) return 1; >> + if (!(params.features & IORING_FEAT_FAST_POLL)) return >> 1;" >> +. auto/feature >> + >> +if [ $ngx_found = yes ]; then >> + have=NGX_HAVE_CLEAR_EVENT . auto/have >> + CORE_SRCS="$CORE_SRCS $URING_SRCS" >> + CORE_INCS="$CORE_INCS $ngx_feature_path" >> + CORE_LIBS="$CORE_LIBS $ngx_feature_libs" >> + EVENT_MODULES="$EVENT_MODULES $URING_MODULE" >> + EVENT_FOUND=YES >> +fi >> + >> + >> # O_PATH and AT_EMPTY_PATH were introduced in 2.6.39, glibc 2.14 >> >> ngx_feature="O_PATH" >> diff -r 09dfe4a92414 -r d7420a5777b6 auto/sources >> --- a/auto/sources Mon Nov 23 11:01:36 2020 +0000 >> +++ b/auto/sources Mon Nov 23 11:03:35 2020 +0000 >> @@ -120,6 +120,9 @@ >> EPOLL_MODULE=ngx_epoll_module >> EPOLL_SRCS=src/event/modules/ngx_epoll_module.c >> >> +URING_MODULE=ngx_uring_module >> +URING_SRCS=src/event/modules/ngx_uring_module.c >> + >> IOCP_MODULE=ngx_iocp_module >> IOCP_SRCS=src/event/modules/ngx_iocp_module.c >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201125/a951c29c/attachment.htm> From pavan45 at gmail.com Wed Nov 25 14:52:47 2020 From: pavan45 at gmail.com (Pavan P) Date: Wed, 25 Nov 2020 20:22:47 +0530 Subject: Help - Nginx Azure Auth Message-ID: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> Hi, I have configured nginx to authenticate with azure AD for login. When I access the site abc.example.com it redirects to Azure for authentication and redirects me back once the authentication is complete. How ever when I try to access the site with https abc.example.com it does not redirect for authentication. Is there anyway I can get both http and https to redirect for azure auth. Regards, Pavan -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201125/a3b06ddf/attachment.htm> From harishkumarivaturi at gmail.com Wed Nov 25 23:34:25 2020 From: harishkumarivaturi at gmail.com (HARISH KUMAR Ivaturi) Date: Thu, 26 Nov 2020 00:34:25 +0100 Subject: Help - Nginx Azure Auth In-Reply-To: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> References: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> Message-ID: <CAGmfrBw5NCW=vd84V+ATWuueBa5pjBQLpKBQ=nz7LUkBKhjcBg@mail.gmail.com> I am not sure if you have configured nginx with https_module. Once try that. And also add proper headers in the nginx.conf like Listen 443 ssl; Certificates location BR Harish Kumar On Wed 25 Nov, 2020, 3:53 PM Pavan P, <pavan45 at gmail.com> wrote: > Hi, > I have configured nginx to authenticate with azure AD for login. > > When I access the site abc.example.com it redirects to Azure for > authentication and redirects me back once the authentication is complete. > > How ever when I try to access the site with https abc.example.com it does > not redirect for authentication. > > Is there anyway I can get both http and https to redirect for azure auth. > > Regards, > Pavan > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201126/0ea46ffe/attachment.htm> From pavan45 at gmail.com Thu Nov 26 04:27:19 2020 From: pavan45 at gmail.com (Pavan P) Date: Thu, 26 Nov 2020 09:57:19 +0530 Subject: Help - Nginx Azure Auth In-Reply-To: <CAGmfrBw5NCW=vd84V+ATWuueBa5pjBQLpKBQ=nz7LUkBKhjcBg@mail.gmail.com> References: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> <CAGmfrBw5NCW=vd84V+ATWuueBa5pjBQLpKBQ=nz7LUkBKhjcBg@mail.gmail.com> Message-ID: <CAHsjVga-Uiwoh3qTY9tzhtQoCQ3BkwNUUtmfT9Rb1RwMCP=Uvw@mail.gmail.com> Hi Harish, Below is the config of my nginx. Https module is configured fine. Please let me know if I have missed anything. server { server_name ci1.altlifelab.com; location / { proxy_set_header Host $host:$server_port; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; # Fix the "It appears that your reverse proxy set up is broken" error. proxy_pass http://127.0.0.1:9080; proxy_read_timeout 90; proxy_redirect http://127.0.0.1:9080 http://www.ci1.altlifelab.com; # Required for new HTTP-based CLI proxy_http_version 1.1; proxy_request_buffering off; # workaround for https://issues.jenkins-ci1.org/browse/JENKINS-45651 add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; } listen 443 ssl; # managed by Certbot ssl_certificate /etc/letsencrypt/live/ci1.altlifelab.com/fullchain.pem; # managed by Certbot ssl_certificate_key /etc/letsencrypt/live/ci1.altlifelab.com/privkey.pem; # managed by Certbot include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot } server { if ($host = ci1.altlifelab.com) { # return 301 https://$host$request_uri; return 301 https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd ; } # managed by Certbot listen 80; server_name ci1.altlifelab.com; return 301 https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd; } On Thu, Nov 26, 2020 at 5:04 AM HARISH KUMAR Ivaturi < harishkumarivaturi at gmail.com> wrote: > I am not sure if you have configured nginx with https_module. Once try > that. And also add proper headers in the nginx.conf like > > Listen 443 ssl; > Certificates location > > BR > Harish Kumar > > On Wed 25 Nov, 2020, 3:53 PM Pavan P, <pavan45 at gmail.com> wrote: > >> Hi, >> I have configured nginx to authenticate with azure AD for login. >> >> When I access the site abc.example.com it redirects to Azure for >> authentication and redirects me back once the authentication is complete. >> >> How ever when I try to access the site with https abc.example.com it >> does not redirect for authentication. >> >> Is there anyway I can get both http and https to redirect for azure auth. >> >> Regards, >> Pavan >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201126/3bdfdb1d/attachment-0001.htm> From harishkumarivaturi at gmail.com Thu Nov 26 05:13:16 2020 From: harishkumarivaturi at gmail.com (HARISH KUMAR Ivaturi) Date: Thu, 26 Nov 2020 06:13:16 +0100 Subject: Help - Nginx Azure Auth In-Reply-To: <CAHsjVga-Uiwoh3qTY9tzhtQoCQ3BkwNUUtmfT9Rb1RwMCP=Uvw@mail.gmail.com> References: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> <CAGmfrBw5NCW=vd84V+ATWuueBa5pjBQLpKBQ=nz7LUkBKhjcBg@mail.gmail.com> <CAHsjVga-Uiwoh3qTY9tzhtQoCQ3BkwNUUtmfT9Rb1RwMCP=Uvw@mail.gmail.com> Message-ID: <CAGmfrBwVF0LUCpxi_KDf2GuZMb-moqLH7pZRSmFzf_GGqgHrXg@mail.gmail.com> 1) once type nginx -V and send rhe output. 2) certificate - certificate.cert Certificate_key - certificate.key Once recheck the certs section and make sure that you have generated with certificates with openssl properly. BR Harish Kumar On Thu 26 Nov, 2020, 5:27 AM Pavan P, <pavan45 at gmail.com> wrote: > Hi Harish, > Below is the config of my nginx. Https module is configured fine. Please > let me know if I have missed anything. > > server { > server_name ci1.altlifelab.com; > > location / { > proxy_set_header Host $host:$server_port; > proxy_set_header X-Real-IP $remote_addr; > proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; > proxy_set_header X-Forwarded-Proto $scheme; > > > # Fix the "It appears that your reverse proxy set up is broken" > error. > proxy_pass http://127.0.0.1:9080; > proxy_read_timeout 90; > > proxy_redirect http://127.0.0.1:9080 > http://www.ci1.altlifelab.com; > > # Required for new HTTP-based CLI > proxy_http_version 1.1; > proxy_request_buffering off; > # workaround for https://issues.jenkins-ci1.org/browse/JENKINS-45651 > add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; > } > > listen 443 ssl; # managed by Certbot > ssl_certificate /etc/letsencrypt/live/ci1.altlifelab.com/fullchain.pem; > # managed by Certbot > ssl_certificate_key /etc/letsencrypt/live/ > ci1.altlifelab.com/privkey.pem; # managed by Certbot > include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot > ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot > > > } > > server { > if ($host = ci1.altlifelab.com) { > # return 301 https://$host$request_uri; > return 301 > https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd > ; > } # managed by Certbot > > > listen 80; > server_name ci1.altlifelab.com; > return 301 > https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd; > > } > > On Thu, Nov 26, 2020 at 5:04 AM HARISH KUMAR Ivaturi < > harishkumarivaturi at gmail.com> wrote: > >> I am not sure if you have configured nginx with https_module. Once try >> that. And also add proper headers in the nginx.conf like >> >> Listen 443 ssl; >> Certificates location >> >> BR >> Harish Kumar >> >> On Wed 25 Nov, 2020, 3:53 PM Pavan P, <pavan45 at gmail.com> wrote: >> >>> Hi, >>> I have configured nginx to authenticate with azure AD for login. >>> >>> When I access the site abc.example.com it redirects to Azure for >>> authentication and redirects me back once the authentication is complete. >>> >>> How ever when I try to access the site with https abc.example.com it >>> does not redirect for authentication. >>> >>> Is there anyway I can get both http and https to redirect for azure auth. >>> >>> Regards, >>> Pavan >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201126/f596e2ec/attachment.htm> From pavan45 at gmail.com Thu Nov 26 05:16:52 2020 From: pavan45 at gmail.com (Pavan P) Date: Thu, 26 Nov 2020 10:46:52 +0530 Subject: Help - Nginx Azure Auth In-Reply-To: <CAGmfrBwVF0LUCpxi_KDf2GuZMb-moqLH7pZRSmFzf_GGqgHrXg@mail.gmail.com> References: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> <CAGmfrBw5NCW=vd84V+ATWuueBa5pjBQLpKBQ=nz7LUkBKhjcBg@mail.gmail.com> <CAHsjVga-Uiwoh3qTY9tzhtQoCQ3BkwNUUtmfT9Rb1RwMCP=Uvw@mail.gmail.com> <CAGmfrBwVF0LUCpxi_KDf2GuZMb-moqLH7pZRSmFzf_GGqgHrXg@mail.gmail.com> Message-ID: <CAHsjVgb96LonZFwDmki5j4N7PSYAM0NprwsvxRdgzMXWi=BVeA@mail.gmail.com> Yes Harish, Certificate is working fine. root at ip-172-31-33-18:~# nginx -V nginx version: nginx/1.10.3 (Ubuntu) built with OpenSSL 1.0.2g 1 Mar 2016 TLS SNI support enabled configure arguments: --with-cc-opt='-g -O2 -fPIE -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2' --with-ld-opt='-Wl,-Bsymbolic-functions -fPIE -pie -Wl,-z,relro -Wl,-z,now' --prefix=/usr/share/nginx --conf-path=/etc/nginx/nginx.conf --http-log-path=/var/log/nginx/access.log --error-log-path=/var/log/nginx/error.log --lock-path=/var/lock/nginx.lock --pid-path=/run/nginx.pid --http-client-body-temp-path=/var/lib/nginx/body --http-fastcgi-temp-path=/var/lib/nginx/fastcgi --http-proxy-temp-path=/var/lib/nginx/proxy --http-scgi-temp-path=/var/lib/nginx/scgi --http-uwsgi-temp-path=/var/lib/nginx/uwsgi --with-debug --with-pcre-jit --with-ipv6 --with-http_ssl_module --with-http_stub_status_module --with-http_realip_module --with-http_auth_request_module --with-http_addition_module --with-http_dav_module --with-http_geoip_module --with-http_gunzip_module --with-http_gzip_static_module --with-http_image_filter_module --with-http_v2_module --with-http_sub_module --with-http_xslt_module --with-stream --with-stream_ssl_module --with-mail --with-mail_ssl_module --with-threads (base) root at ip-172-31-33-18:~# On Thu, Nov 26, 2020 at 10:43 AM HARISH KUMAR Ivaturi < harishkumarivaturi at gmail.com> wrote: > 1) once type nginx -V and send rhe output. > > 2) certificate - certificate.cert > Certificate_key - certificate.key > > Once recheck the certs section and make sure that you have generated with > certificates with openssl properly. > > BR > Harish Kumar > > On Thu 26 Nov, 2020, 5:27 AM Pavan P, <pavan45 at gmail.com> wrote: > >> Hi Harish, >> Below is the config of my nginx. Https module is configured fine. Please >> let me know if I have missed anything. >> >> server { >> server_name ci1.altlifelab.com; >> >> location / { >> proxy_set_header Host $host:$server_port; >> proxy_set_header X-Real-IP $remote_addr; >> proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; >> proxy_set_header X-Forwarded-Proto $scheme; >> >> >> # Fix the "It appears that your reverse proxy set up is broken" >> error. >> proxy_pass http://127.0.0.1:9080; >> proxy_read_timeout 90; >> >> proxy_redirect http://127.0.0.1:9080 >> http://www.ci1.altlifelab.com; >> >> # Required for new HTTP-based CLI >> proxy_http_version 1.1; >> proxy_request_buffering off; >> # workaround for >> https://issues.jenkins-ci1.org/browse/JENKINS-45651 >> add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; >> } >> >> listen 443 ssl; # managed by Certbot >> ssl_certificate /etc/letsencrypt/live/ >> ci1.altlifelab.com/fullchain.pem; # managed by Certbot >> ssl_certificate_key /etc/letsencrypt/live/ >> ci1.altlifelab.com/privkey.pem; # managed by Certbot >> include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot >> ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot >> >> >> } >> >> server { >> if ($host = ci1.altlifelab.com) { >> # return 301 https://$host$request_uri; >> return 301 >> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd >> ; >> } # managed by Certbot >> >> >> listen 80; >> server_name ci1.altlifelab.com; >> return 301 >> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd; >> >> } >> >> On Thu, Nov 26, 2020 at 5:04 AM HARISH KUMAR Ivaturi < >> harishkumarivaturi at gmail.com> wrote: >> >>> I am not sure if you have configured nginx with https_module. Once try >>> that. And also add proper headers in the nginx.conf like >>> >>> Listen 443 ssl; >>> Certificates location >>> >>> BR >>> Harish Kumar >>> >>> On Wed 25 Nov, 2020, 3:53 PM Pavan P, <pavan45 at gmail.com> wrote: >>> >>>> Hi, >>>> I have configured nginx to authenticate with azure AD for login. >>>> >>>> When I access the site abc.example.com it redirects to Azure for >>>> authentication and redirects me back once the authentication is complete. >>>> >>>> How ever when I try to access the site with https abc.example.com it >>>> does not redirect for authentication. >>>> >>>> Is there anyway I can get both http and https to redirect for azure >>>> auth. >>>> >>>> Regards, >>>> Pavan >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201126/93f31791/attachment-0001.htm> From harishkumarivaturi at gmail.com Thu Nov 26 05:42:20 2020 From: harishkumarivaturi at gmail.com (HARISH KUMAR Ivaturi) Date: Thu, 26 Nov 2020 06:42:20 +0100 Subject: Help - Nginx Azure Auth In-Reply-To: <CAHsjVgb96LonZFwDmki5j4N7PSYAM0NprwsvxRdgzMXWi=BVeA@mail.gmail.com> References: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> <CAGmfrBw5NCW=vd84V+ATWuueBa5pjBQLpKBQ=nz7LUkBKhjcBg@mail.gmail.com> <CAHsjVga-Uiwoh3qTY9tzhtQoCQ3BkwNUUtmfT9Rb1RwMCP=Uvw@mail.gmail.com> <CAGmfrBwVF0LUCpxi_KDf2GuZMb-moqLH7pZRSmFzf_GGqgHrXg@mail.gmail.com> <CAHsjVgb96LonZFwDmki5j4N7PSYAM0NprwsvxRdgzMXWi=BVeA@mail.gmail.com> Message-ID: <CAGmfrByTGUXRzTPYN0=Emq=hVZta+nQCQ8-Evy1_4Bu+H-ZBsA@mail.gmail.com> Once try this. https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-subrequest-authentication/ And configure again with auth proxy module On Thu 26 Nov, 2020, 6:17 AM Pavan P, <pavan45 at gmail.com> wrote: > Yes Harish, Certificate is working fine. > > root at ip-172-31-33-18:~# nginx -V > nginx version: nginx/1.10.3 (Ubuntu) > built with OpenSSL 1.0.2g 1 Mar 2016 > TLS SNI support enabled > configure arguments: --with-cc-opt='-g -O2 -fPIE -fstack-protector-strong > -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2' > --with-ld-opt='-Wl,-Bsymbolic-functions -fPIE -pie -Wl,-z,relro -Wl,-z,now' > --prefix=/usr/share/nginx --conf-path=/etc/nginx/nginx.conf > --http-log-path=/var/log/nginx/access.log > --error-log-path=/var/log/nginx/error.log --lock-path=/var/lock/nginx.lock > --pid-path=/run/nginx.pid --http-client-body-temp-path=/var/lib/nginx/body > --http-fastcgi-temp-path=/var/lib/nginx/fastcgi > --http-proxy-temp-path=/var/lib/nginx/proxy > --http-scgi-temp-path=/var/lib/nginx/scgi > --http-uwsgi-temp-path=/var/lib/nginx/uwsgi --with-debug --with-pcre-jit > --with-ipv6 --with-http_ssl_module --with-http_stub_status_module > --with-http_realip_module --with-http_auth_request_module > --with-http_addition_module --with-http_dav_module --with-http_geoip_module > --with-http_gunzip_module --with-http_gzip_static_module > --with-http_image_filter_module --with-http_v2_module > --with-http_sub_module --with-http_xslt_module --with-stream > --with-stream_ssl_module --with-mail --with-mail_ssl_module --with-threads > (base) root at ip-172-31-33-18:~# > > On Thu, Nov 26, 2020 at 10:43 AM HARISH KUMAR Ivaturi < > harishkumarivaturi at gmail.com> wrote: > >> 1) once type nginx -V and send rhe output. >> >> 2) certificate - certificate.cert >> Certificate_key - certificate.key >> >> Once recheck the certs section and make sure that you have generated with >> certificates with openssl properly. >> >> BR >> Harish Kumar >> >> On Thu 26 Nov, 2020, 5:27 AM Pavan P, <pavan45 at gmail.com> wrote: >> >>> Hi Harish, >>> Below is the config of my nginx. Https module is configured fine. Please >>> let me know if I have missed anything. >>> >>> server { >>> server_name ci1.altlifelab.com; >>> >>> location / { >>> proxy_set_header Host $host:$server_port; >>> proxy_set_header X-Real-IP $remote_addr; >>> proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; >>> proxy_set_header X-Forwarded-Proto $scheme; >>> >>> >>> # Fix the "It appears that your reverse proxy set up is broken" >>> error. >>> proxy_pass http://127.0.0.1:9080; >>> proxy_read_timeout 90; >>> >>> proxy_redirect http://127.0.0.1:9080 >>> http://www.ci1.altlifelab.com; >>> >>> # Required for new HTTP-based CLI >>> proxy_http_version 1.1; >>> proxy_request_buffering off; >>> # workaround for >>> https://issues.jenkins-ci1.org/browse/JENKINS-45651 >>> add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; >>> } >>> >>> listen 443 ssl; # managed by Certbot >>> ssl_certificate /etc/letsencrypt/live/ >>> ci1.altlifelab.com/fullchain.pem; # managed by Certbot >>> ssl_certificate_key /etc/letsencrypt/live/ >>> ci1.altlifelab.com/privkey.pem; # managed by Certbot >>> include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot >>> ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot >>> >>> >>> } >>> >>> server { >>> if ($host = ci1.altlifelab.com) { >>> # return 301 https://$host$request_uri; >>> return 301 >>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd >>> ; >>> } # managed by Certbot >>> >>> >>> listen 80; >>> server_name ci1.altlifelab.com; >>> return 301 >>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd; >>> >>> } >>> >>> On Thu, Nov 26, 2020 at 5:04 AM HARISH KUMAR Ivaturi < >>> harishkumarivaturi at gmail.com> wrote: >>> >>>> I am not sure if you have configured nginx with https_module. Once try >>>> that. And also add proper headers in the nginx.conf like >>>> >>>> Listen 443 ssl; >>>> Certificates location >>>> >>>> BR >>>> Harish Kumar >>>> >>>> On Wed 25 Nov, 2020, 3:53 PM Pavan P, <pavan45 at gmail.com> wrote: >>>> >>>>> Hi, >>>>> I have configured nginx to authenticate with azure AD for login. >>>>> >>>>> When I access the site abc.example.com it redirects to Azure for >>>>> authentication and redirects me back once the authentication is complete. >>>>> >>>>> How ever when I try to access the site with https abc.example.com it >>>>> does not redirect for authentication. >>>>> >>>>> Is there anyway I can get both http and https to redirect for azure >>>>> auth. >>>>> >>>>> Regards, >>>>> Pavan >>>>> >>>>> _______________________________________________ >>>>> nginx-devel mailing list >>>>> nginx-devel at nginx.org >>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201126/57b6ddee/attachment.htm> From harishkumarivaturi at gmail.com Thu Nov 26 05:49:05 2020 From: harishkumarivaturi at gmail.com (HARISH KUMAR Ivaturi) Date: Thu, 26 Nov 2020 06:49:05 +0100 Subject: Help - Nginx Azure Auth In-Reply-To: <CAGmfrByTGUXRzTPYN0=Emq=hVZta+nQCQ8-Evy1_4Bu+H-ZBsA@mail.gmail.com> References: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> <CAGmfrBw5NCW=vd84V+ATWuueBa5pjBQLpKBQ=nz7LUkBKhjcBg@mail.gmail.com> <CAHsjVga-Uiwoh3qTY9tzhtQoCQ3BkwNUUtmfT9Rb1RwMCP=Uvw@mail.gmail.com> <CAGmfrBwVF0LUCpxi_KDf2GuZMb-moqLH7pZRSmFzf_GGqgHrXg@mail.gmail.com> <CAHsjVgb96LonZFwDmki5j4N7PSYAM0NprwsvxRdgzMXWi=BVeA@mail.gmail.com> <CAGmfrByTGUXRzTPYN0=Emq=hVZta+nQCQ8-Evy1_4Bu+H-ZBsA@mail.gmail.com> Message-ID: <CAGmfrBzM+gi7QcFv3XuCMBLDw7aguc3tC1WET=P7qcZGC5Frpg@mail.gmail.com> Tlsv1.2 On Thu 26 Nov, 2020, 6:42 AM HARISH KUMAR Ivaturi, < harishkumarivaturi at gmail.com> wrote: > Once try this. > > > https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-subrequest-authentication/ > > And configure again with auth proxy module > > On Thu 26 Nov, 2020, 6:17 AM Pavan P, <pavan45 at gmail.com> wrote: > >> Yes Harish, Certificate is working fine. >> >> root at ip-172-31-33-18:~# nginx -V >> nginx version: nginx/1.10.3 (Ubuntu) >> built with OpenSSL 1.0.2g 1 Mar 2016 >> TLS SNI support enabled >> configure arguments: --with-cc-opt='-g -O2 -fPIE -fstack-protector-strong >> -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2' >> --with-ld-opt='-Wl,-Bsymbolic-functions -fPIE -pie -Wl,-z,relro -Wl,-z,now' >> --prefix=/usr/share/nginx --conf-path=/etc/nginx/nginx.conf >> --http-log-path=/var/log/nginx/access.log >> --error-log-path=/var/log/nginx/error.log --lock-path=/var/lock/nginx.lock >> --pid-path=/run/nginx.pid --http-client-body-temp-path=/var/lib/nginx/body >> --http-fastcgi-temp-path=/var/lib/nginx/fastcgi >> --http-proxy-temp-path=/var/lib/nginx/proxy >> --http-scgi-temp-path=/var/lib/nginx/scgi >> --http-uwsgi-temp-path=/var/lib/nginx/uwsgi --with-debug --with-pcre-jit >> --with-ipv6 --with-http_ssl_module --with-http_stub_status_module >> --with-http_realip_module --with-http_auth_request_module >> --with-http_addition_module --with-http_dav_module --with-http_geoip_module >> --with-http_gunzip_module --with-http_gzip_static_module >> --with-http_image_filter_module --with-http_v2_module >> --with-http_sub_module --with-http_xslt_module --with-stream >> --with-stream_ssl_module --with-mail --with-mail_ssl_module --with-threads >> (base) root at ip-172-31-33-18:~# >> >> On Thu, Nov 26, 2020 at 10:43 AM HARISH KUMAR Ivaturi < >> harishkumarivaturi at gmail.com> wrote: >> >>> 1) once type nginx -V and send rhe output. >>> >>> 2) certificate - certificate.cert >>> Certificate_key - certificate.key >>> >>> Once recheck the certs section and make sure that you have generated >>> with certificates with openssl properly. >>> >>> BR >>> Harish Kumar >>> >>> On Thu 26 Nov, 2020, 5:27 AM Pavan P, <pavan45 at gmail.com> wrote: >>> >>>> Hi Harish, >>>> Below is the config of my nginx. Https module is configured fine. >>>> Please let me know if I have missed anything. >>>> >>>> server { >>>> server_name ci1.altlifelab.com; >>>> >>>> location / { >>>> proxy_set_header Host $host:$server_port; >>>> proxy_set_header X-Real-IP $remote_addr; >>>> proxy_set_header X-Forwarded-For >>>> $proxy_add_x_forwarded_for; >>>> proxy_set_header X-Forwarded-Proto $scheme; >>>> >>>> >>>> # Fix the "It appears that your reverse proxy set up is broken" >>>> error. >>>> proxy_pass http://127.0.0.1:9080; >>>> proxy_read_timeout 90; >>>> >>>> proxy_redirect http://127.0.0.1:9080 >>>> http://www.ci1.altlifelab.com; >>>> >>>> # Required for new HTTP-based CLI >>>> proxy_http_version 1.1; >>>> proxy_request_buffering off; >>>> # workaround for >>>> https://issues.jenkins-ci1.org/browse/JENKINS-45651 >>>> add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; >>>> } >>>> >>>> listen 443 ssl; # managed by Certbot >>>> ssl_certificate /etc/letsencrypt/live/ >>>> ci1.altlifelab.com/fullchain.pem; # managed by Certbot >>>> ssl_certificate_key /etc/letsencrypt/live/ >>>> ci1.altlifelab.com/privkey.pem; # managed by Certbot >>>> include /etc/letsencrypt/options-ssl-nginx.conf; # managed by >>>> Certbot >>>> ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot >>>> >>>> >>>> } >>>> >>>> server { >>>> if ($host = ci1.altlifelab.com) { >>>> # return 301 https://$host$request_uri; >>>> return 301 >>>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd >>>> ; >>>> } # managed by Certbot >>>> >>>> >>>> listen 80; >>>> server_name ci1.altlifelab.com; >>>> return 301 >>>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd; >>>> >>>> } >>>> >>>> On Thu, Nov 26, 2020 at 5:04 AM HARISH KUMAR Ivaturi < >>>> harishkumarivaturi at gmail.com> wrote: >>>> >>>>> I am not sure if you have configured nginx with https_module. Once try >>>>> that. And also add proper headers in the nginx.conf like >>>>> >>>>> Listen 443 ssl; >>>>> Certificates location >>>>> >>>>> BR >>>>> Harish Kumar >>>>> >>>>> On Wed 25 Nov, 2020, 3:53 PM Pavan P, <pavan45 at gmail.com> wrote: >>>>> >>>>>> Hi, >>>>>> I have configured nginx to authenticate with azure AD for login. >>>>>> >>>>>> When I access the site abc.example.com it redirects to Azure for >>>>>> authentication and redirects me back once the authentication is complete. >>>>>> >>>>>> How ever when I try to access the site with https abc.example.com it >>>>>> does not redirect for authentication. >>>>>> >>>>>> Is there anyway I can get both http and https to redirect for azure >>>>>> auth. >>>>>> >>>>>> Regards, >>>>>> Pavan >>>>>> >>>>>> _______________________________________________ >>>>>> nginx-devel mailing list >>>>>> nginx-devel at nginx.org >>>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>> >>>>> _______________________________________________ >>>>> nginx-devel mailing list >>>>> nginx-devel at nginx.org >>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201126/b629b41a/attachment-0001.htm> -------------- next part -------------- A non-text attachment was scrubbed... Name: IMG_20201126_064826.jpg Type: image/jpeg Size: 66314 bytes Desc: not available URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201126/b629b41a/attachment-0001.jpg> From pavan45 at gmail.com Thu Nov 26 05:54:51 2020 From: pavan45 at gmail.com (Pavan P) Date: Thu, 26 Nov 2020 11:24:51 +0530 Subject: Help - Nginx Azure Auth In-Reply-To: <CAGmfrByTGUXRzTPYN0=Emq=hVZta+nQCQ8-Evy1_4Bu+H-ZBsA@mail.gmail.com> References: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> <CAGmfrBw5NCW=vd84V+ATWuueBa5pjBQLpKBQ=nz7LUkBKhjcBg@mail.gmail.com> <CAHsjVga-Uiwoh3qTY9tzhtQoCQ3BkwNUUtmfT9Rb1RwMCP=Uvw@mail.gmail.com> <CAGmfrBwVF0LUCpxi_KDf2GuZMb-moqLH7pZRSmFzf_GGqgHrXg@mail.gmail.com> <CAHsjVgb96LonZFwDmki5j4N7PSYAM0NprwsvxRdgzMXWi=BVeA@mail.gmail.com> <CAGmfrByTGUXRzTPYN0=Emq=hVZta+nQCQ8-Evy1_4Bu+H-ZBsA@mail.gmail.com> Message-ID: <CAHsjVgbWuEfmqkRnYDeQtaJvUSM+0FV5GomkuOwo0yuOvw3ONQ@mail.gmail.com> HI Harish, But the issue I'm facing is different, when I try http://ci1.altlifelab.com it works fine, when I use https://ci1.altlifelab.com the url does not redirect to auth. On Thu, Nov 26, 2020 at 11:12 AM HARISH KUMAR Ivaturi < harishkumarivaturi at gmail.com> wrote: > Once try this. > > > https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-subrequest-authentication/ > > And configure again with auth proxy module > > On Thu 26 Nov, 2020, 6:17 AM Pavan P, <pavan45 at gmail.com> wrote: > >> Yes Harish, Certificate is working fine. >> >> root at ip-172-31-33-18:~# nginx -V >> nginx version: nginx/1.10.3 (Ubuntu) >> built with OpenSSL 1.0.2g 1 Mar 2016 >> TLS SNI support enabled >> configure arguments: --with-cc-opt='-g -O2 -fPIE -fstack-protector-strong >> -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2' >> --with-ld-opt='-Wl,-Bsymbolic-functions -fPIE -pie -Wl,-z,relro -Wl,-z,now' >> --prefix=/usr/share/nginx --conf-path=/etc/nginx/nginx.conf >> --http-log-path=/var/log/nginx/access.log >> --error-log-path=/var/log/nginx/error.log --lock-path=/var/lock/nginx.lock >> --pid-path=/run/nginx.pid --http-client-body-temp-path=/var/lib/nginx/body >> --http-fastcgi-temp-path=/var/lib/nginx/fastcgi >> --http-proxy-temp-path=/var/lib/nginx/proxy >> --http-scgi-temp-path=/var/lib/nginx/scgi >> --http-uwsgi-temp-path=/var/lib/nginx/uwsgi --with-debug --with-pcre-jit >> --with-ipv6 --with-http_ssl_module --with-http_stub_status_module >> --with-http_realip_module --with-http_auth_request_module >> --with-http_addition_module --with-http_dav_module --with-http_geoip_module >> --with-http_gunzip_module --with-http_gzip_static_module >> --with-http_image_filter_module --with-http_v2_module >> --with-http_sub_module --with-http_xslt_module --with-stream >> --with-stream_ssl_module --with-mail --with-mail_ssl_module --with-threads >> (base) root at ip-172-31-33-18:~# >> >> On Thu, Nov 26, 2020 at 10:43 AM HARISH KUMAR Ivaturi < >> harishkumarivaturi at gmail.com> wrote: >> >>> 1) once type nginx -V and send rhe output. >>> >>> 2) certificate - certificate.cert >>> Certificate_key - certificate.key >>> >>> Once recheck the certs section and make sure that you have generated >>> with certificates with openssl properly. >>> >>> BR >>> Harish Kumar >>> >>> On Thu 26 Nov, 2020, 5:27 AM Pavan P, <pavan45 at gmail.com> wrote: >>> >>>> Hi Harish, >>>> Below is the config of my nginx. Https module is configured fine. >>>> Please let me know if I have missed anything. >>>> >>>> server { >>>> server_name ci1.altlifelab.com; >>>> >>>> location / { >>>> proxy_set_header Host $host:$server_port; >>>> proxy_set_header X-Real-IP $remote_addr; >>>> proxy_set_header X-Forwarded-For >>>> $proxy_add_x_forwarded_for; >>>> proxy_set_header X-Forwarded-Proto $scheme; >>>> >>>> >>>> # Fix the "It appears that your reverse proxy set up is broken" >>>> error. >>>> proxy_pass http://127.0.0.1:9080; >>>> proxy_read_timeout 90; >>>> >>>> proxy_redirect http://127.0.0.1:9080 >>>> http://www.ci1.altlifelab.com; >>>> >>>> # Required for new HTTP-based CLI >>>> proxy_http_version 1.1; >>>> proxy_request_buffering off; >>>> # workaround for >>>> https://issues.jenkins-ci1.org/browse/JENKINS-45651 >>>> add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; >>>> } >>>> >>>> listen 443 ssl; # managed by Certbot >>>> ssl_certificate /etc/letsencrypt/live/ >>>> ci1.altlifelab.com/fullchain.pem; # managed by Certbot >>>> ssl_certificate_key /etc/letsencrypt/live/ >>>> ci1.altlifelab.com/privkey.pem; # managed by Certbot >>>> include /etc/letsencrypt/options-ssl-nginx.conf; # managed by >>>> Certbot >>>> ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot >>>> >>>> >>>> } >>>> >>>> server { >>>> if ($host = ci1.altlifelab.com) { >>>> # return 301 https://$host$request_uri; >>>> return 301 >>>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd >>>> ; >>>> } # managed by Certbot >>>> >>>> >>>> listen 80; >>>> server_name ci1.altlifelab.com; >>>> return 301 >>>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd; >>>> >>>> } >>>> >>>> On Thu, Nov 26, 2020 at 5:04 AM HARISH KUMAR Ivaturi < >>>> harishkumarivaturi at gmail.com> wrote: >>>> >>>>> I am not sure if you have configured nginx with https_module. Once try >>>>> that. And also add proper headers in the nginx.conf like >>>>> >>>>> Listen 443 ssl; >>>>> Certificates location >>>>> >>>>> BR >>>>> Harish Kumar >>>>> >>>>> On Wed 25 Nov, 2020, 3:53 PM Pavan P, <pavan45 at gmail.com> wrote: >>>>> >>>>>> Hi, >>>>>> I have configured nginx to authenticate with azure AD for login. >>>>>> >>>>>> When I access the site abc.example.com it redirects to Azure for >>>>>> authentication and redirects me back once the authentication is complete. >>>>>> >>>>>> How ever when I try to access the site with https abc.example.com it >>>>>> does not redirect for authentication. >>>>>> >>>>>> Is there anyway I can get both http and https to redirect for azure >>>>>> auth. >>>>>> >>>>>> Regards, >>>>>> Pavan >>>>>> >>>>>> _______________________________________________ >>>>>> nginx-devel mailing list >>>>>> nginx-devel at nginx.org >>>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>> >>>>> _______________________________________________ >>>>> nginx-devel mailing list >>>>> nginx-devel at nginx.org >>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201126/8551f666/attachment.htm> From pavan45 at gmail.com Thu Nov 26 06:08:48 2020 From: pavan45 at gmail.com (Pavan P) Date: Thu, 26 Nov 2020 11:38:48 +0530 Subject: Help - Nginx Azure Auth In-Reply-To: <CAHsjVgbWuEfmqkRnYDeQtaJvUSM+0FV5GomkuOwo0yuOvw3ONQ@mail.gmail.com> References: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> <CAGmfrBw5NCW=vd84V+ATWuueBa5pjBQLpKBQ=nz7LUkBKhjcBg@mail.gmail.com> <CAHsjVga-Uiwoh3qTY9tzhtQoCQ3BkwNUUtmfT9Rb1RwMCP=Uvw@mail.gmail.com> <CAGmfrBwVF0LUCpxi_KDf2GuZMb-moqLH7pZRSmFzf_GGqgHrXg@mail.gmail.com> <CAHsjVgb96LonZFwDmki5j4N7PSYAM0NprwsvxRdgzMXWi=BVeA@mail.gmail.com> <CAGmfrByTGUXRzTPYN0=Emq=hVZta+nQCQ8-Evy1_4Bu+H-ZBsA@mail.gmail.com> <CAHsjVgbWuEfmqkRnYDeQtaJvUSM+0FV5GomkuOwo0yuOvw3ONQ@mail.gmail.com> Message-ID: <CAHsjVgYagjDmv4mdu6z2xxPbo0raawaz_gqpMcHapkHQSmNz+Q@mail.gmail.com> Still the same problem, enabled ssl_protocols TLSv1.3; Is there any issue with my configuration? With the below configuration, http://ci1.altlifelab.com redirects to the authentication page, but https does not, it will directly go to the application without authentication. server { server_name ci1.altlifelab.com; location / { proxy_set_header Host $host:$server_port; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; # Fix the "It appears that your reverse proxy set up is broken" error. proxy_pass http://127.0.0.1:9080; proxy_read_timeout 90; proxy_redirect http://127.0.0.1:9080 http://www.ci1.altlifelab.com; # Required for new HTTP-based CLI proxy_http_version 1.1; proxy_request_buffering off; # workaround for https://issues.jenkins-ci1.org/browse/JENKINS-45651 add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; } listen 443 ssl; # managed by Certbot ssl_certificate /etc/letsencrypt/live/ci1.altlifelab.com/fullchain.pem; # managed by Certbot ssl_certificate_key /etc/letsencrypt/live/ci1.altlifelab.com/privkey.pem; # managed by Certbot include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot ssl_protocols TLSv1.3; } server { if ($host = ci1.altlifelab.com) { # return 301 https://$host$request_uri; return 301 https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd ; } # managed by Certbot listen 80; server_name ci1.altlifelab.com; return 404; # managed by Certbot } On Thu, Nov 26, 2020 at 11:24 AM Pavan P <pavan45 at gmail.com> wrote: > HI Harish, > But the issue I'm facing is different, when I try > http://ci1.altlifelab.com it works fine, when I use > https://ci1.altlifelab.com the url does not redirect to auth. > > On Thu, Nov 26, 2020 at 11:12 AM HARISH KUMAR Ivaturi < > harishkumarivaturi at gmail.com> wrote: > >> Once try this. >> >> >> https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-subrequest-authentication/ >> >> And configure again with auth proxy module >> >> On Thu 26 Nov, 2020, 6:17 AM Pavan P, <pavan45 at gmail.com> wrote: >> >>> Yes Harish, Certificate is working fine. >>> >>> root at ip-172-31-33-18:~# nginx -V >>> nginx version: nginx/1.10.3 (Ubuntu) >>> built with OpenSSL 1.0.2g 1 Mar 2016 >>> TLS SNI support enabled >>> configure arguments: --with-cc-opt='-g -O2 -fPIE >>> -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time >>> -D_FORTIFY_SOURCE=2' --with-ld-opt='-Wl,-Bsymbolic-functions -fPIE -pie >>> -Wl,-z,relro -Wl,-z,now' --prefix=/usr/share/nginx >>> --conf-path=/etc/nginx/nginx.conf --http-log-path=/var/log/nginx/access.log >>> --error-log-path=/var/log/nginx/error.log --lock-path=/var/lock/nginx.lock >>> --pid-path=/run/nginx.pid --http-client-body-temp-path=/var/lib/nginx/body >>> --http-fastcgi-temp-path=/var/lib/nginx/fastcgi >>> --http-proxy-temp-path=/var/lib/nginx/proxy >>> --http-scgi-temp-path=/var/lib/nginx/scgi >>> --http-uwsgi-temp-path=/var/lib/nginx/uwsgi --with-debug --with-pcre-jit >>> --with-ipv6 --with-http_ssl_module --with-http_stub_status_module >>> --with-http_realip_module --with-http_auth_request_module >>> --with-http_addition_module --with-http_dav_module --with-http_geoip_module >>> --with-http_gunzip_module --with-http_gzip_static_module >>> --with-http_image_filter_module --with-http_v2_module >>> --with-http_sub_module --with-http_xslt_module --with-stream >>> --with-stream_ssl_module --with-mail --with-mail_ssl_module --with-threads >>> (base) root at ip-172-31-33-18:~# >>> >>> On Thu, Nov 26, 2020 at 10:43 AM HARISH KUMAR Ivaturi < >>> harishkumarivaturi at gmail.com> wrote: >>> >>>> 1) once type nginx -V and send rhe output. >>>> >>>> 2) certificate - certificate.cert >>>> Certificate_key - certificate.key >>>> >>>> Once recheck the certs section and make sure that you have generated >>>> with certificates with openssl properly. >>>> >>>> BR >>>> Harish Kumar >>>> >>>> On Thu 26 Nov, 2020, 5:27 AM Pavan P, <pavan45 at gmail.com> wrote: >>>> >>>>> Hi Harish, >>>>> Below is the config of my nginx. Https module is configured fine. >>>>> Please let me know if I have missed anything. >>>>> >>>>> server { >>>>> server_name ci1.altlifelab.com; >>>>> >>>>> location / { >>>>> proxy_set_header Host $host:$server_port; >>>>> proxy_set_header X-Real-IP $remote_addr; >>>>> proxy_set_header X-Forwarded-For >>>>> $proxy_add_x_forwarded_for; >>>>> proxy_set_header X-Forwarded-Proto $scheme; >>>>> >>>>> >>>>> # Fix the "It appears that your reverse proxy set up is broken" >>>>> error. >>>>> proxy_pass http://127.0.0.1:9080; >>>>> proxy_read_timeout 90; >>>>> >>>>> proxy_redirect http://127.0.0.1:9080 >>>>> http://www.ci1.altlifelab.com; >>>>> >>>>> # Required for new HTTP-based CLI >>>>> proxy_http_version 1.1; >>>>> proxy_request_buffering off; >>>>> # workaround for >>>>> https://issues.jenkins-ci1.org/browse/JENKINS-45651 >>>>> add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; >>>>> } >>>>> >>>>> listen 443 ssl; # managed by Certbot >>>>> ssl_certificate /etc/letsencrypt/live/ >>>>> ci1.altlifelab.com/fullchain.pem; # managed by Certbot >>>>> ssl_certificate_key /etc/letsencrypt/live/ >>>>> ci1.altlifelab.com/privkey.pem; # managed by Certbot >>>>> include /etc/letsencrypt/options-ssl-nginx.conf; # managed by >>>>> Certbot >>>>> ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot >>>>> >>>>> >>>>> } >>>>> >>>>> server { >>>>> if ($host = ci1.altlifelab.com) { >>>>> # return 301 https://$host$request_uri; >>>>> return 301 >>>>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd >>>>> ; >>>>> } # managed by Certbot >>>>> >>>>> >>>>> listen 80; >>>>> server_name ci1.altlifelab.com; >>>>> return 301 >>>>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd; >>>>> >>>>> } >>>>> >>>>> On Thu, Nov 26, 2020 at 5:04 AM HARISH KUMAR Ivaturi < >>>>> harishkumarivaturi at gmail.com> wrote: >>>>> >>>>>> I am not sure if you have configured nginx with https_module. Once >>>>>> try that. And also add proper headers in the nginx.conf like >>>>>> >>>>>> Listen 443 ssl; >>>>>> Certificates location >>>>>> >>>>>> BR >>>>>> Harish Kumar >>>>>> >>>>>> On Wed 25 Nov, 2020, 3:53 PM Pavan P, <pavan45 at gmail.com> wrote: >>>>>> >>>>>>> Hi, >>>>>>> I have configured nginx to authenticate with azure AD for login. >>>>>>> >>>>>>> When I access the site abc.example.com it redirects to Azure for >>>>>>> authentication and redirects me back once the authentication is complete. >>>>>>> >>>>>>> How ever when I try to access the site with https abc.example.com >>>>>>> it does not redirect for authentication. >>>>>>> >>>>>>> Is there anyway I can get both http and https to redirect for azure >>>>>>> auth. >>>>>>> >>>>>>> Regards, >>>>>>> Pavan >>>>>>> >>>>>>> _______________________________________________ >>>>>>> nginx-devel mailing list >>>>>>> nginx-devel at nginx.org >>>>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>>> >>>>>> _______________________________________________ >>>>>> nginx-devel mailing list >>>>>> nginx-devel at nginx.org >>>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>> >>>>> _______________________________________________ >>>>> nginx-devel mailing list >>>>> nginx-devel at nginx.org >>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201126/759eb0d1/attachment-0001.htm> From harishkumarivaturi at gmail.com Thu Nov 26 06:17:05 2020 From: harishkumarivaturi at gmail.com (HARISH KUMAR Ivaturi) Date: Thu, 26 Nov 2020 07:17:05 +0100 Subject: Help - Nginx Azure Auth In-Reply-To: <CAHsjVgYagjDmv4mdu6z2xxPbo0raawaz_gqpMcHapkHQSmNz+Q@mail.gmail.com> References: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> <CAGmfrBw5NCW=vd84V+ATWuueBa5pjBQLpKBQ=nz7LUkBKhjcBg@mail.gmail.com> <CAHsjVga-Uiwoh3qTY9tzhtQoCQ3BkwNUUtmfT9Rb1RwMCP=Uvw@mail.gmail.com> <CAGmfrBwVF0LUCpxi_KDf2GuZMb-moqLH7pZRSmFzf_GGqgHrXg@mail.gmail.com> <CAHsjVgb96LonZFwDmki5j4N7PSYAM0NprwsvxRdgzMXWi=BVeA@mail.gmail.com> <CAGmfrByTGUXRzTPYN0=Emq=hVZta+nQCQ8-Evy1_4Bu+H-ZBsA@mail.gmail.com> <CAHsjVgbWuEfmqkRnYDeQtaJvUSM+0FV5GomkuOwo0yuOvw3ONQ@mail.gmail.com> <CAHsjVgYagjDmv4mdu6z2xxPbo0raawaz_gqpMcHapkHQSmNz+Q@mail.gmail.com> Message-ID: <CAGmfrBwod=5B1o59XZnUmCNTWk45THVJRqg0WH45+xeEbg=cFg@mail.gmail.com> Try with TLSv1.2 TLSv1.3 is for http3. On Thu 26 Nov, 2020, 7:09 AM Pavan P, <pavan45 at gmail.com> wrote: > Still the same problem, enabled ssl_protocols TLSv1.3; > > Is there any issue with my configuration? With the below configuration, > http://ci1.altlifelab.com redirects to the authentication page, but https > does not, it will directly go to the application without authentication. > > server { > server_name ci1.altlifelab.com; > > location / { > proxy_set_header Host $host:$server_port; > proxy_set_header X-Real-IP $remote_addr; > proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; > proxy_set_header X-Forwarded-Proto $scheme; > > > # Fix the "It appears that your reverse proxy set up is broken" > error. > proxy_pass http://127.0.0.1:9080; > proxy_read_timeout 90; > > proxy_redirect http://127.0.0.1:9080 > http://www.ci1.altlifelab.com; > > # Required for new HTTP-based CLI > proxy_http_version 1.1; > proxy_request_buffering off; > # workaround for https://issues.jenkins-ci1.org/browse/JENKINS-45651 > add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; > } > > listen 443 ssl; # managed by Certbot > ssl_certificate /etc/letsencrypt/live/ci1.altlifelab.com/fullchain.pem; > # managed by Certbot > ssl_certificate_key /etc/letsencrypt/live/ > ci1.altlifelab.com/privkey.pem; # managed by Certbot > include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot > ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot > ssl_protocols TLSv1.3; > } > > server { > if ($host = ci1.altlifelab.com) { > # return 301 https://$host$request_uri; > return 301 > https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd > ; > } # managed by Certbot > > > listen 80; > server_name ci1.altlifelab.com; > return 404; # managed by Certbot > } > > On Thu, Nov 26, 2020 at 11:24 AM Pavan P <pavan45 at gmail.com> wrote: > >> HI Harish, >> But the issue I'm facing is different, when I try >> http://ci1.altlifelab.com it works fine, when I use >> https://ci1.altlifelab.com the url does not redirect to auth. >> >> On Thu, Nov 26, 2020 at 11:12 AM HARISH KUMAR Ivaturi < >> harishkumarivaturi at gmail.com> wrote: >> >>> Once try this. >>> >>> >>> https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-subrequest-authentication/ >>> >>> And configure again with auth proxy module >>> >>> On Thu 26 Nov, 2020, 6:17 AM Pavan P, <pavan45 at gmail.com> wrote: >>> >>>> Yes Harish, Certificate is working fine. >>>> >>>> root at ip-172-31-33-18:~# nginx -V >>>> nginx version: nginx/1.10.3 (Ubuntu) >>>> built with OpenSSL 1.0.2g 1 Mar 2016 >>>> TLS SNI support enabled >>>> configure arguments: --with-cc-opt='-g -O2 -fPIE >>>> -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time >>>> -D_FORTIFY_SOURCE=2' --with-ld-opt='-Wl,-Bsymbolic-functions -fPIE -pie >>>> -Wl,-z,relro -Wl,-z,now' --prefix=/usr/share/nginx >>>> --conf-path=/etc/nginx/nginx.conf --http-log-path=/var/log/nginx/access.log >>>> --error-log-path=/var/log/nginx/error.log --lock-path=/var/lock/nginx.lock >>>> --pid-path=/run/nginx.pid --http-client-body-temp-path=/var/lib/nginx/body >>>> --http-fastcgi-temp-path=/var/lib/nginx/fastcgi >>>> --http-proxy-temp-path=/var/lib/nginx/proxy >>>> --http-scgi-temp-path=/var/lib/nginx/scgi >>>> --http-uwsgi-temp-path=/var/lib/nginx/uwsgi --with-debug --with-pcre-jit >>>> --with-ipv6 --with-http_ssl_module --with-http_stub_status_module >>>> --with-http_realip_module --with-http_auth_request_module >>>> --with-http_addition_module --with-http_dav_module --with-http_geoip_module >>>> --with-http_gunzip_module --with-http_gzip_static_module >>>> --with-http_image_filter_module --with-http_v2_module >>>> --with-http_sub_module --with-http_xslt_module --with-stream >>>> --with-stream_ssl_module --with-mail --with-mail_ssl_module --with-threads >>>> (base) root at ip-172-31-33-18:~# >>>> >>>> On Thu, Nov 26, 2020 at 10:43 AM HARISH KUMAR Ivaturi < >>>> harishkumarivaturi at gmail.com> wrote: >>>> >>>>> 1) once type nginx -V and send rhe output. >>>>> >>>>> 2) certificate - certificate.cert >>>>> Certificate_key - certificate.key >>>>> >>>>> Once recheck the certs section and make sure that you have generated >>>>> with certificates with openssl properly. >>>>> >>>>> BR >>>>> Harish Kumar >>>>> >>>>> On Thu 26 Nov, 2020, 5:27 AM Pavan P, <pavan45 at gmail.com> wrote: >>>>> >>>>>> Hi Harish, >>>>>> Below is the config of my nginx. Https module is configured fine. >>>>>> Please let me know if I have missed anything. >>>>>> >>>>>> server { >>>>>> server_name ci1.altlifelab.com; >>>>>> >>>>>> location / { >>>>>> proxy_set_header Host $host:$server_port; >>>>>> proxy_set_header X-Real-IP $remote_addr; >>>>>> proxy_set_header X-Forwarded-For >>>>>> $proxy_add_x_forwarded_for; >>>>>> proxy_set_header X-Forwarded-Proto $scheme; >>>>>> >>>>>> >>>>>> # Fix the "It appears that your reverse proxy set up is broken" >>>>>> error. >>>>>> proxy_pass http://127.0.0.1:9080; >>>>>> proxy_read_timeout 90; >>>>>> >>>>>> proxy_redirect http://127.0.0.1:9080 >>>>>> http://www.ci1.altlifelab.com; >>>>>> >>>>>> # Required for new HTTP-based CLI >>>>>> proxy_http_version 1.1; >>>>>> proxy_request_buffering off; >>>>>> # workaround for >>>>>> https://issues.jenkins-ci1.org/browse/JENKINS-45651 >>>>>> add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; >>>>>> } >>>>>> >>>>>> listen 443 ssl; # managed by Certbot >>>>>> ssl_certificate /etc/letsencrypt/live/ >>>>>> ci1.altlifelab.com/fullchain.pem; # managed by Certbot >>>>>> ssl_certificate_key /etc/letsencrypt/live/ >>>>>> ci1.altlifelab.com/privkey.pem; # managed by Certbot >>>>>> include /etc/letsencrypt/options-ssl-nginx.conf; # managed by >>>>>> Certbot >>>>>> ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by >>>>>> Certbot >>>>>> >>>>>> >>>>>> } >>>>>> >>>>>> server { >>>>>> if ($host = ci1.altlifelab.com) { >>>>>> # return 301 https://$host$request_uri; >>>>>> return 301 >>>>>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd >>>>>> ; >>>>>> } # managed by Certbot >>>>>> >>>>>> >>>>>> listen 80; >>>>>> server_name ci1.altlifelab.com; >>>>>> return 301 >>>>>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd; >>>>>> >>>>>> } >>>>>> >>>>>> On Thu, Nov 26, 2020 at 5:04 AM HARISH KUMAR Ivaturi < >>>>>> harishkumarivaturi at gmail.com> wrote: >>>>>> >>>>>>> I am not sure if you have configured nginx with https_module. Once >>>>>>> try that. And also add proper headers in the nginx.conf like >>>>>>> >>>>>>> Listen 443 ssl; >>>>>>> Certificates location >>>>>>> >>>>>>> BR >>>>>>> Harish Kumar >>>>>>> >>>>>>> On Wed 25 Nov, 2020, 3:53 PM Pavan P, <pavan45 at gmail.com> wrote: >>>>>>> >>>>>>>> Hi, >>>>>>>> I have configured nginx to authenticate with azure AD for login. >>>>>>>> >>>>>>>> When I access the site abc.example.com it redirects to Azure for >>>>>>>> authentication and redirects me back once the authentication is complete. >>>>>>>> >>>>>>>> How ever when I try to access the site with https abc.example.com >>>>>>>> it does not redirect for authentication. >>>>>>>> >>>>>>>> Is there anyway I can get both http and https to redirect for azure >>>>>>>> auth. >>>>>>>> >>>>>>>> Regards, >>>>>>>> Pavan >>>>>>>> >>>>>>>> _______________________________________________ >>>>>>>> nginx-devel mailing list >>>>>>>> nginx-devel at nginx.org >>>>>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>>>> >>>>>>> _______________________________________________ >>>>>>> nginx-devel mailing list >>>>>>> nginx-devel at nginx.org >>>>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>>> >>>>>> _______________________________________________ >>>>>> nginx-devel mailing list >>>>>> nginx-devel at nginx.org >>>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>> >>>>> _______________________________________________ >>>>> nginx-devel mailing list >>>>> nginx-devel at nginx.org >>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201126/34ca5868/attachment-0001.htm> From pavan45 at gmail.com Thu Nov 26 06:21:10 2020 From: pavan45 at gmail.com (Pavan P) Date: Thu, 26 Nov 2020 11:51:10 +0530 Subject: Help - Nginx Azure Auth In-Reply-To: <CAGmfrBwod=5B1o59XZnUmCNTWk45THVJRqg0WH45+xeEbg=cFg@mail.gmail.com> References: <CAHsjVgZRdd7sNsbAw0BSHL2ZmpfQtstPKZwaCak6LfugMYBgrA@mail.gmail.com> <CAGmfrBw5NCW=vd84V+ATWuueBa5pjBQLpKBQ=nz7LUkBKhjcBg@mail.gmail.com> <CAHsjVga-Uiwoh3qTY9tzhtQoCQ3BkwNUUtmfT9Rb1RwMCP=Uvw@mail.gmail.com> <CAGmfrBwVF0LUCpxi_KDf2GuZMb-moqLH7pZRSmFzf_GGqgHrXg@mail.gmail.com> <CAHsjVgb96LonZFwDmki5j4N7PSYAM0NprwsvxRdgzMXWi=BVeA@mail.gmail.com> <CAGmfrByTGUXRzTPYN0=Emq=hVZta+nQCQ8-Evy1_4Bu+H-ZBsA@mail.gmail.com> <CAHsjVgbWuEfmqkRnYDeQtaJvUSM+0FV5GomkuOwo0yuOvw3ONQ@mail.gmail.com> <CAHsjVgYagjDmv4mdu6z2xxPbo0raawaz_gqpMcHapkHQSmNz+Q@mail.gmail.com> <CAGmfrBwod=5B1o59XZnUmCNTWk45THVJRqg0WH45+xeEbg=cFg@mail.gmail.com> Message-ID: <CAHsjVgaUWWmQwy6=vy1pUn4g=BeemypA=_mmNgWsjbxXk9X8Jw@mail.gmail.com> Still the same issue, https not redirecting to azure for authentication. Only http redirects. On Thu, Nov 26, 2020 at 11:47 AM HARISH KUMAR Ivaturi < harishkumarivaturi at gmail.com> wrote: > Try with TLSv1.2 > > TLSv1.3 is for http3. > > On Thu 26 Nov, 2020, 7:09 AM Pavan P, <pavan45 at gmail.com> wrote: > >> Still the same problem, enabled ssl_protocols TLSv1.3; >> >> Is there any issue with my configuration? With the below configuration, >> http://ci1.altlifelab.com redirects to the authentication page, but >> https does not, it will directly go to the application without >> authentication. >> >> server { >> server_name ci1.altlifelab.com; >> >> location / { >> proxy_set_header Host $host:$server_port; >> proxy_set_header X-Real-IP $remote_addr; >> proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; >> proxy_set_header X-Forwarded-Proto $scheme; >> >> >> # Fix the "It appears that your reverse proxy set up is broken" >> error. >> proxy_pass http://127.0.0.1:9080; >> proxy_read_timeout 90; >> >> proxy_redirect http://127.0.0.1:9080 >> http://www.ci1.altlifelab.com; >> >> # Required for new HTTP-based CLI >> proxy_http_version 1.1; >> proxy_request_buffering off; >> # workaround for >> https://issues.jenkins-ci1.org/browse/JENKINS-45651 >> add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; >> } >> >> listen 443 ssl; # managed by Certbot >> ssl_certificate /etc/letsencrypt/live/ >> ci1.altlifelab.com/fullchain.pem; # managed by Certbot >> ssl_certificate_key /etc/letsencrypt/live/ >> ci1.altlifelab.com/privkey.pem; # managed by Certbot >> include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot >> ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot >> ssl_protocols TLSv1.3; >> } >> >> server { >> if ($host = ci1.altlifelab.com) { >> # return 301 https://$host$request_uri; >> return 301 >> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd >> ; >> } # managed by Certbot >> >> >> listen 80; >> server_name ci1.altlifelab.com; >> return 404; # managed by Certbot >> } >> >> On Thu, Nov 26, 2020 at 11:24 AM Pavan P <pavan45 at gmail.com> wrote: >> >>> HI Harish, >>> But the issue I'm facing is different, when I try >>> http://ci1.altlifelab.com it works fine, when I use >>> https://ci1.altlifelab.com the url does not redirect to auth. >>> >>> On Thu, Nov 26, 2020 at 11:12 AM HARISH KUMAR Ivaturi < >>> harishkumarivaturi at gmail.com> wrote: >>> >>>> Once try this. >>>> >>>> >>>> https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-subrequest-authentication/ >>>> >>>> And configure again with auth proxy module >>>> >>>> On Thu 26 Nov, 2020, 6:17 AM Pavan P, <pavan45 at gmail.com> wrote: >>>> >>>>> Yes Harish, Certificate is working fine. >>>>> >>>>> root at ip-172-31-33-18:~# nginx -V >>>>> nginx version: nginx/1.10.3 (Ubuntu) >>>>> built with OpenSSL 1.0.2g 1 Mar 2016 >>>>> TLS SNI support enabled >>>>> configure arguments: --with-cc-opt='-g -O2 -fPIE >>>>> -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time >>>>> -D_FORTIFY_SOURCE=2' --with-ld-opt='-Wl,-Bsymbolic-functions -fPIE -pie >>>>> -Wl,-z,relro -Wl,-z,now' --prefix=/usr/share/nginx >>>>> --conf-path=/etc/nginx/nginx.conf --http-log-path=/var/log/nginx/access.log >>>>> --error-log-path=/var/log/nginx/error.log --lock-path=/var/lock/nginx.lock >>>>> --pid-path=/run/nginx.pid --http-client-body-temp-path=/var/lib/nginx/body >>>>> --http-fastcgi-temp-path=/var/lib/nginx/fastcgi >>>>> --http-proxy-temp-path=/var/lib/nginx/proxy >>>>> --http-scgi-temp-path=/var/lib/nginx/scgi >>>>> --http-uwsgi-temp-path=/var/lib/nginx/uwsgi --with-debug --with-pcre-jit >>>>> --with-ipv6 --with-http_ssl_module --with-http_stub_status_module >>>>> --with-http_realip_module --with-http_auth_request_module >>>>> --with-http_addition_module --with-http_dav_module --with-http_geoip_module >>>>> --with-http_gunzip_module --with-http_gzip_static_module >>>>> --with-http_image_filter_module --with-http_v2_module >>>>> --with-http_sub_module --with-http_xslt_module --with-stream >>>>> --with-stream_ssl_module --with-mail --with-mail_ssl_module --with-threads >>>>> (base) root at ip-172-31-33-18:~# >>>>> >>>>> On Thu, Nov 26, 2020 at 10:43 AM HARISH KUMAR Ivaturi < >>>>> harishkumarivaturi at gmail.com> wrote: >>>>> >>>>>> 1) once type nginx -V and send rhe output. >>>>>> >>>>>> 2) certificate - certificate.cert >>>>>> Certificate_key - certificate.key >>>>>> >>>>>> Once recheck the certs section and make sure that you have generated >>>>>> with certificates with openssl properly. >>>>>> >>>>>> BR >>>>>> Harish Kumar >>>>>> >>>>>> On Thu 26 Nov, 2020, 5:27 AM Pavan P, <pavan45 at gmail.com> wrote: >>>>>> >>>>>>> Hi Harish, >>>>>>> Below is the config of my nginx. Https module is configured fine. >>>>>>> Please let me know if I have missed anything. >>>>>>> >>>>>>> server { >>>>>>> server_name ci1.altlifelab.com; >>>>>>> >>>>>>> location / { >>>>>>> proxy_set_header Host $host:$server_port; >>>>>>> proxy_set_header X-Real-IP $remote_addr; >>>>>>> proxy_set_header X-Forwarded-For >>>>>>> $proxy_add_x_forwarded_for; >>>>>>> proxy_set_header X-Forwarded-Proto $scheme; >>>>>>> >>>>>>> >>>>>>> # Fix the "It appears that your reverse proxy set up is >>>>>>> broken" error. >>>>>>> proxy_pass http://127.0.0.1:9080; >>>>>>> proxy_read_timeout 90; >>>>>>> >>>>>>> proxy_redirect http://127.0.0.1:9080 >>>>>>> http://www.ci1.altlifelab.com; >>>>>>> >>>>>>> # Required for new HTTP-based CLI >>>>>>> proxy_http_version 1.1; >>>>>>> proxy_request_buffering off; >>>>>>> # workaround for >>>>>>> https://issues.jenkins-ci1.org/browse/JENKINS-45651 >>>>>>> add_header 'X-SSH-Endpoint' 'ci1.altlifelab.com:50022' always; >>>>>>> } >>>>>>> >>>>>>> listen 443 ssl; # managed by Certbot >>>>>>> ssl_certificate /etc/letsencrypt/live/ >>>>>>> ci1.altlifelab.com/fullchain.pem; # managed by Certbot >>>>>>> ssl_certificate_key /etc/letsencrypt/live/ >>>>>>> ci1.altlifelab.com/privkey.pem; # managed by Certbot >>>>>>> include /etc/letsencrypt/options-ssl-nginx.conf; # managed by >>>>>>> Certbot >>>>>>> ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by >>>>>>> Certbot >>>>>>> >>>>>>> >>>>>>> } >>>>>>> >>>>>>> server { >>>>>>> if ($host = ci1.altlifelab.com) { >>>>>>> # return 301 https://$host$request_uri; >>>>>>> return 301 >>>>>>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd >>>>>>> ; >>>>>>> } # managed by Certbot >>>>>>> >>>>>>> >>>>>>> listen 80; >>>>>>> server_name ci1.altlifelab.com; >>>>>>> return 301 >>>>>>> https://myapps.microsoft.com/signin/ci2/a825dd26-fed2-4423-ae69-6a7d457b4b44?tenantId=eb9970cc-4803-4f6a-9ad2-e9b46042c5fd; >>>>>>> >>>>>>> } >>>>>>> >>>>>>> On Thu, Nov 26, 2020 at 5:04 AM HARISH KUMAR Ivaturi < >>>>>>> harishkumarivaturi at gmail.com> wrote: >>>>>>> >>>>>>>> I am not sure if you have configured nginx with https_module. Once >>>>>>>> try that. And also add proper headers in the nginx.conf like >>>>>>>> >>>>>>>> Listen 443 ssl; >>>>>>>> Certificates location >>>>>>>> >>>>>>>> BR >>>>>>>> Harish Kumar >>>>>>>> >>>>>>>> On Wed 25 Nov, 2020, 3:53 PM Pavan P, <pavan45 at gmail.com> wrote: >>>>>>>> >>>>>>>>> Hi, >>>>>>>>> I have configured nginx to authenticate with azure AD for login. >>>>>>>>> >>>>>>>>> When I access the site abc.example.com it redirects to Azure for >>>>>>>>> authentication and redirects me back once the authentication is complete. >>>>>>>>> >>>>>>>>> How ever when I try to access the site with https abc.example.com >>>>>>>>> it does not redirect for authentication. >>>>>>>>> >>>>>>>>> Is there anyway I can get both http and https to redirect for >>>>>>>>> azure auth. >>>>>>>>> >>>>>>>>> Regards, >>>>>>>>> Pavan >>>>>>>>> >>>>>>>>> _______________________________________________ >>>>>>>>> nginx-devel mailing list >>>>>>>>> nginx-devel at nginx.org >>>>>>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>>>>> >>>>>>>> _______________________________________________ >>>>>>>> nginx-devel mailing list >>>>>>>> nginx-devel at nginx.org >>>>>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>>>> >>>>>>> _______________________________________________ >>>>>>> nginx-devel mailing list >>>>>>> nginx-devel at nginx.org >>>>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>>> >>>>>> _______________________________________________ >>>>>> nginx-devel mailing list >>>>>> nginx-devel at nginx.org >>>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>> >>>>> _______________________________________________ >>>>> nginx-devel mailing list >>>>> nginx-devel at nginx.org >>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://mailman.nginx.org/pipermail/nginx-devel/attachments/20201126/196bb1a0/attachment-0001.htm> From xeioex at nginx.com Thu Nov 26 11:12:36 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 26 Nov 2020 11:12:36 +0000 Subject: [njs] Types: extending data types for methods with NjsStringLike args. Message-ID: <hg.5bd78c74777a.1606389156.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/5bd78c74777a branches: changeset: 1572:5bd78c74777a user: Dmitry Volyntsev <xeioex at nginx.com> date: Thu Nov 26 11:10:59 2020 +0000 description: Types: extending data types for methods with NjsStringLike args. diffstat: test/ts/test.ts | 5 ++++- ts/ngx_http_js_module.d.ts | 20 ++++++++++---------- ts/ngx_stream_js_module.d.ts | 8 ++++---- ts/njs_core.d.ts | 1 + ts/njs_modules/crypto.d.ts | 6 +++--- ts/njs_modules/fs.d.ts | 8 ++++---- 6 files changed, 26 insertions(+), 22 deletions(-) diffs (207 lines): diff -r 434f20c29f4c -r 5bd78c74777a test/ts/test.ts --- a/test/ts/test.ts Wed Nov 25 10:47:47 2020 +0000 +++ b/test/ts/test.ts Thu Nov 26 11:10:59 2020 +0000 @@ -49,6 +49,7 @@ function http_module(r: NginxHTTPRequest // r.log r.log(bs); + r.log(Buffer.from("abc")); r.log(r.headersOut['Connection'] ?? ''); // r.variables @@ -61,7 +62,7 @@ function http_module(r: NginxHTTPRequest r.subrequest('/p/sub2', {method:'POST'}).then(reply => r.return(reply.status)); vod = r.subrequest('/p/sub3', reply => r.return(reply.status)); vod = r.subrequest('/p/sub4', {method:'POST'}, reply => r.return(reply.status)); - vod = r.subrequest('/p/sub5', {detached:true}); + vod = r.subrequest(Buffer.from('/p/sub5'), {detached:true}); // Warning: vod = r.subrequest('/p/sub9', {detached:true}, reply => r.return(reply.status)); r.subrequest('/p/sub6', 'a=1&b=2').then(reply => r.return(reply.status, JSON.stringify(JSON.parse(reply.responseBody ?? '')))); @@ -73,6 +74,8 @@ function fs_module() { s = fs.readFileSync('/path', 'utf8'); s = fs.readFileSync(Buffer.from('/path'), {encoding:'hex'}); + + fs.writeFileSync('/path', Buffer.from('abc')); } function qs_module(str: NjsByteString) { diff -r 434f20c29f4c -r 5bd78c74777a ts/ngx_http_js_module.d.ts --- a/ts/ngx_http_js_module.d.ts Wed Nov 25 10:47:47 2020 +0000 +++ b/ts/ngx_http_js_module.d.ts Thu Nov 26 11:10:59 2020 +0000 @@ -263,7 +263,7 @@ interface NginxHTTPRequest { * Writes a string to the error log on the error level of logging. * @param message Message to log. */ - error(message: NjsStringLike): void; + error(message: NjsStringOrBuffer): void; /** * Finishes sending a response to the client. */ @@ -286,12 +286,12 @@ interface NginxHTTPRequest { * The actual redirect happens after the handler execution is completed. * @param uri Location to redirect to. */ - internalRedirect(uri: NjsStringLike): void; + internalRedirect(uri: NjsStringOrBuffer): void; /** * Writes a string to the error log on the info level of logging. * @param message Message to log. */ - log(message: NjsStringLike): void; + log(message: NjsStringOrBuffer): void; /** * HTTP method. */ @@ -323,11 +323,11 @@ interface NginxHTTPRequest { * @param status Respose status code. * @param body Respose body. */ - return(status: number, body?: NjsStringLike): void; + return(status: number, body?: NjsStringOrBuffer): void; /** * Sends the HTTP headers to the client. */ - send(part: NjsStringLike): void; + send(part: NjsStringOrBuffer): void; /** * Sends the HTTP headers to the client. */ @@ -346,11 +346,11 @@ interface NginxHTTPRequest { * @param options Subrequest options. * @param callback Completion callback. */ - subrequest(uri: NjsStringLike, options: NginxSubrequestOptions & { detached: true }): void; - subrequest(uri: NjsStringLike, options?: NginxSubrequestOptions | string): Promise<NginxHTTPRequest>; - subrequest(uri: NjsStringLike, options: NginxSubrequestOptions & { detached?: false } | string, + subrequest(uri: NjsStringOrBuffer, options: NginxSubrequestOptions & { detached: true }): void; + subrequest(uri: NjsStringOrBuffer, options?: NginxSubrequestOptions | string): Promise<NginxHTTPRequest>; + subrequest(uri: NjsStringOrBuffer, options: NginxSubrequestOptions & { detached?: false } | string, callback:(reply:NginxHTTPRequest) => void): void; - subrequest(uri: NjsStringLike, callback:(reply:NginxHTTPRequest) => void): void; + subrequest(uri: NjsStringOrBuffer, callback:(reply:NginxHTTPRequest) => void): void; /** * Current URI in request, normalized. */ @@ -363,5 +363,5 @@ interface NginxHTTPRequest { * Writes a string to the error log on the warn level of logging. * @param message Message to log. */ - warn(message: NjsStringLike): void; + warn(message: NjsStringOrBuffer): void; } diff -r 434f20c29f4c -r 5bd78c74777a ts/ngx_stream_js_module.d.ts --- a/ts/ngx_stream_js_module.d.ts Wed Nov 25 10:47:47 2020 +0000 +++ b/ts/ngx_stream_js_module.d.ts Thu Nov 26 11:10:59 2020 +0000 @@ -110,12 +110,12 @@ interface NginxStreamRequest { * Writes a string to the error log on the error level of logging. * @param message Message to log. */ - error(message: NjsStringLike): void; + error(message: NjsStringOrBuffer): void; /** * Writes a string to the error log on the info level of logging. * @param message Message to log. */ - log(message: NjsStringLike): void; + log(message: NjsStringOrBuffer): void; /** * Unregisters the callback set by on() method. */ @@ -135,7 +135,7 @@ interface NginxStreamRequest { * @param options Object used to override nginx buffer flags derived from * an incoming data chunk buffer. */ - send(data: NjsStringLike, options?: NginxStreamSendOptions): void; + send(data: NjsStringOrBuffer, options?: NginxStreamSendOptions): void; /** * nginx variables object. */ @@ -144,5 +144,5 @@ interface NginxStreamRequest { * Writes a string to the error log on the warn level of logging. * @param message Message to log. */ - warn(message: NjsStringLike): void; + warn(message: NjsStringOrBuffer): void; } diff -r 434f20c29f4c -r 5bd78c74777a ts/njs_core.d.ts --- a/ts/njs_core.d.ts Wed Nov 25 10:47:47 2020 +0000 +++ b/ts/njs_core.d.ts Thu Nov 26 11:10:59 2020 +0000 @@ -584,6 +584,7 @@ declare class Buffer extends Uint8Array writeFloatLE(value: number, offset?: number): number; } +type NjsStringOrBuffer = NjsStringLike | Buffer | DataView | TypedArray; // Global objects diff -r 434f20c29f4c -r 5bd78c74777a ts/njs_modules/crypto.d.ts --- a/ts/njs_modules/crypto.d.ts Wed Nov 25 10:47:47 2020 +0000 +++ b/ts/njs_modules/crypto.d.ts Thu Nov 26 11:10:59 2020 +0000 @@ -10,7 +10,7 @@ declare module "crypto" { /** * Updates the hash content with the given `data` and returns self. */ - update(data: NjsStringLike | Buffer | DataView | TypedArray): Hash; + update(data: NjsStringOrBuffer): Hash; /** * Calculates the digest of all of the data passed using `hash.update()`. @@ -31,7 +31,7 @@ declare module "crypto" { /** * Updates the HMAC content with the given `data` and returns self. */ - update(data: NjsStringLike | Buffer | DataView | TypedArray): Hmac; + update(data: NjsStringOrBuffer): Hmac; /** * Calculates the HMAC digest of all of the data passed using `hmac.update()`. @@ -65,7 +65,7 @@ declare module "crypto" { * @param key The secret key. * @returns An `HMAC` object. */ - createHmac(algorithm: Algorithm, key: NjsStringLike): Hmac; + createHmac(algorithm: Algorithm, key: NjsStringOrBuffer): Hmac; } const crypto: Crypto; diff -r 434f20c29f4c -r 5bd78c74777a ts/njs_modules/fs.d.ts --- a/ts/njs_modules/fs.d.ts Wed Nov 25 10:47:47 2020 +0000 +++ b/ts/njs_modules/fs.d.ts Thu Nov 26 11:10:59 2020 +0000 @@ -124,7 +124,7 @@ declare module "fs" { * If `mode` is not supplied, the default of `0o666` is used. * If `flag` is not supplied, the default of `'a'` is used. */ - appendFile(path: PathLike, data: NjsStringLike | Buffer, options?: WriteFileOptions): Promise<void>; + appendFile(path: PathLike, data: NjsStringOrBuffer, options?: WriteFileOptions): Promise<void>; /** * Asynchronously creates a directory at the specified `path`. @@ -219,7 +219,7 @@ declare module "fs" { * If `mode` is not supplied, the default of `0o666` is used. * If `flag` is not supplied, the default of `'w'` is used. */ - writeFile(path: PathLike, data: NjsStringLike | Buffer, options?: WriteFileOptions): Promise<void>; + writeFile(path: PathLike, data: NjsStringOrBuffer, options?: WriteFileOptions): Promise<void>; } interface NjsFS { @@ -264,7 +264,7 @@ declare module "fs" { * If `mode` is not supplied, the default of `0o666` is used. * If `flag` is not supplied, the default of `'a'` is used. */ - appendFileSync(path: PathLike, data: NjsStringLike | Buffer, options?: WriteFileOptions): void; + appendFileSync(path: PathLike, data: NjsStringOrBuffer, options?: WriteFileOptions): void; /** * Synchronously creates a directory at the specified `path`. @@ -373,7 +373,7 @@ declare module "fs" { * If `mode` is not supplied, the default of `0o666` is used. * If `flag` is not supplied, the default of `'w'` is used. */ - writeFileSync(path: PathLike, data: NjsStringLike | Buffer, options?: WriteFileOptions): void; + writeFileSync(path: PathLike, data: NjsStringOrBuffer, options?: WriteFileOptions): void; } const fs: NjsFS; From xeioex at nginx.com Thu Nov 26 11:12:38 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 26 Nov 2020 11:12:38 +0000 Subject: [njs] Types: added description for "ngx" object. Message-ID: <hg.42dfbf020c68.1606389158.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/42dfbf020c68 branches: changeset: 1573:42dfbf020c68 user: Dmitry Volyntsev <xeioex at nginx.com> date: Thu Nov 26 11:11:01 2020 +0000 description: Types: added description for "ngx" object. diffstat: test/ts/test.ts | 9 +++++++-- ts/ngx_core.d.ts | 14 ++++++++++++++ ts/ngx_http_js_module.d.ts | 1 + ts/ngx_stream_js_module.d.ts | 1 + 4 files changed, 23 insertions(+), 2 deletions(-) diffs (62 lines): diff -r 5bd78c74777a -r 42dfbf020c68 test/ts/test.ts --- a/test/ts/test.ts Thu Nov 26 11:10:59 2020 +0000 +++ b/test/ts/test.ts Thu Nov 26 11:11:01 2020 +0000 @@ -66,7 +66,6 @@ function http_module(r: NginxHTTPRequest // Warning: vod = r.subrequest('/p/sub9', {detached:true}, reply => r.return(reply.status)); r.subrequest('/p/sub6', 'a=1&b=2').then(reply => r.return(reply.status, JSON.stringify(JSON.parse(reply.responseBody ?? '')))); - } function fs_module() { @@ -107,7 +106,13 @@ function buffer(b: Buffer) { b.equals(b); } -function builtins() { +function njs_object() { njs.dump('asdf'); njs.version != process.argv[1]; } + +function ngx_object() { + ngx.log(ngx.INFO, 'asdf'); + ngx.log(ngx.WARN, Buffer.from('asdf')); + ngx.log(ngx.ERR, 'asdf'); +} diff -r 5bd78c74777a -r 42dfbf020c68 ts/ngx_core.d.ts --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/ts/ngx_core.d.ts Thu Nov 26 11:11:01 2020 +0000 @@ -0,0 +1,14 @@ +interface NgxObject { + readonly INFO: number; + readonly WARN: number; + readonly ERR: number; + /** + * Writes a string to the error log with the specified level + * of logging. + * @param level Log level (ngx.INFO, ngx.WARN, ngx.ERR). + * @param message Message to log. + */ + log(level: number, message: NjsStringOrBuffer): void; +} + +declare const ngx: NgxObject; diff -r 5bd78c74777a -r 42dfbf020c68 ts/ngx_http_js_module.d.ts --- a/ts/ngx_http_js_module.d.ts Thu Nov 26 11:10:59 2020 +0000 +++ b/ts/ngx_http_js_module.d.ts Thu Nov 26 11:11:01 2020 +0000 @@ -1,4 +1,5 @@ /// <reference path="index.d.ts" /> +/// <reference path="ngx_core.d.ts" /> interface NginxHTTPArgs { readonly [prop: string]: NjsByteString; diff -r 5bd78c74777a -r 42dfbf020c68 ts/ngx_stream_js_module.d.ts --- a/ts/ngx_stream_js_module.d.ts Thu Nov 26 11:10:59 2020 +0000 +++ b/ts/ngx_stream_js_module.d.ts Thu Nov 26 11:11:01 2020 +0000 @@ -1,4 +1,5 @@ /// <reference path="index.d.ts" /> +/// <reference path="ngx_core.d.ts" /> interface NginxStreamVariables { readonly 'binary_remote_addr'?: NjsByteString; From xeioex at nginx.com Thu Nov 26 11:38:28 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 26 Nov 2020 11:38:28 +0000 Subject: [njs] Modules: renaming vars to rawVariables to better reflect purpose. Message-ID: <hg.a141a29417dc.1606390708.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/a141a29417dc branches: changeset: 1574:a141a29417dc user: Dmitry Volyntsev <xeioex at nginx.com> date: Thu Nov 26 11:36:03 2020 +0000 description: Modules: renaming vars to rawVariables to better reflect purpose. In 434f20c29f4c, obj.vars was introduced. obj.vars is almost identical to obj.variables except a value of the Buffer type is returned. Since most nginx variables are valid strings, it is preferable to leave both variants. To avoid confusion rawVariables name is used for Buffer variables. diffstat: nginx/ngx_http_js_module.c | 2 +- nginx/ngx_stream_js_module.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diffs (24 lines): diff -r 42dfbf020c68 -r a141a29417dc nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Thu Nov 26 11:11:01 2020 +0000 +++ b/nginx/ngx_http_js_module.c Thu Nov 26 11:36:03 2020 +0000 @@ -406,7 +406,7 @@ static njs_external_t ngx_http_js_ext_r { .flags = NJS_EXTERN_OBJECT, - .name.string = njs_str("vars"), + .name.string = njs_str("rawVariables"), .u.object = { .writable = 1, .prop_handler = ngx_http_js_ext_variables, diff -r 42dfbf020c68 -r a141a29417dc nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Thu Nov 26 11:11:01 2020 +0000 +++ b/nginx/ngx_stream_js_module.c Thu Nov 26 11:36:03 2020 +0000 @@ -245,7 +245,7 @@ static njs_external_t ngx_stream_js_ext { .flags = NJS_EXTERN_OBJECT, - .name.string = njs_str("vars"), + .name.string = njs_str("rawVariables"), .u.object = { .writable = 1, .prop_handler = ngx_stream_js_ext_variables, From alexander.borisov at nginx.com Thu Nov 26 18:44:03 2020 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Thu, 26 Nov 2020 18:44:03 +0000 Subject: [njs] Modules: fixed promise events handling. Message-ID: <hg.fac7e5dc8009.1606416243.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/fac7e5dc8009 branches: changeset: 1575:fac7e5dc8009 user: Alexander Borisov <alexander.borisov at nginx.com> date: Thu Nov 26 21:43:17 2020 +0300 description: Modules: fixed promise events handling. Previously, promise chain might not be invoked at all in some cases. Specifically, this happened in HTTP module if promise chain did not start with a r.subrequest() invocation. The fix is to always process all pending promise events after the main module function. This closes #359 issue on GitHub. diffstat: nginx/ngx_js.c | 6 +----- src/njs_vm.c | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diffs (28 lines): diff -r a141a29417dc -r fac7e5dc8009 nginx/ngx_js.c --- a/nginx/ngx_js.c Thu Nov 26 11:36:03 2020 +0000 +++ b/nginx/ngx_js.c Thu Nov 26 21:43:17 2020 +0300 @@ -79,11 +79,7 @@ ngx_js_call(njs_vm_t *vm, ngx_str_t *fna return NGX_ERROR; } - if (njs_vm_pending(vm)) { - return NGX_AGAIN; - } - - return NGX_OK; + return njs_vm_run(vm); } diff -r a141a29417dc -r fac7e5dc8009 src/njs_vm.c --- a/src/njs_vm.c Thu Nov 26 11:36:03 2020 +0000 +++ b/src/njs_vm.c Thu Nov 26 21:43:17 2020 +0300 @@ -578,7 +578,7 @@ njs_vm_handle_events(njs_vm_t *vm) } while (!njs_queue_is_empty(promise_events)); - return njs_posted_events(vm) ? NJS_AGAIN : NJS_OK; + return njs_vm_pending(vm) ? NJS_AGAIN : NJS_OK; } From jan.prachar at gmail.com Thu Nov 26 20:11:39 2020 From: jan.prachar at gmail.com (Jan =?UTF-8?Q?Pracha=C5=99?=) Date: Thu, 26 Nov 2020 21:11:39 +0100 Subject: =?UTF-8?Q?=5BPATCH=5D=C2=A0Core=3A_fixed_inconsistent_state_of_subrequest?= =?UTF-8?Q?=27s_headers=5Fin_list?= Message-ID: <cf07ab7f9af733b0ac75091b13a923dc2b36c755.camel@gmail.com> # HG changeset patch # User Jan Pracha? <jan.prachar at gmail.com> # Date 1606420825 -3600 # Thu Nov 26 21:00:25 2020 +0100 # Node ID cf3d537ec6706f8713a757df256f2cfccb8f9b01 # Parent e35b529b03781e64912e0d8a72bd0f957dc08cd2 Core: fixed inconsistent state of subrequest's headers_in list When copying structure ngx_list_t, a reference to the last part need to be updated, if list contains only one part. This fixes an issue, when adding a header to the subrequest's headers_in list has no effect. diff -r e35b529b0378 -r cf3d537ec670 src/core/ngx_list.h --- a/src/core/ngx_list.h Mon Sep 21 19:49:49 2020 +0200 +++ b/src/core/ngx_list.h Thu Nov 26 21:00:25 2020 +0100 @@ -51,6 +51,15 @@ return NGX_OK; } +static ngx_inline void +ngx_list_copy(ngx_list_t *target, ngx_list_t *src) +{ + *target = *src; + if (target->part.next == NULL) { + target->last = &target->part; + } +} + /* * diff -r e35b529b0378 -r cf3d537ec670 src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c Mon Sep 21 19:49:49 2020 +0200 +++ b/src/http/ngx_http_core_module.c Thu Nov 26 21:00:25 2020 +0100 @@ -2364,6 +2364,7 @@ sr->pool = r->pool; sr->headers_in = r->headers_in; + ngx_list_copy(&sr->headers_in.headers, &r->headers_in.headers); ngx_http_clear_content_length(sr); ngx_http_clear_accept_ranges(sr); From ru at nginx.com Thu Nov 26 21:08:41 2020 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 26 Nov 2020 21:08:41 +0000 Subject: [nginx] Version bump. Message-ID: <hg.ac09a57ec50d.1606424921.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/ac09a57ec50d branches: changeset: 7749:ac09a57ec50d user: Ruslan Ermilov <ru at nginx.com> date: Thu Nov 26 23:46:59 2020 +0300 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r 66a441bf669b -r ac09a57ec50d src/core/nginx.h --- a/src/core/nginx.h Tue Nov 24 18:06:34 2020 +0300 +++ b/src/core/nginx.h Thu Nov 26 23:46:59 2020 +0300 @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1019005 -#define NGINX_VERSION "1.19.5" +#define nginx_version 1019006 +#define NGINX_VERSION "1.19.6" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From ru at nginx.com Thu Nov 26 21:08:44 2020 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 26 Nov 2020 21:08:44 +0000 Subject: [nginx] Upstream: excluded down servers from the next_upstream tries. Message-ID: <hg.90cc7194e993.1606424924.6026610855610030274@dev.nginx> details: https://hg.nginx.org/nginx/rev/90cc7194e993 branches: changeset: 7750:90cc7194e993 user: Ruslan Ermilov <ru at nginx.com> date: Fri Nov 27 00:01:20 2020 +0300 description: Upstream: excluded down servers from the next_upstream tries. Previously, the number of next_upstream tries included servers marked as "down", resulting in "no live upstreams" with the code 502 instead of the code derived from an attempt to connect to the last tried "up" server (ticket #2096). diffstat: src/http/ngx_http_upstream_round_robin.c | 20 +++++++++++++++++--- src/http/ngx_http_upstream_round_robin.h | 1 + src/stream/ngx_stream_upstream_round_robin.c | 20 +++++++++++++++++--- src/stream/ngx_stream_upstream_round_robin.h | 1 + 4 files changed, 36 insertions(+), 6 deletions(-) diffs (208 lines): diff -r ac09a57ec50d -r 90cc7194e993 src/http/ngx_http_upstream_round_robin.c --- a/src/http/ngx_http_upstream_round_robin.c Thu Nov 26 23:46:59 2020 +0300 +++ b/src/http/ngx_http_upstream_round_robin.c Fri Nov 27 00:01:20 2020 +0300 @@ -10,8 +10,8 @@ #include <ngx_http.h> -#define ngx_http_upstream_tries(p) ((p)->number \ - + ((p)->next ? (p)->next->number : 0)) +#define ngx_http_upstream_tries(p) ((p)->tries \ + + ((p)->next ? (p)->next->tries : 0)) static ngx_http_upstream_rr_peer_t *ngx_http_upstream_get_peer( @@ -32,7 +32,7 @@ ngx_http_upstream_init_round_robin(ngx_c ngx_http_upstream_srv_conf_t *us) { ngx_url_t u; - ngx_uint_t i, j, n, w; + ngx_uint_t i, j, n, w, t; ngx_http_upstream_server_t *server; ngx_http_upstream_rr_peer_t *peer, **peerp; ngx_http_upstream_rr_peers_t *peers, *backup; @@ -44,6 +44,7 @@ ngx_http_upstream_init_round_robin(ngx_c n = 0; w = 0; + t = 0; for (i = 0; i < us->servers->nelts; i++) { if (server[i].backup) { @@ -52,6 +53,10 @@ ngx_http_upstream_init_round_robin(ngx_c n += server[i].naddrs; w += server[i].naddrs * server[i].weight; + + if (!server[i].down) { + t += server[i].naddrs; + } } if (n == 0) { @@ -75,6 +80,7 @@ ngx_http_upstream_init_round_robin(ngx_c peers->number = n; peers->weighted = (w != n); peers->total_weight = w; + peers->tries = t; peers->name = &us->host; n = 0; @@ -110,6 +116,7 @@ ngx_http_upstream_init_round_robin(ngx_c n = 0; w = 0; + t = 0; for (i = 0; i < us->servers->nelts; i++) { if (!server[i].backup) { @@ -118,6 +125,10 @@ ngx_http_upstream_init_round_robin(ngx_c n += server[i].naddrs; w += server[i].naddrs * server[i].weight; + + if (!server[i].down) { + t += server[i].naddrs; + } } if (n == 0) { @@ -139,6 +150,7 @@ ngx_http_upstream_init_round_robin(ngx_c backup->number = n; backup->weighted = (w != n); backup->total_weight = w; + backup->tries = t; backup->name = &us->host; n = 0; @@ -214,6 +226,7 @@ ngx_http_upstream_init_round_robin(ngx_c peers->number = n; peers->weighted = 0; peers->total_weight = n; + peers->tries = n; peers->name = &us->host; peerp = &peers->peer; @@ -332,6 +345,7 @@ ngx_http_upstream_create_round_robin_pee peers->single = (ur->naddrs == 1); peers->number = ur->naddrs; + peers->tries = ur->naddrs; peers->name = &ur->host; if (ur->sockaddr) { diff -r ac09a57ec50d -r 90cc7194e993 src/http/ngx_http_upstream_round_robin.h --- a/src/http/ngx_http_upstream_round_robin.h Thu Nov 26 23:46:59 2020 +0300 +++ b/src/http/ngx_http_upstream_round_robin.h Fri Nov 27 00:01:20 2020 +0300 @@ -68,6 +68,7 @@ struct ngx_http_upstream_rr_peers_s { #endif ngx_uint_t total_weight; + ngx_uint_t tries; unsigned single:1; unsigned weighted:1; diff -r ac09a57ec50d -r 90cc7194e993 src/stream/ngx_stream_upstream_round_robin.c --- a/src/stream/ngx_stream_upstream_round_robin.c Thu Nov 26 23:46:59 2020 +0300 +++ b/src/stream/ngx_stream_upstream_round_robin.c Fri Nov 27 00:01:20 2020 +0300 @@ -10,8 +10,8 @@ #include <ngx_stream.h> -#define ngx_stream_upstream_tries(p) ((p)->number \ - + ((p)->next ? (p)->next->number : 0)) +#define ngx_stream_upstream_tries(p) ((p)->tries \ + + ((p)->next ? (p)->next->tries : 0)) static ngx_stream_upstream_rr_peer_t *ngx_stream_upstream_get_peer( @@ -38,7 +38,7 @@ ngx_stream_upstream_init_round_robin(ngx ngx_stream_upstream_srv_conf_t *us) { ngx_url_t u; - ngx_uint_t i, j, n, w; + ngx_uint_t i, j, n, w, t; ngx_stream_upstream_server_t *server; ngx_stream_upstream_rr_peer_t *peer, **peerp; ngx_stream_upstream_rr_peers_t *peers, *backup; @@ -50,6 +50,7 @@ ngx_stream_upstream_init_round_robin(ngx n = 0; w = 0; + t = 0; for (i = 0; i < us->servers->nelts; i++) { if (server[i].backup) { @@ -58,6 +59,10 @@ ngx_stream_upstream_init_round_robin(ngx n += server[i].naddrs; w += server[i].naddrs * server[i].weight; + + if (!server[i].down) { + t += server[i].naddrs; + } } if (n == 0) { @@ -81,6 +86,7 @@ ngx_stream_upstream_init_round_robin(ngx peers->number = n; peers->weighted = (w != n); peers->total_weight = w; + peers->tries = t; peers->name = &us->host; n = 0; @@ -116,6 +122,7 @@ ngx_stream_upstream_init_round_robin(ngx n = 0; w = 0; + t = 0; for (i = 0; i < us->servers->nelts; i++) { if (!server[i].backup) { @@ -124,6 +131,10 @@ ngx_stream_upstream_init_round_robin(ngx n += server[i].naddrs; w += server[i].naddrs * server[i].weight; + + if (!server[i].down) { + t += server[i].naddrs; + } } if (n == 0) { @@ -145,6 +156,7 @@ ngx_stream_upstream_init_round_robin(ngx backup->number = n; backup->weighted = (w != n); backup->total_weight = w; + backup->tries = t; backup->name = &us->host; n = 0; @@ -220,6 +232,7 @@ ngx_stream_upstream_init_round_robin(ngx peers->number = n; peers->weighted = 0; peers->total_weight = n; + peers->tries = n; peers->name = &us->host; peerp = &peers->peer; @@ -342,6 +355,7 @@ ngx_stream_upstream_create_round_robin_p peers->single = (ur->naddrs == 1); peers->number = ur->naddrs; + peers->tries = ur->naddrs; peers->name = &ur->host; if (ur->sockaddr) { diff -r ac09a57ec50d -r 90cc7194e993 src/stream/ngx_stream_upstream_round_robin.h --- a/src/stream/ngx_stream_upstream_round_robin.h Thu Nov 26 23:46:59 2020 +0300 +++ b/src/stream/ngx_stream_upstream_round_robin.h Fri Nov 27 00:01:20 2020 +0300 @@ -66,6 +66,7 @@ struct ngx_stream_upstream_rr_peers_s { #endif ngx_uint_t total_weight; + ngx_uint_t tries; unsigned single:1; unsigned weighted:1; From xeioex at nginx.com Fri Nov 27 13:13:20 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Fri, 27 Nov 2020 13:13:20 +0000 Subject: [njs] HTTP: renaming reqBody, resBody to requestBuffer and responseBuffer. Message-ID: <hg.a8cb5f47bfea.1606482800.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/a8cb5f47bfea branches: changeset: 1576:a8cb5f47bfea user: Dmitry Volyntsev <xeioex at nginx.com> date: Fri Nov 27 12:28:44 2020 +0000 description: HTTP: renaming reqBody,resBody to requestBuffer and responseBuffer. In 434f20c29f4c, r.reqBody and r.resBody were introduced. Since quite often request body and response body are valid strings, it is preferable to leave both variants to avoid potential conversions. To make distinction clearer, requestText and responseText aliases to requestBody and responseBody were also added. diffstat: nginx/ngx_http_js_module.c | 22 ++++++++++++++++++++-- 1 files changed, 20 insertions(+), 2 deletions(-) diffs (45 lines): diff -r fac7e5dc8009 -r a8cb5f47bfea nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Thu Nov 26 21:43:17 2020 +0300 +++ b/nginx/ngx_http_js_module.c Fri Nov 27 12:28:44 2020 +0000 @@ -328,10 +328,19 @@ static njs_external_t ngx_http_js_ext_r { .flags = NJS_EXTERN_PROPERTY, - .name.string = njs_str("reqBody"), + .name.string = njs_str("requestText"), .enumerable = 1, .u.property = { .handler = ngx_http_js_ext_get_request_body, + .magic32 = NGX_JS_STRING, + } + }, + + { + .flags = NJS_EXTERN_PROPERTY, + .name.string = njs_str("requestBuffer"), + .u.property = { + .handler = ngx_http_js_ext_get_request_body, .magic32 = NGX_JS_BUFFER, } }, @@ -355,10 +364,19 @@ static njs_external_t ngx_http_js_ext_r { .flags = NJS_EXTERN_PROPERTY, - .name.string = njs_str("resBody"), + .name.string = njs_str("responseText"), .enumerable = 1, .u.property = { .handler = ngx_http_js_ext_get_response_body, + .magic32 = NGX_JS_STRING, + } + }, + + { + .flags = NJS_EXTERN_PROPERTY, + .name.string = njs_str("responseBuffer"), + .u.property = { + .handler = ngx_http_js_ext_get_response_body, .magic32 = NGX_JS_BUFFER, } }, From mdounin at mdounin.ru Fri Nov 27 17:43:12 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 27 Nov 2020 20:43:12 +0300 Subject: =?UTF-8?Q?Re=3A_=5BPATCH=5D=C2=A0Core=3A_fixed_inconsistent_state_of_subre?= =?UTF-8?Q?quest=27s_headers=5Fin_list?= In-Reply-To: <cf07ab7f9af733b0ac75091b13a923dc2b36c755.camel@gmail.com> References: <cf07ab7f9af733b0ac75091b13a923dc2b36c755.camel@gmail.com> Message-ID: <20201127174312.GB1147@mdounin.ru> Hello! On Thu, Nov 26, 2020 at 09:11:39PM +0100, Jan Pracha? wrote: > # HG changeset patch > # User Jan Pracha? <jan.prachar at gmail.com> > # Date 1606420825 -3600 > # Thu Nov 26 21:00:25 2020 +0100 > # Node ID cf3d537ec6706f8713a757df256f2cfccb8f9b01 > # Parent e35b529b03781e64912e0d8a72bd0f957dc08cd2 > Core: fixed inconsistent state of subrequest's headers_in list > > When copying structure ngx_list_t, a reference to the last part need to be > updated, if list contains only one part. > > This fixes an issue, when adding a header to the subrequest's headers_in list > has no effect. Thank you for your patch. Modules are not expected to modify r->headers_in. Instead, consider using appropriate directives to set headers in upstream requests, notably proxy_set_header. [...] -- Maxim Dounin http://mdounin.ru/ From jan.prachar at gmail.com Fri Nov 27 18:09:52 2020 From: jan.prachar at gmail.com (Jan =?UTF-8?Q?Pracha=C5=99?=) Date: Fri, 27 Nov 2020 19:09:52 +0100 Subject: =?UTF-8?Q?Re=3A_=5BPATCH=5D=C2=A0Core=3A_fixed_inconsistent_state_of_subre?= =?UTF-8?Q?quest=27s_headers=5Fin_list?= In-Reply-To: <20201127174312.GB1147@mdounin.ru> References: <cf07ab7f9af733b0ac75091b13a923dc2b36c755.camel@gmail.com> <20201127174312.GB1147@mdounin.ru> Message-ID: <0ca36f5e06ab70bf3c831b776e759af2d56def9c.camel@gmail.com> On P?, 2020-11-27 at 20:43 +0300, Maxim Dounin wrote: > Hello! > > On Thu, Nov 26, 2020 at 09:11:39PM +0100, Jan Pracha? wrote: > > > # HG changeset patch > > # User Jan Pracha? <jan.prachar at gmail.com> > > # Date 1606420825 -3600 > > # Thu Nov 26 21:00:25 2020 +0100 > > # Node ID cf3d537ec6706f8713a757df256f2cfccb8f9b01 > > # Parent e35b529b03781e64912e0d8a72bd0f957dc08cd2 > > Core: fixed inconsistent state of subrequest's headers_in list > > > > When copying structure ngx_list_t, a reference to the last part need to be > > updated, if list contains only one part. > > > > This fixes an issue, when adding a header to the subrequest's headers_in list > > has no effect. > > Thank you for your patch. Hello Maxim, thank you for your reply. > Modules are not expected to modify r->headers_in. Instead, > consider using appropriate directives to set headers in upstream > requests, notably proxy_set_header. Okay, but there are modules that modifies it, notable openresty lua module, because there is no other way to modify headers that are sent to upstream on per-request basis. Best, Jan From goldstein.w.n at gmail.com Fri Nov 27 18:22:46 2020 From: goldstein.w.n at gmail.com (goldstein.w.n at gmail.com) Date: Fri, 27 Nov 2020 13:22:46 -0500 Subject: [PATCH] Replaced loop with __builtin_ctzl for detecting first 0 in bitnamp Message-ID: <7ec2fc7b29d6614df281.1606501366@noah> # HG changeset patch # User Noah Goldstein <goldstein.w.n at gmail.com> # Date 1606497081 18000 # Fri Nov 27 12:11:21 2020 -0500 # Node ID 7ec2fc7b29d6614df28152dd4a895e6139138890 # Parent 90cc7194e993f8d722347e9f46a00f65dffc3935 Replaced loop with __builtin_ctzl for detecting first 0 in bitnamp No particular pressing reason for this change other than the performance benefit it yields. Converts a loop that could take 63 iterations (and had a branch) to 5 instructions diff -r 90cc7194e993 -r 7ec2fc7b29d6 src/core/ngx_slab.c --- a/src/core/ngx_slab.c Fri Nov 27 00:01:20 2020 +0300 +++ b/src/core/ngx_slab.c Fri Nov 27 12:11:21 2020 -0500 @@ -235,36 +235,33 @@ if (bitmap[n] != NGX_SLAB_BUSY) { - for (m = 1, i = 0; m; m <<= 1, i++) { - if (bitmap[n] & m) { - continue; + m = ~bitmap[n]; + i = __builtin_ctzl(m); + + bitmap[n] = ~(m & (m - 1)); + + i = (n * 8 * sizeof(uintptr_t) + i) << shift; + + p = (uintptr_t) bitmap + i; + + pool->stats[slot].used++; + + if (bitmap[n] == NGX_SLAB_BUSY) { + for (n = n + 1; n < map; n++) { + if (bitmap[n] != NGX_SLAB_BUSY) { + goto done; + } } - bitmap[n] |= m; - - i = (n * 8 * sizeof(uintptr_t) + i) << shift; - - p = (uintptr_t) bitmap + i; - - pool->stats[slot].used++; + prev = ngx_slab_page_prev(page); + prev->next = page->next; + page->next->prev = page->prev; - if (bitmap[n] == NGX_SLAB_BUSY) { - for (n = n + 1; n < map; n++) { - if (bitmap[n] != NGX_SLAB_BUSY) { - goto done; - } - } + page->next = NULL; + page->prev = NGX_SLAB_SMALL; + } - prev = ngx_slab_page_prev(page); - prev->next = page->next; - page->next->prev = page->prev; - - page->next = NULL; - page->prev = NGX_SLAB_SMALL; - } - - goto done; - } + goto done; } } From mdounin at mdounin.ru Fri Nov 27 20:16:39 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 27 Nov 2020 23:16:39 +0300 Subject: [PATCH] Replaced loop with __builtin_ctzl for detecting first 0 in bitnamp In-Reply-To: <7ec2fc7b29d6614df281.1606501366@noah> References: <7ec2fc7b29d6614df281.1606501366@noah> Message-ID: <20201127201639.GC1147@mdounin.ru> Hello! On Fri, Nov 27, 2020 at 01:22:46PM -0500, goldstein.w.n at gmail.com wrote: > # HG changeset patch > # User Noah Goldstein <goldstein.w.n at gmail.com> > # Date 1606497081 18000 > # Fri Nov 27 12:11:21 2020 -0500 > # Node ID 7ec2fc7b29d6614df28152dd4a895e6139138890 > # Parent 90cc7194e993f8d722347e9f46a00f65dffc3935 > Replaced loop with __builtin_ctzl for detecting first 0 in bitnamp > No particular pressing reason for this change other than the performance benefit it yields. Converts a loop that could take 63 iterations (and had a branch) to 5 instructions > > diff -r 90cc7194e993 -r 7ec2fc7b29d6 src/core/ngx_slab.c > --- a/src/core/ngx_slab.c Fri Nov 27 00:01:20 2020 +0300 > +++ b/src/core/ngx_slab.c Fri Nov 27 12:11:21 2020 -0500 > @@ -235,36 +235,33 @@ > > if (bitmap[n] != NGX_SLAB_BUSY) { > > - for (m = 1, i = 0; m; m <<= 1, i++) { > - if (bitmap[n] & m) { > - continue; > + m = ~bitmap[n]; > + i = __builtin_ctzl(m); Thank you for the patch. Unfortunately, __builtin_ctzl() is not portable and hence the patch cannot be committed for obvious reasons. Further, even if __builtin_ctzl() is available, there no guarantees that it can be used on an uintptr_t variable, as uintptr_t can be larger than long (notably, 64bit Windows uses LLP64 data model). Also note that there are other similar loops in the various places of the code, and changing just one would certainly confuse readers. Note well that slab allocations are expected to be rare, and this loop is not expected to be performance-critical. Most performance impact on slab allocations are expected to be from shared mutex locking. If you have practical reasons to assume this code needs to be optimized, please share the details. If it indeed needs to, we can consider making a portable solution. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Fri Nov 27 20:39:44 2020 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 27 Nov 2020 23:39:44 +0300 Subject: =?UTF-8?Q?Re=3A_=5BPATCH=5D=C2=A0Core=3A_fixed_inconsistent_state_of_subre?= =?UTF-8?Q?quest=27s_headers=5Fin_list?= In-Reply-To: <0ca36f5e06ab70bf3c831b776e759af2d56def9c.camel@gmail.com> References: <cf07ab7f9af733b0ac75091b13a923dc2b36c755.camel@gmail.com> <20201127174312.GB1147@mdounin.ru> <0ca36f5e06ab70bf3c831b776e759af2d56def9c.camel@gmail.com> Message-ID: <20201127203944.GD1147@mdounin.ru> Hello! On Fri, Nov 27, 2020 at 07:09:52PM +0100, Jan Pracha? wrote: > On P?, 2020-11-27 at 20:43 +0300, Maxim Dounin wrote: > > Hello! > > > > On Thu, Nov 26, 2020 at 09:11:39PM +0100, Jan Pracha? wrote: > > > > > # HG changeset patch > > > # User Jan Pracha? <jan.prachar at gmail.com> > > > # Date 1606420825 -3600 > > > # Thu Nov 26 21:00:25 2020 +0100 > > > # Node ID cf3d537ec6706f8713a757df256f2cfccb8f9b01 > > > # Parent e35b529b03781e64912e0d8a72bd0f957dc08cd2 > > > Core: fixed inconsistent state of subrequest's headers_in list > > > > > > When copying structure ngx_list_t, a reference to the last part need to be > > > updated, if list contains only one part. > > > > > > This fixes an issue, when adding a header to the subrequest's headers_in list > > > has no effect. > > > > Thank you for your patch. > > Hello Maxim, > > thank you for your reply. > > > Modules are not expected to modify r->headers_in. Instead, > > consider using appropriate directives to set headers in upstream > > requests, notably proxy_set_header. > > Okay, but there are modules that modifies it, notable openresty lua module, because there > is no other way to modify headers that are sent to upstream on per-request basis. Certainly there is a way to modify headers that are sent to upstream o per-request basis. As previouly said, proxy_set_header is a way to go. The $proxy_add_x_forwarded_for variable is a good example how to do it on a per-request basis. As for the lua module, I told the very same thing to the author more than once. It's clearly his choice to do things not allowed in nginx. -- Maxim Dounin http://mdounin.ru/ From vl at nginx.com Mon Nov 30 14:39:13 2020 From: vl at nginx.com (Vladimir Homutov) Date: Mon, 30 Nov 2020 17:39:13 +0300 Subject: [PATCH 8 of 8] new io_uring event module In-Reply-To: <d7420a5777b63c8a8cfb.1606207416@ssyserver> References: <1cb2b354e262e10a8e86.1606207409@ssyserver> <d7420a5777b63c8a8cfb.1606207416@ssyserver> Message-ID: <X8UEEcem0fobRLq7@vl.krasnogorsk.ru> First, thank you for sharing the patchset! We are always looking at new features that appear in kernels and may be useful in nginx. There are a lof of shiny features, but it is a long way for them to mature and be adopted in nginx. Currently we are not considering adding such functionality. The io_uring interface looks like promising candidate to support AIO nginx functionality in linux. You may want to start looking at nginx.org/r/aio directive and related functionality. The task is quite complex (at some degree due to poor interfaces available), but we hope it has an elegant solution. Note also we prefer to use system calls directly, without introducing dependencies to such things like liburing (and for sure, the method of integration definitely is not cloning its copy in nginx). You may also want to consider building your modules externally and minimizing changes to nginx core. While patching nginx often seen as a simple and quick solution, we would appretiate attempts to integrate external code using some generic approach/interface. From xeioex at nginx.com Mon Nov 30 18:13:38 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 30 Nov 2020 18:13:38 +0000 Subject: [njs] Types: added description for Buffer properties. Message-ID: <hg.5a6d8e16591b.1606760018.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/5a6d8e16591b branches: changeset: 1577:5a6d8e16591b user: Dmitry Volyntsev <xeioex at nginx.com> date: Fri Nov 27 13:17:53 2020 +0000 description: Types: added description for Buffer properties. diffstat: test/ts/test.ts | 20 ++++++++++++- ts/ngx_http_js_module.d.ts | 61 +++++++++++++++++++++++++++++++++++++++++++- ts/ngx_stream_js_module.d.ts | 34 ++++++++++++++++++++++-- 3 files changed, 109 insertions(+), 6 deletions(-) diffs (198 lines): diff -r a8cb5f47bfea -r 5a6d8e16591b test/ts/test.ts --- a/test/ts/test.ts Fri Nov 27 12:28:44 2020 +0000 +++ b/test/ts/test.ts Fri Nov 27 13:17:53 2020 +0000 @@ -44,8 +44,6 @@ function http_module(r: NginxHTTPRequest r.headersOut['Set-Cookie'] = ['aaa', 'bbb']; r.headersOut['Foo'] = ['aaa', 'bbb']; - r.subrequest('/uri', reply => r.return(200, reply.headersOut["Location"] ?? '')); - // r.log r.log(bs); @@ -57,7 +55,11 @@ function http_module(r: NginxHTTPRequest r.variables.a == 'a'; r.variables.cookie_a = 'b'; + // r.rawVariables + r.rawVariables.a?.equals(Buffer.from([1])); + // r.subrequest + r.subrequest('/uri', reply => r.return(200, reply.headersOut["Location"] ?? '')); r.subrequest('/p/sub1').then(reply => r.return(reply.status)); r.subrequest('/p/sub2', {method:'POST'}).then(reply => r.return(reply.status)); vod = r.subrequest('/p/sub3', reply => r.return(reply.status)); @@ -66,6 +68,20 @@ function http_module(r: NginxHTTPRequest // Warning: vod = r.subrequest('/p/sub9', {detached:true}, reply => r.return(reply.status)); r.subrequest('/p/sub6', 'a=1&b=2').then(reply => r.return(reply.status, JSON.stringify(JSON.parse(reply.responseBody ?? '')))); + + // r.requestText + r.requestText == 'a'; + r.requestText?.startsWith('a'); + + // r.requestBuffer + r.requestBuffer?.equals(Buffer.from([1])); + + // r.responseText + r.responseText == 'a'; + r.responseText?.startsWith('a'); + + // r.responseBuffer + r.responseBuffer?.equals(Buffer.from([1])); } function fs_module() { diff -r a8cb5f47bfea -r 5a6d8e16591b ts/ngx_http_js_module.d.ts --- a/ts/ngx_http_js_module.d.ts Fri Nov 27 12:28:44 2020 +0000 +++ b/ts/ngx_http_js_module.d.ts Fri Nov 27 13:17:53 2020 +0000 @@ -233,6 +233,13 @@ interface NginxVariables { [prop: string]: NjsStringLike | undefined; } +/** + * @since 0.5.0 + */ +type NginxRawVariables = { + [K in keyof NginxVariables]: Buffer | undefined; +}; + interface NginxSubrequestOptions { /** * Arguments string, by default an empty string is used. @@ -310,11 +317,52 @@ interface NginxHTTPRequest { * To ensure that the client request body is in memory, its size should be * limited by client_max_body_size, and a sufficient buffer size should be set * using client_body_buffer_size. The property is available only in the js_content directive. + * + * @since 0.5.0 + */ + readonly requestBuffer?: Buffer; + /** + * The same as `requestBuffer`, but returns a string. + * + * **Warning:** It may convert bytes invalid in UTF-8 encoding into the replacement character. + * + * @see requestBuffer + * @since 0.5.0 + */ + readonly requestText?: NjsByteString; + /** + * The same as `requestBuffer`, but returns a string. + * + * **Warning:** It may convert bytes invalid in UTF-8 encoding into the replacement character. + * + * @see requestBuffer + * @see requestText + * @deprecated Use `requestText` or `requestBuffer` instead. */ readonly requestBody?: NjsByteString; /** * Subrequest response body. The size of response body is limited by * the subrequest_output_buffer_size directive. + * + * @since 0.5.0 + */ + readonly responseBuffer?: Buffer; + /** + * The same as `responseBuffer`, but returns a string. + * + * **Warning:** It may convert bytes invalid in UTF-8 encoding into the replacement character. + * + * @see responseBuffer + */ + readonly responseText?: NjsByteString; + /** + * The same as `responseBuffer`, but returns a string. + * + * **Warning:** It may convert bytes invalid in UTF-8 encoding into the replacement character. + * + * @see responseBuffer + * @see responseText + * @deprecated Use `responseText` or `responseBuffer` instead. */ readonly responseBody?: NjsByteString; /** @@ -357,7 +405,18 @@ interface NginxHTTPRequest { */ readonly uri: NjsByteString; /** - * nginx variables object. + * nginx variables as Buffers. + * + * @since 0.5.0 + * @see variables + */ + readonly rawVariables: NginxRawVariables; + /** + * nginx variables as strings. + * + * **Warning:** Bytes invalid in UTF-8 encoding may be converted into the replacement character. + * + * @see rawVariables */ readonly variables: NginxVariables; /** diff -r a8cb5f47bfea -r 5a6d8e16591b ts/ngx_stream_js_module.d.ts --- a/ts/ngx_stream_js_module.d.ts Fri Nov 27 12:28:44 2020 +0000 +++ b/ts/ngx_stream_js_module.d.ts Fri Nov 27 13:17:53 2020 +0000 @@ -70,6 +70,13 @@ interface NginxStreamVariables { [prop: string]: NjsByteString | undefined; } +/** + * @since 0.5.0 + */ +type NginxStreamRawVariables = { + [K in keyof NginxStreamVariables]: Buffer | undefined; +}; + interface NginxStreamCallbackFlags { /** * True if data is a last buffer. @@ -119,13 +126,23 @@ interface NginxStreamRequest { log(message: NjsStringOrBuffer): void; /** * Unregisters the callback set by on() method. + * @param event Event type to unregister. */ - off(event: "upload" | "download"): void; + off(event: "upload" | "download" | "upstream" | "downstream"): void; /** * Registers a callback for the specified event. + * @param event Event type to register. The callback data value type + * depends on the event type. For "upload" | "download" the data type is string. + * For "upstream" | "downstream" the data type is Buffer. + * String and buffer events cannot be mixed for a single session. + * + * **Warning:** For string data type bytes invalid in UTF-8 encoding may be + * converted into the replacement character. */ on(event: "upload" | "download", - callback:(data:NjsByteString, flags: NginxStreamCallbackFlags) => void): void; + callback: (data: NjsByteString, flags: NginxStreamCallbackFlags) => void): void; + on(event: "upstream" | "downstream", + callback: (data: Buffer, flags: NginxStreamCallbackFlags) => void): void; /** * Client address. */ @@ -138,7 +155,18 @@ interface NginxStreamRequest { */ send(data: NjsStringOrBuffer, options?: NginxStreamSendOptions): void; /** - * nginx variables object. + * nginx variables as Buffers. + * + * @since 0.5.0 + * @see variables + */ + readonly rawVariables: NginxStreamRawVariables; + /** + * nginx variables as strings. + * + * **Warning:** Bytes invalid in UTF-8 encoding may be converted into the replacement character. + * + * @see rawVariables */ readonly variables: NginxStreamVariables; /** From xeioex at nginx.com Mon Nov 30 18:13:40 2020 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 30 Nov 2020 18:13:40 +0000 Subject: [njs] Types: added definitions for timer methods. Message-ID: <hg.5e29ce36383e.1606760020.5965299922797593991@dev.nginx> details: https://hg.nginx.org/njs/rev/5e29ce36383e branches: changeset: 1578:5e29ce36383e user: Jakub Jirutka <jakub at jirutka.cz> date: Wed Nov 25 00:12:04 2020 +0100 description: Types: added definitions for timer methods. diffstat: test/ts/test.ts | 14 ++++++++++++++ ts/njs_core.d.ts | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 0 deletions(-) diffs (73 lines): diff -r 5a6d8e16591b -r 5e29ce36383e test/ts/test.ts --- a/test/ts/test.ts Fri Nov 27 13:17:53 2020 +0000 +++ b/test/ts/test.ts Wed Nov 25 00:12:04 2020 +0100 @@ -122,6 +122,20 @@ function buffer(b: Buffer) { b.equals(b); } +function timers() { + var handle:TimerHandle; + + handle = setTimeout(() => {}); + handle = setTimeout(() => {}, 100); + handle = setTimeout((a:string, b:number) => {}, 100, 'foo', 42); + + handle = setImmediate(() => {}); + handle = setImmediate((a:string, b:number) => {}, 'foo', 42); + + clearTimeout(handle); + // Warning: clearTimeout(123); +} + function njs_object() { njs.dump('asdf'); njs.version != process.argv[1]; diff -r 5a6d8e16591b -r 5e29ce36383e ts/njs_core.d.ts --- a/ts/njs_core.d.ts Fri Nov 27 13:17:53 2020 +0000 +++ b/ts/njs_core.d.ts Wed Nov 25 00:12:04 2020 +0100 @@ -607,3 +607,45 @@ interface NjsProcess { } declare const process: NjsProcess; + +/** + * A value returned by `setTimeout()` and `setImmediate()` functions. It's an positive integer now, + * but this may be changed in future, so it should be treated as an opaque value. + */ +type TimerHandle = number & { readonly '': unique symbol }; + +/** + * Schedules the "immediate" execution of the given function after I/O events' callbacks. + * + * @param callback The function to call. + * @param args Optional arguments to pass to the `callback` function. + * @returns A value which identifies the timer created by the call. + * + * @throws {TypeError} if `callback` is not a function. + * @throws {InternalError} if timers are not supported by host environment. + */ +declare function setImmediate<TArgs extends any[]>(callback: (...args: TArgs) => void, ...args: TArgs): TimerHandle; + +/** + * Schedules a timer which executes the given function after the specified delay. + * + * @param callback The function to call when the timer elapses. + * @param delay The number of milliseconds to wait before calling the `callback`. Defaults to `0`, + * meaning execute "immediately", or more accurately, the next event cycle. + * @param args Optional arguments to pass to the `callback` function. + * @returns A value which identifies the timer created by the call; it can be passed to + * `clearTimeout()` to cancel the timeout. + * + * @throws {TypeError} if `callback` is not a function. + * @throws {InternalError} if timers are not supported by host environment. + */ +declare function setTimeout<TArgs extends any[]>(callback: (...args: TArgs) => void, delay?: number, ...args: TArgs): TimerHandle; + +/** + * Cancels a timer previously established by calling `setTimeout()`. + * + * Note: Passing an invalid handle silently does nothing; no exception is thrown. + * + * @param handle A value returned by `setTimeout()`. + */ +declare function clearTimeout(handle?: TimerHandle): void;