From alexander.borisov at nginx.com Mon Nov 1 15:34:02 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Mon, 01 Nov 2021 15:34:02 +0000 Subject: [njs] Fixed heap-use-after-free in await frame. Message-ID: details: https://hg.nginx.org/njs/rev/360384498060 branches: changeset: 1732:360384498060 user: Alexander Borisov date: Mon Nov 01 18:32:48 2021 +0300 description: Fixed heap-use-after-free in await frame. The bug was introduced in 92d10cd761e2 (0.7.0). diffstat: src/njs_async.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diffs (12 lines): diff -r d2e23f936214 -r 360384498060 src/njs_async.c --- a/src/njs_async.c Fri Oct 29 13:57:26 2021 +0000 +++ b/src/njs_async.c Mon Nov 01 18:32:48 2021 +0300 @@ -72,6 +72,8 @@ njs_await_fulfilled(njs_vm_t *vm, njs_va } async = ctx->await; + async->previous = vm->top_frame; + function = async->function; cur_local = vm->levels[NJS_LEVEL_LOCAL]; From xeioex at nginx.com Mon Nov 1 15:56:11 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 01 Nov 2021 15:56:11 +0000 Subject: [njs] Tests: allowing to define own test function per test suite. Message-ID: details: https://hg.nginx.org/njs/rev/8dd6868a8748 branches: changeset: 1733:8dd6868a8748 user: Dmitry Volyntsev date: Mon Nov 01 15:53:30 2021 +0000 description: Tests: allowing to define own test function per test suite. diffstat: test/webcrypto/aes.js | 8 +++++--- test/webcrypto/aes_decoding.js | 8 +++++--- test/webcrypto/derive.js | 8 +++++--- test/webcrypto/digest.js | 8 +++++--- test/webcrypto/rsa.js | 8 +++++--- test/webcrypto/rsa_decoding.js | 8 +++++--- test/webcrypto/sign.js | 14 +++++++++++--- test/webcrypto/verify.js | 14 +++++++++++--- 8 files changed, 52 insertions(+), 24 deletions(-) diffs (342 lines): diff -r 360384498060 -r 8dd6868a8748 test/webcrypto/aes.js --- a/test/webcrypto/aes.js Mon Nov 01 18:32:48 2021 +0300 +++ b/test/webcrypto/aes.js Mon Nov 01 15:53:30 2021 +0000 @@ -2,7 +2,7 @@ if (typeof crypto == 'undefined') { crypto = require('crypto').webcrypto; } -async function run(tlist, T, prepare_args) { +async function run(tlist) { function validate(t, r, i) { if (r.status == "fulfilled" && !t[i].exception) { return r.value === "SUCCESS"; @@ -22,7 +22,7 @@ async function run(tlist, T, prepare_arg for (let k = 0; k < tlist.length; k++) { let ts = tlist[k]; - let results = await Promise.allSettled(ts.tests.map(t => T(prepare_args(t, ts.opts)))); + let results = await Promise.allSettled(ts.tests.map(t => ts.T(ts.prepare_args(t, ts.opts)))); let r = results.map((r, i) => validate(ts.tests, r, i)); console.log(`${ts.name} ${r.every(v=>v == true) ? "SUCCESS" : "FAILED"}`); @@ -77,6 +77,8 @@ async function test(params) { let aes_tsuite = { name: "AES encoding/decoding", + T: test, + prepare_args: p, opts: { iv: "44556677445566774455667744556677", key: "00112233001122330011223300112233", @@ -120,4 +122,4 @@ let aes_tsuite = { { name: "AES-CBC", data: "aabbccdd".repeat(5), iv: "ffffffffffffffffffffffffffffffff" }, ]}; -run([aes_tsuite], test, p); +run([aes_tsuite]); diff -r 360384498060 -r 8dd6868a8748 test/webcrypto/aes_decoding.js --- a/test/webcrypto/aes_decoding.js Mon Nov 01 18:32:48 2021 +0300 +++ b/test/webcrypto/aes_decoding.js Mon Nov 01 15:53:30 2021 +0000 @@ -4,7 +4,7 @@ if (typeof crypto == 'undefined') { crypto = require('crypto').webcrypto; } -async function run(tlist, T, prepare_args) { +async function run(tlist) { function validate(t, r, i) { if (r.status == "fulfilled" && !t[i].exception) { return r.value === "SUCCESS"; @@ -24,7 +24,7 @@ async function run(tlist, T, prepare_arg for (let k = 0; k < tlist.length; k++) { let ts = tlist[k]; - let results = await Promise.allSettled(ts.tests.map(t => T(prepare_args(t, ts.opts)))); + let results = await Promise.allSettled(ts.tests.map(t => ts.T(ts.prepare_args(t, ts.opts)))); let r = results.map((r, i) => validate(ts.tests, r, i)); console.log(`${ts.name} ${r.every(v=>v == true) ? "SUCCESS" : "FAILED"}`); @@ -79,6 +79,8 @@ async function test(params) { let aes_tsuite = { name: "AES decoding", + T: test, + prepare_args: p, opts: { key: "00112233001122330011223300112233", iv: "44556677445566774455667744556677", @@ -113,4 +115,4 @@ let aes_tsuite = { expected: "AES-CBC-256-SECRET-TEXT" }, ]}; -run([aes_tsuite], test, p); +run([aes_tsuite]); diff -r 360384498060 -r 8dd6868a8748 test/webcrypto/derive.js --- a/test/webcrypto/derive.js Mon Nov 01 18:32:48 2021 +0300 +++ b/test/webcrypto/derive.js Mon Nov 01 15:53:30 2021 +0000 @@ -2,7 +2,7 @@ if (typeof crypto == 'undefined') { crypto = require('crypto').webcrypto; } -async function run(tlist, T, prepare_args) { +async function run(tlist) { function validate(t, r, i) { if (r.status == "fulfilled" && !t[i].exception) { return r.value === "SUCCESS"; @@ -26,7 +26,7 @@ async function run(tlist, T, prepare_arg for (let k = 0; k < tlist.length; k++) { let ts = tlist[k]; - let results = await Promise.allSettled(ts.tests.map(t => T(prepare_args(t, ts.opts)))); + let results = await Promise.allSettled(ts.tests.map(t => ts.T(ts.prepare_args(t, ts.opts)))); let r = results.map((r, i) => validate(ts.tests, r, i)); console.log(`${ts.name} ${r.every(v=>v == true) ? "SUCCESS" : "FAILED"}`); @@ -96,6 +96,8 @@ async function test(params) { let derive_tsuite = { name: "derive", + T: test, + prepare_args: p, opts: { text: "secReT", pass: "passW0rd", @@ -146,4 +148,4 @@ let derive_tsuite = { expected: "e089c7491711306c69e077aa19fae6bfd2d4a6d240b0d37317d50472d7291a3e" }, ]}; -run([derive_tsuite], test, p); +run([derive_tsuite]); diff -r 360384498060 -r 8dd6868a8748 test/webcrypto/digest.js --- a/test/webcrypto/digest.js Mon Nov 01 18:32:48 2021 +0300 +++ b/test/webcrypto/digest.js Mon Nov 01 15:53:30 2021 +0000 @@ -2,7 +2,7 @@ if (typeof crypto == 'undefined') { crypto = require('crypto').webcrypto; } -async function run(tlist, T, prepare_args) { +async function run(tlist) { function validate(t, r, i) { if (r.status == "fulfilled" && !t[i].exception) { return r.value === "SUCCESS"; @@ -22,7 +22,7 @@ async function run(tlist, T, prepare_arg for (let k = 0; k < tlist.length; k++) { let ts = tlist[k]; - let results = await Promise.allSettled(ts.tests.map(t => T(prepare_args(t, ts.opts)))); + let results = await Promise.allSettled(ts.tests.map(t => ts.T(ts.prepare_args(t, ts.opts)))); let r = results.map((r, i) => validate(ts.tests, r, i)); console.log(`${ts.name} ${r.every(v=>v == true) ? "SUCCESS" : "FAILED"}`); @@ -54,6 +54,8 @@ async function test(params) { let digest_tsuite = { name: "SHA digest", + T: test, + prepare_args: p, opts: { }, tests: [ @@ -85,4 +87,4 @@ let digest_tsuite = { expected: "cdea58919606ea9ae078f7595b192b84446f2189" }, ]}; -run([digest_tsuite], test, p); +run([digest_tsuite]); diff -r 360384498060 -r 8dd6868a8748 test/webcrypto/rsa.js --- a/test/webcrypto/rsa.js Mon Nov 01 18:32:48 2021 +0300 +++ b/test/webcrypto/rsa.js Mon Nov 01 15:53:30 2021 +0000 @@ -4,7 +4,7 @@ if (typeof crypto == 'undefined') { crypto = require('crypto').webcrypto; } -async function run(tlist, T, prepare_args) { +async function run(tlist) { function validate(t, r, i) { if (r.status == "fulfilled" && !t[i].exception) { return r.value === "SUCCESS"; @@ -24,7 +24,7 @@ async function run(tlist, T, prepare_arg for (let k = 0; k < tlist.length; k++) { let ts = tlist[k]; - let results = await Promise.allSettled(ts.tests.map(t => T(prepare_args(t, ts.opts)))); + let results = await Promise.allSettled(ts.tests.map(t => ts.T(ts.prepare_args(t, ts.opts)))); let r = results.map((r, i) => validate(ts.tests, r, i)); console.log(`${ts.name} ${r.every(v=>v == true) ? "SUCCESS" : "FAILED"}`); @@ -79,6 +79,8 @@ async function test(params) { let rsa_tsuite = { name: "RSA-OAEP encoding/decoding", + T: test, + prepare_args: p, opts: { spki: "rsa.spki", spki_hash: "SHA-256", @@ -103,4 +105,4 @@ let rsa_tsuite = { { data: "aabbcc", spki: "rsa2.spki", exception: "Error: EVP_PKEY_decrypt() failed" }, ]}; -run([rsa_tsuite], test, p); +run([rsa_tsuite]); diff -r 360384498060 -r 8dd6868a8748 test/webcrypto/rsa_decoding.js --- a/test/webcrypto/rsa_decoding.js Mon Nov 01 18:32:48 2021 +0300 +++ b/test/webcrypto/rsa_decoding.js Mon Nov 01 15:53:30 2021 +0000 @@ -4,7 +4,7 @@ if (typeof crypto == 'undefined') { crypto = require('crypto').webcrypto; } -async function run(tlist, T, prepare_args) { +async function run(tlist) { function validate(t, r, i) { if (r.status == "fulfilled" && !t[i].exception) { return r.value === "SUCCESS"; @@ -24,7 +24,7 @@ async function run(tlist, T, prepare_arg for (let k = 0; k < tlist.length; k++) { let ts = tlist[k]; - let results = await Promise.allSettled(ts.tests.map(t => T(prepare_args(t, ts.opts)))); + let results = await Promise.allSettled(ts.tests.map(t => ts.T(ts.prepare_args(t, ts.opts)))); let r = results.map((r, i) => validate(ts.tests, r, i)); console.log(`${ts.name} ${r.every(v=>v == true) ? "SUCCESS" : "FAILED"}`); @@ -70,6 +70,8 @@ async function test(params) { let rsa_tsuite = { name: "RSA-OAEP decoding", + T: test, + prepare_args: (v) => v, opts: { }, tests: [ @@ -78,4 +80,4 @@ let rsa_tsuite = { { pem: "rsa.pkcs8.broken", src: "text.base64.rsa-oaep.enc", exception: "Error: d2i_PKCS8_PRIV_KEY_INFO_bio() failed" }, ]}; -run([rsa_tsuite], test, (v) => v); +run([rsa_tsuite]); diff -r 360384498060 -r 8dd6868a8748 test/webcrypto/sign.js --- a/test/webcrypto/sign.js Mon Nov 01 18:32:48 2021 +0300 +++ b/test/webcrypto/sign.js Mon Nov 01 15:53:30 2021 +0000 @@ -3,7 +3,7 @@ if (typeof crypto == 'undefined') { crypto = require('crypto').webcrypto; } -async function run(tlist, T, prepare_args) { +async function run(tlist) { function validate(t, r, i) { if (r.status == "fulfilled" && !t[i].exception) { return r.value === "SUCCESS"; @@ -23,7 +23,7 @@ async function run(tlist, T, prepare_arg for (let k = 0; k < tlist.length; k++) { let ts = tlist[k]; - let results = await Promise.allSettled(ts.tests.map(t => T(prepare_args(t, ts.opts)))); + let results = await Promise.allSettled(ts.tests.map(t => ts.T(ts.prepare_args(t, ts.opts)))); let r = results.map((r, i) => validate(ts.tests, r, i)); console.log(`${ts.name} ${r.every(v=>v == true) ? "SUCCESS" : "FAILED"}`); @@ -160,6 +160,8 @@ async function test(params) { let hmac_tsuite = { name: "HMAC sign", + T: test, + prepare_args: p, opts: { text: "TExt-T0-SiGN", sign_key: { key: "secretKEY", fmt: "raw" }, @@ -193,6 +195,8 @@ let hmac_tsuite = { let rsassa_pkcs1_v1_5_tsuite = { name: "RSASSA-PKCS1-v1_5 sign", + T: test, + prepare_args: p, opts: { text: "TExt-T0-SiGN", sign_key: { key: "rsa.pkcs8", fmt: "pkcs8" }, @@ -221,6 +225,8 @@ let rsassa_pkcs1_v1_5_tsuite = { let rsa_pss_tsuite = { name: "RSA-PSS sign", + T: test, + prepare_args: p, opts: { text: "TExt-T0-SiGN", sign_key: { key: "rsa.pkcs8", fmt: "pkcs8" }, @@ -250,6 +256,8 @@ let rsa_pss_tsuite = { let ecdsa_tsuite = { name: "ECDSA sign", + T: test, + prepare_args: p, opts: { text: "TExt-T0-SiGN", sign_key: { key: "ec.pkcs8", fmt: "pkcs8" }, @@ -279,4 +287,4 @@ run([ rsassa_pkcs1_v1_5_tsuite, rsa_pss_tsuite, ecdsa_tsuite -], test, p); +]); diff -r 360384498060 -r 8dd6868a8748 test/webcrypto/verify.js --- a/test/webcrypto/verify.js Mon Nov 01 18:32:48 2021 +0300 +++ b/test/webcrypto/verify.js Mon Nov 01 15:53:30 2021 +0000 @@ -4,7 +4,7 @@ if (typeof crypto == 'undefined') { crypto = require('crypto').webcrypto; } -async function run(tlist, T, prepare_args) { +async function run(tlist) { function validate(t, r, i) { if (r.status == "fulfilled" && !t[i].exception) { return r.value === "SUCCESS"; @@ -24,7 +24,7 @@ async function run(tlist, T, prepare_arg for (let k = 0; k < tlist.length; k++) { let ts = tlist[k]; - let results = await Promise.allSettled(ts.tests.map(t => T(prepare_args(t, ts.opts)))); + let results = await Promise.allSettled(ts.tests.map(t => ts.T(ts.prepare_args(t, ts.opts)))); let r = results.map((r, i) => validate(ts.tests, r, i)); console.log(`${ts.name} ${r.every(v=>v == true) ? "SUCCESS" : "FAILED"}`); @@ -108,6 +108,8 @@ async function test(params) { let hmac_tsuite = { name: "HMAC verify", + T: test, + prepare_args: p, opts: { text: "SigneD-TExt", key: { fmt: "raw", file: "aabbcc" }, @@ -130,6 +132,8 @@ let hmac_tsuite = { let rsassa_pkcs1_v1_5_tsuite = { name: "RSASSA-PKCS1-v1_5 verify", + T: test, + prepare_args: p, opts: { text: "SigneD-TExt", key: { fmt: "spki", file: "rsa.spki" }, @@ -152,6 +156,8 @@ let rsassa_pkcs1_v1_5_tsuite = { let rsa_pss_tsuite = { name: "RSA-PSS verify", + T: test, + prepare_args: p, opts: { text: "SigneD-TExt", key: { fmt: "spki", file: "rsa.spki" }, @@ -179,6 +185,8 @@ let rsa_pss_tsuite = { let ecdsa_tsuite = { name: "ECDSA verify", + T: test, + prepare_args: p, opts: { text: "SigneD-TExt", key: { fmt: "spki", file: "ec.spki" }, @@ -204,4 +212,4 @@ run([ rsassa_pkcs1_v1_5_tsuite, rsa_pss_tsuite, ecdsa_tsuite, -], test, p); +]); From xeioex at nginx.com Tue Nov 2 12:40:52 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 02 Nov 2021 12:40:52 +0000 Subject: [njs] Tests: refactored "fs" module tests. Message-ID: details: https://hg.nginx.org/njs/rev/685adce92af1 branches: changeset: 1734:685adce92af1 user: Dmitry Volyntsev date: Tue Nov 02 12:35:34 2021 +0000 description: Tests: refactored "fs" module tests. diffstat: test/fs/methods.js | 371 +++++++++++++++++++++++++++++++++++++++++++ test/js/fs_appendFile.js | 61 ------- test/js/fs_appendFileSync.js | 59 ------ test/js/fs_readFile.js | 40 ---- test/js/fs_readFileSync.js | 43 ---- test/js/fs_writeFile.js | 57 ------ test/js/fs_writeFileSync.js | 54 ------ test/njs_expect_test.exp | 200 +--------------------- 8 files changed, 387 insertions(+), 498 deletions(-) diffs (937 lines): diff -r 8dd6868a8748 -r 685adce92af1 test/fs/methods.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/fs/methods.js Tue Nov 02 12:35:34 2021 +0000 @@ -0,0 +1,371 @@ +var fs = require('fs'); + +async function run(tlist) { + function validate(t, r, i) { + if (r.status == "fulfilled") { + return r.value === "SUCCESS"; + } + + if (r.status == "rejected" && t[i].exception) { + if (process.argv[2] === '--match-exception-text') { + /* is not compatible with node.js format */ + return r.reason.toString().startsWith(t[i].exception); + } + + return true; + } + + if (r.status == "rejected" && t[i].optional) { + return r.reason.toString().startsWith("Error: No such file or directory"); + } + + return false; + } + + for (let k = 0; k < tlist.length; k++) { + let ts = tlist[k]; + let results = await Promise.allSettled(ts.tests.map(t => ts.T(ts.prepare_args(t, ts.opts)))); + let r = results.map((r, i) => validate(ts.tests, r, i)); + + console.log(`${ts.name} ${r.every(v=>v == true) ? "SUCCESS" : "FAILED"}`); + + r.forEach((v, i) => { + if (!v) { + console.log(`FAILED ${i}: ${JSON.stringify(ts.tests[i])}\n with reason: ${results[i].reason}`); + } + }) + } +} + +function p(args, default_opts) { + let params = Object.assign({}, default_opts, args); + + let fname = params.args[0]; + + if (fname[0] == '@') { + let gen = `build/test/fs_test_${Math.round(Math.random() * 1000000)}`; + params.args = params.args.map(v => v); + params.args[0] = gen + fname.slice(1); + } + + return params; +} + +function promisify(f) { + return function (...args) { + return new Promise((resolve, reject) => { + function callback(err, result) { + if (err) { + return reject(err); + } else { + resolve(result); + } + } + + args.push(callback); + f.apply(this, args); + }); + }; +} + +async function method(name, params) { + let data = null; + + switch (params.type) { + case "sync": + try { + data = fs[name + "Sync"].apply(null, params.args); + + } catch (e) { + if (!params.stringify) { + throw e; + } + + data = Buffer.from(JSON.stringify(e)); + } + + break; + + case "callback": + data = await promisify(fs[name]).apply(null, params.args) + .catch(e => { + if (!params.stringify) { + throw e; + } + + return Buffer.from(JSON.stringify(e)); + }); + + break; + + case "promise": + data = await fs.promises[name].apply(null, params.args) + .catch(e => { + if (!params.stringify) { + throw e; + } + + return Buffer.from(JSON.stringify(e)); + }); + + break; + } + + return data; +} + +async function read_test(params) { + let data = await method("readFile", params); + + if (params.slice) { + data = data.slice.apply(data, params.slice); + } + + let success = true; + if (data instanceof Buffer) { + if (data.compare(params.expected) != 0) { + success = false; + } + + } else if (data != params.expected) { + success = false; + } + + if (!success) { + throw Error(`readFile unexpected data`); + } + + return 'SUCCESS'; +} + +let read_tests = [ + { args: ["test/fs/utf8"], expected: Buffer.from("??Z?") }, + { args: [Buffer.from("@test/fs/utf8").slice(1)], expected: Buffer.from("??Z?") }, + { args: ["test/fs/utf8", "utf8"], expected: "??Z?" }, + { args: ["test/fs/utf8", {encoding: "utf8", flags:"r+"}], expected: "??Z?" }, + { args: ["test/fs/nonexistent"], stringify: true, + expected: Buffer.from('{"errno":2,"code":"ENOENT","path":"test/fs/nonexistent","syscall":"open"}'), + exception: "Error: No such file or directory" }, + { args: ["test/fs/non_utf8", "utf8"], expected: "??" }, + { args: ["test/fs/non_utf8", {encoding: "hex"}], expected: "8080" }, + { args: ["test/fs/non_utf8", "base64"], expected: "gIA=" }, + { args: ["test/fs/ascii", "utf8"], expected: "x".repeat(600) }, + { args: ["test/fs/ascii", { encoding:"utf8", flags: "r+"}], expected: "x".repeat(600) }, + + { args: [Buffer.from([0x80, 0x80])], exception: "Error: No such file or directory" }, + { args: ['x'.repeat(8192)], exception: "TypeError: \"path\" is too long" }, + + { args: ["/proc/version"], slice:[0,5], expected: Buffer.from("Linux"), optional: true }, + { args: ["/proc/cpuinfo"], slice:[0,9], expected: Buffer.from("processor"), optional: true }, +]; + +let readFile_tsuite = { + name: "fs readFile", + T: read_test, + prepare_args: p, + opts: { type: "callback" }, + tests: read_tests, +}; + +let readFileSync_tsuite = { + name: "fs readFileSync", + T: read_test, + prepare_args: p, + opts: { type: "sync" }, + tests: read_tests, +}; + +let readFileP_tsuite = { + name: "fsp readFile", + T: read_test, + prepare_args: p, + opts: { type: "promise" }, + tests: read_tests, +}; + +async function write_test(params) { + let fname = params.args[0]; + + try { fs.unlinkSync(fname); } catch (e) {} + + let data = await method("writeFile", params); + + if (!data) { + data = fs.readFileSync(fname); + } + + try { fs.unlinkSync(fname); } catch (e) {} + + if (data.compare(params.expected) != 0) { + throw Error(`writeFile unexpected data`); + } + + return 'SUCCESS'; +} + +let write_tests = [ + { args: ["@", Buffer.from(Buffer.alloc(4).fill(65).buffer, 1)], + expected: Buffer.from("AAA") }, + { args: ["@", Buffer.from("XYZ"), "utf8"], expected: Buffer.from("XYZ") }, + { args: ["@", Buffer.from("XYZ"), {encoding: "utf8", mode: 0o666}], + expected: Buffer.from("XYZ") }, + { args: ["@", new DataView(Buffer.alloc(3).fill(66).buffer)], + expected: Buffer.from("BBB") }, + { args: ["@", new Uint8Array(Buffer.from("ABCD"))], + expected: Buffer.from("ABCD")}, + { args: ["@", "XYZ"], expected: Buffer.from("XYZ")}, + { args: ["@", "78797a", "hex"], expected: Buffer.from("xyz") }, + { args: ["@", "eHl6", "base64"], expected: Buffer.from("xyz") }, + { args: ["@", "eHl6", {encoding: "base64url"}], expected: Buffer.from("xyz"), + optional: true }, + { args: ["@", Symbol("XYZ")], exception: "TypeError: Cannot convert a Symbol value to a string"}, + { args: ["/invalid_path", "XYZ"], stringify: true, + expected: Buffer.from('{"errno":13,"code":"EACCES","path":"/invalid_path","syscall":"open"}'), + exception: "Error: No such file or directory" }, +]; + +let writeFile_tsuite = { + name: "fs writeFile", + T: write_test, + prepare_args: p, + opts: { type: "callback" }, + tests: write_tests, +}; + +let writeFileSync_tsuite = { + name: "fs writeFileSync", + T: write_test, + prepare_args: p, + opts: { type: "sync" }, + tests: write_tests, +}; + +let writeFileP_tsuite = { + name: "fsp writeFile", + T: write_test, + prepare_args: p, + opts: { type: "promise" }, + tests: write_tests, +}; + +async function append_test(params) { + let fname = params.args[0]; + + try { fs.unlinkSync(fname); } catch (e) {} + + let data = await method("appendFile", params); + data = await method("appendFile", params); + + if (!data) { + data = fs.readFileSync(fname); + } + + try { fs.unlinkSync(fname); } catch (e) {} + + if (data.compare(params.expected) != 0) { + throw Error(`appendFile unexpected data`); + } + + return 'SUCCESS'; +} + +let append_tests = [ + { args: ["@", Buffer.from(Buffer.alloc(4).fill(65).buffer, 1)], + expected: Buffer.from("AAAAAA") }, + { args: ["@", Buffer.from("XYZ"), "utf8"], expected: Buffer.from("XYZXYZ") }, + { args: ["@", Buffer.from("XYZ"), {encoding: "utf8", mode: 0o666}], + expected: Buffer.from("XYZXYZ") }, + { args: ["@", new DataView(Buffer.alloc(3).fill(66).buffer)], + expected: Buffer.from("BBBBBB") }, + { args: ["@", new Uint8Array(Buffer.from("ABCD"))], + expected: Buffer.from("ABCDABCD")}, + { args: ["@", "XYZ"], expected: Buffer.from("XYZXYZ")}, + { args: ["@", "78797a", "hex"], expected: Buffer.from("xyzxyz") }, + { args: ["@", "eHl6", "base64"], expected: Buffer.from("xyzxyz") }, + { args: ["@", "eHl6", {encoding: "base64url"}], expected: Buffer.from("xyzxyz"), + optional: true }, + { args: ["@", Symbol("XYZ")], exception: "TypeError: Cannot convert a Symbol value to a string"}, + { args: ["/invalid_path", "XYZ"], stringify: true, + expected: Buffer.from('{"errno":13,"code":"EACCES","path":"/invalid_path","syscall":"open"}'), + exception: "Error: No such file or directory" }, +]; + +let appendFile_tsuite = { + name: "fs appendFile", + T: append_test, + prepare_args: p, + opts: { type: "callback" }, + tests: append_tests, +}; + +let appendFileSync_tsuite = { + name: "fs appendFileSync", + T: append_test, + prepare_args: p, + opts: { type: "sync" }, + tests: append_tests, +}; + +let appendFileP_tsuite = { + name: "fsp appendFile", + T: append_test, + prepare_args: p, + opts: { type: "promise" }, + tests: append_tests, +}; + +async function realpath_test(params) { + let data = await method("realpath", params); + + if (!params.check(data)) { + throw Error(`realpath failed check`); + } + + return 'SUCCESS'; +} + +let realpath_tests = [ + { args: ["./build/test/.."], + check: (data) => data.endsWith("build") }, + { args: ["./build/test/", {encoding:'buffer'}], + check: (data) => data instanceof Buffer }, +]; + +let realpath_tsuite = { + name: "fs realpath", + T: realpath_test, + prepare_args: p, + opts: { type: "callback" }, + tests: realpath_tests, +}; + +let realpathSync_tsuite = { + name: "fs realpathSync", + T: realpath_test, + prepare_args: p, + opts: { type: "sync" }, + tests: realpath_tests, +}; + +let realpathP_tsuite = { + name: "fsp realpath", + T: realpath_test, + prepare_args: p, + opts: { type: "promise" }, + tests: realpath_tests, +}; + +run([ + readFile_tsuite, + readFileSync_tsuite, + readFileP_tsuite, + writeFile_tsuite, + writeFileSync_tsuite, + writeFileP_tsuite, + appendFile_tsuite, + appendFileSync_tsuite, + appendFileP_tsuite, + realpath_tsuite, + realpathSync_tsuite, + realpathP_tsuite, +]); diff -r 8dd6868a8748 -r 685adce92af1 test/js/fs_appendFile.js --- a/test/js/fs_appendFile.js Mon Nov 01 15:53:30 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,61 +0,0 @@ -var fs = require('fs'); -var fname = './build/test/fs_appendFile'; - -var argv = process.argv.slice(2); - -var data = (() => { - var value = argv[0]; - var type = argv[1]; - var offset = argv[2] ? parseInt(argv[2]) : 0; - - switch (type) { - case 'Buffer': - return Buffer.from(Buffer.from(value).buffer, offset); - case 'DataView': - return new DataView(Buffer.from(value).buffer, offset); - case 'Object': - return {toString(){return value}}; - case 'String': - return String(value); - case 'Symbol': - return Symbol(value); - case 'Uint8Array': - return new Uint8Array(Buffer.from(value).buffer, offset); - default: - throw new Error(`Unknown data type:${type}`); - } -})(); - -var options = (() => { - var encoding = argv[2]; - var mode = argv[3] ? parseInt(argv[3].slice(2), 8) : 0; - - if (encoding && mode) { - return {encoding, mode}; - - } else if (encoding) { - return encoding; - } - - return undefined; -})(); - -try { fs.unlinkSync(fname); } catch (e) {} - -function done(e) { - if (e) {throw e}; - var data = fs.readFileSync(fname); - console.log(String(data)); -} - -function append(cb) { - if (options) { - var path = Buffer.from(`@${fname}`).slice(1); - fs.appendFile(path, data, options, cb); - - } else { - fs.appendFile(fname, data, cb); - } -} - -append((e) => {if (e) {throw e}; append(done);}) diff -r 8dd6868a8748 -r 685adce92af1 test/js/fs_appendFileSync.js --- a/test/js/fs_appendFileSync.js Mon Nov 01 15:53:30 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,59 +0,0 @@ -var fs = require('fs'); -var fname = './build/test/fs_appendFileSync'; - -var argv = process.argv.slice(2); - -var data = (() => { - var value = argv[0]; - var type = argv[1]; - var offset = argv[2] ? parseInt(argv[2]) : 0; - - switch (type) { - case 'Buffer': - return Buffer.from(Buffer.from(value).buffer, offset); - case 'DataView': - return new DataView(Buffer.from(value).buffer, offset); - case 'Object': - return {toString(){return value}}; - case 'String': - return String(value); - case 'Symbol': - return Symbol(value); - case 'Uint8Array': - return new Uint8Array(Buffer.from(value).buffer, offset); - default: - throw new Error(`Unknown data type:${type}`); - } -})(); - -var options = (() => { - var encoding = argv[2]; - var mode = argv[3] ? parseInt(argv[3].slice(2), 8) : 0; - - if (encoding && mode) { - return {encoding, mode}; - - } else if (encoding) { - return encoding; - } - - return undefined; -})(); - -function append() { - if (options) { - var path = Buffer.from(`@${fname}`).slice(1); - fs.appendFileSync(path, data, options); - - } else { - fs.appendFileSync(fname, data); - } -} - -try { fs.unlinkSync(fname); } catch (e) {} - -append(); -append(); - -var ret = fs.readFileSync(fname); -console.log(String(ret)); diff -r 8dd6868a8748 -r 685adce92af1 test/js/fs_readFile.js --- a/test/js/fs_readFile.js Mon Nov 01 15:53:30 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,40 +0,0 @@ -var fs = require('fs'); - -var argv = process.argv.slice(2); -var fname = argv[0]; - -var options = (() => { - var encoding = argv[1]; - var flags = argv[2]; - - if (encoding && flags) { - return {encoding, flags}; - - } else if (encoding) { - return encoding; - } - - return undefined; -})(); - -function type(v) { - if (v instanceof Buffer) { - return 'Buffer'; - } - - return typeof v; -} - -function done(e, data) { - if (e) {console.log(JSON.stringify(e))}; - console.log(String(data), type(data), data.length); -} - -if (options) { - var path = Buffer.from(`@${fname}`).slice(1); - fs.readFile(path, options, done); - -} else { - fs.readFile(fname, done); -} - diff -r 8dd6868a8748 -r 685adce92af1 test/js/fs_readFileSync.js --- a/test/js/fs_readFileSync.js Mon Nov 01 15:53:30 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,43 +0,0 @@ -var fs = require('fs'); - -var argv = process.argv.slice(2); -var fname = argv[0]; - -var options = (() => { - var encoding = argv[1]; - var flags = argv[2]; - - if (encoding && flags) { - return {encoding, flags}; - - } else if (encoding) { - return encoding; - } - - return undefined; -})(); - -function type(v) { - if (v instanceof Buffer) { - return 'Buffer'; - } - - return typeof v; -} - -var data; - -try { - if (options) { - var path = Buffer.from(`@${fname}`).slice(1); - data = fs.readFileSync(path, options); - - } else { - data = fs.readFileSync(fname); - } - -} catch (e) { - console.log(JSON.stringify(e)); -} - -console.log(String(data), type(data), data.length); diff -r 8dd6868a8748 -r 685adce92af1 test/js/fs_writeFile.js --- a/test/js/fs_writeFile.js Mon Nov 01 15:53:30 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,57 +0,0 @@ -var fs = require('fs'); -var fname = './build/test/fs_writeFile'; - -var argv = process.argv.slice(2); - -var data = (() => { - var value = argv[0]; - var type = argv[1]; - var offset = argv[2] ? parseInt(argv[2]) : 0; - - switch (type) { - case 'Buffer': - return Buffer.from(Buffer.from(value).buffer, offset); - case 'DataView': - return new DataView(Buffer.from(value).buffer, offset); - case 'Object': - return {toString(){return value}}; - case 'String': - return String(value); - case 'Symbol': - return Symbol(value); - case 'Uint8Array': - return new Uint8Array(Buffer.from(value).buffer, offset); - default: - throw new Error(`Unknown data type:${type}`); - } -})(); - -var options = (() => { - var encoding = argv[2]; - var mode = argv[3] ? parseInt(argv[3].slice(2), 8) : 0; - - if (encoding && mode) { - return {encoding, mode}; - - } else if (encoding) { - return encoding; - } - - return undefined; -})(); - -try { fs.unlinkSync(fname); } catch (e) {} - -function cb(e) { - if (e) {throw e}; - var data = fs.readFileSync(fname); - console.log(String(data)); -} - -if (options) { - var path = Buffer.from(`@${fname}`).slice(1); - fs.writeFile(path, data, options, cb); - -} else { - fs.writeFile(fname, data, cb); -} diff -r 8dd6868a8748 -r 685adce92af1 test/js/fs_writeFileSync.js --- a/test/js/fs_writeFileSync.js Mon Nov 01 15:53:30 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,54 +0,0 @@ -var fs = require('fs'); -var fname = './build/test/fs_writeFileSync'; - -var argv = process.argv.slice(2); - -var data = (() => { - var value = argv[0]; - var type = argv[1]; - var offset = argv[2] ? parseInt(argv[2]) : 0; - - switch (type) { - case 'Buffer': - return Buffer.from(Buffer.from(value).buffer, offset); - case 'DataView': - return new DataView(Buffer.from(value).buffer, offset); - case 'Object': - return {toString(){return value}}; - case 'String': - return String(value); - case 'Symbol': - return Symbol(value); - case 'Uint8Array': - return new Uint8Array(Buffer.from(value).buffer, offset); - default: - throw new Error(`Unknown data type:${type}`); - } -})(); - -var options = (() => { - var encoding = argv[2]; - var mode = argv[3] ? parseInt(argv[3].slice(2), 8) : 0; - - if (encoding && mode) { - return {encoding, mode}; - - } else if (encoding) { - return encoding; - } - - return undefined; -})(); - -try { fs.unlinkSync(fname); } catch (e) {} - -if (options) { - var path = Buffer.from(`@${fname}`).slice(1); - fs.writeFileSync(path, data, options); - -} else { - fs.writeFileSync(fname, data); -} - -var ret = fs.readFileSync(fname); -console.log(String(ret)); diff -r 8dd6868a8748 -r 685adce92af1 test/njs_expect_test.exp --- a/test/njs_expect_test.exp Mon Nov 01 15:53:30 2021 +0000 +++ b/test/njs_expect_test.exp Tue Nov 02 12:35:34 2021 +0000 @@ -459,189 +459,6 @@ njs_test { "queue.toString()\r\n'0,1,2,3,4,5'"} } -# require('fs').readFile() - -njs_run {"./test/js/fs_readFile.js" "test/fs/utf8"} "??Z? Buffer 7" -njs_run {"./test/js/fs_readFile.js" "test/fs/utf8" "utf8"} "??Z? string 4" -njs_run {"./test/js/fs_readFile.js" "test/fs/utf8" "utf8" "r+"} "??Z? string 4" -njs_run {"./test/js/fs_readFile.js" "test/fs/nonexistent"} \ - "{\"errno\":2,\"code\":\"ENOENT\",\"path\":\"test/fs/nonexistent\",\"syscall\":\"open\"}" -njs_run {"./test/js/fs_readFile.js" "test/fs/non_utf8" "utf8"} "?? string 2" -njs_run {"./test/js/fs_readFile.js" "test/fs/non_utf8" "hex"} "8080 string 4" -njs_run {"./test/js/fs_readFile.js" "test/fs/non_utf8" "base64"} "gIA= string 4" - -njs_test { - {"var fs = require('fs')\r\n" - "undefined\r\n>> "} - {"fs.readFile('test/fs/ascii', 'utf8', function (e, data) {console.log(data[599], data[600])})\r\n" - "undefined\r\nx undefined\r\n>> "} - {"fs.readFile('test/fs/ascii', {encoding:'utf8',flag:'r+'}, function (e, data) {console.log(data[599], data[600])})\r\n" - "undefined\r\nx undefined\r\n>> "} - {"fs.readFile(Buffer.from([0x80,0x80]), function(e) {console.log(e.path.codePointAt())})\r\n" - "undefined\r\n65533"} -} - -njs_test { - {"var fs = require('fs')\r\n" - "undefined\r\n>> "} - {"fs.readFile('/proc/version', (e, data) => {console.log(e || data.slice(0,5) == 'Linux')})\r\n" - "undefined\r\ntrue\r\n>> "} - {"fs.readFile('/proc/cpuinfo', (e, data) => {console.log(e || data.slice(0,9) == 'processor')})\r\n" - "undefined\r\ntrue\r\n>> "} -} - -# require('fs').readFileSync() - -njs_run {"./test/js/fs_readFileSync.js" "test/fs/utf8"} "??Z? Buffer 7" -njs_run {"./test/js/fs_readFileSync.js" "test/fs/utf8" "utf8"} "??Z? string 4" -njs_run {"./test/js/fs_readFileSync.js" "test/fs/utf8" "utf8" "r+"} "??Z? string 4" -njs_run {"./test/js/fs_readFileSync.js" "test/fs/nonexistent"} \ - "{\"errno\":2,\"code\":\"ENOENT\",\"path\":\"test/fs/nonexistent\",\"syscall\":\"open\"}" -njs_run {"./test/js/fs_readFileSync.js" "test/fs/non_utf8" "utf8"} "?? string 2" -njs_run {"./test/js/fs_readFileSync.js" "test/fs/non_utf8" "hex"} "8080 string 4" -njs_run {"./test/js/fs_readFileSync.js" "test/fs/non_utf8" "base64"} "gIA= string 4" - -njs_test { - {"var fs = require('fs')\r\n" - "undefined\r\n>> "} - {"fs.readFileSync('test/fs/non_utf8', 'utf8').charCodeAt(1)\r\n" - "65533"} - {"fs.readFile('x'.repeat(8192))\r\n" - "TypeError: \"path\" is too long >= 4096"} -} - -njs_test { - {"var fs = require('fs'), file\r\n" - "undefined\r\n>> "} - {"try { file = fs.readFileSync('/proc/version')} catch (e) {}\r\n" - "undefined"} - {"(!file || file.slice(0,5) == 'Linux')\r\n" - "true"} - {"try { file = fs.readFileSync('/proc/cpuinfo')} catch (e) {}\r\n" - "undefined"} - {"(!file || file.slice(0,9) == 'processor')\r\n" - "true"} -} - -# require('fs').writeFile() - -njs_run {"./test/js/fs_writeFile.js" "ABCD" "Buffer" "1"} "BCD" -njs_run {"./test/js/fs_writeFile.js" "ABC" "DataView"} "ABC" -njs_run {"./test/js/fs_writeFile.js" "414243" "Object" "hex"} "ABC" -njs_run {"./test/js/fs_writeFile.js" "ABC" "String"} "ABC" -njs_run {"./test/js/fs_writeFile.js" "ABC" "Symbol"} "TypeError: Cannot convert a Symbol value to a string*" -njs_run {"./test/js/fs_writeFile.js" "ABC" "Uint8Array"} "ABC" - -njs_run {"./test/js/fs_writeFile.js" "ABC" "String" "utf8"} "ABC" -njs_run {"./test/js/fs_writeFile.js" "ABC" "String" "utf8" "0o666"} "ABC" -njs_run {"./test/js/fs_writeFile.js" "ABC" "String" "utf8" "0o222"} "Error: Permission denied*" -njs_run {"./test/js/fs_writeFile.js" "414243" "String" "hex"} "ABC" -njs_run {"./test/js/fs_writeFile.js" "QUJD" "String" "base64"} "ABC" -njs_run {"./test/js/fs_writeFile.js" "QUJD" "String" "base64url"} "ABC" - -njs_test { - {"var fs = require('fs')\r\n" - "undefined\r\n>> "} - {"fs.writeFile('/invalid_path', 'ABC', function (e) { console.log(JSON.stringify(e))})\r\n" - "undefined\r\n{\"errno\":13,\"code\":\"EACCES\",\"path\":\"/invalid_path\",\"syscall\":\"open\"}\r\n>> "} - {"fs.writeFile(Buffer.from('/invalid_path'), 'ABC', function (e) { console.log(typeof e.path)})\r\n" - "undefined\r\nstring\r\n>> "} -} - -# require('fs').writeFileSync() - -njs_run {"./test/js/fs_writeFileSync.js" "ABCD" "Buffer" "1"} "BCD" -njs_run {"./test/js/fs_writeFileSync.js" "ABC" "DataView"} "ABC" -njs_run {"./test/js/fs_writeFileSync.js" "414243" "Object" "hex"} "ABC" -njs_run {"./test/js/fs_writeFileSync.js" "ABC" "String"} "ABC" -njs_run {"./test/js/fs_writeFileSync.js" "ABC" "Symbol"} "TypeError: Cannot convert a Symbol value to a string*" -njs_run {"./test/js/fs_writeFileSync.js" "ABC" "Uint8Array"} "ABC" - -njs_run {"./test/js/fs_writeFileSync.js" "ABC" "String" "utf8"} "ABC" -njs_run {"./test/js/fs_writeFileSync.js" "ABC" "String" "utf8" "0o666"} "ABC" -njs_run {"./test/js/fs_writeFileSync.js" "ABC" "String" "utf8" "0o222"} "Error: Permission denied*" -njs_run {"./test/js/fs_writeFileSync.js" "78797a" "String" "hex"} "xyz" -njs_run {"./test/js/fs_writeFileSync.js" "eHl6" "String" "base64"} "xyz" -njs_run {"./test/js/fs_writeFileSync.js" "eHl6" "String" "base64url"} "xyz" - -njs_test { - {"var fs = require('fs'), fn = './build/test/file2';\r\n" - "undefined\r\n>> "} - {"fs.writeFileSync(fn, 'ABC')\r\n" - "undefined\r\n>> "} - {"fs.writeFileSync(fn, 'ABC')\r\n" - "undefined\r\n>> "} - {"fs.readFileSync(fn, 'utf8')\r\n" - "'ABC'\r\n>> "} -} - -# require('fs').appendFile() - -njs_run {"./test/js/fs_appendFile.js" "ABCD" "Buffer" "1"} "BCDBCD" -njs_run {"./test/js/fs_appendFile.js" "ABC" "DataView"} "ABCABC" -njs_run {"./test/js/fs_appendFile.js" "414243" "Object" "hex"} "ABCABC" -njs_run {"./test/js/fs_appendFile.js" "ABC" "String"} "ABCABC" -njs_run {"./test/js/fs_appendFile.js" "ABC" "Symbol"} "TypeError: Cannot convert a Symbol value to a string*" -njs_run {"./test/js/fs_appendFile.js" "ABC" "Uint8Array"} "ABCABC" - -njs_run {"./test/js/fs_appendFile.js" "ABC" "String" "utf8"} "ABC" -njs_run {"./test/js/fs_appendFile.js" "ABC" "String" "utf8" "0o666"} "ABC" -njs_run {"./test/js/fs_appendFile.js" "ABC" "String" "utf8" "0o222"} "Error: Permission denied*" -njs_run {"./test/js/fs_appendFile.js" "414243" "String" "hex"} "ABC" -njs_run {"./test/js/fs_appendFile.js" "QUJD" "String" "base64"} "ABC" -njs_run {"./test/js/fs_appendFile.js" "QUJD" "String" "base64url"} "ABC" - -# require('fs').appendFileSync() - -njs_run {"./test/js/fs_appendFileSync.js" "ABCD" "Buffer" "1"} "BCDBCD" -njs_run {"./test/js/fs_appendFileSync.js" "ABC" "DataView"} "ABCABC" -njs_run {"./test/js/fs_appendFileSync.js" "414243" "Object" "hex"} "ABCABC" -njs_run {"./test/js/fs_appendFileSync.js" "ABC" "String"} "ABCABC" -njs_run {"./test/js/fs_appendFileSync.js" "ABC" "Symbol"} "TypeError: Cannot convert a Symbol value to a string*" -njs_run {"./test/js/fs_appendFileSync.js" "ABC" "Uint8Array"} "ABCABC" - -njs_run {"./test/js/fs_appendFileSync.js" "ABC" "String" "utf8"} "ABC" -njs_run {"./test/js/fs_appendFileSync.js" "ABC" "String" "utf8" "0o666"} "ABC" -njs_run {"./test/js/fs_appendFileSync.js" "ABC" "String" "utf8" "0o222"} "Error: Permission denied*" -njs_run {"./test/js/fs_appendFileSync.js" "414243" "String" "hex"} "ABC" -njs_run {"./test/js/fs_appendFileSync.js" "QUJD" "String" "base64"} "ABC" -njs_run {"./test/js/fs_appendFileSync.js" "QUJD" "String" "base64url"} "ABC" - -# require('fs').renameSync() - -njs_test { - {"var fs = require('fs'), mktemp = ()=> `/tmp/njs_${Math.round(Math.random() * 1000000)}`\r\n" - "undefined\r\n>> "} - {"var fn1 = mktemp(), fn2 = mktemp();\r\n" - "undefined\r\n>> "} - {"fs.writeFileSync(fn1, 'ABC')\r\n" - "undefined\r\n>> "} - {"fs.renameSync(fn1, fn2)\r\n" - "undefined\r\n>> "} - {"String(fs.readFileSync(fn2))\r\n" - "'ABC'\r\n>> "} -} - -njs_test { - {"var fs = require('fs'), fn = './build/test/file2'\r\n" - "undefined\r\n>> "} - {"fs.writeFileSync(fn, 'ABC')\r\n" - "undefined\r\n>> "} - {"fs.renameSync(fn, 'test/fs/')\r\n" - "Error: Not a directory*"} -} - -# require('fs').realpathSync() - -njs_test { - {"var fs = require('fs')\r\n" - "undefined\r\n>> "} - {"fs.realpathSync('./build/test/..').endsWith('build')\r\n" - "true\r\n>> "} - {"fs.realpathSync('./build/test/..', {encoding:'buffer'}) instanceof Buffer\r\n" - "true\r\n>> "} -} - njs_run {"-c" "setTimeout(() => {console.log('A'.repeat(1024))}, 0); ref"} \ "^Thrown: ReferenceError: \"ref\" is not defined @@ -832,7 +649,6 @@ njs_test { njs_run {"-v"} "\\d+\.\\d+\.\\d+" - # Promise njs_run {"./test/js/promise_set_timeout.js"} \ @@ -1003,6 +819,22 @@ PatchedPromise.constructor PatchedPromise.constructor PatchedPromise async done" +# fs + +njs_run {"./test/fs/methods.js" "--match-exception-text"} \ +"fs readFile SUCCESS +fs readFileSync SUCCESS +fsp readFile SUCCESS +fs writeFile SUCCESS +fs writeFileSync SUCCESS +fsp writeFile SUCCESS +fs appendFile SUCCESS +fs appendFileSync SUCCESS +fsp appendFile SUCCESS +fs realpath SUCCESS +fs realpathSync SUCCESS +fsp realpath SUCCESS" + njs_run {"./test/js/fs_promises_001.js"} \ "init ok true short circut ok true From xeioex at nginx.com Tue Nov 2 12:40:55 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 02 Nov 2021 12:40:55 +0000 Subject: [njs] Getting rid of special types for primitive objects. Message-ID: details: https://hg.nginx.org/njs/rev/adb4ac09b86d branches: changeset: 1735:adb4ac09b86d user: Dmitry Volyntsev date: Tue Nov 02 12:37:00 2021 +0000 description: Getting rid of special types for primitive objects. The following types were removed: NJS_OBJECT_BOOLEAN, NJS_OBJECT_NUMBER, NJS_OBJECT_SYMBOL, NJS_OBJECT_STRING. Instead a generic NJS_OBJECT_VALUE type is used for objects with custom slots. diffstat: src/njs_array.c | 6 +- src/njs_boolean.c | 14 +- src/njs_builtin.c | 2 +- src/njs_iterator.c | 40 ++-- src/njs_json.c | 319 ++++++++++++++++++++++++-------------------- src/njs_number.c | 22 +- src/njs_object.c | 247 +++++++++++++++++---------------- src/njs_object.h | 6 +- src/njs_string.c | 35 ++-- src/njs_symbol.c | 2 +- src/njs_value.c | 33 +-- src/njs_value.h | 46 +++-- src/njs_value_conversion.h | 4 +- src/njs_vmcode.c | 4 - src/test/njs_unit_test.c | 7 +- 15 files changed, 411 insertions(+), 376 deletions(-) diffs (truncated from 1356 to 1000 lines): diff -r 685adce92af1 -r adb4ac09b86d src/njs_array.c --- a/src/njs_array.c Tue Nov 02 12:35:34 2021 +0000 +++ b/src/njs_array.c Tue Nov 02 12:37:00 2021 +0000 @@ -775,9 +775,9 @@ njs_array_prototype_slice_copy(njs_vm_t length--; } while (length != 0); - } else if (njs_is_string(this) || this->type == NJS_OBJECT_STRING) { - - if (this->type == NJS_OBJECT_STRING) { + } else if (njs_is_string(this) || njs_is_object_string(this)) { + + if (njs_is_object_string(this)) { this = njs_object_value(this); } diff -r 685adce92af1 -r adb4ac09b86d src/njs_boolean.c --- a/src/njs_boolean.c Tue Nov 02 12:35:34 2021 +0000 +++ b/src/njs_boolean.c Tue Nov 02 12:37:00 2021 +0000 @@ -12,8 +12,8 @@ static njs_int_t njs_boolean_constructor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { - njs_object_t *object; - const njs_value_t *value; + const njs_value_t *value; + njs_object_value_t *object; if (nargs == 1) { value = &njs_value_false; @@ -23,12 +23,12 @@ njs_boolean_constructor(njs_vm_t *vm, nj } if (vm->top_frame->ctor) { - object = njs_object_value_alloc(vm, value, value->type); + object = njs_object_value_alloc(vm, NJS_OBJ_TYPE_BOOLEAN, 0, value); if (njs_slow_path(object == NULL)) { return NJS_ERROR; } - njs_set_type_object(&vm->retval, object, NJS_OBJECT_BOOLEAN); + njs_set_object_value(&vm->retval, object); } else { vm->retval = *value; @@ -78,7 +78,7 @@ njs_boolean_prototype_value_of(njs_vm_t if (value->type != NJS_BOOLEAN) { - if (value->type == NJS_OBJECT_BOOLEAN) { + if (njs_is_object_boolean(value)) { value = njs_object_value(value); } else { @@ -104,7 +104,7 @@ njs_boolean_prototype_to_string(njs_vm_t if (value->type != NJS_BOOLEAN) { - if (value->type == NJS_OBJECT_BOOLEAN) { + if (njs_is_object_boolean(value)) { value = njs_object_value(value); } else { @@ -168,6 +168,6 @@ const njs_object_type_init_t njs_boolea .prototype_props = &njs_boolean_prototype_init, .prototype_value = { .object_value = { .value = njs_value(NJS_BOOLEAN, 0, 0.0), - .object = { .type = NJS_OBJECT_BOOLEAN } } + .object = { .type = NJS_OBJECT_VALUE } } }, }; diff -r 685adce92af1 -r adb4ac09b86d src/njs_builtin.c --- a/src/njs_builtin.c Tue Nov 02 12:35:34 2021 +0000 +++ b/src/njs_builtin.c Tue Nov 02 12:37:00 2021 +0000 @@ -333,7 +333,7 @@ njs_builtin_objects_create(njs_vm_t *vm) string_object = &shared->string_object; njs_lvlhsh_init(&string_object->hash); string_object->shared_hash = shared->string_instance_hash; - string_object->type = NJS_OBJECT_STRING; + string_object->type = NJS_OBJECT_VALUE; string_object->shared = 1; string_object->extensible = 0; diff -r 685adce92af1 -r adb4ac09b86d src/njs_iterator.c --- a/src/njs_iterator.c Tue Nov 02 12:35:34 2021 +0000 +++ b/src/njs_iterator.c Tue Nov 02 12:37:00 2021 +0000 @@ -311,14 +311,14 @@ njs_int_t njs_object_iterate(njs_vm_t *vm, njs_iterator_args_t *args, njs_iterator_handler_t handler) { - double idx; - int64_t length, i, from, to; - njs_int_t ret; - njs_array_t *array, *keys; - njs_value_t *value, *entry, prop, character, string_obj; - njs_object_t *object; - const u_char *p, *end, *pos; - njs_string_prop_t string_prop; + double idx; + int64_t length, i, from, to; + njs_int_t ret; + njs_array_t *array, *keys; + njs_value_t *value, *entry, prop, character, string_obj; + const u_char *p, *end, *pos; + njs_string_prop_t string_prop; + njs_object_value_t *object; value = args->value; from = args->from; @@ -366,12 +366,12 @@ njs_object_iterate(njs_vm_t *vm, njs_ite if (njs_is_string(value) || njs_is_object_string(value)) { if (njs_is_string(value)) { - object = njs_object_value_alloc(vm, value, NJS_STRING); + object = njs_object_value_alloc(vm, NJS_OBJ_TYPE_STRING, 0, value); if (njs_slow_path(object == NULL)) { return NJS_ERROR; } - njs_set_type_object(&string_obj, object, NJS_OBJECT_STRING); + njs_set_object_value(&string_obj, object); args->value = &string_obj; } @@ -473,14 +473,14 @@ njs_int_t njs_object_iterate_reverse(njs_vm_t *vm, njs_iterator_args_t *args, njs_iterator_handler_t handler) { - double idx; - int64_t i, from, to, length; - njs_int_t ret; - njs_array_t *array, *keys; - njs_value_t *entry, *value, prop, character, string_obj; - njs_object_t *object; - const u_char *p, *end, *pos; - njs_string_prop_t string_prop; + double idx; + int64_t i, from, to, length; + njs_int_t ret; + njs_array_t *array, *keys; + njs_value_t *entry, *value, prop, character, string_obj; + const u_char *p, *end, *pos; + njs_string_prop_t string_prop; + njs_object_value_t *object; value = args->value; from = args->from; @@ -530,12 +530,12 @@ njs_object_iterate_reverse(njs_vm_t *vm, if (njs_is_string(value) || njs_is_object_string(value)) { if (njs_is_string(value)) { - object = njs_object_value_alloc(vm, value, NJS_STRING); + object = njs_object_value_alloc(vm, NJS_OBJ_TYPE_STRING, 0, value); if (njs_slow_path(object == NULL)) { return NJS_ERROR; } - njs_set_type_object(&string_obj, object, NJS_OBJECT_STRING); + njs_set_object_value(&string_obj, object); args->value = &string_obj; } diff -r 685adce92af1 -r adb4ac09b86d src/njs_json.c --- a/src/njs_json.c Tue Nov 02 12:35:34 2021 +0000 +++ b/src/njs_json.c Tue Nov 02 12:37:00 2021 +0000 @@ -220,15 +220,22 @@ njs_json_stringify(njs_vm_t *vm, njs_val space = njs_arg(args, nargs, 3); + if (njs_is_object(space)) { + if (njs_is_object_number(space)) { + ret = njs_value_to_numeric(vm, space, space); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + } else if (njs_is_object_string(space)) { + ret = njs_value_to_string(vm, space, space); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + } + } + switch (space->type) { - case NJS_OBJECT_STRING: - ret = njs_value_to_string(vm, space, space); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - - /* Fall through. */ - case NJS_STRING: length = njs_string_prop(&prop, space); @@ -250,14 +257,6 @@ njs_json_stringify(njs_vm_t *vm, njs_val break; - case NJS_OBJECT_NUMBER: - ret = njs_value_to_numeric(vm, space, space); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - - /* Fall through. */ - case NJS_NUMBER: i64 = njs_min(njs_number_to_integer(njs_number(space)), 10); @@ -1138,10 +1137,27 @@ njs_json_pop_stringify_state(njs_json_st njs_inline njs_bool_t njs_json_is_object(const njs_value_t *value) { - return (((value)->type == NJS_OBJECT) - || ((value)->type == NJS_ARRAY) - || ((value)->type == NJS_OBJECT_SYMBOL) - || ((value)->type >= NJS_REGEXP)); + if (!njs_is_object(value)) { + return 0; + } + + if (njs_is_function(value)) { + return 0; + } + + if (njs_is_object_value(value)) { + switch (njs_object_value(value)->type) { + case NJS_BOOLEAN: + case NJS_NUMBER: + case NJS_STRING: + return 0; + + default: + break; + } + } + + return 1; } @@ -1487,74 +1503,82 @@ static njs_int_t njs_json_stringify_array(njs_vm_t *vm, njs_json_stringify_t *stringify) { njs_int_t ret; - uint32_t i, n, k, properties_length, array_length; - njs_value_t *value, num_value; - njs_array_t *properties, *array; - - properties_length = 1; - array = njs_array(&stringify->replacer); - array_length = array->length; - - for (i = 0; i < array_length; i++) { - if (njs_is_valid(&array->start[i])) { - properties_length++; - } + int64_t i, k, length; + njs_value_t *value, *item; + njs_array_t *properties; + + ret = njs_object_length(vm, &stringify->replacer, &length); + if (njs_slow_path(ret != NJS_OK)) { + return ret; } - properties = njs_array_alloc(vm, 1, properties_length, NJS_ARRAY_SPARE); + properties = njs_array_alloc(vm, 1, 0, NJS_ARRAY_SPARE); if (njs_slow_path(properties == NULL)) { return NJS_ERROR; } - n = 0; - properties->start[n++] = njs_string_empty; - - for (i = 0; i < array_length; i++) { - value = &array->start[i]; - - if (!njs_is_valid(&array->start[i])) { - continue; + item = njs_array_push(vm, properties); + njs_value_assign(item, &njs_string_empty); + + for (i = 0; i < length; i++) { + ret = njs_value_property_i64(vm, &stringify->replacer, i, + &stringify->retval); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; } + value = &stringify->retval; + switch (value->type) { + case NJS_STRING: + break; + case NJS_NUMBER: - ret = njs_number_to_string(vm, &num_value, value); + ret = njs_number_to_string(vm, value, value); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } - value = &num_value; break; - case NJS_OBJECT_NUMBER: - case NJS_OBJECT_STRING: - ret = njs_value_to_string(vm, value, value); - if (njs_slow_path(ret != NJS_OK)) { - return NJS_ERROR; + case NJS_OBJECT_VALUE: + switch (njs_object_value(value)->type) { + case NJS_NUMBER: + case NJS_STRING: + ret = njs_value_to_string(vm, value, value); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + + break; + + default: + continue; } - /* Fall through. */ - - case NJS_STRING: break; default: continue; } - for (k = 0; k < n; k ++) { + for (k = 0; k < properties->length; k++) { if (njs_values_strict_equal(value, &properties->start[k]) == 1) { break; } } - if (k == n) { - properties->start[n++] = *value; + if (k == properties->length) { + item = njs_array_push(vm, properties); + if (njs_slow_path(item == NULL)) { + return NJS_ERROR; + } + + njs_value_assign(item, value); } } - properties->length = n; - stringify->replacer.data.u.array = properties; + njs_set_array(&stringify->replacer, properties); return NJS_OK; } @@ -1565,35 +1589,42 @@ njs_json_append_value(njs_vm_t *vm, njs_ { njs_int_t ret; + if (njs_is_object_value(value)) { + switch (njs_object_value(value)->type) { + case NJS_NUMBER: + ret = njs_value_to_numeric(vm, value, value); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + break; + + case NJS_BOOLEAN: + njs_value_assign(value, njs_object_value(value)); + break; + + case NJS_STRING: + ret = njs_value_to_string(vm, value, value); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + break; + + default: + break; + } + } + switch (value->type) { - case NJS_OBJECT_STRING: - ret = njs_value_to_string(vm, value, value); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - - /* Fall through. */ - case NJS_STRING: njs_json_append_string(chain, value, '\"'); break; - case NJS_OBJECT_NUMBER: - ret = njs_value_to_numeric(vm, value, value); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - - /* Fall through. */ - case NJS_NUMBER: njs_json_append_number(chain, value); break; - case NJS_OBJECT_BOOLEAN: - value = njs_object_value(value); - /* Fall through. */ - case NJS_BOOLEAN: if (njs_is_true(value)) { njs_chb_append_literal(chain, "true"); @@ -1829,12 +1860,22 @@ njs_dump_terminal(njs_json_stringify_t * njs_int_t (*to_string)(njs_vm_t *, njs_value_t *, const njs_value_t *); switch (value->type) { - case NJS_OBJECT_STRING: - value = njs_object_value(value); - - njs_chb_append_literal(chain, "[String: "); - njs_json_append_string(chain, value, '\''); - njs_chb_append_literal(chain, "]"); + case NJS_NULL: + njs_chb_append_literal(chain, "null"); + break; + + case NJS_UNDEFINED: + njs_chb_append_literal(chain, "undefined"); + break; + + case NJS_BOOLEAN: + if (njs_is_true(value)) { + njs_chb_append_literal(chain, "true"); + + } else { + njs_chb_append_literal(chain, "false"); + } + break; case NJS_STRING: @@ -1849,19 +1890,6 @@ njs_dump_terminal(njs_json_stringify_t * break; - case NJS_OBJECT_SYMBOL: - value = njs_object_value(value); - - ret = njs_symbol_descriptive_string(stringify->vm, &str_val, value); - if (njs_slow_path(ret != NJS_OK)) { - return NJS_ERROR; - } - - njs_string_get(&str_val, &str); - njs_chb_sprintf(chain, 16 + str.length, "[Symbol: %V]", &str); - - break; - case NJS_SYMBOL: ret = njs_symbol_descriptive_string(stringify->vm, &str_val, value); if (njs_slow_path(ret != NJS_OK)) { @@ -1873,59 +1901,61 @@ njs_dump_terminal(njs_json_stringify_t * break; - case NJS_OBJECT_NUMBER: + case NJS_INVALID: + njs_chb_append_literal(chain, ""); + break; + + case NJS_OBJECT_VALUE: value = njs_object_value(value); - if (njs_slow_path(njs_number(value) == 0.0 - && signbit(njs_number(value)))) - { - - njs_chb_append_literal(chain, "[Number: -0]"); + switch (value->type) { + case NJS_BOOLEAN: + if (njs_is_true(value)) { + njs_chb_append_literal(chain, "[Boolean: true]"); + + } else { + njs_chb_append_literal(chain, "[Boolean: false]"); + } + + break; + + case NJS_NUMBER: + if (njs_slow_path(njs_number(value) == 0.0 + && signbit(njs_number(value)))) + { + + njs_chb_append_literal(chain, "[Number: -0]"); + break; + } + + ret = njs_number_to_string(stringify->vm, &str_val, value); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + + njs_string_get(&str_val, &str); + njs_chb_sprintf(chain, 16 + str.length, "[Number: %V]", &str); + break; + + case NJS_SYMBOL: + ret = njs_symbol_descriptive_string(stringify->vm, &str_val, value); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + + njs_string_get(&str_val, &str); + njs_chb_sprintf(chain, 16 + str.length, "[Symbol: %V]", &str); + + break; + + case NJS_STRING: + default: + njs_chb_append_literal(chain, "[String: "); + njs_json_append_string(chain, value, '\''); + njs_chb_append_literal(chain, "]"); break; } - ret = njs_number_to_string(stringify->vm, &str_val, value); - if (njs_slow_path(ret != NJS_OK)) { - return NJS_ERROR; - } - - njs_string_get(&str_val, &str); - njs_chb_sprintf(chain, 16 + str.length, "[Number: %V]", &str); - - break; - - case NJS_OBJECT_BOOLEAN: - value = njs_object_value(value); - - if (njs_is_true(value)) { - njs_chb_append_literal(chain, "[Boolean: true]"); - - } else { - njs_chb_append_literal(chain, "[Boolean: false]"); - } - - break; - - case NJS_BOOLEAN: - if (njs_is_true(value)) { - njs_chb_append_literal(chain, "true"); - - } else { - njs_chb_append_literal(chain, "false"); - } - - break; - - case NJS_UNDEFINED: - njs_chb_append_literal(chain, "undefined"); - break; - - case NJS_NULL: - njs_chb_append_literal(chain, "null"); - break; - - case NJS_INVALID: - njs_chb_append_literal(chain, ""); break; case NJS_FUNCTION: @@ -2031,7 +2061,8 @@ njs_dump_is_recursive(const njs_value_t { return (value->type == NJS_OBJECT && !njs_object(value)->error_data) || (value->type == NJS_ARRAY) - || (value->type >= NJS_OBJECT_SPECIAL_MAX); + || (value->type >= NJS_OBJECT_SPECIAL_MAX + && !njs_is_object_primitive(value)); } diff -r 685adce92af1 -r adb4ac09b86d src/njs_number.c --- a/src/njs_number.c Tue Nov 02 12:35:34 2021 +0000 +++ b/src/njs_number.c Tue Nov 02 12:37:00 2021 +0000 @@ -311,9 +311,9 @@ static njs_int_t njs_number_constructor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { - njs_int_t ret; - njs_value_t *value; - njs_object_t *object; + njs_int_t ret; + njs_value_t *value; + njs_object_value_t *object; if (nargs == 1) { value = njs_value_arg(&njs_value_zero); @@ -330,12 +330,12 @@ njs_number_constructor(njs_vm_t *vm, njs } if (vm->top_frame->ctor) { - object = njs_object_value_alloc(vm, value, NJS_NUMBER); + object = njs_object_value_alloc(vm, NJS_OBJ_TYPE_NUMBER, 0, value); if (njs_slow_path(object == NULL)) { return NJS_ERROR; } - njs_set_type_object(&vm->retval, object, NJS_OBJECT_NUMBER); + njs_set_object_value(&vm->retval, object); } else { njs_set_number(&vm->retval, njs_number(value)); @@ -572,7 +572,7 @@ njs_number_prototype_value_of(njs_vm_t * if (value->type != NJS_NUMBER) { - if (value->type == NJS_OBJECT_NUMBER) { + if (njs_is_object_number(value)) { value = njs_object_value(value); } else { @@ -601,7 +601,7 @@ njs_number_prototype_to_string(njs_vm_t if (value->type != NJS_NUMBER) { - if (value->type == NJS_OBJECT_NUMBER) { + if (njs_is_object_number(value)) { value = njs_object_value(value); } else { @@ -650,7 +650,7 @@ njs_number_prototype_to_fixed(njs_vm_t * value = &args[0]; if (value->type != NJS_NUMBER) { - if (value->type == NJS_OBJECT_NUMBER) { + if (njs_is_object_number(value)) { value = njs_object_value(value); } else { @@ -748,7 +748,7 @@ njs_number_prototype_to_precision(njs_vm value = &args[0]; if (value->type != NJS_NUMBER) { - if (value->type == NJS_OBJECT_NUMBER) { + if (njs_is_object_number(value)) { value = njs_object_value(value); } else { @@ -798,7 +798,7 @@ njs_number_prototype_to_exponential(njs_ value = &args[0]; if (value->type != NJS_NUMBER) { - if (value->type == NJS_OBJECT_NUMBER) { + if (njs_is_object_number(value)) { value = njs_object_value(value); } else { @@ -1187,6 +1187,6 @@ const njs_object_type_init_t njs_number .prototype_props = &njs_number_prototype_init, .prototype_value = { .object_value = { .value = njs_value(NJS_NUMBER, 0, 0.0), - .object = { .type = NJS_OBJECT_NUMBER } } + .object = { .type = NJS_OBJECT_VALUE } } }, }; diff -r 685adce92af1 -r adb4ac09b86d src/njs_object.c --- a/src/njs_object.c Tue Nov 02 12:35:34 2021 +0000 +++ b/src/njs_object.c Tue Nov 02 12:37:00 2021 +0000 @@ -92,42 +92,41 @@ njs_object_value_copy(njs_vm_t *vm, njs_ } -njs_object_t * -njs_object_value_alloc(njs_vm_t *vm, const njs_value_t *value, njs_uint_t type) +njs_object_value_t * +njs_object_value_alloc(njs_vm_t *vm, njs_uint_t prototype_index, size_t extra, + const njs_value_t *value) { - njs_uint_t index; njs_object_value_t *ov; - ov = njs_mp_alloc(vm->mem_pool, sizeof(njs_object_value_t)); - - if (njs_fast_path(ov != NULL)) { - njs_lvlhsh_init(&ov->object.hash); - - if (type == NJS_STRING) { - ov->object.shared_hash = vm->shared->string_instance_hash; - - } else { - njs_lvlhsh_init(&ov->object.shared_hash); - } - - ov->object.type = njs_object_value_type(type); - ov->object.shared = 0; - ov->object.extensible = 1; - ov->object.error_data = 0; - ov->object.fast_array = 0; - - index = njs_primitive_prototype_index(type); - ov->object.__proto__ = &vm->prototypes[index].object; - ov->object.slots = NULL; - + ov = njs_mp_alloc(vm->mem_pool, sizeof(njs_object_value_t) + extra); + if (njs_slow_path(ov == NULL)) { + njs_memory_error(vm); + return NULL; + } + + njs_lvlhsh_init(&ov->object.hash); + + if (prototype_index == NJS_OBJ_TYPE_STRING) { + ov->object.shared_hash = vm->shared->string_instance_hash; + + } else { + njs_lvlhsh_init(&ov->object.shared_hash); + } + + ov->object.type = NJS_OBJECT_VALUE; + ov->object.shared = 0; + ov->object.extensible = 1; + ov->object.error_data = 0; + ov->object.fast_array = 0; + + ov->object.__proto__ = &vm->prototypes[prototype_index].object; + ov->object.slots = NULL; + + if (value != NULL) { ov->value = *value; - - return &ov->object; } - njs_memory_error(vm); - - return NULL; + return ov; } @@ -219,46 +218,45 @@ static njs_int_t njs_object_constructor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { - njs_uint_t type; - njs_value_t *value; - njs_object_t *object; + njs_uint_t type, index; + njs_value_t *value; + njs_object_t *object; + njs_object_value_t *obj_val; value = njs_arg(args, nargs, 1); type = value->type; if (njs_is_null_or_undefined(value)) { - object = njs_object_alloc(vm); if (njs_slow_path(object == NULL)) { return NJS_ERROR; } - type = NJS_OBJECT; - - } else { - - if (njs_is_object(value)) { - object = njs_object(value); - - } else if (njs_is_primitive(value)) { - - /* value->type is the same as prototype offset. */ - object = njs_object_value_alloc(vm, value, type); - if (njs_slow_path(object == NULL)) { - return NJS_ERROR; - } - - type = njs_object_value_type(type); - - } else { - njs_type_error(vm, "unexpected constructor argument:%s", - njs_type_string(type)); - + njs_set_object(&vm->retval, object); + + return NJS_OK; + } + + if (njs_is_primitive(value)) { + index = njs_primitive_prototype_index(type); + obj_val = njs_object_value_alloc(vm, index, 0, value); + if (njs_slow_path(obj_val == NULL)) { return NJS_ERROR; } + + njs_set_object_value(&vm->retval, obj_val); + + return NJS_OK; } - njs_set_type_object(&vm->retval, object, type); + if (njs_slow_path(!njs_is_object(value))) { + njs_type_error(vm, "unexpected constructor argument:%s", + njs_type_string(type)); + + return NJS_ERROR; + } + + njs_value_assign(&vm->retval, value); return NJS_OK; } @@ -450,11 +448,16 @@ njs_object_enumerate_value(njs_vm_t *vm, items, kind); break; - case NJS_OBJECT_STRING: + case NJS_OBJECT_VALUE: obj_val = (njs_object_value_t *) object; - ret = njs_object_enumerate_string(vm, &obj_val->value, items, kind); - break; + if (njs_is_string(&obj_val->value)) { + ret = njs_object_enumerate_string(vm, &obj_val->value, items, + kind); + break; + } + + /* Fall through. */ default: goto object; @@ -497,11 +500,16 @@ njs_object_own_enumerate_value(njs_vm_t items, kind); break; - case NJS_OBJECT_STRING: + case NJS_OBJECT_VALUE: obj_val = (njs_object_value_t *) object; - ret = njs_object_enumerate_string(vm, &obj_val->value, items, kind); - break; + if (njs_is_string(&obj_val->value)) { + ret = njs_object_enumerate_string(vm, &obj_val->value, items, + kind); + break; + } + + /* Fall through. */ default: goto object; @@ -1462,7 +1470,7 @@ static njs_int_t njs_object_get_prototype_of(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { - uint32_t index, type; + uint32_t index; njs_value_t *value; value = njs_arg(args, nargs, 1); @@ -1474,10 +1482,14 @@ njs_object_get_prototype_of(njs_vm_t *vm if (!njs_is_null_or_undefined(value)) { index = njs_primitive_prototype_index(value->type); - type = njs_is_symbol(value) ? NJS_OBJECT - : njs_object_value_type(value->type); - - njs_set_type_object(&vm->retval, &vm->prototypes[index].object, type); + + if (njs_is_symbol(value)) { + njs_set_object(&vm->retval, &vm->prototypes[index].object); + + } else { + njs_set_object_value(&vm->retval, + &vm->prototypes[index].object_value); + } return NJS_OK; } @@ -2300,14 +2312,8 @@ static const njs_value_t njs_object_boo njs_long_string("[object Boolean]"); static const njs_value_t njs_object_number_string = njs_long_string("[object Number]"); -static const njs_value_t njs_object_symbol_string = - njs_long_string("[object Symbol]"); static const njs_value_t njs_object_string_string = njs_long_string("[object String]"); -static const njs_value_t njs_object_data_string = - njs_string("[object Data]"); -static const njs_value_t njs_object_exernal_string = - njs_long_string("[object External]"); static const njs_value_t njs_object_object_string = njs_long_string("[object Object]"); static const njs_value_t njs_object_array_string = @@ -2329,67 +2335,68 @@ njs_object_prototype_to_string(njs_vm_t { u_char *p; njs_int_t ret; - njs_value_t tag, *value; + njs_value_t tag, *this; njs_string_prop_t string; const njs_value_t *name; - static const njs_value_t *class_name[NJS_VALUE_TYPE_MAX] = { - /* Primitives. */ - &njs_object_null_string, - &njs_object_undefined_string, - &njs_object_boolean_string, - &njs_object_number_string, - &njs_object_symbol_string, - &njs_object_string_string, - - &njs_object_data_string, - &njs_object_exernal_string, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - - /* Objects. */ - &njs_object_object_string, - &njs_object_array_string, - &njs_object_boolean_string, - &njs_object_number_string, - &njs_object_symbol_string, - &njs_object_string_string, - &njs_object_function_string, - &njs_object_regexp_string, - &njs_object_date_string, - &njs_object_object_string, - &njs_object_object_string, - &njs_object_object_string, - &njs_object_object_string, - }; - - value = njs_argument(args, 0); - name = class_name[value->type]; - - if (njs_is_null_or_undefined(value)) { - vm->retval = *name; + this = njs_argument(args, 0); + + if (njs_is_null_or_undefined(this)) { + vm->retval = njs_is_null(this) ? njs_object_null_string + : njs_object_undefined_string; return NJS_OK; } - if (njs_is_error(value)) { - name = &njs_object_error_string; + ret = njs_value_to_object(vm, this); + if (njs_slow_path(ret != NJS_OK)) { + return ret; } - if (njs_is_object(value) - && njs_lvlhsh_eq(&njs_object(value)->shared_hash, + name = &njs_object_object_string; + + if (njs_is_array(this)) { + name = &njs_object_array_string; + + } else if (njs_is_object(this) + && njs_lvlhsh_eq(&njs_object(this)->shared_hash, &vm->shared->arguments_object_instance_hash)) { name = &njs_object_arguments_string; + + } else if (njs_is_function(this)) { + name = &njs_object_function_string; + + } else if (njs_is_error(this)) { + name = &njs_object_error_string; + + } else if (njs_is_object_value(this)) { + + switch (njs_object_value(this)->type) { + case NJS_BOOLEAN: + name = &njs_object_boolean_string; + break; + From xeioex at nginx.com Tue Nov 2 12:40:57 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 02 Nov 2021 12:40:57 +0000 Subject: [njs] Simplified object dumping. Message-ID: details: https://hg.nginx.org/njs/rev/3257b9892274 branches: changeset: 1736:3257b9892274 user: Dmitry Volyntsev date: Tue Nov 02 12:37:19 2021 +0000 description: Simplified object dumping. diffstat: src/njs_json.c | 514 +++++++++++++++++++++----------------------------------- 1 files changed, 191 insertions(+), 323 deletions(-) diffs (638 lines): diff -r adb4ac09b86d -r 3257b9892274 src/njs_json.c --- a/src/njs_json.c Tue Nov 02 12:37:00 2021 +0000 +++ b/src/njs_json.c Tue Nov 02 12:37:19 2021 +0000 @@ -22,11 +22,7 @@ typedef struct { uint8_t written; /* 1 bit */ uint8_t array; /* 1 bit */ - - enum { - NJS_JSON_OBJECT, - NJS_JSON_ARRAY, - } type:8; + uint8_t fast_array; /* 1 bit */ int64_t index; int64_t length; @@ -1070,41 +1066,37 @@ njs_json_push_stringify_state(njs_vm_t * state = &stringify->states[stringify->depth++]; state->value = *value; + state->array = njs_is_array(value); + state->fast_array = njs_is_fast_array(value); state->index = 0; state->written = 0; state->keys = NULL; state->key = NULL; - if (njs_is_fast_array(value)) { + if (state->fast_array) { state->length = njs_array_len(value); - state->type = NJS_JSON_ARRAY; - state->array = 1; + } + + if (njs_is_array(&stringify->replacer)) { + state->keys = njs_array(&stringify->replacer); + + } else if (state->array && !state->fast_array) { + state->keys = njs_array_keys(vm, value, 0); + if (njs_slow_path(state->keys == NULL)) { + return NULL; + } + + ret = njs_object_length(vm, &state->value, &state->length); + if (njs_slow_path(ret == NJS_ERROR)) { + return NULL; + } } else { - state->type = NJS_JSON_OBJECT; - state->array = njs_is_array(value); - - if (njs_is_array(&stringify->replacer)) { - state->keys = njs_array(&stringify->replacer); - - } else if (njs_is_array(value)) { - state->keys = njs_array_keys(vm, value, 0); - if (njs_slow_path(state->keys == NULL)) { - return NULL; - } - - ret = njs_object_length(vm, &state->value, &state->length); - if (njs_slow_path(ret == NJS_ERROR)) { - return NULL; - } - - } else { - state->keys = njs_value_own_enumerate(vm, value, NJS_ENUM_KEYS, - stringify->keys_type, 0); - - if (njs_slow_path(state->keys == NULL)) { - return NULL; - } + state->keys = njs_value_own_enumerate(vm, value, NJS_ENUM_KEYS, + stringify->keys_type, 0); + + if (njs_slow_path(state->keys == NULL)) { + return NULL; } } @@ -1176,6 +1168,15 @@ njs_json_stringify_indent(njs_json_strin } } + +njs_inline njs_bool_t +njs_json_stringify_done(njs_json_state_t *state, njs_bool_t array) +{ + return array ? state->index >= state->length + : state->index >= state->keys->length; +} + + static njs_int_t njs_json_stringify_iterator(njs_vm_t *vm, njs_json_stringify_t *stringify, njs_value_t *object) @@ -1201,158 +1202,90 @@ njs_json_stringify_iterator(njs_vm_t *vm njs_chb_init(&chain, vm->mem_pool); for ( ;; ) { - switch (state->type) { - case NJS_JSON_OBJECT: - if (state->index == 0) { - njs_chb_append(&chain, state->array ? "[" : "{", 1); - njs_json_stringify_indent(stringify, &chain, 0); - } - - if ((state->array && state->index >= state->length) - || (!state->array && state->index >= state->keys->length)) - { - njs_json_stringify_indent(stringify, &chain, -1); - njs_chb_append(&chain, state->array ? "]" : "}", 1); - - state = njs_json_pop_stringify_state(stringify); - if (state == NULL) { - goto done; - } - - break; + if (state->index == 0) { + njs_chb_append(&chain, state->array ? "[" : "{", 1); + njs_json_stringify_indent(stringify, &chain, 0); + } + + if (njs_json_stringify_done(state, state->array)) { + njs_json_stringify_indent(stringify, &chain, -1); + njs_chb_append(&chain, state->array ? "]" : "}", 1); + + state = njs_json_pop_stringify_state(stringify); + if (state == NULL) { + goto done; } - value = &stringify->retval; - - if (state->array) { - njs_set_number(&index, state->index++); - key = &index; - - } else { - key = &state->keys->start[state->index++]; - } - - ret = njs_value_property(vm, &state->value, key, value); - if (njs_slow_path(ret == NJS_ERROR)) { - return ret; - } - - if (state->array && ret == NJS_DECLINED) { - njs_set_null(value); - } - - ret = njs_json_stringify_to_json(stringify, state, key, value); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - - ret = njs_json_stringify_replacer(stringify, state, key, value); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - - if (njs_is_undefined(value) + continue; + } + + value = &stringify->retval; + + if (state->array) { + njs_set_number(&index, state->index); + key = &index; + + } else { + key = &state->keys->start[state->index]; + } + + ret = njs_value_property(vm, &state->value, key, value); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + if (state->array && ret == NJS_DECLINED) { + njs_set_null(value); + } + + ret = njs_json_stringify_to_json(stringify, state, key, value); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + ret = njs_json_stringify_replacer(stringify, state, key, value); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + state->index++; + + if (!state->array + && (njs_is_undefined(value) || njs_is_symbol(value) || njs_is_function(value) - || !njs_is_valid(value)) - { - break; - } - - if (state->written) { - njs_chb_append_literal(&chain,","); - njs_json_stringify_indent(stringify, &chain, 0); - } - - state->written = 1; - - if (!state->array) { - njs_json_append_string(&chain, key, '\"'); - njs_chb_append_literal(&chain,":"); - if (stringify->space.length != 0) { - njs_chb_append_literal(&chain," "); - } - } - - if (njs_json_is_object(value)) { - state = njs_json_push_stringify_state(vm, stringify, value); - if (njs_slow_path(state == NULL)) { - return NJS_ERROR; - } - - break; - } - - ret = njs_json_append_value(vm, &chain, value); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - - break; - - case NJS_JSON_ARRAY: - if (state->index == 0) { - njs_chb_append_literal(&chain,"["); - njs_json_stringify_indent(stringify, &chain, 0); + || !njs_is_valid(value))) + { + continue; + } + + if (state->written) { + njs_chb_append_literal(&chain,","); + njs_json_stringify_indent(stringify, &chain, 0); + } + + state->written = 1; + + if (!state->array) { + njs_json_append_string(&chain, key, '\"'); + njs_chb_append_literal(&chain,":"); + if (stringify->space.length != 0) { + njs_chb_append_literal(&chain," "); } - - if (state->index >= state->length) { - njs_json_stringify_indent(stringify, &chain, -1); - njs_chb_append_literal(&chain,"]"); - - state = njs_json_pop_stringify_state(stringify); - if (state == NULL) { - goto done; - } - - break; - } - - if (state->written) { - njs_chb_append_literal(&chain,","); - njs_json_stringify_indent(stringify, &chain, 0); - } - - if (njs_is_fast_array(&state->value)) { - value = njs_array_start(&state->value); - stringify->retval = value[state->index++]; - - } else { - ret = njs_value_property_i64(vm, &state->value, state->index++, - &stringify->retval); - if (njs_slow_path(ret == NJS_ERROR)) { - return ret; - } + } + + if (njs_json_is_object(value)) { + state = njs_json_push_stringify_state(vm, stringify, value); + if (njs_slow_path(state == NULL)) { + return NJS_ERROR; } - value = &stringify->retval; - - ret = njs_json_stringify_to_json(stringify, state, NULL, value); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - - ret = njs_json_stringify_replacer(stringify, state, NULL, value); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - - if (njs_json_is_object(value)) { - state = njs_json_push_stringify_state(vm, stringify, value); - if (state == NULL) { - return NJS_ERROR; - } - - break; - } - - state->written = 1; - ret = njs_json_append_value(vm, &chain, value); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - - break; + continue; + } + + ret = njs_json_append_value(vm, &chain, value); + if (njs_slow_path(ret != NJS_OK)) { + return ret; } } @@ -1439,28 +1372,17 @@ njs_json_stringify_to_json(njs_json_stri } to_json = njs_object_to_json_function(stringify->vm, value); - if (to_json == NULL) { return NJS_OK; } arguments[0] = *value; - switch (state->type) { - case NJS_JSON_OBJECT: - if (key != NULL) { - arguments[1] = *key; - - } else { - njs_string_short_set(&arguments[1], 0, 0); - } - - break; - - case NJS_JSON_ARRAY: - njs_uint32_to_string(&arguments[1], state->index - 1); - - break; + if (!state->array) { + arguments[1] = *key; + + } else { + njs_uint32_to_string(&arguments[1], state->index); } return njs_function_apply(stringify->vm, to_json, arguments, 2, @@ -1479,19 +1401,13 @@ njs_json_stringify_replacer(njs_json_str } arguments[0] = state->value; - - switch (state->type) { - case NJS_JSON_OBJECT: + arguments[2] = *value; + + if (!state->array) { arguments[1] = *key; - arguments[2] = *value; - - break; - - case NJS_JSON_ARRAY: - njs_uint32_to_string(&arguments[1], state->index - 1); - arguments[2] = *value; - - break; + + } else { + njs_uint32_to_string(&arguments[1], state->index); } return njs_function_apply(stringify->vm, njs_function(&stringify->replacer), @@ -2086,14 +2002,24 @@ njs_dump_visited(njs_vm_t *vm, njs_json_ njs_inline njs_bool_t njs_dump_empty(njs_json_stringify_t *stringify, njs_json_state_t *state, - njs_chb_t *chain, double key, double prev, njs_bool_t sep_position) + njs_chb_t *chain, njs_bool_t sep_position) { + double key, prev; int64_t diff; - if (!state->array || isnan(prev)) { + if (!state->array || state->fast_array) { return 0; } + if (sep_position) { + key = njs_key_to_index(state->key); + prev = (state->index > 1) ? njs_key_to_index(&state->key[-1]) : -1; + + } else { + key = state->length; + prev = (state->index > 0) ? njs_key_to_index(state->key) : -1; + } + if (isnan(key)) { key = state->length; } @@ -2168,39 +2094,47 @@ njs_vm_value_dump(njs_vm_t *vm, njs_str_ } for ( ;; ) { - switch (state->type) { - case NJS_JSON_OBJECT: - if (state->index == 0) { - ret = njs_object_string_tag(vm, &state->value, &tag); - if (njs_slow_path(ret == NJS_ERROR)) { - return ret; - } - - if (ret == NJS_OK) { - (void) njs_string_prop(&string, &tag); - njs_chb_append(&chain, string.start, string.size); - njs_chb_append_literal(&chain, " "); - } - - njs_chb_append(&chain, state->array ? "[" : "{", 1); + if (state->index == 0) { + ret = njs_object_string_tag(vm, &state->value, &tag); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + if (ret == NJS_OK) { + (void) njs_string_prop(&string, &tag); + njs_chb_append(&chain, string.start, string.size); + njs_chb_append_literal(&chain, " "); + } + + njs_chb_append(&chain, state->array ? "[" : "{", 1); + njs_json_stringify_indent(stringify, &chain, 1); + } + + if (njs_json_stringify_done(state, state->fast_array)) { + njs_dump_empty(stringify, state, &chain, 0); + + njs_json_stringify_indent(stringify, &chain, 0); + njs_chb_append(&chain, state->array ? "]" : "}", 1); + + state = njs_json_pop_stringify_state(stringify); + if (state == NULL) { + goto done; + } + + continue; + } + + if (state->fast_array) { + if (state->written) { + njs_chb_append_literal(&chain, ","); njs_json_stringify_indent(stringify, &chain, 1); } - if (state->index >= state->keys->length) { - njs_dump_empty(stringify, state, &chain, state->length, - (state->index > 0) ? njs_key_to_index(state->key) : -1, 0); - - njs_json_stringify_indent(stringify, &chain, 0); - njs_chb_append(&chain, state->array ? "]" : "}", 1); - - state = njs_json_pop_stringify_state(stringify); - if (state == NULL) { - goto done; - } - - break; - } - + state->written = 1; + + val = &njs_array_start(&state->value)[state->index++]; + + } else { njs_property_query_init(&pq, NJS_PROPERTY_QUERY_GET, 0); key = &state->keys->start[state->index++]; @@ -2209,7 +2143,7 @@ njs_vm_value_dump(njs_vm_t *vm, njs_str_ ret = njs_property_query(vm, &pq, &state->value, key); if (njs_slow_path(ret != NJS_OK)) { if (ret == NJS_DECLINED) { - break; + continue; } goto exception; @@ -2218,7 +2152,7 @@ njs_vm_value_dump(njs_vm_t *vm, njs_str_ prop = pq.lhq.value; if (prop->type == NJS_WHITEOUT || !prop->enumerable) { - break; + continue; } if (state->written) { @@ -2228,8 +2162,7 @@ njs_vm_value_dump(njs_vm_t *vm, njs_str_ state->written = 1; - njs_dump_empty(stringify, state, &chain, njs_key_to_index(key), - (state->index > 1) ? njs_key_to_index(&key[-1]) : -1, 1); + njs_dump_empty(stringify, state, &chain, 1); if (!state->array || isnan(njs_key_to_index(key))) { njs_key_string_get(vm, key, &pq.lhq.key); @@ -2268,94 +2201,29 @@ njs_vm_value_dump(njs_vm_t *vm, njs_str_ val = njs_value_arg(&string_set); } } - - if (njs_dump_is_recursive(val)) { - if (njs_slow_path(njs_dump_visited(vm, stringify, val))) { - njs_chb_append_literal(&chain, "[Circular]"); - break; - } - - state = njs_json_push_stringify_state(vm, stringify, val); - if (njs_slow_path(state == NULL)) { - goto exception; - } - - break; + } + + if (njs_dump_is_recursive(val)) { + if (njs_slow_path(njs_dump_visited(vm, stringify, val))) { + njs_chb_append_literal(&chain, "[Circular]"); + continue; } - ret = njs_dump_terminal(stringify, &chain, val, console); - if (njs_slow_path(ret != NJS_OK)) { - if (ret == NJS_DECLINED) { - goto exception; - } - - goto memory_error; - } - - break; - - case NJS_JSON_ARRAY: - if (state->index == 0) { - ret = njs_object_string_tag(vm, &state->value, &tag); - if (njs_slow_path(ret == NJS_ERROR)) { - return ret; - } - - if (ret == NJS_OK) { - (void) njs_string_prop(&string, &tag); - njs_chb_append(&chain, string.start, string.size); - njs_chb_append_literal(&chain, " "); - } - - njs_chb_append_literal(&chain, "["); - njs_json_stringify_indent(stringify, &chain, 1); + state = njs_json_push_stringify_state(vm, stringify, val); + if (njs_slow_path(state == NULL)) { + goto exception; } - if (state->index >= njs_array_len(&state->value)) { - njs_json_stringify_indent(stringify, &chain, 0); - njs_chb_append_literal(&chain, "]"); - - state = njs_json_pop_stringify_state(stringify); - if (state == NULL) { - goto done; - } - - break; - } - - if (state->written) { - njs_chb_append_literal(&chain, ","); - njs_json_stringify_indent(stringify, &chain, 1); + continue; + } + + ret = njs_dump_terminal(stringify, &chain, val, console); + if (njs_slow_path(ret != NJS_OK)) { + if (ret == NJS_DECLINED) { + goto exception; } - val = &njs_array_start(&state->value)[state->index++]; - - if (njs_dump_is_recursive(val)) { - if (njs_slow_path(njs_dump_visited(vm, stringify, val))) { - njs_chb_append_literal(&chain, "[Circular]"); - break; - } - - state = njs_json_push_stringify_state(vm, stringify, val); - if (njs_slow_path(state == NULL)) { - goto exception; - } - - break; - } - - state->written = 1; - - ret = njs_dump_terminal(stringify, &chain, val, console); - if (njs_slow_path(ret != NJS_OK)) { - if (ret == NJS_DECLINED) { - goto exception; - } - - goto memory_error; - } - - break; + goto memory_error; } } From xeioex at nginx.com Tue Nov 2 12:40:59 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 02 Nov 2021 12:40:59 +0000 Subject: [njs] Unified creation of objects with custom value slots. Message-ID: details: https://hg.nginx.org/njs/rev/0c5745fc200d branches: changeset: 1737:0c5745fc200d user: Dmitry Volyntsev date: Tue Nov 02 12:38:02 2021 +0000 description: Unified creation of objects with custom value slots. diffstat: src/njs_crypto.c | 31 ++----------------------------- src/njs_encoding.c | 52 ++++++++++++---------------------------------------- src/njs_extern.c | 19 ++----------------- src/njs_iterator.c | 22 +++++----------------- 4 files changed, 21 insertions(+), 103 deletions(-) diffs (247 lines): diff -r 3257b9892274 -r 0c5745fc200d src/njs_crypto.c --- a/src/njs_crypto.c Tue Nov 02 12:37:19 2021 +0000 +++ b/src/njs_crypto.c Tue Nov 02 12:38:02 2021 +0000 @@ -129,33 +129,6 @@ static njs_crypto_enc_t njs_encodings[] }; -static njs_object_value_t * -njs_crypto_object_value_alloc(njs_vm_t *vm, njs_object_type_t type) -{ - njs_object_value_t *ov; - - ov = njs_mp_alloc(vm->mem_pool, sizeof(njs_object_value_t)); - - if (njs_fast_path(ov != NULL)) { - njs_lvlhsh_init(&ov->object.hash); - njs_lvlhsh_init(&ov->object.shared_hash); - ov->object.type = NJS_OBJECT_VALUE; - ov->object.shared = 0; - ov->object.extensible = 1; - ov->object.error_data = 0; - ov->object.fast_array = 0; - - ov->object.__proto__ = &vm->prototypes[type].object; - ov->object.slots = NULL; - return ov; - } - - njs_memory_error(vm); - - return NULL; -} - - static njs_int_t njs_crypto_create_hash(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) @@ -169,7 +142,7 @@ njs_crypto_create_hash(njs_vm_t *vm, njs return NJS_ERROR; } - hash = njs_crypto_object_value_alloc(vm, NJS_OBJ_TYPE_CRYPTO_HASH); + hash = njs_object_value_alloc(vm, NJS_OBJ_TYPE_CRYPTO_HASH, 0, NULL); if (njs_slow_path(hash == NULL)) { return NJS_ERROR; } @@ -510,7 +483,7 @@ njs_crypto_create_hmac(njs_vm_t *vm, njs alg->init(&ctx->u); alg->update(&ctx->u, key_buf, 64); - hmac = njs_crypto_object_value_alloc(vm, NJS_OBJ_TYPE_CRYPTO_HMAC); + hmac = njs_object_value_alloc(vm, NJS_OBJ_TYPE_CRYPTO_HMAC, 0, NULL); if (njs_slow_path(hmac == NULL)) { return NJS_ERROR; } diff -r 3257b9892274 -r 0c5745fc200d src/njs_encoding.c --- a/src/njs_encoding.c Tue Nov 02 12:37:19 2021 +0000 +++ b/src/njs_encoding.c Tue Nov 02 12:38:02 2021 +0000 @@ -48,34 +48,20 @@ static njs_int_t njs_text_encoder_constructor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { - njs_object_t *proto; - njs_object_value_t *ov; + njs_object_value_t *encoder; if (!vm->top_frame->ctor) { njs_type_error(vm, "Constructor of TextEncoder requires 'new'"); return NJS_ERROR; } - ov = njs_mp_alloc(vm->mem_pool, sizeof(njs_object_value_t)); - if (njs_slow_path(ov == NULL)) { - njs_memory_error(vm); + encoder = njs_object_value_alloc(vm, NJS_OBJ_TYPE_TEXT_ENCODER, 0, NULL); + if (njs_slow_path(encoder == NULL)) { return NJS_ERROR; } - proto = &vm->prototypes[NJS_OBJ_TYPE_TEXT_ENCODER].object; - - njs_lvlhsh_init(&ov->object.hash); - njs_lvlhsh_init(&ov->object.shared_hash); - ov->object.type = NJS_OBJECT_VALUE; - ov->object.shared = 0; - ov->object.extensible = 1; - ov->object.error_data = 0; - ov->object.fast_array = 0; - ov->object.__proto__ = proto; - ov->object.slots = NULL; - - njs_set_data(&ov->value, NULL, NJS_DATA_TAG_TEXT_ENCODER); - njs_set_object_value(&vm->retval, ov); + njs_set_data(&encoder->value, NULL, NJS_DATA_TAG_TEXT_ENCODER); + njs_set_object_value(&vm->retval, encoder); return NJS_OK; } @@ -330,8 +316,7 @@ njs_text_decoder_constructor(njs_vm_t *v njs_index_t unused) { njs_int_t ret; - njs_object_t *proto; - njs_object_value_t *ov; + njs_object_value_t *decoder; njs_encoding_decode_t *data; if (!vm->top_frame->ctor) { @@ -339,26 +324,13 @@ njs_text_decoder_constructor(njs_vm_t *v return NJS_ERROR; } - ov = njs_mp_alloc(vm->mem_pool, sizeof(njs_object_value_t) - + sizeof(njs_encoding_decode_t)); - if (njs_slow_path(ov == NULL)) { - njs_memory_error(vm); + decoder = njs_object_value_alloc(vm, NJS_OBJ_TYPE_TEXT_DECODER, + sizeof(njs_encoding_decode_t), NULL); + if (njs_slow_path(decoder == NULL)) { return NJS_ERROR; } - proto = &vm->prototypes[NJS_OBJ_TYPE_TEXT_DECODER].object; - - njs_lvlhsh_init(&ov->object.hash); - njs_lvlhsh_init(&ov->object.shared_hash); - ov->object.type = NJS_OBJECT_VALUE; - ov->object.shared = 0; - ov->object.extensible = 1; - ov->object.error_data = 0; - ov->object.fast_array = 0; - ov->object.__proto__ = proto; - ov->object.slots = NULL; - - data = (njs_encoding_decode_t *) ((uint8_t *) ov + data = (njs_encoding_decode_t *) ((uint8_t *) decoder + sizeof(njs_object_value_t)); ret = njs_text_decoder_arg_encoding(vm, args, nargs, data); @@ -373,8 +345,8 @@ njs_text_decoder_constructor(njs_vm_t *v njs_utf8_decode_init(&data->ctx); - njs_set_data(&ov->value, data, NJS_DATA_TAG_TEXT_DECODER); - njs_set_object_value(&vm->retval, ov); + njs_set_data(&decoder->value, data, NJS_DATA_TAG_TEXT_DECODER); + njs_set_object_value(&vm->retval, decoder); return NJS_OK; } diff -r 3257b9892274 -r 0c5745fc200d src/njs_extern.c --- a/src/njs_extern.c Tue Nov 02 12:37:19 2021 +0000 +++ b/src/njs_extern.c Tue Nov 02 12:38:02 2021 +0000 @@ -179,22 +179,14 @@ njs_external_prop_handler(njs_vm_t *vm, *retval = *setval; } else { - ov = njs_mp_alloc(vm->mem_pool, sizeof(njs_object_value_t)); + ov = njs_object_value_alloc(vm, NJS_OBJ_TYPE_OBJECT, 0, NULL); if (njs_slow_path(ov == NULL)) { - njs_memory_error(vm); return NJS_ERROR; } slots = njs_object(value)->slots + self->value.data.magic16; - njs_lvlhsh_init(&ov->object.hash); ov->object.shared_hash = slots->external_shared_hash; - ov->object.type = NJS_OBJECT; - ov->object.shared = 0; - ov->object.extensible = 1; - ov->object.error_data = 0; - ov->object.fast_array = 0; - ov->object.__proto__ = &vm->prototypes[NJS_OBJ_TYPE_OBJECT].object; ov->object.slots = slots; external = njs_vm_external(vm, NJS_PROTO_ID_ANY, value); @@ -308,23 +300,16 @@ njs_vm_external_create(njs_vm_t *vm, njs proto = ((uintptr_t *) vm->protos->start)[proto_id]; - ov = njs_mp_alloc(vm->mem_pool, sizeof(njs_object_value_t)); + ov = njs_object_value_alloc(vm, NJS_OBJ_TYPE_OBJECT, 0, NULL); if (njs_slow_path(ov == NULL)) { - njs_memory_error(vm); return NJS_ERROR; } protos = (njs_arr_t *) proto; slots = protos->start; - njs_lvlhsh_init(&ov->object.hash); ov->object.shared_hash = slots->external_shared_hash; - ov->object.type = NJS_OBJECT; ov->object.shared = shared; - ov->object.extensible = 1; - ov->object.error_data = 0; - ov->object.fast_array = 0; - ov->object.__proto__ = &vm->prototypes[NJS_OBJ_TYPE_OBJECT].object; ov->object.slots = slots; njs_set_object_value(value, ov); diff -r 3257b9892274 -r 0c5745fc200d src/njs_iterator.c --- a/src/njs_iterator.c Tue Nov 02 12:37:19 2021 +0000 +++ b/src/njs_iterator.c Tue Nov 02 12:38:02 2021 +0000 @@ -34,27 +34,15 @@ njs_int_t njs_array_iterator_create(njs_vm_t *vm, const njs_value_t *target, njs_value_t *retval, njs_object_enum_t kind) { - njs_object_value_t *ov; + njs_object_value_t *iterator; njs_array_iterator_t *it; - ov = njs_mp_alloc(vm->mem_pool, sizeof(njs_object_value_t)); - if (njs_slow_path(ov == NULL)) { + iterator = njs_object_value_alloc(vm, NJS_OBJ_TYPE_ARRAY_ITERATOR, 0, NULL); + if (njs_slow_path(iterator == NULL)) { njs_memory_error(vm); return NJS_ERROR; } - njs_lvlhsh_init(&ov->object.hash); - njs_lvlhsh_init(&ov->object.shared_hash); - ov->object.type = NJS_OBJECT_VALUE; - ov->object.shared = 0; - ov->object.extensible = 1; - ov->object.error_data = 0; - ov->object.fast_array = 0; - - ov->object.__proto__ = - &vm->prototypes[NJS_OBJ_TYPE_ARRAY_ITERATOR].object; - ov->object.slots = NULL; - it = njs_mp_alloc(vm->mem_pool, sizeof(njs_array_iterator_t)); if (njs_slow_path(it == NULL)) { njs_memory_error(vm); @@ -66,8 +54,8 @@ njs_array_iterator_create(njs_vm_t *vm, it->next = 0; it->kind = kind; - njs_set_data(&ov->value, it, NJS_DATA_TAG_ARRAY_ITERATOR); - njs_set_object_value(retval, ov); + njs_set_data(&iterator->value, it, NJS_DATA_TAG_ARRAY_ITERATOR); + njs_set_object_value(retval, iterator); return NJS_OK; } From xeioex at nginx.com Tue Nov 2 12:41:00 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 02 Nov 2021 12:41:00 +0000 Subject: [njs] Added njs_date_alloc(). Message-ID: details: https://hg.nginx.org/njs/rev/4ddfb2f2227f branches: changeset: 1738:4ddfb2f2227f user: Dmitry Volyntsev date: Tue Nov 02 12:38:42 2021 +0000 description: Added njs_date_alloc(). diffstat: src/njs_date.c | 42 ++++++++++++++++++++++++++++-------------- src/njs_date.h | 1 + 2 files changed, 29 insertions(+), 14 deletions(-) diffs (74 lines): diff -r 0c5745fc200d -r 4ddfb2f2227f src/njs_date.c --- a/src/njs_date.c Tue Nov 02 12:38:02 2021 +0000 +++ b/src/njs_date.c Tue Nov 02 12:38:42 2021 +0000 @@ -346,6 +346,33 @@ njs_date_args(njs_vm_t *vm, njs_value_t } +njs_date_t * +njs_date_alloc(njs_vm_t *vm, double time) +{ + njs_date_t *date; + + date = njs_mp_alloc(vm->mem_pool, sizeof(njs_date_t)); + if (njs_slow_path(date == NULL)) { + njs_memory_error(vm); + return NULL; + } + + njs_lvlhsh_init(&date->object.hash); + njs_lvlhsh_init(&date->object.shared_hash); + date->object.type = NJS_DATE; + date->object.shared = 0; + date->object.extensible = 1; + date->object.error_data = 0; + date->object.fast_array = 0; + date->object.__proto__ = &vm->prototypes[NJS_OBJ_TYPE_DATE].object; + date->object.slots = NULL; + + date->time = time; + + return date; +} + + static njs_int_t njs_date_constructor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) @@ -393,24 +420,11 @@ njs_date_constructor(njs_vm_t *vm, njs_v time = njs_make_date(tm, 1); } - date = njs_mp_alloc(vm->mem_pool, sizeof(njs_date_t)); + date = njs_date_alloc(vm, time); if (njs_slow_path(date == NULL)) { - njs_memory_error(vm); return NJS_ERROR; } - njs_lvlhsh_init(&date->object.hash); - njs_lvlhsh_init(&date->object.shared_hash); - date->object.type = NJS_DATE; - date->object.shared = 0; - date->object.extensible = 1; - date->object.error_data = 0; - date->object.fast_array = 0; - date->object.__proto__ = &vm->prototypes[NJS_OBJ_TYPE_DATE].object; - date->object.slots = NULL; - - date->time = time; - njs_set_date(&vm->retval, date); return NJS_OK; diff -r 0c5745fc200d -r 4ddfb2f2227f src/njs_date.h --- a/src/njs_date.h Tue Nov 02 12:38:02 2021 +0000 +++ b/src/njs_date.h Tue Nov 02 12:38:42 2021 +0000 @@ -8,6 +8,7 @@ #define _NJS_DATE_H_INCLUDED_ +njs_date_t *njs_date_alloc(njs_vm_t *vm, double time); njs_int_t njs_date_to_string(njs_vm_t *vm, njs_value_t *retval, const njs_value_t *date); From mdounin at mdounin.ru Tue Nov 2 14:52:24 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 02 Nov 2021 14:52:24 +0000 Subject: [nginx] nginx-1.21.4-RELEASE Message-ID: details: https://hg.nginx.org/nginx/rev/39be8a682c58 branches: changeset: 7952:39be8a682c58 user: Maxim Dounin date: Tue Nov 02 17:49:22 2021 +0300 description: nginx-1.21.4-RELEASE diffstat: docs/xml/nginx/changes.xml | 131 +++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 131 insertions(+), 0 deletions(-) diffs (141 lines): diff -r c7a8bdf5af55 -r 39be8a682c58 docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml Sat Oct 30 02:39:19 2021 +0300 +++ b/docs/xml/nginx/changes.xml Tue Nov 02 17:49:22 2021 +0300 @@ -5,6 +5,137 @@ + + + + +????????? NPN ?????? ALPN ??? ???????????? HTTP/2-?????????? +??????????. + + +support for NPN instead of ALPN to establish HTTP/2 connections +has been removed. + + + + + +?????? nginx ????????? SSL ??????????, ???? ?????? ?????????? ALPN, +?? nginx ?? ???????????? ?? ???? ?? ?????????? ???????? ??????????. + + +now nginx rejects SSL connections if ALPN is used by the client, +but no supported protocols can be negotiated. + + + + + +? ????????? sendfile_max_chunk ???????? ?? ????????? +???????? ?? 2 ?????????. + + +the default value of the "sendfile_max_chunk" directive +was changed to 2 megabytes. + + + + + +????????? proxy_half_close ? ?????? stream. + + +the "proxy_half_close" directive in the stream module. + + + + + +????????? ssl_alpn ? ?????? stream. + + +the "ssl_alpn" directive in the stream module. + + + + + +?????????? $ssl_alpn_protocol. + + +the $ssl_alpn_protocol variable. + + + + + +????????? SSL_sendfile() ??? ????????????? OpenSSL 3.0. + + +support for SSL_sendfile() when using OpenSSL 3.0. + + + + + +????????? mp4_start_key_frame ? ?????? ngx_http_mp4_module.
+??????? Tracey Jaquith. +
+ +the "mp4_start_key_frame" directive in the ngx_http_mp4_module.
+Thanks to Tracey Jaquith. +
+
+ + + +? ?????????? $content_length ??? ????????????? chunked transfer encoding. + + +in the $content_length variable when using chunked transfer encoding. + + + + + +??? ????????? ?????? ???????????? ????? ?? ????????????? ??????? +nginx ??? ??? ?? ????? ???????????? ??????????.
+??????? Awdhesh Mathpal. +
+ +after receiving a response with incorrect length from a proxied backend +nginx might nevertheless cache the connection.
+Thanks to Awdhesh Mathpal. +
+
+ + + +???????????? ????????? ?? ???????? +????????????? ?? ?????? info ?????? error; +?????? ????????? ? 1.21.1. + + +invalid headers from backends +were logged at the "info" level instead of "error"; +the bug had appeared in 1.21.1. + + + + + +??? ????????????? HTTP/2 ? ????????? aio_write +??????? ????? ????????. + + +requests might hang +when using HTTP/2 and the "aio_write" directive. + + + +
+ + From mdounin at mdounin.ru Tue Nov 2 14:52:27 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 02 Nov 2021 14:52:27 +0000 Subject: [nginx] release-1.21.4 tag Message-ID: details: https://hg.nginx.org/nginx/rev/82b750b20c52 branches: changeset: 7953:82b750b20c52 user: Maxim Dounin date: Tue Nov 02 17:49:22 2021 +0300 description: release-1.21.4 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r 39be8a682c58 -r 82b750b20c52 .hgtags --- a/.hgtags Tue Nov 02 17:49:22 2021 +0300 +++ b/.hgtags Tue Nov 02 17:49:22 2021 +0300 @@ -464,3 +464,4 @@ df34dcc9ac072ffd0945e5a1f3eb7987e8275375 a68ac0677f8553b1f84d357bc9da114731ab5f47 release-1.21.1 bfbc52374adcbf2f9060afd62de940f6fab3bba5 release-1.21.2 2217a9c1d0b86026f22700b3c089545db1964f55 release-1.21.3 +39be8a682c58308d9399cddd57e37f9fdb7bdf3e release-1.21.4 From tracey at archive.org Wed Nov 3 03:16:19 2021 From: tracey at archive.org (Tracey Jaquith) Date: Tue, 2 Nov 2021 20:16:19 -0700 Subject: [PATCH] Add optional "mp4_exact_start" nginx config off/on to show video between keyframes In-Reply-To: References: <20210628095320.px3ggmmoyjalyv5m@Romans-MacBook-Pro.local> <5F32216C-A041-454C-A73C-0E1C259E434C@archive.org> <20210930134811.epttik4joflf2qj6@Romans-MacBook-Pro.local> <20241A9E-BDF1-42D8-9848-AF628717EFE3@archive.org> Message-ID: Wow, it got merged (the EDTS version). Nice and thanks! ( https://github.com/nginx/nginx/commit/7927071ee26ff6313301b744a90240dccbc836db ) Thanks for filing the Firefox bug and following up with it, Roman. I can move our live services over to the updated nginx once FF fixes the bug (and some time goes by). (No worries, that?s probably about when ubuntu nginx version will get updated, hehe). I haven?t seen timescales like 24 before here in the US (esp. w/ TV - but I believe very rare otherwise out here). Glad you found a good solution. I?m really looking forward to the new: start_key_frame directive! You might find this hopefully amusing, but I gave a talk at Demuxed 2021 October 7th, titled: "30,000 fps nginx - To Russia with Love? while things were before the EDTS solution you all found and worked out. (Video should hit YouTube by end of 2021). Kind regards & gratefully! - Tracey > On Oct 20, 2021, at 8:32 PM, Maxim Dounin wrote: > > Hello! > > On Mon, Oct 04, 2021 at 03:41:47PM -0700, Tracey Jaquith wrote: > >> Hi Roman, >> >> OK, thanks! >> >> I?ve tested this on macosx & linux, so far with: chrome, safari, Firefox and iOS. >> >> However, I?m seeing Firefox is having alternate behavior where it plays video from the prior keyframe, >> without audio, until it hits the desired start time in at least one video, though it?s not consistently doing this. >> I suspect it?s the edit list ? a nice solve for this. >> I?ve had minor issues with edit lists in the past, for what that?s worth. > > Thanks for testing. Just for the record: > > https://bugzilla.mozilla.org/show_bug.cgi?id=1735300 > > Hopefully this will be eventually fixed. > > [...] > >> And deep apologies? >>> Another problem is track delay >> >> I *should have* mentioned when I initially wrote in, that I was aware of the a/v sync slight slip >> ? and that in practice and running for over 3 months now, it hasn?t seemed to be any kind of issue. >> >> Assuming: >> * the average (US TV) video might be 29.97 fps >> * and thus timescale / duration of 30000 / 1001 >> * and that a typical max distance between keyframe GOPs w/ ffmpeg encoders and similar is 300 frames or about 10s >> >> Then: >> * with a max of 10s between keyframes >> * and 300 frames max would get ?sped up? from 1001 => 1 >> >> Then we?re looking at a maximum additional video frames duration of 1/100th of a second. >> >> (300 * 1001 / 30000) == 10.01 >> >> (300 * 1 / 30000) == 0.01 >> >> So the most the A/V sync could ?drift? from those early added frames is 1/100th of a second, >> where average might be 2-3x smaller than that. >> In practice, it didn?t seem noticeable ? >> but I am quite impressed by your desire to minimize/eliminate that. >> (In practice, from the broadcasters at least in the US, 1/100th of a second A/V slip is not uncommon). > > While it looks quite well with timescale 30000, it is not uncommon > for video tracks to have timescale 25 or so. For example, the > test video in the ticket linked above uses timescale 24. With > such a timescale, resulting desync will be much more noticeable. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -Tracey @tracey_pooh TV Architect https://archive.org/tv -------------- next part -------------- An HTML attachment was scrubbed... URL: From maxim at nginx.com Wed Nov 3 07:48:45 2021 From: maxim at nginx.com (Maxim Konovalov) Date: Wed, 3 Nov 2021 09:48:45 +0200 Subject: [PATCH] Add optional "mp4_exact_start" nginx config off/on to show video between keyframes In-Reply-To: References: <20210628095320.px3ggmmoyjalyv5m@Romans-MacBook-Pro.local> <5F32216C-A041-454C-A73C-0E1C259E434C@archive.org> <20210930134811.epttik4joflf2qj6@Romans-MacBook-Pro.local> <20241A9E-BDF1-42D8-9848-AF628717EFE3@archive.org> Message-ID: <642e8b4e-db98-97c4-dfc6-09d4512aad36@nginx.com> Hi Tracey, On 03.11.2021 05:16, Tracey Jaquith wrote: [...] > You might find this hopefully amusing, but I gave a talk at Demuxed 2021 > October 7th, titled: > ? "30,000 fps nginx - To Russia with Love? > while things were before the EDTS solution you all found and worked out. > (Video should hit YouTube by end of 2021). > Please update us with the link then. Thanks! Maxim -- Maxim Konovalov From xeioex at nginx.com Wed Nov 3 15:51:21 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 03 Nov 2021 15:51:21 +0000 Subject: [njs] Added fs.Stats, fs.stat() and friends. Message-ID: details: https://hg.nginx.org/njs/rev/203bc61d8d70 branches: changeset: 1739:203bc61d8d70 user: Dmitry Volyntsev date: Wed Nov 03 15:46:15 2021 +0000 description: Added fs.Stats, fs.stat() and friends. diffstat: auto/stat | 58 +++ configure | 1 + src/njs_builtin.c | 1 + src/njs_fs.c | 722 ++++++++++++++++++++++++++++++++++++++++++++++- src/njs_fs.h | 1 + src/njs_value.h | 1 + src/njs_vm.h | 1 + test/fs/methods.js | 186 ++++++++++++ test/njs_expect_test.exp | 8 +- 9 files changed, 970 insertions(+), 9 deletions(-) diffs (truncated from 1117 to 1000 lines): diff -r 4ddfb2f2227f -r 203bc61d8d70 auto/stat --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/auto/stat Wed Nov 03 15:46:15 2021 +0000 @@ -0,0 +1,58 @@ + +# Copyright (C) Dmitry Volyntsev +# Copyright (C) NGINX, Inc. + + +njs_feature="stat.st_atimespec" +njs_feature_name=NJS_HAVE_STAT_ATIMESPEC +njs_feature_run=no +njs_feature_incs= +njs_feature_libs= +njs_feature_test="#include + + int main(void) { + struct stat st; + + if (fstat(0, &st) != 0) { + return 1; + } + + return (int) st.st_atimespec.tv_sec; + }" +. auto/feature + + +njs_feature="stat.st_birthtim" +njs_feature_name=NJS_HAVE_STAT_BIRTHTIM +njs_feature_incs= +njs_feature_libs= +njs_feature_test="#include + + int main(void) { + struct stat st; + + if (fstat(0, &st) != 0) { + return 1; + } + + return (int) st.st_birthtim.tv_sec; + }" +. auto/feature + + +njs_feature="stat.st_atim" +njs_feature_name=NJS_HAVE_STAT_ATIM +njs_feature_incs= +njs_feature_libs= +njs_feature_test="#include + + int main(void) { + struct stat st; + + if (fstat(0, &st) != 0) { + return 1; + } + + return (int) st.st_atim.tv_sec; + }" +. auto/feature diff -r 4ddfb2f2227f -r 203bc61d8d70 configure --- a/configure Tue Nov 02 12:38:42 2021 +0000 +++ b/configure Wed Nov 03 15:46:15 2021 +0000 @@ -23,6 +23,7 @@ set -u . auto/time . auto/memalign . auto/getrandom +. auto/stat . auto/explicit_bzero . auto/pcre . auto/readline diff -r 4ddfb2f2227f -r 203bc61d8d70 src/njs_builtin.c --- a/src/njs_builtin.c Tue Nov 02 12:38:42 2021 +0000 +++ b/src/njs_builtin.c Wed Nov 03 15:46:15 2021 +0000 @@ -89,6 +89,7 @@ static const njs_object_type_init_t *con &njs_iterator_type_init, &njs_array_iterator_type_init, &njs_dirent_type_init, + &njs_stats_type_init, &njs_hash_type_init, &njs_hmac_type_init, &njs_typed_array_type_init, diff -r 4ddfb2f2227f -r 203bc61d8d70 src/njs_fs.c --- a/src/njs_fs.c Tue Nov 02 12:38:42 2021 +0000 +++ b/src/njs_fs.c Wed Nov 03 15:46:15 2021 +0000 @@ -12,12 +12,12 @@ #if (NJS_SOLARIS) #define DT_DIR 0 -#define DT_REG 0 -#define DT_CHR 0 -#define DT_LNK 0 -#define DT_BLK 0 -#define DT_FIFO 0 -#define DT_SOCK 0 +#define DT_REG 1 +#define DT_CHR 2 +#define DT_LNK 3 +#define DT_BLK 4 +#define DT_FIFO 5 +#define DT_SOCK 6 #define NJS_DT_INVALID 0xffffffff #define njs_dentry_type(_dentry) \ @@ -50,9 +50,15 @@ typedef enum { } njs_fs_writemode_t; +typedef enum { + NJS_FS_STAT, + NJS_FS_LSTAT, +} njs_fs_statmode_t; + + typedef struct { - njs_str_t name; - int value; + njs_str_t name; + int value; } njs_fs_entry_t; @@ -74,6 +80,48 @@ typedef enum { } njs_ftw_type_t; +typedef struct { + long tv_sec; + long tv_nsec; +} njs_timespec_t; + + +typedef struct { + uint64_t st_dev; + uint64_t st_mode; + uint64_t st_nlink; + uint64_t st_uid; + uint64_t st_gid; + uint64_t st_rdev; + uint64_t st_ino; + uint64_t st_size; + uint64_t st_blksize; + uint64_t st_blocks; + njs_timespec_t st_atim; + njs_timespec_t st_mtim; + njs_timespec_t st_ctim; + njs_timespec_t st_birthtim; +} njs_stat_t; + + +typedef enum { + NJS_FS_STAT_DEV, + NJS_FS_STAT_INO, + NJS_FS_STAT_MODE, + NJS_FS_STAT_NLINK, + NJS_FS_STAT_UID, + NJS_FS_STAT_GID, + NJS_FS_STAT_RDEV, + NJS_FS_STAT_SIZE, + NJS_FS_STAT_BLKSIZE, + NJS_FS_STAT_BLOCKS, + NJS_FS_STAT_ATIME, + NJS_FS_STAT_BIRTHTIME, + NJS_FS_STAT_CTIME, + NJS_FS_STAT_MTIME, +} njs_stat_prop_t; + + typedef njs_int_t (*njs_file_tree_walk_cb_t)(const char *, const struct stat *, njs_ftw_type_t); @@ -105,6 +153,9 @@ static njs_int_t njs_fs_add_event(njs_vm static njs_int_t njs_fs_dirent_create(njs_vm_t *vm, njs_value_t *name, njs_value_t *type, njs_value_t *retval); +static njs_int_t njs_fs_stats_create(njs_vm_t *vm, struct stat *st, + njs_value_t *retval); + static njs_fs_entry_t njs_flags_table[] = { { njs_str("a"), O_APPEND | O_CREAT | O_WRONLY }, { njs_str("a+"), O_APPEND | O_CREAT | O_RDWR }, @@ -1010,6 +1061,102 @@ done: static njs_int_t +njs_fs_stat(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t magic) +{ + njs_int_t ret; + njs_bool_t throw; + struct stat sb; + const char *path; + njs_value_t retval, *callback, *options; + njs_fs_calltype_t calltype; + char path_buf[NJS_MAX_PATH + 1]; + + static const njs_value_t string_bigint = njs_string("bigint"); + static const njs_value_t string_throw = njs_string("throwIfNoEntry"); + + path = njs_fs_path(vm, path_buf, njs_arg(args, nargs, 1), "path"); + if (njs_slow_path(path == NULL)) { + return NJS_ERROR; + } + + callback = NULL; + calltype = magic & 3; + options = njs_arg(args, nargs, 2); + + if (njs_slow_path(calltype == NJS_FS_CALLBACK)) { + callback = njs_arg(args, nargs, njs_min(nargs - 1, 3)); + if (!njs_is_function(callback)) { + njs_type_error(vm, "\"callback\" must be a function"); + return NJS_ERROR; + } + if (options == callback) { + options = njs_value_arg(&njs_value_undefined); + } + } + + throw = 1; + + switch (options->type) { + case NJS_UNDEFINED: + break; + + default: + if (!njs_is_object(options)) { + njs_type_error(vm, "Unknown options type: \"%s\" " + "(an object required)", + njs_type_string(options->type)); + return NJS_ERROR; + } + + ret = njs_value_property(vm, options, njs_value_arg(&string_bigint), + &retval); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + if (njs_bool(&retval)) { + njs_type_error(vm, "\"bigint\" is not supported"); + return NJS_ERROR; + } + + if (calltype == NJS_FS_DIRECT) { + ret = njs_value_property(vm, options, njs_value_arg(&string_throw), + &retval); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + throw = njs_bool(&retval); + } + } + + ret = ((magic >> 2) == NJS_FS_STAT) ? stat(path, &sb) : lstat(path, &sb); + if (njs_slow_path(ret != 0)) { + if (errno != ENOENT || throw) { + ret = njs_fs_error(vm, + ((magic >> 2) == NJS_FS_STAT) ? "stat" : "lstat", + strerror(errno), path, errno, &retval); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + } else { + njs_set_undefined(&retval); + } + + return njs_fs_result(vm, &retval, calltype, callback, 2); + } + + ret = njs_fs_stats_create(vm, &sb, &retval); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + + return njs_fs_result(vm, &retval, calltype, callback, 2); +} + + +static njs_int_t njs_fs_fd_read(njs_vm_t *vm, int fd, njs_str_t *data) { u_char *p, *end, *start; @@ -1870,6 +2017,511 @@ const njs_object_type_init_t njs_dirent }; +static void +njs_fs_to_stat(njs_stat_t *dst, struct stat *st) +{ + dst->st_dev = st->st_dev; + dst->st_mode = st->st_mode; + dst->st_nlink = st->st_nlink; + dst->st_uid = st->st_uid; + dst->st_gid = st->st_gid; + dst->st_rdev = st->st_rdev; + dst->st_ino = st->st_ino; + dst->st_size = st->st_size; + dst->st_blksize = st->st_blksize; + dst->st_blocks = st->st_blocks; + +#if (NJS_HAVE_STAT_ATIMESPEC) + + dst->st_atim.tv_sec = st->st_atimespec.tv_sec; + dst->st_atim.tv_nsec = st->st_atimespec.tv_nsec; + dst->st_mtim.tv_sec = st->st_mtimespec.tv_sec; + dst->st_mtim.tv_nsec = st->st_mtimespec.tv_nsec; + dst->st_ctim.tv_sec = st->st_ctimespec.tv_sec; + dst->st_ctim.tv_nsec = st->st_ctimespec.tv_nsec; + dst->st_birthtim.tv_sec = st->st_birthtimespec.tv_sec; + dst->st_birthtim.tv_nsec = st->st_birthtimespec.tv_nsec; + +#elif (NJS_HAVE_STAT_ATIM) + + dst->st_atim.tv_sec = st->st_atim.tv_sec; + dst->st_atim.tv_nsec = st->st_atim.tv_nsec; + dst->st_mtim.tv_sec = st->st_mtim.tv_sec; + dst->st_mtim.tv_nsec = st->st_mtim.tv_nsec; + dst->st_ctim.tv_sec = st->st_ctim.tv_sec; + dst->st_ctim.tv_nsec = st->st_ctim.tv_nsec; + +#if (NJS_HAVE_STAT_BIRTHTIM) + dst->st_birthtim.tv_sec = st->st_birthtim.tv_sec; + dst->st_birthtim.tv_nsec = st->st_birthtim.tv_nsec; +#else + dst->st_birthtim.tv_sec = st->st_ctim.tv_sec; + dst->st_birthtim.tv_nsec = st->st_ctim.tv_nsec; +#endif + +#else + + dst->st_atim.tv_sec = st->st_atime; + dst->st_atim.tv_nsec = 0; + dst->st_mtim.tv_sec = st->st_mtime; + dst->st_mtim.tv_nsec = 0; + dst->st_ctim.tv_sec = st->st_ctime; + dst->st_ctim.tv_nsec = 0; + dst->st_birthtim.tv_sec = st->st_ctime; + dst->st_birthtim.tv_nsec = 0; + +#endif +} + + +static njs_int_t +njs_fs_stats_create(njs_vm_t *vm, struct stat *st, njs_value_t *retval) +{ + njs_stat_t *copy; + njs_object_value_t *stat; + + stat = njs_object_value_alloc(vm, NJS_OBJ_TYPE_FS_STATS, 0, NULL); + if (njs_slow_path(stat == NULL)) { + return NJS_ERROR; + } + + stat->object.shared_hash = + vm->prototypes[NJS_OBJ_TYPE_FS_STATS].object.shared_hash; + + copy = njs_mp_alloc(vm->mem_pool, sizeof(njs_stat_t)); + if (copy == NULL) { + njs_memory_error(vm); + return NJS_ERROR; + } + + njs_fs_to_stat(copy, st); + + njs_set_data(&stat->value, copy, NJS_DATA_TAG_FS_STAT); + njs_set_object_value(retval, stat); + + return NJS_OK; +} + + +static njs_int_t +njs_stats_constructor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t unused) +{ + njs_type_error(vm, "Stats is not a constructor"); + return NJS_ERROR; +} + + +static njs_int_t +njs_fs_stats_test(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t testtype) +{ + unsigned mask; + njs_stat_t *st; + njs_value_t *this; + + this = njs_argument(args, 0); + + if (njs_slow_path(!njs_is_object_data(this, NJS_DATA_TAG_FS_STAT))) { + return NJS_DECLINED; + } + + st = njs_object_data(this); + + switch (testtype) { + case DT_DIR: + mask = S_IFDIR; + break; + + case DT_REG: + mask = S_IFREG; + break; + + case DT_CHR: + mask = S_IFCHR; + break; + + case DT_LNK: + mask = S_IFLNK; + break; + + case DT_BLK: + mask = S_IFBLK; + break; + + case DT_FIFO: + mask = S_IFIFO; + break; + + case DT_SOCK: + default: + mask = S_IFSOCK; + } + + njs_set_boolean(&vm->retval, (st->st_mode & S_IFMT) == mask); + + return NJS_OK; +} + + +static njs_int_t +njs_fs_stats_prop(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, + njs_value_t *setval, njs_value_t *retval) +{ + double v; + njs_date_t *date; + njs_stat_t *st; + +#define njs_fs_time_ms(ts) ((ts)->tv_sec * 1000.0 + (ts)->tv_nsec / 1000000.0) + + if (njs_slow_path(!njs_is_object_data(value, NJS_DATA_TAG_FS_STAT))) { + return NJS_DECLINED; + } + + st = njs_object_data(value); + + switch (prop->value.data.magic16) { + case NJS_FS_STAT_DEV: + v = st->st_dev; + break; + + case NJS_FS_STAT_INO: + v = st->st_ino; + break; + + case NJS_FS_STAT_MODE: + v = st->st_mode; + break; + + case NJS_FS_STAT_NLINK: + v = st->st_nlink; + break; + + case NJS_FS_STAT_UID: + v = st->st_uid; + break; + + case NJS_FS_STAT_GID: + v = st->st_gid; + break; + + case NJS_FS_STAT_RDEV: + v = st->st_rdev; + break; + + case NJS_FS_STAT_SIZE: + v = st->st_size; + break; + + case NJS_FS_STAT_BLKSIZE: + v = st->st_blksize; + break; + + case NJS_FS_STAT_BLOCKS: + v = st->st_blocks; + break; + + case NJS_FS_STAT_ATIME: + v = njs_fs_time_ms(&st->st_atim); + break; + + case NJS_FS_STAT_BIRTHTIME: + v = njs_fs_time_ms(&st->st_birthtim); + break; + + case NJS_FS_STAT_CTIME: + v = njs_fs_time_ms(&st->st_ctim); + break; + + case NJS_FS_STAT_MTIME: + default: + v = njs_fs_time_ms(&st->st_mtim); + break; + } + + switch (prop->value.data.magic32) { + case NJS_NUMBER: + njs_set_number(retval, v); + break; + + case NJS_DATE: + default: + date = njs_date_alloc(vm, v); + if (njs_slow_path(date == NULL)) { + return NJS_ERROR; + } + + njs_set_date(retval, date); + break; + } + + return NJS_OK; +} + + +static const njs_object_prop_t njs_stats_constructor_properties[] = +{ + { + .type = NJS_PROPERTY, + .name = njs_string("name"), + .value = njs_string("Stats"), + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("length"), + .value = njs_value(NJS_NUMBER, 1, 0), + .configurable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("prototype"), + .value = njs_prop_handler(njs_object_prototype_create), + }, +}; + + +const njs_object_init_t njs_stats_constructor_init = { + njs_stats_constructor_properties, + njs_nitems(njs_stats_constructor_properties), +}; + + +static const njs_object_prop_t njs_stats_prototype_properties[] = +{ + { + .type = NJS_PROPERTY, + .name = njs_wellknown_symbol(NJS_SYMBOL_TO_STRING_TAG), + .value = njs_string("Stats"), + .configurable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("constructor"), + .value = njs_prop_handler(njs_object_prototype_create_constructor), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("isBlockDevice"), + .value = njs_native_function2(njs_fs_stats_test, 0, DT_BLK), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_long_string("isCharacterDevice"), + .value = njs_native_function2(njs_fs_stats_test, 0, DT_CHR), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("isDirectory"), + .value = njs_native_function2(njs_fs_stats_test, 0, DT_DIR), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("isFIFO"), + .value = njs_native_function2(njs_fs_stats_test, 0, DT_FIFO), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("isFile"), + .value = njs_native_function2(njs_fs_stats_test, 0, DT_REG), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("isSocket"), + .value = njs_native_function2(njs_fs_stats_test, 0, DT_SOCK), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("isSymbolicLink"), + .value = njs_native_function2(njs_fs_stats_test, 0, DT_LNK), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("dev"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_DEV, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("ino"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_INO, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("mode"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_MODE, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("nlink"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_NLINK, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("uid"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_UID, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("gid"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_GID, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("rdev"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_RDEV, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("size"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_SIZE, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("blksize"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_BLKSIZE, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("blocks"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_BLOCKS, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("atimeMs"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_ATIME, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("birthtimeMs"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_BIRTHTIME, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("ctimeMs"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_CTIME, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("mtimeMs"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_MTIME, + NJS_NUMBER), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("atime"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_ATIME, + NJS_DATE), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("birthtime"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_BIRTHTIME, + NJS_DATE), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("ctime"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_CTIME, + NJS_DATE), + .enumerable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("mtime"), + .value = njs_prop_handler2(njs_fs_stats_prop, NJS_FS_STAT_MTIME, + NJS_DATE), + .enumerable = 1, + }, +}; + + +const njs_object_init_t njs_stats_prototype_init = { + njs_stats_prototype_properties, + njs_nitems(njs_stats_prototype_properties), +}; + + +const njs_object_type_init_t njs_stats_type_init = { + .constructor = njs_native_ctor(njs_stats_constructor, 0, 0), + .prototype_props = &njs_stats_prototype_init, + .constructor_props = &njs_stats_constructor_init, + .prototype_value = { .object = { .type = NJS_OBJECT } }, +}; + + static const njs_object_prop_t njs_fs_promises_properties[] = { { @@ -1940,6 +2592,24 @@ static const njs_object_prop_t njs_fs_p { .type = NJS_PROPERTY, + .name = njs_string("lstat"), + .value = njs_native_function2(njs_fs_stat, 0, + njs_fs_magic(NJS_FS_PROMISE, NJS_FS_LSTAT)), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("stat"), + .value = njs_native_function2(njs_fs_stat, 0, + njs_fs_magic(NJS_FS_PROMISE, NJS_FS_STAT)), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, .name = njs_string("symlink"), .value = njs_native_function2(njs_fs_symlink, 0, NJS_FS_PROMISE), .writable = 1, @@ -2231,6 +2901,42 @@ static const njs_object_prop_t njs_fs_o .writable = 1, .configurable = 1, }, + + { + .type = NJS_PROPERTY, + .name = njs_string("lstat"), + .value = njs_native_function2(njs_fs_stat, 0, + njs_fs_magic(NJS_FS_CALLBACK, NJS_FS_LSTAT)), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("lstatSync"), + .value = njs_native_function2(njs_fs_stat, 0, + njs_fs_magic(NJS_FS_DIRECT, NJS_FS_LSTAT)), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("stat"), + .value = njs_native_function2(njs_fs_stat, 0, + njs_fs_magic(NJS_FS_CALLBACK, NJS_FS_STAT)), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("statSync"), + .value = njs_native_function2(njs_fs_stat, 0, + njs_fs_magic(NJS_FS_DIRECT, NJS_FS_STAT)), + .writable = 1, + .configurable = 1, + }, }; diff -r 4ddfb2f2227f -r 203bc61d8d70 src/njs_fs.h --- a/src/njs_fs.h Tue Nov 02 12:38:42 2021 +0000 +++ b/src/njs_fs.h Wed Nov 03 15:46:15 2021 +0000 @@ -11,5 +11,6 @@ extern const njs_object_init_t njs_fs_object_init; extern const njs_object_type_init_t njs_dirent_type_init; +extern const njs_object_type_init_t njs_stats_type_init; #endif /* _NJS_FS_H_INCLUDED_ */ diff -r 4ddfb2f2227f -r 203bc61d8d70 src/njs_value.h --- a/src/njs_value.h Tue Nov 02 12:38:42 2021 +0000 +++ b/src/njs_value.h Wed Nov 03 15:46:15 2021 +0000 @@ -72,6 +72,7 @@ typedef enum { NJS_DATA_TAG_TEXT_ENCODER, NJS_DATA_TAG_TEXT_DECODER, NJS_DATA_TAG_ARRAY_ITERATOR, + NJS_DATA_TAG_FS_STAT, NJS_DATA_TAG_MAX } njs_data_tag_t; diff -r 4ddfb2f2227f -r 203bc61d8d70 src/njs_vm.h --- a/src/njs_vm.h Tue Nov 02 12:38:42 2021 +0000 +++ b/src/njs_vm.h Wed Nov 03 15:46:15 2021 +0000 @@ -57,6 +57,7 @@ typedef enum { NJS_OBJ_TYPE_ITERATOR, NJS_OBJ_TYPE_ARRAY_ITERATOR, NJS_OBJ_TYPE_FS_DIRENT, + NJS_OBJ_TYPE_FS_STATS, NJS_OBJ_TYPE_CRYPTO_HASH, NJS_OBJ_TYPE_CRYPTO_HMAC, NJS_OBJ_TYPE_TYPED_ARRAY, diff -r 4ddfb2f2227f -r 203bc61d8d70 test/fs/methods.js --- a/test/fs/methods.js Tue Nov 02 12:38:42 2021 +0000 +++ b/test/fs/methods.js Wed Nov 03 15:46:15 2021 +0000 @@ -355,6 +355,186 @@ let realpathP_tsuite = { tests: realpath_tests, }; +async function stat_test(params) { + if (params.init) { + params.init(params); + } + + let stat = await method(params.method, params).catch(e => ({error:e})); + + if (params.check && !params.check(stat, params)) { + throw Error(`${params.method} failed check`); + } + + return 'SUCCESS'; +} + +function contains(arr, elts) { + return elts.every(el => { + let r = arr.some(v => el == v); + + if (!r) { + throw Error(`${el} is not found`); + } + + return r; + }); +} + +let stat_tests = [ + { args: ["/invalid_path"], + check: (err, params) => { + let e = err.error; + + if (e.syscall != params.method) { + throw Error(`${e.syscall} unexpected syscall`); + } + + if (e.code != "ENOENT") { + throw Error(`${e.code} unexpected code`); + } + + return true; + } }, + + { args: ["@_link"], + init: (params) => { + let lname = params.args[0]; + let fname = lname.slice(0, -5); + + /* making symbolic link. */ + + try { fs.unlinkSync(fname); fs.unlinkSync(lname); } catch (e) {} + + fs.writeFileSync(fname, fname); + + fname = fs.realpathSync(fname); + fs.symlinkSync(fname, lname); + }, + + check: (st, params) => { + switch (params.method) { + case "stat": + if (!st.isFile()) { + throw Error(`${params.args[0]} is not a file`); + } + + break; + + case "lstat": + if (!st.isSymbolicLink()) { + throw Error(`${params.args[0]} is not a link`); + } + + break; + } + + return true; + } }, + + { args: ["./build/"], + check: (st) => contains(Object.keys(st), + [ "atime", "atimeMs", "birthtime", "birthtimeMs", + "blksize", "blocks", "ctime", "ctimeMs", "dev", + "gid", "ino", "mode", "mtime", "mtimeMs","nlink", + "rdev", "size", "uid" ]) }, + + { args: ["./build/"], + check: (st) => Object.keys(st).every(p => { + let v = st[p]; + if (p == 'atime' || p == 'ctime' || p == 'mtime' || p == 'birthtime') { + if (!(v instanceof Date)) { + throw Error(`${p} is not an instance of Date`); + } + + return true; + } + From ranier.vf at gmail.com Fri Nov 5 12:35:18 2021 From: ranier.vf at gmail.com (Ranier Vilela) Date: Fri, 5 Nov 2021 09:35:18 -0300 Subject: Reduce scope some variables Message-ID: Hi, Is there any interest in safely reducing the scope of variables? I think this improves readability and may have some tiny effect on performance. An example is attached. regards, Ranier Vilela -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: ngx_array_reduce_scope.patch Type: application/octet-stream Size: 1014 bytes Desc: not available URL: From greeshma.avadhootha at gmail.com Sat Nov 6 00:02:50 2021 From: greeshma.avadhootha at gmail.com (Greeshma A) Date: Fri, 5 Nov 2021 17:02:50 -0700 Subject: nginx variable for destination IP address in stream layer? Message-ID: Hi, Is there any way to get the destination IP address in the stream layer (ngx_stream_core_module) The SNI header of the destination can be gotten from "ssl_preread_server_name" Similarly, how to get the destination IP address in the stream(L4) or L3 layer from the request? Suppose my request is as follows curl https://DOMAIN.EXAMPLE* --resolve 'DOMAIN.EXAMPLE:443:192.0.2.17'* I want to get the IP 192.0.2.17 Which nginx variable would be mapped to this? -Greeshma -------------- next part -------------- An HTML attachment was scrubbed... URL: From greeshma.avadhootha at gmail.com Mon Nov 8 07:20:37 2021 From: greeshma.avadhootha at gmail.com (Greeshma A) Date: Sun, 7 Nov 2021 23:20:37 -0800 Subject: nginx variable for destination IP address in stream layer? In-Reply-To: References: Message-ID: Anyone has any insight on this? Would really appreciate the help! -Greeshma On Fri, Nov 5, 2021, 5:02 PM Greeshma A wrote: > Hi, > Is there any way to get the destination IP address in the stream layer > (ngx_stream_core_module) > > The SNI header of the destination can be gotten from > "ssl_preread_server_name" > > Similarly, how to get the destination IP address in the stream(L4) or L3 > layer from the request? > > Suppose my request is as follows > curl https://DOMAIN.EXAMPLE* --resolve 'DOMAIN.EXAMPLE:443:192.0.2.17'* > > I want to get the IP 192.0.2.17 > Which nginx variable would be mapped to this? > > -Greeshma > -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Mon Nov 8 16:25:43 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 8 Nov 2021 19:25:43 +0300 Subject: [PATCH 1 of 3] HTTP/3: adjusted QUIC connection finalization In-Reply-To: <8739f475583031399879.1634561308@arut-laptop> References: <8739f475583031399879.1634561308@arut-laptop> Message-ID: <20211108162543.aysjry2ceehooktn@MacBook-Air-Sergey.local> On Mon, Oct 18, 2021 at 03:48:28PM +0300, Roman Arutyunyan wrote: > # HG changeset patch > # User Roman Arutyunyan > # Date 1634559753 -10800 > # Mon Oct 18 15:22:33 2021 +0300 > # Branch quic > # Node ID 8739f475583031399879ef0af2eb5af76008449e > # Parent 404de224517e33f685613d6425dcdb3c8ef5b97e > HTTP/3: adjusted QUIC connection finalization. > > When an HTTP/3 function returns an error in context of a QUIC stream, it's > this function's responsibility now to finalize the entire QUIC connection > with the right code, if required. Previously, QUIC connection finalization > could be done both outside and inside such functions. The new rule follows > a similar rule for logging, leads to cleaner code, and allows to provide more > details about the error. > > While here, a few error cases are no longer treated as fatal and QUIC connection > is no longer finalized in these cases. A few other cases now lead to > stream reset instead of connection finalization. > > diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c > --- a/src/http/v3/ngx_http_v3.c > +++ b/src/http/v3/ngx_http_v3.c > @@ -33,7 +33,7 @@ ngx_http_v3_init_session(ngx_connection_ > > h3c = ngx_pcalloc(pc->pool, sizeof(ngx_http_v3_session_t)); > if (h3c == NULL) { > - return NGX_ERROR; > + goto failed; > } > > h3c->max_push_id = (uint64_t) -1; > @@ -49,7 +49,7 @@ ngx_http_v3_init_session(ngx_connection_ > > cln = ngx_pool_cleanup_add(pc->pool, 0); > if (cln == NULL) { > - return NGX_ERROR; > + goto failed; > } > > cln->handler = ngx_http_v3_cleanup_session; > @@ -58,6 +58,14 @@ ngx_http_v3_init_session(ngx_connection_ > hc->v3_session = h3c; > > return ngx_http_v3_send_settings(c); > + > +failed: > + > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create http3 session"); > + > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > + "failed to create http3 session"); > + return NGX_ERROR; > } > > > diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c > --- a/src/http/v3/ngx_http_v3_request.c > +++ b/src/http/v3/ngx_http_v3_request.c > @@ -65,8 +65,6 @@ ngx_http_v3_init(ngx_connection_t *c) > ngx_http_core_srv_conf_t *cscf; > > if (ngx_http_v3_init_session(c) != NGX_OK) { > - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > - "internal error"); > ngx_http_close_connection(c); > return; > } > @@ -110,8 +108,6 @@ ngx_http_v3_init(ngx_connection_t *c) > h3c->goaway = 1; > > if (ngx_http_v3_send_goaway(c, (n + 1) << 2) != NGX_OK) { > - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > - "goaway error"); > ngx_http_close_connection(c); > return; > } > @@ -287,15 +283,14 @@ ngx_http_v3_process_request(ngx_event_t > rc = ngx_http_v3_parse_headers(c, st, b); > > if (rc > 0) { > - ngx_http_v3_finalize_connection(c, rc, > - "could not parse request headers"); > + ngx_quic_reset_stream(c, rc); > + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > + "client sent invalid header"); > ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); > break; > } > > if (rc == NGX_ERROR) { > - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > - "internal error"); > ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); > break; > } > @@ -1167,17 +1162,13 @@ ngx_http_v3_request_body_filter(ngx_http > } > > if (rc > 0) { > - ngx_http_v3_finalize_connection(r->connection, rc, > - "client sent invalid body"); > + ngx_quic_reset_stream(r->connection, rc); > ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > "client sent invalid body"); > return NGX_HTTP_BAD_REQUEST; > } > > if (rc == NGX_ERROR) { > - ngx_http_v3_finalize_connection(r->connection, > - NGX_HTTP_V3_ERR_INTERNAL_ERROR, > - "internal error"); > return NGX_HTTP_INTERNAL_SERVER_ERROR; > } > > diff --git a/src/http/v3/ngx_http_v3_streams.c b/src/http/v3/ngx_http_v3_streams.c > --- a/src/http/v3/ngx_http_v3_streams.c > +++ b/src/http/v3/ngx_http_v3_streams.c > @@ -283,7 +283,7 @@ ngx_http_v3_create_push_stream(ngx_conne > > sc = ngx_quic_open_stream(c, 0); > if (sc == NULL) { > - return NULL; > + goto failed; > } > > p = buf; > @@ -318,7 +318,13 @@ ngx_http_v3_create_push_stream(ngx_conne > > failed: > > - ngx_http_v3_close_uni_stream(sc); > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create push stream"); > + > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, > + "failed to create push stream"); > + if (sc) { > + ngx_http_v3_close_uni_stream(sc); > + } > > return NULL; > } > @@ -368,7 +374,7 @@ ngx_http_v3_get_uni_stream(ngx_connectio > > sc = ngx_quic_open_stream(c, 0); > if (sc == NULL) { > - return NULL; > + goto failed; > } > > sc->quic->cancelable = 1; > @@ -405,7 +411,13 @@ ngx_http_v3_get_uni_stream(ngx_connectio > > failed: > > - ngx_http_v3_close_uni_stream(sc); > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create server stream"); > + > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, > + "failed to create server stream"); > + if (sc) { > + ngx_http_v3_close_uni_stream(sc); > + } > > return NULL; > } > @@ -424,7 +436,7 @@ ngx_http_v3_send_settings(ngx_connection > > cc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_CONTROL); > if (cc == NULL) { > - return NGX_DECLINED; > + return NGX_ERROR; > } > > h3scf = ngx_http_v3_get_module_srv_conf(c, ngx_http_v3_module); > @@ -457,6 +469,10 @@ ngx_http_v3_send_settings(ngx_connection > > failed: > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send settings"); > + > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > + "failed to send settings"); > ngx_http_v3_close_uni_stream(cc); > > return NGX_ERROR; > @@ -475,7 +491,7 @@ ngx_http_v3_send_goaway(ngx_connection_t > > cc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_CONTROL); > if (cc == NULL) { > - return NGX_DECLINED; > + return NGX_ERROR; This case now misses sending CONNECTION_CLOSE (no further stream processing), Instead, the last allowed stream connection is now closed while initializing with sending FIN with off:0 in its cleanup function which confuses clients due to H3_FRAME_UNEXPECTED. It probably needs to be converted to goto failed with conditional uni stream close. Same in ngx_http_v3_send_settings() around ngx_http_v3_get_uni_stream(), but there it just leads to control stream silent closure with no visible effects on the wire. Yet, with teh 2nd patch applied that updates stream reset logic, the error causes to emit RESET_STREAM(H3_INTERNAL_ERROR), which is a protocol violation when operating on a critical (control) stream. > } > > n = ngx_http_v3_encode_varlen_int(NULL, id); > @@ -495,6 +511,10 @@ ngx_http_v3_send_goaway(ngx_connection_t > > failed: > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send goaway"); > + > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > + "failed to send goaway"); > ngx_http_v3_close_uni_stream(cc); > > return NGX_ERROR; > @@ -510,7 +530,7 @@ ngx_http_v3_send_ack_section(ngx_connect > ngx_http_v3_session_t *h3c; > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, > - "http3 client ack section %ui", stream_id); > + "http3 send section acknowledgement %ui", stream_id); > > dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); > if (dc == NULL) { > @@ -524,11 +544,21 @@ ngx_http_v3_send_ack_section(ngx_connect > h3c->total_bytes += n; > > if (dc->send(dc, buf, n) != (ssize_t) n) { > - ngx_http_v3_close_uni_stream(dc); > - return NGX_ERROR; > + goto failed; > } > > return NGX_OK; > + > +failed: > + > + ngx_log_error(NGX_LOG_ERR, c->log, 0, > + "failed to send section acknowledgement"); > + > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > + "failed to send section acknowledgement"); > + ngx_http_v3_close_uni_stream(dc); > + > + return NGX_ERROR; > } > > > @@ -541,7 +571,7 @@ ngx_http_v3_send_cancel_stream(ngx_conne > ngx_http_v3_session_t *h3c; > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, > - "http3 client cancel stream %ui", stream_id); > + "http3 send stream cancellation %ui", stream_id); > > dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); > if (dc == NULL) { > @@ -555,11 +585,20 @@ ngx_http_v3_send_cancel_stream(ngx_conne > h3c->total_bytes += n; > > if (dc->send(dc, buf, n) != (ssize_t) n) { > - ngx_http_v3_close_uni_stream(dc); > - return NGX_ERROR; > + goto failed; > } > > return NGX_OK; > + > +failed: > + > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send stream cancellation"); > + > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > + "failed to send stream cancellation"); > + ngx_http_v3_close_uni_stream(dc); > + > + return NGX_ERROR; > } > > > @@ -572,7 +611,7 @@ ngx_http_v3_send_inc_insert_count(ngx_co > ngx_http_v3_session_t *h3c; > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, > - "http3 client increment insert count %ui", inc); > + "http3 send insert count increment %ui", inc); > > dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); > if (dc == NULL) { > @@ -586,11 +625,21 @@ ngx_http_v3_send_inc_insert_count(ngx_co > h3c->total_bytes += n; > > if (dc->send(dc, buf, n) != (ssize_t) n) { > - ngx_http_v3_close_uni_stream(dc); > - return NGX_ERROR; > + goto failed; > } > > return NGX_OK; > + > +failed: > + > + ngx_log_error(NGX_LOG_ERR, c->log, 0, > + "failed to send insert count increment"); > + > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > + "failed to send insert count increment"); > + ngx_http_v3_close_uni_stream(dc); > + > + return NGX_ERROR; > } > > > diff --git a/src/http/v3/ngx_http_v3_tables.c b/src/http/v3/ngx_http_v3_tables.c > --- a/src/http/v3/ngx_http_v3_tables.c > +++ b/src/http/v3/ngx_http_v3_tables.c > @@ -589,6 +589,10 @@ ngx_http_v3_check_insert_count(ngx_conne > if (h3c->nblocked == h3scf->max_blocked_streams) { > ngx_log_error(NGX_LOG_INFO, c->log, 0, > "client exceeded http3_max_blocked_streams limit"); > + > + ngx_http_v3_finalize_connection(c, > + NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED, > + "too many blocked streams"); > return NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED; > } > From greeshma.avadhootha at gmail.com Mon Nov 8 18:15:19 2021 From: greeshma.avadhootha at gmail.com (Greeshma A) Date: Mon, 8 Nov 2021 10:15:19 -0800 Subject: Which nginx variable holds the value for destination IP address in stream layer Message-ID: Hi, Is there any way to get the destination IP address in the stream layer (ngx_stream_core_module) The SNI header of the destination can be gotten from "ssl_preread_server_name" Similarly, how to get the destination IP address in the stream(L4) or L3 layer from the request? Suppose my request is as follows curl https://DOMAIN.EXAMPLE * --resolve 'DOMAIN.EXAMPLE:443:192.0.2.17'* I want to get the IP 192.0.2.17 Which nginx variable would be mapped to this? -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Mon Nov 8 20:28:30 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 8 Nov 2021 23:28:30 +0300 Subject: Which nginx variable holds the value for destination IP address in stream layer In-Reply-To: References: Message-ID: Hello! On Mon, Nov 08, 2021 at 10:15:19AM -0800, Greeshma A wrote: > Hi, > Is there any way to get the destination IP address in the stream layer > (ngx_stream_core_module) > > The SNI header of the destination can be gotten from > "ssl_preread_server_name" > > Similarly, how to get the destination IP address in the stream(L4) or L3 > layer from the request? > > Suppose my request is as follows > curl https://DOMAIN.EXAMPLE * --resolve > 'DOMAIN.EXAMPLE:443:192.0.2.17'* > > I want to get the IP 192.0.2.17 > Which nginx variable would be mapped to this? This mailing list is for nginx developers. Please avoid posting user-level questions here. Thanks for understanding. -- Maxim Dounin http://mdounin.ru/ From greeshma.avadhootha at gmail.com Mon Nov 8 20:39:14 2021 From: greeshma.avadhootha at gmail.com (Greeshma A) Date: Mon, 8 Nov 2021 12:39:14 -0800 Subject: Which nginx variable holds the value for destination IP address in stream layer In-Reply-To: References: Message-ID: @Maxim, thanks for pointing this out. Please share the forum/mailing list that would be appropriate. Really appreciate it. Haven't had much luck with the documentation regarding this. -Greeshma On Mon, Nov 8, 2021 at 12:29 PM Maxim Dounin wrote: > Hello! > > On Mon, Nov 08, 2021 at 10:15:19AM -0800, Greeshma A wrote: > > > Hi, > > Is there any way to get the destination IP address in the stream layer > > (ngx_stream_core_module) > > > > The SNI header of the destination can be gotten from > > "ssl_preread_server_name" > > > > Similarly, how to get the destination IP address in the stream(L4) or L3 > > layer from the request? > > > > Suppose my request is as follows > > curl https://DOMAIN.EXAMPLE * --resolve > > 'DOMAIN.EXAMPLE:443:192.0.2.17'* > > > > I want to get the IP 192.0.2.17 > > Which nginx variable would be mapped to this? > > This mailing list is for nginx developers. Please avoid posting > user-level questions here. Thanks for understanding. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Mon Nov 8 20:58:34 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 8 Nov 2021 23:58:34 +0300 Subject: Which nginx variable holds the value for destination IP address in stream layer In-Reply-To: References: Message-ID: Hello! On Mon, Nov 08, 2021 at 12:39:14PM -0800, Greeshma A wrote: > @Maxim, thanks for pointing this out. Please share the forum/mailing list > that would be appropriate. Really appreciate it. Haven't had much luck with > the documentation regarding this. Relevant mailing list can be found here: http://nginx.org/en/support.html Note well that it might be a good idea to re-check the documentation before re-posting your question to another mailing list. -- Maxim Dounin http://mdounin.ru/ From arut at nginx.com Tue Nov 9 07:32:12 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 9 Nov 2021 10:32:12 +0300 Subject: [PATCH 1 of 3] HTTP/3: adjusted QUIC connection finalization In-Reply-To: <20211108162543.aysjry2ceehooktn@MacBook-Air-Sergey.local> References: <8739f475583031399879.1634561308@arut-laptop> <20211108162543.aysjry2ceehooktn@MacBook-Air-Sergey.local> Message-ID: <20211109073212.bgvqnihgkvieygiy@Romans-MacBook-Pro.local> On Mon, Nov 08, 2021 at 07:25:43PM +0300, Sergey Kandaurov wrote: > On Mon, Oct 18, 2021 at 03:48:28PM +0300, Roman Arutyunyan wrote: > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1634559753 -10800 > > # Mon Oct 18 15:22:33 2021 +0300 > > # Branch quic > > # Node ID 8739f475583031399879ef0af2eb5af76008449e > > # Parent 404de224517e33f685613d6425dcdb3c8ef5b97e > > HTTP/3: adjusted QUIC connection finalization. > > > > When an HTTP/3 function returns an error in context of a QUIC stream, it's > > this function's responsibility now to finalize the entire QUIC connection > > with the right code, if required. Previously, QUIC connection finalization > > could be done both outside and inside such functions. The new rule follows > > a similar rule for logging, leads to cleaner code, and allows to provide more > > details about the error. > > > > While here, a few error cases are no longer treated as fatal and QUIC connection > > is no longer finalized in these cases. A few other cases now lead to > > stream reset instead of connection finalization. > > > > diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c > > --- a/src/http/v3/ngx_http_v3.c > > +++ b/src/http/v3/ngx_http_v3.c > > @@ -33,7 +33,7 @@ ngx_http_v3_init_session(ngx_connection_ > > > > h3c = ngx_pcalloc(pc->pool, sizeof(ngx_http_v3_session_t)); > > if (h3c == NULL) { > > - return NGX_ERROR; > > + goto failed; > > } > > > > h3c->max_push_id = (uint64_t) -1; > > @@ -49,7 +49,7 @@ ngx_http_v3_init_session(ngx_connection_ > > > > cln = ngx_pool_cleanup_add(pc->pool, 0); > > if (cln == NULL) { > > - return NGX_ERROR; > > + goto failed; > > } > > > > cln->handler = ngx_http_v3_cleanup_session; > > @@ -58,6 +58,14 @@ ngx_http_v3_init_session(ngx_connection_ > > hc->v3_session = h3c; > > > > return ngx_http_v3_send_settings(c); > > + > > +failed: > > + > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create http3 session"); > > + > > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > > + "failed to create http3 session"); > > + return NGX_ERROR; > > } > > > > > > diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c > > --- a/src/http/v3/ngx_http_v3_request.c > > +++ b/src/http/v3/ngx_http_v3_request.c > > @@ -65,8 +65,6 @@ ngx_http_v3_init(ngx_connection_t *c) > > ngx_http_core_srv_conf_t *cscf; > > > > if (ngx_http_v3_init_session(c) != NGX_OK) { > > - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > > - "internal error"); > > ngx_http_close_connection(c); > > return; > > } > > @@ -110,8 +108,6 @@ ngx_http_v3_init(ngx_connection_t *c) > > h3c->goaway = 1; > > > > if (ngx_http_v3_send_goaway(c, (n + 1) << 2) != NGX_OK) { > > - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > > - "goaway error"); > > ngx_http_close_connection(c); > > return; > > } > > @@ -287,15 +283,14 @@ ngx_http_v3_process_request(ngx_event_t > > rc = ngx_http_v3_parse_headers(c, st, b); > > > > if (rc > 0) { > > - ngx_http_v3_finalize_connection(c, rc, > > - "could not parse request headers"); > > + ngx_quic_reset_stream(c, rc); > > + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > > + "client sent invalid header"); > > ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); > > break; > > } > > > > if (rc == NGX_ERROR) { > > - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > > - "internal error"); > > ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); > > break; > > } > > @@ -1167,17 +1162,13 @@ ngx_http_v3_request_body_filter(ngx_http > > } > > > > if (rc > 0) { > > - ngx_http_v3_finalize_connection(r->connection, rc, > > - "client sent invalid body"); > > + ngx_quic_reset_stream(r->connection, rc); > > ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > > "client sent invalid body"); > > return NGX_HTTP_BAD_REQUEST; > > } > > > > if (rc == NGX_ERROR) { > > - ngx_http_v3_finalize_connection(r->connection, > > - NGX_HTTP_V3_ERR_INTERNAL_ERROR, > > - "internal error"); > > return NGX_HTTP_INTERNAL_SERVER_ERROR; > > } > > > > diff --git a/src/http/v3/ngx_http_v3_streams.c b/src/http/v3/ngx_http_v3_streams.c > > --- a/src/http/v3/ngx_http_v3_streams.c > > +++ b/src/http/v3/ngx_http_v3_streams.c > > @@ -283,7 +283,7 @@ ngx_http_v3_create_push_stream(ngx_conne > > > > sc = ngx_quic_open_stream(c, 0); > > if (sc == NULL) { > > - return NULL; > > + goto failed; > > } > > > > p = buf; > > @@ -318,7 +318,13 @@ ngx_http_v3_create_push_stream(ngx_conne > > > > failed: > > > > - ngx_http_v3_close_uni_stream(sc); > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create push stream"); > > + > > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, > > + "failed to create push stream"); > > + if (sc) { > > + ngx_http_v3_close_uni_stream(sc); > > + } > > > > return NULL; > > } > > @@ -368,7 +374,7 @@ ngx_http_v3_get_uni_stream(ngx_connectio > > > > sc = ngx_quic_open_stream(c, 0); > > if (sc == NULL) { > > - return NULL; > > + goto failed; > > } > > > > sc->quic->cancelable = 1; > > @@ -405,7 +411,13 @@ ngx_http_v3_get_uni_stream(ngx_connectio > > > > failed: > > > > - ngx_http_v3_close_uni_stream(sc); > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create server stream"); > > + > > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, > > + "failed to create server stream"); > > + if (sc) { > > + ngx_http_v3_close_uni_stream(sc); > > + } > > > > return NULL; > > } > > @@ -424,7 +436,7 @@ ngx_http_v3_send_settings(ngx_connection > > > > cc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_CONTROL); > > if (cc == NULL) { > > - return NGX_DECLINED; > > + return NGX_ERROR; > > } > > > > h3scf = ngx_http_v3_get_module_srv_conf(c, ngx_http_v3_module); > > @@ -457,6 +469,10 @@ ngx_http_v3_send_settings(ngx_connection > > > > failed: > > > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send settings"); > > + > > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > > + "failed to send settings"); > > ngx_http_v3_close_uni_stream(cc); > > > > return NGX_ERROR; > > @@ -475,7 +491,7 @@ ngx_http_v3_send_goaway(ngx_connection_t > > > > cc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_CONTROL); > > if (cc == NULL) { > > - return NGX_DECLINED; > > + return NGX_ERROR; > > This case now misses sending CONNECTION_CLOSE (no further stream processing), > Instead, the last allowed stream connection is now closed while initializing > with sending FIN with off:0 in its cleanup function which confuses clients > due to H3_FRAME_UNEXPECTED. It probably needs to be converted to goto failed > with conditional uni stream close. > > Same in ngx_http_v3_send_settings() around ngx_http_v3_get_uni_stream(), but > there it just leads to control stream silent closure with no visible effects > on the wire. Yet, with teh 2nd patch applied that updates stream reset logic, > the error causes to emit RESET_STREAM(H3_INTERNAL_ERROR), which is a protocol > violation when operating on a critical (control) stream. Can you elaborate on these? The ngx_http_v3_get_uni_stream() function now closes the QUIC connection by calling ngx_http_v3_finalize_connection() in case of error. So its callers don't need to do this. > > } > > > > n = ngx_http_v3_encode_varlen_int(NULL, id); > > @@ -495,6 +511,10 @@ ngx_http_v3_send_goaway(ngx_connection_t > > > > failed: > > > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send goaway"); > > + > > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > > + "failed to send goaway"); > > ngx_http_v3_close_uni_stream(cc); > > > > return NGX_ERROR; > > @@ -510,7 +530,7 @@ ngx_http_v3_send_ack_section(ngx_connect > > ngx_http_v3_session_t *h3c; > > > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, > > - "http3 client ack section %ui", stream_id); > > + "http3 send section acknowledgement %ui", stream_id); > > > > dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); > > if (dc == NULL) { > > @@ -524,11 +544,21 @@ ngx_http_v3_send_ack_section(ngx_connect > > h3c->total_bytes += n; > > > > if (dc->send(dc, buf, n) != (ssize_t) n) { > > - ngx_http_v3_close_uni_stream(dc); > > - return NGX_ERROR; > > + goto failed; > > } > > > > return NGX_OK; > > + > > +failed: > > + > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, > > + "failed to send section acknowledgement"); > > + > > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > > + "failed to send section acknowledgement"); > > + ngx_http_v3_close_uni_stream(dc); > > + > > + return NGX_ERROR; > > } > > > > > > @@ -541,7 +571,7 @@ ngx_http_v3_send_cancel_stream(ngx_conne > > ngx_http_v3_session_t *h3c; > > > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, > > - "http3 client cancel stream %ui", stream_id); > > + "http3 send stream cancellation %ui", stream_id); > > > > dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); > > if (dc == NULL) { > > @@ -555,11 +585,20 @@ ngx_http_v3_send_cancel_stream(ngx_conne > > h3c->total_bytes += n; > > > > if (dc->send(dc, buf, n) != (ssize_t) n) { > > - ngx_http_v3_close_uni_stream(dc); > > - return NGX_ERROR; > > + goto failed; > > } > > > > return NGX_OK; > > + > > +failed: > > + > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send stream cancellation"); > > + > > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > > + "failed to send stream cancellation"); > > + ngx_http_v3_close_uni_stream(dc); > > + > > + return NGX_ERROR; > > } > > > > > > @@ -572,7 +611,7 @@ ngx_http_v3_send_inc_insert_count(ngx_co > > ngx_http_v3_session_t *h3c; > > > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, > > - "http3 client increment insert count %ui", inc); > > + "http3 send insert count increment %ui", inc); > > > > dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); > > if (dc == NULL) { > > @@ -586,11 +625,21 @@ ngx_http_v3_send_inc_insert_count(ngx_co > > h3c->total_bytes += n; > > > > if (dc->send(dc, buf, n) != (ssize_t) n) { > > - ngx_http_v3_close_uni_stream(dc); > > - return NGX_ERROR; > > + goto failed; > > } > > > > return NGX_OK; > > + > > +failed: > > + > > + ngx_log_error(NGX_LOG_ERR, c->log, 0, > > + "failed to send insert count increment"); > > + > > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > > + "failed to send insert count increment"); > > + ngx_http_v3_close_uni_stream(dc); > > + > > + return NGX_ERROR; > > } > > > > > > diff --git a/src/http/v3/ngx_http_v3_tables.c b/src/http/v3/ngx_http_v3_tables.c > > --- a/src/http/v3/ngx_http_v3_tables.c > > +++ b/src/http/v3/ngx_http_v3_tables.c > > @@ -589,6 +589,10 @@ ngx_http_v3_check_insert_count(ngx_conne > > if (h3c->nblocked == h3scf->max_blocked_streams) { > > ngx_log_error(NGX_LOG_INFO, c->log, 0, > > "client exceeded http3_max_blocked_streams limit"); > > + > > + ngx_http_v3_finalize_connection(c, > > + NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED, > > + "too many blocked streams"); > > return NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED; > > } > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan From pluknet at nginx.com Tue Nov 9 16:01:32 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 9 Nov 2021 19:01:32 +0300 Subject: [PATCH 1 of 3] HTTP/3: adjusted QUIC connection finalization In-Reply-To: <20211109073212.bgvqnihgkvieygiy@Romans-MacBook-Pro.local> References: <8739f475583031399879.1634561308@arut-laptop> <20211108162543.aysjry2ceehooktn@MacBook-Air-Sergey.local> <20211109073212.bgvqnihgkvieygiy@Romans-MacBook-Pro.local> Message-ID: <4FA13320-2BA5-47E6-A57D-C9A5FE20A490@nginx.com> > On 9 Nov 2021, at 10:32, Roman Arutyunyan wrote: > > On Mon, Nov 08, 2021 at 07:25:43PM +0300, Sergey Kandaurov wrote: >> On Mon, Oct 18, 2021 at 03:48:28PM +0300, Roman Arutyunyan wrote: >>> # HG changeset patch >>> # User Roman Arutyunyan >>> # Date 1634559753 -10800 >>> # Mon Oct 18 15:22:33 2021 +0300 >>> # Branch quic >>> # Node ID 8739f475583031399879ef0af2eb5af76008449e >>> # Parent 404de224517e33f685613d6425dcdb3c8ef5b97e >>> HTTP/3: adjusted QUIC connection finalization. >>> >>> When an HTTP/3 function returns an error in context of a QUIC stream, it's >>> this function's responsibility now to finalize the entire QUIC connection >>> with the right code, if required. Previously, QUIC connection finalization >>> could be done both outside and inside such functions. The new rule follows >>> a similar rule for logging, leads to cleaner code, and allows to provide more >>> details about the error. >>> >>> While here, a few error cases are no longer treated as fatal and QUIC connection >>> is no longer finalized in these cases. A few other cases now lead to >>> stream reset instead of connection finalization. >>> >>> diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c >>> --- a/src/http/v3/ngx_http_v3.c >>> +++ b/src/http/v3/ngx_http_v3.c >>> @@ -33,7 +33,7 @@ ngx_http_v3_init_session(ngx_connection_ >>> >>> h3c = ngx_pcalloc(pc->pool, sizeof(ngx_http_v3_session_t)); >>> if (h3c == NULL) { >>> - return NGX_ERROR; >>> + goto failed; >>> } >>> >>> h3c->max_push_id = (uint64_t) -1; >>> @@ -49,7 +49,7 @@ ngx_http_v3_init_session(ngx_connection_ >>> >>> cln = ngx_pool_cleanup_add(pc->pool, 0); >>> if (cln == NULL) { >>> - return NGX_ERROR; >>> + goto failed; >>> } >>> >>> cln->handler = ngx_http_v3_cleanup_session; >>> @@ -58,6 +58,14 @@ ngx_http_v3_init_session(ngx_connection_ >>> hc->v3_session = h3c; >>> >>> return ngx_http_v3_send_settings(c); >>> + >>> +failed: >>> + >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create http3 session"); >>> + >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, >>> + "failed to create http3 session"); >>> + return NGX_ERROR; >>> } >>> >>> >>> diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c >>> --- a/src/http/v3/ngx_http_v3_request.c >>> +++ b/src/http/v3/ngx_http_v3_request.c >>> @@ -65,8 +65,6 @@ ngx_http_v3_init(ngx_connection_t *c) >>> ngx_http_core_srv_conf_t *cscf; >>> >>> if (ngx_http_v3_init_session(c) != NGX_OK) { >>> - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, >>> - "internal error"); >>> ngx_http_close_connection(c); >>> return; >>> } >>> @@ -110,8 +108,6 @@ ngx_http_v3_init(ngx_connection_t *c) >>> h3c->goaway = 1; >>> >>> if (ngx_http_v3_send_goaway(c, (n + 1) << 2) != NGX_OK) { >>> - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, >>> - "goaway error"); >>> ngx_http_close_connection(c); >>> return; >>> } >>> @@ -287,15 +283,14 @@ ngx_http_v3_process_request(ngx_event_t >>> rc = ngx_http_v3_parse_headers(c, st, b); >>> >>> if (rc > 0) { >>> - ngx_http_v3_finalize_connection(c, rc, >>> - "could not parse request headers"); >>> + ngx_quic_reset_stream(c, rc); >>> + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, >>> + "client sent invalid header"); >>> ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); >>> break; >>> } Note that ngx_http_v3_parse_headers() returned > 0 will reset a stream, then it will try again, now as part of closing a (fake) http connection. This still works due to a wev->error check in ngx_quic_reset_stream(). >>> >>> if (rc == NGX_ERROR) { >>> - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, >>> - "internal error"); >>> ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); >>> break; >>> } >>> @@ -1167,17 +1162,13 @@ ngx_http_v3_request_body_filter(ngx_http >>> } >>> >>> if (rc > 0) { >>> - ngx_http_v3_finalize_connection(r->connection, rc, >>> - "client sent invalid body"); >>> + ngx_quic_reset_stream(r->connection, rc); >>> ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, >>> "client sent invalid body"); >>> return NGX_HTTP_BAD_REQUEST; >>> } >>> >>> if (rc == NGX_ERROR) { >>> - ngx_http_v3_finalize_connection(r->connection, >>> - NGX_HTTP_V3_ERR_INTERNAL_ERROR, >>> - "internal error"); >>> return NGX_HTTP_INTERNAL_SERVER_ERROR; >>> } >>> Note that NGX_ERROR conversion to emitting a special response on request finalization looks contrary to just resetting a stream (above) on less fatal logical errors, with rc > 0 (I see only H3_FRAME_UNEXPECTED there). Not sure about that. OTOH, handling NGX_ERROR this way seems to follow our others HTTP protocol version implementations. >>> diff --git a/src/http/v3/ngx_http_v3_streams.c b/src/http/v3/ngx_http_v3_streams.c >>> --- a/src/http/v3/ngx_http_v3_streams.c >>> +++ b/src/http/v3/ngx_http_v3_streams.c >>> @@ -283,7 +283,7 @@ ngx_http_v3_create_push_stream(ngx_conne >>> >>> sc = ngx_quic_open_stream(c, 0); >>> if (sc == NULL) { >>> - return NULL; >>> + goto failed; >>> } >>> >>> p = buf; >>> @@ -318,7 +318,13 @@ ngx_http_v3_create_push_stream(ngx_conne >>> >>> failed: >>> >>> - ngx_http_v3_close_uni_stream(sc); >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create push stream"); >>> + >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, >>> + "failed to create push stream"); >>> + if (sc) { >>> + ngx_http_v3_close_uni_stream(sc); >>> + } >>> >>> return NULL; >>> } >>> @@ -368,7 +374,7 @@ ngx_http_v3_get_uni_stream(ngx_connectio >>> >>> sc = ngx_quic_open_stream(c, 0); >>> if (sc == NULL) { >>> - return NULL; >>> + goto failed; >>> } >>> >>> sc->quic->cancelable = 1; >>> @@ -405,7 +411,13 @@ ngx_http_v3_get_uni_stream(ngx_connectio >>> >>> failed: >>> >>> - ngx_http_v3_close_uni_stream(sc); >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create server stream"); >>> + >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, >>> + "failed to create server stream"); >>> + if (sc) { >>> + ngx_http_v3_close_uni_stream(sc); >>> + } >>> >>> return NULL; >>> } >>> @@ -424,7 +436,7 @@ ngx_http_v3_send_settings(ngx_connection >>> >>> cc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_CONTROL); >>> if (cc == NULL) { >>> - return NGX_DECLINED; >>> + return NGX_ERROR; >>> } >>> >>> h3scf = ngx_http_v3_get_module_srv_conf(c, ngx_http_v3_module); >>> @@ -457,6 +469,10 @@ ngx_http_v3_send_settings(ngx_connection >>> >>> failed: >>> >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send settings"); >>> + >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, >>> + "failed to send settings"); >>> ngx_http_v3_close_uni_stream(cc); >>> >>> return NGX_ERROR; >>> @@ -475,7 +491,7 @@ ngx_http_v3_send_goaway(ngx_connection_t >>> >>> cc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_CONTROL); >>> if (cc == NULL) { >>> - return NGX_DECLINED; >>> + return NGX_ERROR; >> >> This case now misses sending CONNECTION_CLOSE (no further stream processing), >> Instead, the last allowed stream connection is now closed while initializing >> with sending FIN with off:0 in its cleanup function which confuses clients >> due to H3_FRAME_UNEXPECTED. It probably needs to be converted to goto failed >> with conditional uni stream close. >> >> Same in ngx_http_v3_send_settings() around ngx_http_v3_get_uni_stream(), but >> there it just leads to control stream silent closure with no visible effects >> on the wire. Yet, with teh 2nd patch applied that updates stream reset logic, >> the error causes to emit RESET_STREAM(H3_INTERNAL_ERROR), which is a protocol >> violation when operating on a critical (control) stream. > > Can you elaborate on these? The ngx_http_v3_get_uni_stream() function now > closes the QUIC connection by calling ngx_http_v3_finalize_connection() in case > of error. So its callers don't need to do this. Okay, somehow I've missed that. It looks good. > >>> } >>> >>> n = ngx_http_v3_encode_varlen_int(NULL, id); >>> @@ -495,6 +511,10 @@ ngx_http_v3_send_goaway(ngx_connection_t >>> >>> failed: >>> >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send goaway"); >>> + >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, >>> + "failed to send goaway"); >>> ngx_http_v3_close_uni_stream(cc); >>> >>> return NGX_ERROR; >>> @@ -510,7 +530,7 @@ ngx_http_v3_send_ack_section(ngx_connect >>> ngx_http_v3_session_t *h3c; >>> >>> ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, >>> - "http3 client ack section %ui", stream_id); >>> + "http3 send section acknowledgement %ui", stream_id); >>> >>> dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); >>> if (dc == NULL) { >>> @@ -524,11 +544,21 @@ ngx_http_v3_send_ack_section(ngx_connect >>> h3c->total_bytes += n; >>> >>> if (dc->send(dc, buf, n) != (ssize_t) n) { >>> - ngx_http_v3_close_uni_stream(dc); >>> - return NGX_ERROR; >>> + goto failed; >>> } >>> >>> return NGX_OK; >>> + >>> +failed: >>> + >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, >>> + "failed to send section acknowledgement"); >>> + >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, >>> + "failed to send section acknowledgement"); >>> + ngx_http_v3_close_uni_stream(dc); >>> + >>> + return NGX_ERROR; >>> } >>> >>> >>> @@ -541,7 +571,7 @@ ngx_http_v3_send_cancel_stream(ngx_conne >>> ngx_http_v3_session_t *h3c; >>> >>> ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, >>> - "http3 client cancel stream %ui", stream_id); >>> + "http3 send stream cancellation %ui", stream_id); >>> >>> dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); >>> if (dc == NULL) { >>> @@ -555,11 +585,20 @@ ngx_http_v3_send_cancel_stream(ngx_conne >>> h3c->total_bytes += n; >>> >>> if (dc->send(dc, buf, n) != (ssize_t) n) { >>> - ngx_http_v3_close_uni_stream(dc); >>> - return NGX_ERROR; >>> + goto failed; >>> } >>> >>> return NGX_OK; >>> + >>> +failed: >>> + >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send stream cancellation"); >>> + >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, >>> + "failed to send stream cancellation"); >>> + ngx_http_v3_close_uni_stream(dc); >>> + >>> + return NGX_ERROR; >>> } >>> >>> >>> @@ -572,7 +611,7 @@ ngx_http_v3_send_inc_insert_count(ngx_co >>> ngx_http_v3_session_t *h3c; >>> >>> ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, >>> - "http3 client increment insert count %ui", inc); >>> + "http3 send insert count increment %ui", inc); >>> >>> dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); >>> if (dc == NULL) { >>> @@ -586,11 +625,21 @@ ngx_http_v3_send_inc_insert_count(ngx_co >>> h3c->total_bytes += n; >>> >>> if (dc->send(dc, buf, n) != (ssize_t) n) { >>> - ngx_http_v3_close_uni_stream(dc); >>> - return NGX_ERROR; >>> + goto failed; >>> } >>> >>> return NGX_OK; >>> + >>> +failed: >>> + >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, >>> + "failed to send insert count increment"); >>> + >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, >>> + "failed to send insert count increment"); >>> + ngx_http_v3_close_uni_stream(dc); >>> + >>> + return NGX_ERROR; >>> } >>> >>> >>> diff --git a/src/http/v3/ngx_http_v3_tables.c b/src/http/v3/ngx_http_v3_tables.c >>> --- a/src/http/v3/ngx_http_v3_tables.c >>> +++ b/src/http/v3/ngx_http_v3_tables.c >>> @@ -589,6 +589,10 @@ ngx_http_v3_check_insert_count(ngx_conne >>> if (h3c->nblocked == h3scf->max_blocked_streams) { >>> ngx_log_error(NGX_LOG_INFO, c->log, 0, >>> "client exceeded http3_max_blocked_streams limit"); >>> + >>> + ngx_http_v3_finalize_connection(c, >>> + NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED, >>> + "too many blocked streams"); >>> return NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED; >>> } >>> Similar to the very beginning comment above, after closing QUIC connection there and handling decompression error as part of parsing headers, it still tries to reset a stream to no avail (up to two times, see above). -- Sergey Kandaurov From xeioex at nginx.com Tue Nov 9 18:02:30 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 09 Nov 2021 18:02:30 +0000 Subject: [njs] PCRE: removed unused context functions. Message-ID: details: https://hg.nginx.org/njs/rev/384e1bb55a33 branches: changeset: 1740:384e1bb55a33 user: Dmitry Volyntsev date: Tue Nov 09 17:59:46 2021 +0000 description: PCRE: removed unused context functions. diffstat: src/njs_pcre.c | 21 --------------------- 1 files changed, 0 insertions(+), 21 deletions(-) diffs (45 lines): diff -r 203bc61d8d70 -r 384e1bb55a33 src/njs_pcre.c --- a/src/njs_pcre.c Wed Nov 03 15:46:15 2021 +0000 +++ b/src/njs_pcre.c Tue Nov 09 17:59:46 2021 +0000 @@ -10,8 +10,6 @@ static void *njs_pcre_malloc(size_t size); static void njs_pcre_free(void *p); -static void *njs_pcre_default_malloc(size_t size, void *memory_data); -static void njs_pcre_default_free(void *p, void *memory_data); static njs_regex_context_t *regex_context; @@ -23,11 +21,6 @@ njs_regex_context_create(njs_pcre_malloc { njs_regex_context_t *ctx; - if (private_malloc == NULL) { - private_malloc = njs_pcre_default_malloc; - private_free = njs_pcre_default_free; - } - ctx = private_malloc(sizeof(njs_regex_context_t), memory_data); if (njs_fast_path(ctx != NULL)) { @@ -263,20 +256,6 @@ njs_pcre_free(void *p) } -static void * -njs_pcre_default_malloc(size_t size, void *memory_data) -{ - return malloc(size); -} - - -static void -njs_pcre_default_free(void *p, void *memory_data) -{ - free(p); -} - - njs_int_t njs_regex_match(njs_regex_t *regex, const u_char *subject, size_t off, size_t len, njs_regex_match_data_t *match_data, njs_regex_context_t *ctx) From xeioex at nginx.com Tue Nov 9 18:02:32 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 09 Nov 2021 18:02:32 +0000 Subject: [njs] Removed unused regular expression API. Message-ID: details: https://hg.nginx.org/njs/rev/2b07f8810d31 branches: changeset: 1741:2b07f8810d31 user: Dmitry Volyntsev date: Tue Nov 09 17:59:58 2021 +0000 description: Removed unused regular expression API. diffstat: src/njs_pcre.c | 14 -------------- src/njs_regex.h | 2 -- 2 files changed, 0 insertions(+), 16 deletions(-) diffs (36 lines): diff -r 384e1bb55a33 -r 2b07f8810d31 src/njs_pcre.c --- a/src/njs_pcre.c Tue Nov 09 17:59:46 2021 +0000 +++ b/src/njs_pcre.c Tue Nov 09 17:59:58 2021 +0000 @@ -170,20 +170,6 @@ njs_regex_is_valid(njs_regex_t *regex) } -njs_uint_t -njs_regex_ncaptures(njs_regex_t *regex) -{ - return regex->ncaptures; -} - - -njs_uint_t -njs_regex_backrefs(njs_regex_t *regex) -{ - return regex->backrefmax; -} - - njs_int_t njs_regex_named_captures(njs_regex_t *regex, njs_str_t *name, int n) { diff -r 384e1bb55a33 -r 2b07f8810d31 src/njs_regex.h --- a/src/njs_regex.h Tue Nov 09 17:59:46 2021 +0000 +++ b/src/njs_regex.h Tue Nov 09 17:59:58 2021 +0000 @@ -30,8 +30,6 @@ NJS_EXPORT njs_regex_context_t * NJS_EXPORT njs_int_t njs_regex_compile(njs_regex_t *regex, u_char *source, size_t len, njs_uint_t options, njs_regex_context_t *ctx); NJS_EXPORT njs_bool_t njs_regex_is_valid(njs_regex_t *regex); -NJS_EXPORT njs_uint_t njs_regex_ncaptures(njs_regex_t *regex); -NJS_EXPORT njs_uint_t njs_regex_backrefs(njs_regex_t *regex); NJS_EXPORT njs_int_t njs_regex_named_captures(njs_regex_t *regex, njs_str_t *name, int n); NJS_EXPORT njs_regex_match_data_t *njs_regex_match_data(njs_regex_t *regex, From arut at nginx.com Wed Nov 10 12:21:01 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 10 Nov 2021 15:21:01 +0300 Subject: [PATCH 1 of 3] HTTP/3: adjusted QUIC connection finalization In-Reply-To: <4FA13320-2BA5-47E6-A57D-C9A5FE20A490@nginx.com> References: <8739f475583031399879.1634561308@arut-laptop> <20211108162543.aysjry2ceehooktn@MacBook-Air-Sergey.local> <20211109073212.bgvqnihgkvieygiy@Romans-MacBook-Pro.local> <4FA13320-2BA5-47E6-A57D-C9A5FE20A490@nginx.com> Message-ID: <20211110122101.y5vuw5kgiug3fsmz@Romans-MacBook-Pro.local> On Tue, Nov 09, 2021 at 07:01:32PM +0300, Sergey Kandaurov wrote: > > > On 9 Nov 2021, at 10:32, Roman Arutyunyan wrote: > > > > On Mon, Nov 08, 2021 at 07:25:43PM +0300, Sergey Kandaurov wrote: > >> On Mon, Oct 18, 2021 at 03:48:28PM +0300, Roman Arutyunyan wrote: > >>> # HG changeset patch > >>> # User Roman Arutyunyan > >>> # Date 1634559753 -10800 > >>> # Mon Oct 18 15:22:33 2021 +0300 > >>> # Branch quic > >>> # Node ID 8739f475583031399879ef0af2eb5af76008449e > >>> # Parent 404de224517e33f685613d6425dcdb3c8ef5b97e > >>> HTTP/3: adjusted QUIC connection finalization. > >>> > >>> When an HTTP/3 function returns an error in context of a QUIC stream, it's > >>> this function's responsibility now to finalize the entire QUIC connection > >>> with the right code, if required. Previously, QUIC connection finalization > >>> could be done both outside and inside such functions. The new rule follows > >>> a similar rule for logging, leads to cleaner code, and allows to provide more > >>> details about the error. > >>> > >>> While here, a few error cases are no longer treated as fatal and QUIC connection > >>> is no longer finalized in these cases. A few other cases now lead to > >>> stream reset instead of connection finalization. > >>> > >>> diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c > >>> --- a/src/http/v3/ngx_http_v3.c > >>> +++ b/src/http/v3/ngx_http_v3.c > >>> @@ -33,7 +33,7 @@ ngx_http_v3_init_session(ngx_connection_ > >>> > >>> h3c = ngx_pcalloc(pc->pool, sizeof(ngx_http_v3_session_t)); > >>> if (h3c == NULL) { > >>> - return NGX_ERROR; > >>> + goto failed; > >>> } > >>> > >>> h3c->max_push_id = (uint64_t) -1; > >>> @@ -49,7 +49,7 @@ ngx_http_v3_init_session(ngx_connection_ > >>> > >>> cln = ngx_pool_cleanup_add(pc->pool, 0); > >>> if (cln == NULL) { > >>> - return NGX_ERROR; > >>> + goto failed; > >>> } > >>> > >>> cln->handler = ngx_http_v3_cleanup_session; > >>> @@ -58,6 +58,14 @@ ngx_http_v3_init_session(ngx_connection_ > >>> hc->v3_session = h3c; > >>> > >>> return ngx_http_v3_send_settings(c); > >>> + > >>> +failed: > >>> + > >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create http3 session"); > >>> + > >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > >>> + "failed to create http3 session"); > >>> + return NGX_ERROR; > >>> } > >>> > >>> > >>> diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c > >>> --- a/src/http/v3/ngx_http_v3_request.c > >>> +++ b/src/http/v3/ngx_http_v3_request.c > >>> @@ -65,8 +65,6 @@ ngx_http_v3_init(ngx_connection_t *c) > >>> ngx_http_core_srv_conf_t *cscf; > >>> > >>> if (ngx_http_v3_init_session(c) != NGX_OK) { > >>> - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > >>> - "internal error"); > >>> ngx_http_close_connection(c); > >>> return; > >>> } > >>> @@ -110,8 +108,6 @@ ngx_http_v3_init(ngx_connection_t *c) > >>> h3c->goaway = 1; > >>> > >>> if (ngx_http_v3_send_goaway(c, (n + 1) << 2) != NGX_OK) { > >>> - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > >>> - "goaway error"); > >>> ngx_http_close_connection(c); > >>> return; > >>> } > >>> @@ -287,15 +283,14 @@ ngx_http_v3_process_request(ngx_event_t > >>> rc = ngx_http_v3_parse_headers(c, st, b); > >>> > >>> if (rc > 0) { > >>> - ngx_http_v3_finalize_connection(c, rc, > >>> - "could not parse request headers"); > >>> + ngx_quic_reset_stream(c, rc); > >>> + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > >>> + "client sent invalid header"); > >>> ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); > >>> break; > >>> } > > Note that ngx_http_v3_parse_headers() returned > 0 will reset a stream, > then it will try again, now as part of closing a (fake) http connection. > This still works due to a wev->error check in ngx_quic_reset_stream(). Yes, resetting the stream will set wev->error, which will lead to errors in send_chain() while sending HTTP 400 page. As a consequence, we'll try to reset the stream again, which will lead to nothing because of duplicate reset protection. > >>> > >>> if (rc == NGX_ERROR) { > >>> - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, > >>> - "internal error"); > >>> ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); Here the stream will be reset as part of closing the request because ngx_http_close_request() does not send HTTP 500 unlike ngx_http_finalize_request(). > >>> break; > >>> } > >>> @@ -1167,17 +1162,13 @@ ngx_http_v3_request_body_filter(ngx_http > >>> } > >>> > >>> if (rc > 0) { > >>> - ngx_http_v3_finalize_connection(r->connection, rc, > >>> - "client sent invalid body"); > >>> + ngx_quic_reset_stream(r->connection, rc); > >>> ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, > >>> "client sent invalid body"); > >>> return NGX_HTTP_BAD_REQUEST; > >>> } > >>> > >>> if (rc == NGX_ERROR) { > >>> - ngx_http_v3_finalize_connection(r->connection, > >>> - NGX_HTTP_V3_ERR_INTERNAL_ERROR, > >>> - "internal error"); > >>> return NGX_HTTP_INTERNAL_SERVER_ERROR; > >>> } > >>> > > Note that NGX_ERROR conversion to emitting a special response on request > finalization looks contrary to just resetting a stream (above) on less > fatal logical errors, with rc > 0 (I see only H3_FRAME_UNEXPECTED there). > Not sure about that. OTOH, handling NGX_ERROR this way seems to follow > our others HTTP protocol version implementations. When we have an error code, we have to reset the stream, because there's no other way to send that code to client (except closing the QUIC connection). When we have no code, we can return HTTP 500 instead which may look inconsistent. However, when ngx_http_v3_parse_XXX() return NGX_ERROR, it's not because of parsing (in this case we have an error code), but because of allocation etc. This is similar to an allocation error in any other part of the code, including common HTTP code where we just return HTTP 500. So I don't think we need to reset the stream here. Actually, ngx_http_v3_parse_data() does not currently return NGX_ERROR at all, but this fact does not change the API. > >>> diff --git a/src/http/v3/ngx_http_v3_streams.c b/src/http/v3/ngx_http_v3_streams.c > >>> --- a/src/http/v3/ngx_http_v3_streams.c > >>> +++ b/src/http/v3/ngx_http_v3_streams.c > >>> @@ -283,7 +283,7 @@ ngx_http_v3_create_push_stream(ngx_conne > >>> > >>> sc = ngx_quic_open_stream(c, 0); > >>> if (sc == NULL) { > >>> - return NULL; > >>> + goto failed; > >>> } > >>> > >>> p = buf; > >>> @@ -318,7 +318,13 @@ ngx_http_v3_create_push_stream(ngx_conne > >>> > >>> failed: > >>> > >>> - ngx_http_v3_close_uni_stream(sc); > >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create push stream"); > >>> + > >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, > >>> + "failed to create push stream"); > >>> + if (sc) { > >>> + ngx_http_v3_close_uni_stream(sc); > >>> + } > >>> > >>> return NULL; > >>> } > >>> @@ -368,7 +374,7 @@ ngx_http_v3_get_uni_stream(ngx_connectio > >>> > >>> sc = ngx_quic_open_stream(c, 0); > >>> if (sc == NULL) { > >>> - return NULL; > >>> + goto failed; > >>> } > >>> > >>> sc->quic->cancelable = 1; > >>> @@ -405,7 +411,13 @@ ngx_http_v3_get_uni_stream(ngx_connectio > >>> > >>> failed: > >>> > >>> - ngx_http_v3_close_uni_stream(sc); > >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create server stream"); > >>> + > >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, > >>> + "failed to create server stream"); > >>> + if (sc) { > >>> + ngx_http_v3_close_uni_stream(sc); > >>> + } > >>> > >>> return NULL; > >>> } > >>> @@ -424,7 +436,7 @@ ngx_http_v3_send_settings(ngx_connection > >>> > >>> cc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_CONTROL); > >>> if (cc == NULL) { > >>> - return NGX_DECLINED; > >>> + return NGX_ERROR; > >>> } > >>> > >>> h3scf = ngx_http_v3_get_module_srv_conf(c, ngx_http_v3_module); > >>> @@ -457,6 +469,10 @@ ngx_http_v3_send_settings(ngx_connection > >>> > >>> failed: > >>> > >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send settings"); > >>> + > >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > >>> + "failed to send settings"); > >>> ngx_http_v3_close_uni_stream(cc); > >>> > >>> return NGX_ERROR; > >>> @@ -475,7 +491,7 @@ ngx_http_v3_send_goaway(ngx_connection_t > >>> > >>> cc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_CONTROL); > >>> if (cc == NULL) { > >>> - return NGX_DECLINED; > >>> + return NGX_ERROR; > >> > >> This case now misses sending CONNECTION_CLOSE (no further stream processing), > >> Instead, the last allowed stream connection is now closed while initializing > >> with sending FIN with off:0 in its cleanup function which confuses clients > >> due to H3_FRAME_UNEXPECTED. It probably needs to be converted to goto failed > >> with conditional uni stream close. > >> > >> Same in ngx_http_v3_send_settings() around ngx_http_v3_get_uni_stream(), but > >> there it just leads to control stream silent closure with no visible effects > >> on the wire. Yet, with teh 2nd patch applied that updates stream reset logic, > >> the error causes to emit RESET_STREAM(H3_INTERNAL_ERROR), which is a protocol > >> violation when operating on a critical (control) stream. > > > > Can you elaborate on these? The ngx_http_v3_get_uni_stream() function now > > closes the QUIC connection by calling ngx_http_v3_finalize_connection() in case > > of error. So its callers don't need to do this. > > Okay, somehow I've missed that. > It looks good. > > > > >>> } > >>> > >>> n = ngx_http_v3_encode_varlen_int(NULL, id); > >>> @@ -495,6 +511,10 @@ ngx_http_v3_send_goaway(ngx_connection_t > >>> > >>> failed: > >>> > >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send goaway"); > >>> + > >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > >>> + "failed to send goaway"); > >>> ngx_http_v3_close_uni_stream(cc); > >>> > >>> return NGX_ERROR; > >>> @@ -510,7 +530,7 @@ ngx_http_v3_send_ack_section(ngx_connect > >>> ngx_http_v3_session_t *h3c; > >>> > >>> ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, > >>> - "http3 client ack section %ui", stream_id); > >>> + "http3 send section acknowledgement %ui", stream_id); > >>> > >>> dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); > >>> if (dc == NULL) { > >>> @@ -524,11 +544,21 @@ ngx_http_v3_send_ack_section(ngx_connect > >>> h3c->total_bytes += n; > >>> > >>> if (dc->send(dc, buf, n) != (ssize_t) n) { > >>> - ngx_http_v3_close_uni_stream(dc); > >>> - return NGX_ERROR; > >>> + goto failed; > >>> } > >>> > >>> return NGX_OK; > >>> + > >>> +failed: > >>> + > >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, > >>> + "failed to send section acknowledgement"); > >>> + > >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > >>> + "failed to send section acknowledgement"); > >>> + ngx_http_v3_close_uni_stream(dc); > >>> + > >>> + return NGX_ERROR; > >>> } > >>> > >>> > >>> @@ -541,7 +571,7 @@ ngx_http_v3_send_cancel_stream(ngx_conne > >>> ngx_http_v3_session_t *h3c; > >>> > >>> ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, > >>> - "http3 client cancel stream %ui", stream_id); > >>> + "http3 send stream cancellation %ui", stream_id); > >>> > >>> dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); > >>> if (dc == NULL) { > >>> @@ -555,11 +585,20 @@ ngx_http_v3_send_cancel_stream(ngx_conne > >>> h3c->total_bytes += n; > >>> > >>> if (dc->send(dc, buf, n) != (ssize_t) n) { > >>> - ngx_http_v3_close_uni_stream(dc); > >>> - return NGX_ERROR; > >>> + goto failed; > >>> } > >>> > >>> return NGX_OK; > >>> + > >>> +failed: > >>> + > >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send stream cancellation"); > >>> + > >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > >>> + "failed to send stream cancellation"); > >>> + ngx_http_v3_close_uni_stream(dc); > >>> + > >>> + return NGX_ERROR; > >>> } > >>> > >>> > >>> @@ -572,7 +611,7 @@ ngx_http_v3_send_inc_insert_count(ngx_co > >>> ngx_http_v3_session_t *h3c; > >>> > >>> ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, > >>> - "http3 client increment insert count %ui", inc); > >>> + "http3 send insert count increment %ui", inc); > >>> > >>> dc = ngx_http_v3_get_uni_stream(c, NGX_HTTP_V3_STREAM_DECODER); > >>> if (dc == NULL) { > >>> @@ -586,11 +625,21 @@ ngx_http_v3_send_inc_insert_count(ngx_co > >>> h3c->total_bytes += n; > >>> > >>> if (dc->send(dc, buf, n) != (ssize_t) n) { > >>> - ngx_http_v3_close_uni_stream(dc); > >>> - return NGX_ERROR; > >>> + goto failed; > >>> } > >>> > >>> return NGX_OK; > >>> + > >>> +failed: > >>> + > >>> + ngx_log_error(NGX_LOG_ERR, c->log, 0, > >>> + "failed to send insert count increment"); > >>> + > >>> + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, > >>> + "failed to send insert count increment"); > >>> + ngx_http_v3_close_uni_stream(dc); > >>> + > >>> + return NGX_ERROR; > >>> } > >>> > >>> > >>> diff --git a/src/http/v3/ngx_http_v3_tables.c b/src/http/v3/ngx_http_v3_tables.c > >>> --- a/src/http/v3/ngx_http_v3_tables.c > >>> +++ b/src/http/v3/ngx_http_v3_tables.c > >>> @@ -589,6 +589,10 @@ ngx_http_v3_check_insert_count(ngx_conne > >>> if (h3c->nblocked == h3scf->max_blocked_streams) { > >>> ngx_log_error(NGX_LOG_INFO, c->log, 0, > >>> "client exceeded http3_max_blocked_streams limit"); > >>> + > >>> + ngx_http_v3_finalize_connection(c, > >>> + NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED, > >>> + "too many blocked streams"); > >>> return NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED; > >>> } > >>> > > Similar to the very beginning comment above, after closing QUIC connection > there and handling decompression error as part of parsing headers, it still > tries to reset a stream to no avail (up to two times, see above). Yes. But this does not seem to break anything. -- Roman Arutyunyan From pluknet at nginx.com Wed Nov 10 12:59:39 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 10 Nov 2021 15:59:39 +0300 Subject: [PATCH 2 of 3] HTTP/3: allowed QUIC stream connection reuse In-Reply-To: <8ae53c592c719af4f3ba.1634561309@arut-laptop> References: <8ae53c592c719af4f3ba.1634561309@arut-laptop> Message-ID: > On 18 Oct 2021, at 15:48, Roman Arutyunyan wrote: > > # HG changeset patch > # User Roman Arutyunyan > # Date 1634561226 -10800 > # Mon Oct 18 15:47:06 2021 +0300 > # Branch quic > # Node ID 8ae53c592c719af4f3ba47dbd85f78be27aaf7db > # Parent 8739f475583031399879ef0af2eb5af76008449e > HTTP/3: allowed QUIC stream connection reuse. > > A QUIC stream connection is treated as reusable until first bytes of request > arrive, which is also when the request object is now allocated. A connection > closed as a result of draining, is reset with the error code > H3_REQUEST_REJECTED. Such behavior is allowed by quic-http-34: > > Once a request stream has been opened, the request MAY be cancelled > by either endpoint. Clients cancel requests if the response is no > longer of interest; servers cancel requests if they are unable to or > choose not to respond. > > When the server cancels a request without performing any application > processing, the request is considered "rejected." The server SHOULD > abort its response stream with the error code H3_REQUEST_REJECTED. > > The client can treat requests rejected by the server as though they had > never been sent at all, thereby allowing them to be retried later. > Looks good. See below for minor comments. BTW, if we still hit the worker_connections limit, this leads to an entire QUIC connection close, but I doubt we can easily improve this. > diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c > --- a/src/http/ngx_http_request.c > +++ b/src/http/ngx_http_request.c > @@ -3743,15 +3743,14 @@ ngx_http_free_request(ngx_http_request_t > > log->action = "closing request"; > > - if (r->connection->timedout) { > + if (r->connection->timedout > +#if (NGX_HTTP_QUIC) > + && r->connection->quic == NULL > +#endif > + ) > + { > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > > -#if (NGX_HTTP_V3) > - if (r->connection->quic) { > - (void) ngx_quic_reset_stream(r->connection, > - NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR); > - } else > -#endif > if (clcf->reset_timedout_connection) { > linger.l_onoff = 1; > linger.l_linger = 0; > @@ -3763,14 +3762,6 @@ ngx_http_free_request(ngx_http_request_t > "setsockopt(SO_LINGER) failed"); > } > } > - > - } else if (!r->response_sent) { > -#if (NGX_HTTP_V3) > - if (r->connection->quic) { > - (void) ngx_quic_reset_stream(r->connection, > - NGX_HTTP_V3_ERR_INTERNAL_ERROR); > - } > -#endif > } > > /* the various request strings were allocated from r->pool */ > @@ -3830,6 +3821,12 @@ ngx_http_close_connection(ngx_connection > > #endif > > +#if (NGX_HTTP_V3) > + if (ngx_http_v3_connection(c)) { > + ngx_http_v3_reset_connection(c); > + } > +#endif > + > #if (NGX_STAT_STUB) > (void) ngx_atomic_fetch_add(ngx_stat_active, -1); > #endif > diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h > --- a/src/http/v3/ngx_http_v3.h > +++ b/src/http/v3/ngx_http_v3.h > @@ -90,6 +90,9 @@ > #define ngx_http_v3_shutdown_connection(c, code, reason) \ > ngx_quic_shutdown_connection(c->quic->parent, code, reason) > > +#define ngx_http_v3_connection(c) \ > + ((c)->quic ? ngx_http_quic_get_connection(c)->addr_conf->http3 : 0) > + > > typedef struct { > size_t max_table_capacity; > @@ -138,6 +141,7 @@ struct ngx_http_v3_session_s { > > > void ngx_http_v3_init(ngx_connection_t *c); > +void ngx_http_v3_reset_connection(ngx_connection_t *c); > ngx_int_t ngx_http_v3_init_session(ngx_connection_t *c); > ngx_int_t ngx_http_v3_check_flood(ngx_connection_t *c); > > diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c > --- a/src/http/v3/ngx_http_v3_request.c > +++ b/src/http/v3/ngx_http_v3_request.c > @@ -10,6 +10,7 @@ > #include > > > +static void ngx_http_v3_wait_request_handler(ngx_event_t *rev); > static void ngx_http_v3_cleanup_request(void *data); > static void ngx_http_v3_process_request(ngx_event_t *rev); > static ngx_int_t ngx_http_v3_process_header(ngx_http_request_t *r, > @@ -53,12 +54,8 @@ static const struct { > void > ngx_http_v3_init(ngx_connection_t *c) > { > - size_t size; > uint64_t n; > - ngx_buf_t *b; > ngx_event_t *rev; > - ngx_pool_cleanup_t *cln; > - ngx_http_request_t *r; > ngx_http_connection_t *hc; > ngx_http_v3_session_t *h3c; > ngx_http_core_loc_conf_t *clcf; > @@ -96,7 +93,7 @@ ngx_http_v3_init(ngx_connection_t *c) > h3c = ngx_http_v3_get_session(c); > > if (h3c->goaway) { > - ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_REQUEST_REJECTED); > + c->close = 1; > ngx_http_close_connection(c); > return; > } > @@ -116,21 +113,57 @@ ngx_http_v3_init(ngx_connection_t *c) > "reached maximum number of requests"); > } > > - cln = ngx_pool_cleanup_add(c->pool, 0); > - if (cln == NULL) { > + rev = c->read; > + rev->handler = ngx_http_v3_wait_request_handler; > + c->write->handler = ngx_http_empty_handler; > + > + if (rev->ready) { > + rev->handler(rev); > + return; > + } > + > + cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module); > + > + ngx_add_timer(rev, cscf->client_header_timeout); > + ngx_reusable_connection(c, 1); > + > + if (ngx_handle_read_event(rev, 0) != NGX_OK) { > + ngx_http_close_connection(c); > + return; > + } > +} > + > + > +static void > +ngx_http_v3_wait_request_handler(ngx_event_t *rev) > +{ > + size_t size; > + ssize_t n; > + ngx_buf_t *b; > + ngx_connection_t *c; > + ngx_pool_cleanup_t *cln; > + ngx_http_request_t *r; > + ngx_http_connection_t *hc; > + ngx_http_v3_session_t *h3c; > + ngx_http_core_srv_conf_t *cscf; > + > + c = rev->data; > + > + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 wait request handler"); > + > + if (rev->timedout) { > + ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed out"); > + c->timedout = 1; > ngx_http_close_connection(c); > return; > } > > - cln->handler = ngx_http_v3_cleanup_request; > - cln->data = c; > - > - h3c->nrequests++; > - > - if (h3c->keepalive.timer_set) { > - ngx_del_timer(&h3c->keepalive); > + if (c->close) { > + ngx_http_close_connection(c); > + return; > } > > + hc = c->data; > cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module); > > size = cscf->client_header_buffer_size; > @@ -159,8 +192,49 @@ ngx_http_v3_init(ngx_connection_t *c) > b->end = b->last + size; > } > > + n = c->recv(c, b->last, size); > + > + if (n == NGX_AGAIN) { > + > + if (!rev->timer_set) { > + ngx_add_timer(rev, cscf->client_header_timeout); > + ngx_reusable_connection(c, 1); > + } > + > + if (ngx_handle_read_event(rev, 0) != NGX_OK) { > + ngx_http_close_connection(c); > + return; > + } > + > + /* > + * We are trying to not hold c->buffer's memory for an idle connection. > + */ > + > + if (ngx_pfree(c->pool, b->start) == NGX_OK) { > + b->start = NULL; > + } > + > + return; > + } > + > + if (n == NGX_ERROR) { > + ngx_http_close_connection(c); > + return; > + } > + > + if (n == 0) { > + ngx_log_error(NGX_LOG_INFO, c->log, 0, > + "client closed connection"); > + ngx_http_close_connection(c); > + return; > + } > + > + b->last += n; > + > c->log->action = "reading client request"; > > + ngx_reusable_connection(c, 0); > + > r = ngx_http_create_request(c); > if (r == NULL) { > ngx_http_close_connection(c); > @@ -171,7 +245,7 @@ ngx_http_v3_init(ngx_connection_t *c) > > r->v3_parse = ngx_pcalloc(r->pool, sizeof(ngx_http_v3_parse_t)); > if (r->v3_parse == NULL) { > - ngx_http_close_connection(c); > + ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); > return; > } > Since we now defer request initialization until first bytes, that's fine. > @@ -179,23 +253,59 @@ ngx_http_v3_init(ngx_connection_t *c) > * cscf->large_client_header_buffers.num; > > c->data = r; > - c->requests = n + 1; > + c->requests = (c->quic->id >> 2) + 1; > + > + cln = ngx_pool_cleanup_add(r->pool, 0); > + if (cln == NULL) { > + ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); > + return; > + } > + > + cln->handler = ngx_http_v3_cleanup_request; > + cln->data = r; > + > + h3c = ngx_http_v3_get_session(c); > + h3c->nrequests++; > + > + if (h3c->keepalive.timer_set) { > + ngx_del_timer(&h3c->keepalive); > + } > > - rev = c->read; > rev->handler = ngx_http_v3_process_request; > + ngx_http_v3_process_request(rev); > +} > > - ngx_http_v3_process_request(rev); > + > +void > +ngx_http_v3_reset_connection(ngx_connection_t *c) > +{ > + if (c->timedout) { > + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR); > + > + } else if (c->close) { > + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_REQUEST_REJECTED); > + > + } else if (c->requests == 0 || c->error) { > + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR); > + } > } The c->requests check looks suspicious, is it something to catch a not yet initialized request? > > > static void > ngx_http_v3_cleanup_request(void *data) > { > - ngx_connection_t *c = data; > + ngx_http_request_t *r = data; > > + ngx_connection_t *c; > ngx_http_v3_session_t *h3c; > ngx_http_core_loc_conf_t *clcf; > > + c = r->connection; > + > + if (!r->response_sent) { > + c->error = 1; > + } > + > h3c = ngx_http_v3_get_session(c); > > if (--h3c->nrequests == 0) { > diff --git a/src/http/v3/ngx_http_v3_streams.c b/src/http/v3/ngx_http_v3_streams.c > --- a/src/http/v3/ngx_http_v3_streams.c > +++ b/src/http/v3/ngx_http_v3_streams.c > @@ -49,7 +49,8 @@ ngx_http_v3_init_uni_stream(ngx_connecti > ngx_http_v3_finalize_connection(c, > NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, > "reached maximum number of uni streams"); > - ngx_http_close_connection(c); > + c->data = NULL; > + ngx_http_v3_close_uni_stream(c); > return; > } > > @@ -57,7 +58,11 @@ ngx_http_v3_init_uni_stream(ngx_connecti > > us = ngx_pcalloc(c->pool, sizeof(ngx_http_v3_uni_stream_t)); > if (us == NULL) { > - ngx_http_close_connection(c); > + ngx_http_v3_finalize_connection(c, > + NGX_HTTP_V3_ERR_INTERNAL_ERROR, > + "memory allocation error"); > + c->data = NULL; > + ngx_http_v3_close_uni_stream(c); > return; > } > > @@ -79,12 +84,12 @@ ngx_http_v3_close_uni_stream(ngx_connect > ngx_http_v3_session_t *h3c; > ngx_http_v3_uni_stream_t *us; > > - us = c->data; > - h3c = ngx_http_v3_get_session(c); > - > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 close stream"); > > - if (us->index >= 0) { > + us = c->data; > + > + if (us && us->index >= 0) { > + h3c = ngx_http_v3_get_session(c); > h3c->known_streams[us->index] = NULL; > } > Is there any difference after switching to ngx_http_v3_close_uni_stream(), besides ngx_stat_active now no longer decremented? This itself looks like a right change, since ngx_stat_active isn't incremented for uni streams. BTW, we need additional checks to prevent processing new streams after ngx_http_v3_finalize_connection(). # HG changeset patch # User Sergey Kandaurov # Date 1636549164 -10800 # Wed Nov 10 15:59:24 2021 +0300 # Branch quic # Node ID 924ee879f8befa1ec574d41a3979e5c5c5db8639 # Parent c6386afdd1552105c18ce5c47b4b5cd6f6de8b88 QUIC: stop processing new client streams on quic connection error. diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -314,7 +314,7 @@ ngx_quic_create_client_stream(ngx_connec qc = ngx_quic_get_connection(c); - if (qc->shutdown) { + if (qc->shutdown || qc->error) { return NGX_QUIC_STREAM_GONE; } @@ -385,7 +385,7 @@ ngx_quic_create_client_stream(ngx_connec return NULL; } - if (qc->shutdown) { + if (qc->shutdown || qc->error) { return NGX_QUIC_STREAM_GONE; } } -- Sergey Kandaurov From xeioex at nginx.com Wed Nov 10 14:52:43 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 10 Nov 2021 14:52:43 +0000 Subject: [njs] Tests: adapting fs tests introduced in 685adce92af1 for various OS. Message-ID: details: https://hg.nginx.org/njs/rev/dedadba0ee87 branches: changeset: 1742:dedadba0ee87 user: Dmitry Volyntsev date: Wed Nov 10 14:50:16 2021 +0000 description: Tests: adapting fs tests introduced in 685adce92af1 for various OS. diffstat: test/fs/methods.js | 68 +++++++++++++++++++++++++++++++++++++++++++---------- 1 files changed, 55 insertions(+), 13 deletions(-) diffs (113 lines): diff -r 2b07f8810d31 -r dedadba0ee87 test/fs/methods.js --- a/test/fs/methods.js Tue Nov 09 17:59:58 2021 +0000 +++ b/test/fs/methods.js Wed Nov 10 14:50:16 2021 +0000 @@ -188,7 +188,7 @@ async function write_test(params) { try { fs.unlinkSync(fname); } catch (e) {} - let data = await method("writeFile", params); + let data = await method("writeFile", params).catch(e => ({error:e})); if (!data) { data = fs.readFileSync(fname); @@ -196,8 +196,18 @@ async function write_test(params) { try { fs.unlinkSync(fname); } catch (e) {} - if (data.compare(params.expected) != 0) { - throw Error(`writeFile unexpected data`); + if (params.check) { + if (!params.check(data, params)) { + throw Error(`writeFile failed check`); + } + + } else if (params.exception) { + throw data.error; + + } else { + if (data.compare(params.expected) != 0) { + throw Error(`writeFile unexpected data`); + } } return 'SUCCESS'; @@ -219,9 +229,20 @@ let write_tests = [ { args: ["@", "eHl6", {encoding: "base64url"}], expected: Buffer.from("xyz"), optional: true }, { args: ["@", Symbol("XYZ")], exception: "TypeError: Cannot convert a Symbol value to a string"}, - { args: ["/invalid_path", "XYZ"], stringify: true, - expected: Buffer.from('{"errno":13,"code":"EACCES","path":"/invalid_path","syscall":"open"}'), - exception: "Error: No such file or directory" }, + { args: ["/invalid_path", "XYZ"], + check: (err, params) => { + let e = err.error; + + if (e.syscall != 'open') { + throw Error(`${e.syscall} unexpected syscall`); + } + + if (e.code != "EACCES" && e.code != "EROFS") { + throw Error(`${e.code} unexpected code`); + } + + return true; + } }, ]; let writeFile_tsuite = { @@ -253,8 +274,8 @@ async function append_test(params) { try { fs.unlinkSync(fname); } catch (e) {} - let data = await method("appendFile", params); - data = await method("appendFile", params); + let data = await method("appendFile", params).catch(e => ({error:e})); + data = await method("appendFile", params).catch(e => ({error:e})); if (!data) { data = fs.readFileSync(fname); @@ -262,8 +283,18 @@ async function append_test(params) { try { fs.unlinkSync(fname); } catch (e) {} - if (data.compare(params.expected) != 0) { - throw Error(`appendFile unexpected data`); + if (params.check) { + if (!params.check(data, params)) { + throw Error(`appendFile failed check`); + } + + } else if (params.exception) { + throw data.error; + + } else { + if (data.compare(params.expected) != 0) { + throw Error(`appendFile unexpected data`); + } } return 'SUCCESS'; @@ -285,9 +316,20 @@ let append_tests = [ { args: ["@", "eHl6", {encoding: "base64url"}], expected: Buffer.from("xyzxyz"), optional: true }, { args: ["@", Symbol("XYZ")], exception: "TypeError: Cannot convert a Symbol value to a string"}, - { args: ["/invalid_path", "XYZ"], stringify: true, - expected: Buffer.from('{"errno":13,"code":"EACCES","path":"/invalid_path","syscall":"open"}'), - exception: "Error: No such file or directory" }, + { args: ["/invalid_path", "XYZ"], + check: (err, params) => { + let e = err.error; + + if (e.syscall != 'open') { + throw Error(`${e.syscall} unexpected syscall`); + } + + if (e.code != "EACCES" && e.code != "EROFS") { + throw Error(`${e.code} unexpected code`); + } + + return true; + } }, ]; let appendFile_tsuite = { From pluknet at nginx.com Wed Nov 10 15:59:01 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 10 Nov 2021 18:59:01 +0300 Subject: [PATCH 3 of 3] HTTP/3: send Stream Cancellation instruction In-Reply-To: <9018cf33137a19df69e7.1634561310@arut-laptop> References: <9018cf33137a19df69e7.1634561310@arut-laptop> Message-ID: > On 18 Oct 2021, at 15:48, Roman Arutyunyan wrote: > > # HG changeset patch > # User Roman Arutyunyan > # Date 1634557691 -10800 > # Mon Oct 18 14:48:11 2021 +0300 > # Branch quic > # Node ID 9018cf33137a19df69e70ee4a274164c226e7cbd > # Parent 8ae53c592c719af4f3ba47dbd85f78be27aaf7db > HTTP/3: send Stream Cancellation instruction. > > As per quic-qpack-21: > > When a stream is reset or reading is abandoned, the decoder emits a > Stream Cancellation instruction. > > Previously the instruction was not sent. Now it's sent when closing QUIC > stream connection if dynamic table capacity is non-zero and eof was not > received from client. The latter condition means that a trailers section > may still be on its way from client and the stream needs to be cancelled. > > diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c > --- a/src/http/v3/ngx_http_v3_request.c > +++ b/src/http/v3/ngx_http_v3_request.c > @@ -279,6 +279,14 @@ ngx_http_v3_wait_request_handler(ngx_eve > void > ngx_http_v3_reset_connection(ngx_connection_t *c) > { > + ngx_http_v3_srv_conf_t *h3scf; > + > + h3scf = ngx_http_v3_get_module_srv_conf(c, ngx_http_v3_module); > + > + if (h3scf->max_table_capacity > 0 && !c->read->eof) { > + (void) ngx_http_v3_send_cancel_stream(c, c->quic->id); > + } > + > if (c->timedout) { > ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR); > Looks good. -- Sergey Kandaurov From arut at nginx.com Wed Nov 10 21:42:18 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 11 Nov 2021 00:42:18 +0300 Subject: [PATCH 2 of 3] HTTP/3: allowed QUIC stream connection reuse In-Reply-To: References: <8ae53c592c719af4f3ba.1634561309@arut-laptop> Message-ID: <20211110214218.wq7bozp3ppy72kgf@Romans-MacBook-Pro.local> On Wed, Nov 10, 2021 at 03:59:39PM +0300, Sergey Kandaurov wrote: > > > On 18 Oct 2021, at 15:48, Roman Arutyunyan wrote: > > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1634561226 -10800 > > # Mon Oct 18 15:47:06 2021 +0300 > > # Branch quic > > # Node ID 8ae53c592c719af4f3ba47dbd85f78be27aaf7db > > # Parent 8739f475583031399879ef0af2eb5af76008449e > > HTTP/3: allowed QUIC stream connection reuse. > > > > A QUIC stream connection is treated as reusable until first bytes of request > > arrive, which is also when the request object is now allocated. A connection > > closed as a result of draining, is reset with the error code > > H3_REQUEST_REJECTED. Such behavior is allowed by quic-http-34: > > > > Once a request stream has been opened, the request MAY be cancelled > > by either endpoint. Clients cancel requests if the response is no > > longer of interest; servers cancel requests if they are unable to or > > choose not to respond. > > > > When the server cancels a request without performing any application > > processing, the request is considered "rejected." The server SHOULD > > abort its response stream with the error code H3_REQUEST_REJECTED. > > > > The client can treat requests rejected by the server as though they had > > never been sent at all, thereby allowing them to be retried later. > > > > Looks good. See below for minor comments. > BTW, if we still hit the worker_connections limit, this leads to > an entire QUIC connection close, but I doubt we can easily improve this. When there's not enough worker_connections for a new QUIC stream, we can send H3_REQUEST_REJECTED to client without creating a stream. We can discuss this later. > > diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c > > --- a/src/http/ngx_http_request.c > > +++ b/src/http/ngx_http_request.c > > @@ -3743,15 +3743,14 @@ ngx_http_free_request(ngx_http_request_t > > > > log->action = "closing request"; > > > > - if (r->connection->timedout) { > > + if (r->connection->timedout > > +#if (NGX_HTTP_QUIC) > > + && r->connection->quic == NULL > > +#endif > > + ) > > + { > > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > > > > -#if (NGX_HTTP_V3) > > - if (r->connection->quic) { > > - (void) ngx_quic_reset_stream(r->connection, > > - NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR); > > - } else > > -#endif > > if (clcf->reset_timedout_connection) { > > linger.l_onoff = 1; > > linger.l_linger = 0; > > @@ -3763,14 +3762,6 @@ ngx_http_free_request(ngx_http_request_t > > "setsockopt(SO_LINGER) failed"); > > } > > } > > - > > - } else if (!r->response_sent) { > > -#if (NGX_HTTP_V3) > > - if (r->connection->quic) { > > - (void) ngx_quic_reset_stream(r->connection, > > - NGX_HTTP_V3_ERR_INTERNAL_ERROR); > > - } > > -#endif > > } > > > > /* the various request strings were allocated from r->pool */ > > @@ -3830,6 +3821,12 @@ ngx_http_close_connection(ngx_connection > > > > #endif > > > > +#if (NGX_HTTP_V3) > > + if (ngx_http_v3_connection(c)) { > > + ngx_http_v3_reset_connection(c); > > + } > > +#endif > > + > > #if (NGX_STAT_STUB) > > (void) ngx_atomic_fetch_add(ngx_stat_active, -1); > > #endif > > diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h > > --- a/src/http/v3/ngx_http_v3.h > > +++ b/src/http/v3/ngx_http_v3.h > > @@ -90,6 +90,9 @@ > > #define ngx_http_v3_shutdown_connection(c, code, reason) \ > > ngx_quic_shutdown_connection(c->quic->parent, code, reason) > > > > +#define ngx_http_v3_connection(c) \ > > + ((c)->quic ? ngx_http_quic_get_connection(c)->addr_conf->http3 : 0) > > + > > > > typedef struct { > > size_t max_table_capacity; > > @@ -138,6 +141,7 @@ struct ngx_http_v3_session_s { > > > > > > void ngx_http_v3_init(ngx_connection_t *c); > > +void ngx_http_v3_reset_connection(ngx_connection_t *c); > > ngx_int_t ngx_http_v3_init_session(ngx_connection_t *c); > > ngx_int_t ngx_http_v3_check_flood(ngx_connection_t *c); > > > > diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c > > --- a/src/http/v3/ngx_http_v3_request.c > > +++ b/src/http/v3/ngx_http_v3_request.c > > @@ -10,6 +10,7 @@ > > #include > > > > > > +static void ngx_http_v3_wait_request_handler(ngx_event_t *rev); > > static void ngx_http_v3_cleanup_request(void *data); > > static void ngx_http_v3_process_request(ngx_event_t *rev); > > static ngx_int_t ngx_http_v3_process_header(ngx_http_request_t *r, > > @@ -53,12 +54,8 @@ static const struct { > > void > > ngx_http_v3_init(ngx_connection_t *c) > > { > > - size_t size; > > uint64_t n; > > - ngx_buf_t *b; > > ngx_event_t *rev; > > - ngx_pool_cleanup_t *cln; > > - ngx_http_request_t *r; > > ngx_http_connection_t *hc; > > ngx_http_v3_session_t *h3c; > > ngx_http_core_loc_conf_t *clcf; > > @@ -96,7 +93,7 @@ ngx_http_v3_init(ngx_connection_t *c) > > h3c = ngx_http_v3_get_session(c); > > > > if (h3c->goaway) { > > - ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_REQUEST_REJECTED); > > + c->close = 1; > > ngx_http_close_connection(c); > > return; > > } > > @@ -116,21 +113,57 @@ ngx_http_v3_init(ngx_connection_t *c) > > "reached maximum number of requests"); > > } > > > > - cln = ngx_pool_cleanup_add(c->pool, 0); > > - if (cln == NULL) { > > + rev = c->read; > > + rev->handler = ngx_http_v3_wait_request_handler; > > + c->write->handler = ngx_http_empty_handler; > > + > > + if (rev->ready) { > > + rev->handler(rev); > > + return; > > + } > > + > > + cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module); > > + > > + ngx_add_timer(rev, cscf->client_header_timeout); > > + ngx_reusable_connection(c, 1); > > + > > + if (ngx_handle_read_event(rev, 0) != NGX_OK) { > > + ngx_http_close_connection(c); > > + return; > > + } > > +} > > + > > + > > +static void > > +ngx_http_v3_wait_request_handler(ngx_event_t *rev) > > +{ > > + size_t size; > > + ssize_t n; > > + ngx_buf_t *b; > > + ngx_connection_t *c; > > + ngx_pool_cleanup_t *cln; > > + ngx_http_request_t *r; > > + ngx_http_connection_t *hc; > > + ngx_http_v3_session_t *h3c; > > + ngx_http_core_srv_conf_t *cscf; > > + > > + c = rev->data; > > + > > + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 wait request handler"); > > + > > + if (rev->timedout) { > > + ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed out"); > > + c->timedout = 1; > > ngx_http_close_connection(c); > > return; > > } > > > > - cln->handler = ngx_http_v3_cleanup_request; > > - cln->data = c; > > - > > - h3c->nrequests++; > > - > > - if (h3c->keepalive.timer_set) { > > - ngx_del_timer(&h3c->keepalive); > > + if (c->close) { > > + ngx_http_close_connection(c); > > + return; > > } > > > > + hc = c->data; > > cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module); > > > > size = cscf->client_header_buffer_size; > > @@ -159,8 +192,49 @@ ngx_http_v3_init(ngx_connection_t *c) > > b->end = b->last + size; > > } > > > > + n = c->recv(c, b->last, size); > > + > > + if (n == NGX_AGAIN) { > > + > > + if (!rev->timer_set) { > > + ngx_add_timer(rev, cscf->client_header_timeout); > > + ngx_reusable_connection(c, 1); > > + } > > + > > + if (ngx_handle_read_event(rev, 0) != NGX_OK) { > > + ngx_http_close_connection(c); > > + return; > > + } > > + > > + /* > > + * We are trying to not hold c->buffer's memory for an idle connection. > > + */ > > + > > + if (ngx_pfree(c->pool, b->start) == NGX_OK) { > > + b->start = NULL; > > + } > > + > > + return; > > + } > > + > > + if (n == NGX_ERROR) { > > + ngx_http_close_connection(c); > > + return; > > + } > > + > > + if (n == 0) { > > + ngx_log_error(NGX_LOG_INFO, c->log, 0, > > + "client closed connection"); > > + ngx_http_close_connection(c); > > + return; > > + } > > + > > + b->last += n; > > + > > c->log->action = "reading client request"; > > > > + ngx_reusable_connection(c, 0); > > + > > r = ngx_http_create_request(c); > > if (r == NULL) { > > ngx_http_close_connection(c); > > @@ -171,7 +245,7 @@ ngx_http_v3_init(ngx_connection_t *c) > > > > r->v3_parse = ngx_pcalloc(r->pool, sizeof(ngx_http_v3_parse_t)); > > if (r->v3_parse == NULL) { > > - ngx_http_close_connection(c); > > + ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); > > return; > > } > > > > Since we now defer request initialization until first bytes, that's fine. > > > @@ -179,23 +253,59 @@ ngx_http_v3_init(ngx_connection_t *c) > > * cscf->large_client_header_buffers.num; > > > > c->data = r; > > - c->requests = n + 1; > > + c->requests = (c->quic->id >> 2) + 1; > > + > > + cln = ngx_pool_cleanup_add(r->pool, 0); > > + if (cln == NULL) { > > + ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); > > + return; > > + } > > + > > + cln->handler = ngx_http_v3_cleanup_request; > > + cln->data = r; > > + > > + h3c = ngx_http_v3_get_session(c); > > + h3c->nrequests++; > > + > > + if (h3c->keepalive.timer_set) { > > + ngx_del_timer(&h3c->keepalive); > > + } > > > > - rev = c->read; > > rev->handler = ngx_http_v3_process_request; > > + ngx_http_v3_process_request(rev); > > +} > > > > - ngx_http_v3_process_request(rev); > > + > > +void > > +ngx_http_v3_reset_connection(ngx_connection_t *c) > > +{ > > + if (c->timedout) { > > + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR); > > + > > + } else if (c->close) { > > + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_REQUEST_REJECTED); > > + > > + } else if (c->requests == 0 || c->error) { > > + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR); > > + } > > } > > The c->requests check looks suspicious, > is it something to catch a not yet initialized request? If for whatever reason we close the stream connection before creating a request (thus c->requests == 0), that is a clear signal we need to reset the stream with NGX_HTTP_V3_ERR_INTERNAL_ERROR error code. The exceptions are timeout and connection reuse, but these cases are handled just before. All other cases are errors (failed allocation, recv() failure etc). > > static void > > ngx_http_v3_cleanup_request(void *data) > > { > > - ngx_connection_t *c = data; > > + ngx_http_request_t *r = data; > > > > + ngx_connection_t *c; > > ngx_http_v3_session_t *h3c; > > ngx_http_core_loc_conf_t *clcf; > > > > + c = r->connection; > > + > > + if (!r->response_sent) { > > + c->error = 1; > > + } > > + > > h3c = ngx_http_v3_get_session(c); > > > > if (--h3c->nrequests == 0) { > > diff --git a/src/http/v3/ngx_http_v3_streams.c b/src/http/v3/ngx_http_v3_streams.c > > --- a/src/http/v3/ngx_http_v3_streams.c > > +++ b/src/http/v3/ngx_http_v3_streams.c > > @@ -49,7 +49,8 @@ ngx_http_v3_init_uni_stream(ngx_connecti > > ngx_http_v3_finalize_connection(c, > > NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, > > "reached maximum number of uni streams"); > > - ngx_http_close_connection(c); > > + c->data = NULL; > > + ngx_http_v3_close_uni_stream(c); > > return; > > } > > > > @@ -57,7 +58,11 @@ ngx_http_v3_init_uni_stream(ngx_connecti > > > > us = ngx_pcalloc(c->pool, sizeof(ngx_http_v3_uni_stream_t)); > > if (us == NULL) { > > - ngx_http_close_connection(c); > > + ngx_http_v3_finalize_connection(c, > > + NGX_HTTP_V3_ERR_INTERNAL_ERROR, > > + "memory allocation error"); > > + c->data = NULL; > > + ngx_http_v3_close_uni_stream(c); > > return; > > } > > > > @@ -79,12 +84,12 @@ ngx_http_v3_close_uni_stream(ngx_connect > > ngx_http_v3_session_t *h3c; > > ngx_http_v3_uni_stream_t *us; > > > > - us = c->data; > > - h3c = ngx_http_v3_get_session(c); > > - > > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 close stream"); > > > > - if (us->index >= 0) { > > + us = c->data; > > + > > + if (us && us->index >= 0) { > > + h3c = ngx_http_v3_get_session(c); > > h3c->known_streams[us->index] = NULL; > > } > > > > Is there any difference after switching to ngx_http_v3_close_uni_stream(), > besides ngx_stat_active now no longer decremented? This itself looks like > a right change, since ngx_stat_active isn't incremented for uni streams. ngx_stat_active is indeed a good reason. The other reason is ngx_http_v3_reset_connection() which is called when closing an HTTP/3 stream connection. A uni stream connection is also an HTTP/3 stream connection, but we don't want to call ngx_http_v3_reset_connection() for it. We only want to call it for request streams. We could add another condition, but it's easier not to call common http code for uni streams at all. > BTW, we need additional checks to prevent processing new streams > after ngx_http_v3_finalize_connection(). Makes sense. > # HG changeset patch > # User Sergey Kandaurov > # Date 1636549164 -10800 > # Wed Nov 10 15:59:24 2021 +0300 > # Branch quic > # Node ID 924ee879f8befa1ec574d41a3979e5c5c5db8639 > # Parent c6386afdd1552105c18ce5c47b4b5cd6f6de8b88 > QUIC: stop processing new client streams on quic connection error. > > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > --- a/src/event/quic/ngx_event_quic_streams.c > +++ b/src/event/quic/ngx_event_quic_streams.c > @@ -314,7 +314,7 @@ ngx_quic_create_client_stream(ngx_connec > > qc = ngx_quic_get_connection(c); > > - if (qc->shutdown) { > + if (qc->shutdown || qc->error) { Why not qc->closing? > return NGX_QUIC_STREAM_GONE; > } > > @@ -385,7 +385,7 @@ ngx_quic_create_client_stream(ngx_connec > return NULL; > } > > - if (qc->shutdown) { > + if (qc->shutdown || qc->error) { > return NGX_QUIC_STREAM_GONE; > } > } > > > -- > Sergey Kandaurov > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan From mdounin at mdounin.ru Thu Nov 11 03:10:21 2021 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Thu, 11 Nov 2021 06:10:21 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling Message-ID: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> # HG changeset patch # User Maxim Dounin # Date 1636599377 -10800 # Thu Nov 11 05:56:17 2021 +0300 # Node ID 76e072a6947a221868705c13973de15319c0d921 # Parent 82b750b20c5205d685e59031247fe898f011394e HTTP/2: fixed sendfile() aio handling. With sendfile() in threads ("aio threads; sendfile on;"), client connection can block on writing, waiting for sendfile() to complete. In HTTP/2 this might result in the request hang, since an attempt to continue processig in thread event handler will call request's write event handler, which is usually stopped by ngx_http_v2_send_chain(): it does nothing if there are no additional data and stream->queued is set. Further, HTTP/2 resets stream's c->write->ready to 0 if writing blocks, so just fixing ngx_http_v2_send_chain() is not enough. Can be reproduced with test suite on Linux with: TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t The following tests currently fail: h2_keepalive.t, h2_priority.t, h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. Similarly, sendfile() with AIO preloading on FreeBSD can block as well, with similar results. This is, however, harder to reproduce, especially on modern FreeBSD systems, since sendfile() usually do not return EBUSY. Fix is to post a write event on HTTP/2 connection in the thread event handler (and aio preload handler). This ensures that sendfile() will be completed and stream processing will be resumed by HTTP/2 code. diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c +++ b/src/http/ngx_http_copy_filter_module.c @@ -250,6 +250,21 @@ ngx_http_copy_aio_sendfile_event_handler r->aio = 0; ev->complete = 0; +#if (NGX_HTTP_V2) + + if (r->stream) { + /* + * for HTTP/2, trigger a write event on the main connection + * to handle sendfile() preload + */ + + ngx_post_event(r->stream->connection->connection->write, + &ngx_posted_events); + return; + } + +#endif + r->connection->write->handler(r->connection->write); } @@ -323,6 +338,20 @@ ngx_http_copy_thread_event_handler(ngx_e r->main->blocked--; r->aio = 0; +#if (NGX_HTTP_V2) + + if (r->stream) { + /* + * for HTTP/2, trigger a write event on the main connection + * to handle sendfile() in threads + */ + + ngx_post_event(r->stream->connection->connection->write, + &ngx_posted_events); + } + +#endif + if (r->done) { /* * trigger connection event handler if the subrequest was diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -3905,6 +3905,20 @@ ngx_http_upstream_thread_event_handler(n r->main->blocked--; r->aio = 0; +#if (NGX_HTTP_V2) + + if (r->stream) { + /* + * for HTTP/2, trigger a write event on the main connection + * to handle sendfile() in threads + */ + + ngx_post_event(r->stream->connection->connection->write, + &ngx_posted_events); + } + +#endif + if (r->done) { /* * trigger connection event handler if the subrequest was From dnj0496 at gmail.com Thu Nov 11 03:22:50 2021 From: dnj0496 at gmail.com (Dk Jack) Date: Wed, 10 Nov 2021 19:22:50 -0800 Subject: ngx_http_cleanup_add Message-ID: Hi, In my module, I am allocating some memory for each request and saving the ptr in my module context. I am registering a callback using ngx_http_cleanup_add to perform cleanup for each request. I am releasing the memory for the allocation in the cleanup callback. In the log phase, I want to make available the contents of the memory I allocated as an nginx variable. However, when I try to print this variable in the log, the memory has already been de-allocated. It looks like the clean-up handlers are called before the log phase. Since I have released the memory in the cleanup handler, I have nothing to log in the log phase. Is there a way to defer the calling of the clean-up handler until after the log phase? Dk. -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Nov 11 03:47:27 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 11 Nov 2021 06:47:27 +0300 Subject: ngx_http_cleanup_add In-Reply-To: References: Message-ID: Hello! On Wed, Nov 10, 2021 at 07:22:50PM -0800, Dk Jack wrote: > Hi, > In my module, I am allocating some memory for each request and saving the > ptr in my module context. I am registering a callback using > ngx_http_cleanup_add to perform cleanup for each request. I am releasing > the memory for the allocation in the cleanup callback. In the log phase, I > want to make available the contents of the memory I allocated as an nginx > variable. > > However, when I try to print this variable in the log, the memory has > already been de-allocated. It looks like the clean-up handlers are called > before the log phase. Since I have released the memory in the cleanup > handler, I have nothing to log in the log phase. Is there a way to defer > the calling of the clean-up handler until after the log phase? Request cleanup handlers as allocated by ngx_http_cleanup_add() are expected to be called before logging: these are used to stop various activities which might still be going on when nginx decides to terminate the request. Notably, it used by the upstream module to close upstream connections. If you want to free the memory, a better idea would be to use pool cleanup handlers instead, which are registered by the ngx_pool_cleanup_add() function. These are called while actually freeing request memory, after all request processing is completely finished, including logging. See here for details: http://nginx.org/en/docs/dev/development_guide.html#memory_management Note well that for per-request allocations it is usually better to simply allocate memory from the request pool via ngx_palloc(), so it is freed automatically when the request pool is destroyed. Pool cleanup handlers are mostly needed to free various external resources, such as file descriptors or memory from external libraries. -- Maxim Dounin http://mdounin.ru/ From dnj0496 at gmail.com Thu Nov 11 04:03:28 2021 From: dnj0496 at gmail.com (Dk Jack) Date: Wed, 10 Nov 2021 20:03:28 -0800 Subject: ngx_http_cleanup_add In-Reply-To: References: Message-ID: Thanks Maxim, Thanks for the suggestion about using ngx_pool_cleanup_add. Since this is external library memory I couldn't use ngx_pcalloc. Thanks again. On Wed, Nov 10, 2021 at 7:47 PM Maxim Dounin wrote: > Hello! > > On Wed, Nov 10, 2021 at 07:22:50PM -0800, Dk Jack wrote: > > > Hi, > > In my module, I am allocating some memory for each request and saving the > > ptr in my module context. I am registering a callback using > > ngx_http_cleanup_add to perform cleanup for each request. I am releasing > > the memory for the allocation in the cleanup callback. In the log phase, > I > > want to make available the contents of the memory I allocated as an nginx > > variable. > > > > However, when I try to print this variable in the log, the memory has > > already been de-allocated. It looks like the clean-up handlers are called > > before the log phase. Since I have released the memory in the cleanup > > handler, I have nothing to log in the log phase. Is there a way to defer > > the calling of the clean-up handler until after the log phase? > > Request cleanup handlers as allocated by ngx_http_cleanup_add() > are expected to be called before logging: these are used to stop > various activities which might still be going on when nginx > decides to terminate the request. Notably, it used by the > upstream module to close upstream connections. > > If you want to free the memory, a better idea would be to use pool > cleanup handlers instead, which are registered by the > ngx_pool_cleanup_add() function. These are called while actually > freeing request memory, after all request processing is completely > finished, including logging. See here for details: > > http://nginx.org/en/docs/dev/development_guide.html#memory_management > > Note well that for per-request allocations it is usually better to > simply allocate memory from the request pool via ngx_palloc(), so > it is freed automatically when the request pool is destroyed. > Pool cleanup handlers are mostly needed to free various external > resources, such as file descriptors or memory from external > libraries. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Nov 11 04:21:11 2021 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Thu, 11 Nov 2021 07:21:11 +0300 Subject: [PATCH 3 of 4] SSL: SSL_sendfile(SF_NODISKIO) support In-Reply-To: References: Message-ID: <98d3beb63f32cbb68d1c.1636604471@vm-bsd.mdounin.ru> # HG changeset patch # User Maxim Dounin # Date 1636603895 -10800 # Thu Nov 11 07:11:35 2021 +0300 # Node ID 98d3beb63f32cbb68d1cdcec385614d32129cad0 # Parent 4a954e89b1ae8539bbe08c5afc1d5c9828d82d6f SSL: SSL_sendfile(SF_NODISKIO) support. diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -2942,7 +2942,7 @@ ngx_ssl_sendfile(ngx_connection_t *c, ng { #ifdef BIO_get_ktls_send - int sslerr; + int sslerr, flags; ssize_t n; ngx_err_t err; @@ -2954,8 +2954,14 @@ ngx_ssl_sendfile(ngx_connection_t *c, ng ngx_set_errno(0); +#if (NGX_HAVE_SENDFILE_NODISKIO) + flags = (c->busy_count <= 2) ? SF_NODISKIO : 0; +#else + flags = 0; +#endif + n = SSL_sendfile(c->ssl->connection, file->file->fd, file->file_pos, - size, 0); + size, flags); ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_sendfile: %d", n); @@ -2974,6 +2980,10 @@ ngx_ssl_sendfile(ngx_connection_t *c, ng ngx_post_event(c->read, &ngx_posted_events); } +#if (NGX_HAVE_SENDFILE_NODISKIO) + c->busy_count = 0; +#endif + c->sent += n; return n; @@ -3038,6 +3048,23 @@ ngx_ssl_sendfile(ngx_connection_t *c, ng ngx_post_event(c->read, &ngx_posted_events); } +#if (NGX_HAVE_SENDFILE_NODISKIO) + + if (ngx_errno == EBUSY) { + c->busy_count++; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL_sendfile() busy, count:%d", c->busy_count); + + if (c->write->posted) { + ngx_delete_posted_event(c->write); + } + + ngx_post_event(c->write, &ngx_posted_next_events); + } + +#endif + c->write->ready = 0; return NGX_AGAIN; } From mdounin at mdounin.ru Thu Nov 11 04:21:08 2021 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Thu, 11 Nov 2021 07:21:08 +0300 Subject: [PATCH 0 of 4] sendfile(SF_NODISKIO) on FreeBSD Message-ID: Hello! Here are some experimental patches which remove AIO preloading previously used with sendfile(SF_NODISKIO) on FreeBSD, so the code instead relies on non-blocking loading data from disk by sendfile() itself, as available since FreeBSD 11 (that is, on all supported FreeBSD versions). Review and comments appreciated. -- Maxim Dounin From mdounin at mdounin.ru Thu Nov 11 04:21:12 2021 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Thu, 11 Nov 2021 07:21:12 +0300 Subject: [PATCH 4 of 4] Support for sendfile(SF_NOCACHE) In-Reply-To: References: Message-ID: <10f96e74ae73e1c53a3f.1636604472@vm-bsd.mdounin.ru> # HG changeset patch # User Maxim Dounin # Date 1636603897 -10800 # Thu Nov 11 07:11:37 2021 +0300 # Node ID 10f96e74ae73e1c53a3fd08e7e1c26754c8969ed # Parent 98d3beb63f32cbb68d1cdcec385614d32129cad0 Support for sendfile(SF_NOCACHE). The SF_NOCACHE flag, introduced in FreeBSD 11 along with the new non-blocking sendfile() implementation by glebius@, makes it possible to use sendfile() along with the "directio" directive. diff --git a/src/core/ngx_output_chain.c b/src/core/ngx_output_chain.c --- a/src/core/ngx_output_chain.c +++ b/src/core/ngx_output_chain.c @@ -256,9 +256,11 @@ ngx_output_chain_as_is(ngx_output_chain_ } #endif +#if !(NGX_HAVE_SENDFILE_NODISKIO) if (buf->in_file && buf->file->directio) { return 0; } +#endif sendfile = ctx->sendfile; diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -2955,7 +2955,13 @@ ngx_ssl_sendfile(ngx_connection_t *c, ng ngx_set_errno(0); #if (NGX_HAVE_SENDFILE_NODISKIO) + flags = (c->busy_count <= 2) ? SF_NODISKIO : 0; + + if (file->file->directio) { + flags |= SF_NOCACHE; + } + #else flags = 0; #endif diff --git a/src/os/unix/ngx_freebsd_sendfile_chain.c b/src/os/unix/ngx_freebsd_sendfile_chain.c --- a/src/os/unix/ngx_freebsd_sendfile_chain.c +++ b/src/os/unix/ngx_freebsd_sendfile_chain.c @@ -174,7 +174,13 @@ ngx_freebsd_sendfile_chain(ngx_connectio sent = 0; #if (NGX_HAVE_SENDFILE_NODISKIO) + flags = (c->busy_count <= 2) ? SF_NODISKIO : 0; + + if (file->file->directio) { + flags |= SF_NOCACHE; + } + #endif rc = sendfile(file->file->fd, c->fd, file->file_pos, From mdounin at mdounin.ru Thu Nov 11 04:21:09 2021 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Thu, 11 Nov 2021 07:21:09 +0300 Subject: [PATCH 1 of 4] Removed "aio sendfile", deprecated since 1.7.11 In-Reply-To: References: Message-ID: <0fb75ef9dbca698e5e85.1636604469@vm-bsd.mdounin.ru> # HG changeset patch # User Maxim Dounin # Date 1636600255 -10800 # Thu Nov 11 06:10:55 2021 +0300 # Node ID 0fb75ef9dbca698e5e855145cf6a12180a36d400 # Parent 82b750b20c5205d685e59031247fe898f011394e Removed "aio sendfile", deprecated since 1.7.11. diff --git a/src/http/ngx_http_core_module.c b/src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c +++ b/src/http/ngx_http_core_module.c @@ -4568,19 +4568,6 @@ ngx_http_core_set_aio(ngx_conf_t *cf, ng #endif } -#if (NGX_HAVE_AIO_SENDFILE) - - if (ngx_strcmp(value[1].data, "sendfile") == 0) { - clcf->aio = NGX_HTTP_AIO_ON; - - ngx_conf_log_error(NGX_LOG_WARN, cf, 0, - "the \"sendfile\" parameter of " - "the \"aio\" directive is deprecated"); - return NGX_CONF_OK; - } - -#endif - if (ngx_strncmp(value[1].data, "threads", 7) == 0 && (value[1].len == 7 || value[1].data[7] == '=')) { From mdounin at mdounin.ru Thu Nov 11 04:21:10 2021 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Thu, 11 Nov 2021 07:21:10 +0300 Subject: [PATCH 2 of 4] Simplified sendfile(SF_NODISKIO) usage In-Reply-To: References: Message-ID: <4a954e89b1ae8539bbe0.1636604470@vm-bsd.mdounin.ru> # HG changeset patch # User Maxim Dounin # Date 1636603886 -10800 # Thu Nov 11 07:11:26 2021 +0300 # Node ID 4a954e89b1ae8539bbe08c5afc1d5c9828d82d6f # Parent 0fb75ef9dbca698e5e855145cf6a12180a36d400 Simplified sendfile(SF_NODISKIO) usage. Starting with FreeBSD 11, there is no need to use AIO operations to preload data into cache for sendfile(SF_NODISKIO) to work. Instead, sendfile() handles non-blocking loading data from disk by itself. It still can, however, return EBUSY if a page is already being loaded (for example, by a different process). If this happens, we now post an event for the next event loop iteration, so sendfile() is retried "after a short period", as manpage recommends. The limit of the number of EBUSY tolerated without any progress is preserved, but now it does not result in an alert, since on an idle system event loop iteration might be very short and EBUSY can happen many times in a row. Instead, SF_NODISKIO is simply disabled for one call once the limit is reached. With this change, sendfile(SF_NODISKIO) is now used automatically as long as sendfile() is enabled, and no longer requires "aio on;". diff --git a/auto/os/freebsd b/auto/os/freebsd --- a/auto/os/freebsd +++ b/auto/os/freebsd @@ -44,12 +44,10 @@ if [ $osreldate -gt 300007 ]; then CORE_SRCS="$CORE_SRCS $FREEBSD_SENDFILE_SRCS" fi -if [ $NGX_FILE_AIO = YES ]; then - if [ $osreldate -gt 502103 ]; then - echo " + sendfile()'s SF_NODISKIO found" +if [ $osreldate -gt 1100000 ]; then + echo " + sendfile()'s SF_NODISKIO found" - have=NGX_HAVE_AIO_SENDFILE . auto/have - fi + have=NGX_HAVE_SENDFILE_NODISKIO . auto/have fi # POSIX semaphores diff --git a/src/core/ngx_buf.h b/src/core/ngx_buf.h --- a/src/core/ngx_buf.h +++ b/src/core/ngx_buf.h @@ -90,9 +90,6 @@ struct ngx_output_chain_ctx_s { #if (NGX_HAVE_FILE_AIO || NGX_COMPAT) ngx_output_chain_aio_pt aio_handler; -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) - ssize_t (*aio_preload)(ngx_buf_t *file); -#endif #endif #if (NGX_THREADS || NGX_COMPAT) diff --git a/src/core/ngx_connection.h b/src/core/ngx_connection.h --- a/src/core/ngx_connection.h +++ b/src/core/ngx_connection.h @@ -185,7 +185,7 @@ struct ngx_connection_s { unsigned need_last_buf:1; -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) +#if (NGX_HAVE_SENDFILE_NODISKIO || NGX_COMPAT) unsigned busy_count:2; #endif diff --git a/src/core/ngx_module.h b/src/core/ngx_module.h --- a/src/core/ngx_module.h +++ b/src/core/ngx_module.h @@ -41,7 +41,7 @@ #define NGX_MODULE_SIGNATURE_3 "0" #endif -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) +#if (NGX_HAVE_SENDFILE_NODISKIO || NGX_COMPAT) #define NGX_MODULE_SIGNATURE_4 "1" #else #define NGX_MODULE_SIGNATURE_4 "0" diff --git a/src/core/ngx_output_chain.c b/src/core/ngx_output_chain.c --- a/src/core/ngx_output_chain.c +++ b/src/core/ngx_output_chain.c @@ -29,10 +29,6 @@ static ngx_inline ngx_int_t ngx_output_chain_as_is(ngx_output_chain_ctx_t *ctx, ngx_buf_t *buf); -#if (NGX_HAVE_AIO_SENDFILE) -static ngx_int_t ngx_output_chain_aio_setup(ngx_output_chain_ctx_t *ctx, - ngx_file_t *file); -#endif static ngx_int_t ngx_output_chain_add_copy(ngx_pool_t *pool, ngx_chain_t **chain, ngx_chain_t *in); static ngx_int_t ngx_output_chain_align_file_buf(ngx_output_chain_ctx_t *ctx, @@ -283,12 +279,6 @@ ngx_output_chain_as_is(ngx_output_chain_ buf->in_file = 0; } -#if (NGX_HAVE_AIO_SENDFILE) - if (ctx->aio_preload && buf->in_file) { - (void) ngx_output_chain_aio_setup(ctx, buf->file); - } -#endif - if (ctx->need_in_memory && !ngx_buf_in_memory(buf)) { return 0; } @@ -301,28 +291,6 @@ ngx_output_chain_as_is(ngx_output_chain_ } -#if (NGX_HAVE_AIO_SENDFILE) - -static ngx_int_t -ngx_output_chain_aio_setup(ngx_output_chain_ctx_t *ctx, ngx_file_t *file) -{ - ngx_event_aio_t *aio; - - if (file->aio == NULL && ngx_file_aio_init(file, ctx->pool) != NGX_OK) { - return NGX_ERROR; - } - - aio = file->aio; - - aio->data = ctx->filter_ctx; - aio->preload_handler = ctx->aio_preload; - - return NGX_OK; -} - -#endif - - static ngx_int_t ngx_output_chain_add_copy(ngx_pool_t *pool, ngx_chain_t **chain, ngx_chain_t *in) diff --git a/src/event/ngx_event.h b/src/event/ngx_event.h --- a/src/event/ngx_event.h +++ b/src/event/ngx_event.h @@ -147,10 +147,6 @@ struct ngx_event_aio_s { ngx_fd_t fd; -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) - ssize_t (*preload_handler)(ngx_buf_t *file); -#endif - #if (NGX_HAVE_EVENTFD) int64_t res; #endif diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c +++ b/src/http/ngx_http_copy_filter_module.c @@ -19,10 +19,6 @@ typedef struct { static void ngx_http_copy_aio_handler(ngx_output_chain_ctx_t *ctx, ngx_file_t *file); static void ngx_http_copy_aio_event_handler(ngx_event_t *ev); -#if (NGX_HAVE_AIO_SENDFILE) -static ssize_t ngx_http_copy_aio_sendfile_preload(ngx_buf_t *file); -static void ngx_http_copy_aio_sendfile_event_handler(ngx_event_t *ev); -#endif #endif #if (NGX_THREADS) static ngx_int_t ngx_http_copy_thread_handler(ngx_thread_task_t *task, @@ -128,9 +124,6 @@ ngx_http_copy_filter(ngx_http_request_t #if (NGX_HAVE_FILE_AIO) if (ngx_file_aio && clcf->aio == NGX_HTTP_AIO_ON) { ctx->aio_handler = ngx_http_copy_aio_handler; -#if (NGX_HAVE_AIO_SENDFILE) - ctx->aio_preload = ngx_http_copy_aio_sendfile_preload; -#endif } #endif @@ -207,53 +200,6 @@ ngx_http_copy_aio_event_handler(ngx_even ngx_http_run_posted_requests(c); } - -#if (NGX_HAVE_AIO_SENDFILE) - -static ssize_t -ngx_http_copy_aio_sendfile_preload(ngx_buf_t *file) -{ - ssize_t n; - static u_char buf[1]; - ngx_event_aio_t *aio; - ngx_http_request_t *r; - ngx_output_chain_ctx_t *ctx; - - n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); - - if (n == NGX_AGAIN) { - aio = file->file->aio; - aio->handler = ngx_http_copy_aio_sendfile_event_handler; - - r = aio->data; - r->main->blocked++; - r->aio = 1; - - ctx = ngx_http_get_module_ctx(r, ngx_http_copy_filter_module); - ctx->aio = 1; - } - - return n; -} - - -static void -ngx_http_copy_aio_sendfile_event_handler(ngx_event_t *ev) -{ - ngx_event_aio_t *aio; - ngx_http_request_t *r; - - aio = ev->data; - r = aio->data; - - r->main->blocked--; - r->aio = 0; - ev->complete = 0; - - r->connection->write->handler(r->connection->write); -} - -#endif #endif diff --git a/src/os/unix/ngx_freebsd_sendfile_chain.c b/src/os/unix/ngx_freebsd_sendfile_chain.c --- a/src/os/unix/ngx_freebsd_sendfile_chain.c +++ b/src/os/unix/ngx_freebsd_sendfile_chain.c @@ -32,22 +32,21 @@ ngx_chain_t * ngx_freebsd_sendfile_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit) { - int rc, flags; - off_t send, prev_send, sent; - size_t file_size; - ssize_t n; - ngx_uint_t eintr, eagain; - ngx_err_t err; - ngx_buf_t *file; - ngx_event_t *wev; - ngx_chain_t *cl; - ngx_iovec_t header, trailer; - struct sf_hdtr hdtr; - struct iovec headers[NGX_IOVS_PREALLOCATE]; - struct iovec trailers[NGX_IOVS_PREALLOCATE]; -#if (NGX_HAVE_AIO_SENDFILE) - ngx_uint_t ebusy; - ngx_event_aio_t *aio; + int rc, flags; + off_t send, prev_send, sent; + size_t file_size; + ssize_t n; + ngx_uint_t eintr, eagain; + ngx_err_t err; + ngx_buf_t *file; + ngx_event_t *wev; + ngx_chain_t *cl; + ngx_iovec_t header, trailer; + struct sf_hdtr hdtr; + struct iovec headers[NGX_IOVS_PREALLOCATE]; + struct iovec trailers[NGX_IOVS_PREALLOCATE]; +#if (NGX_HAVE_SENDFILE_NODISKIO) + ngx_uint_t ebusy; #endif wev = c->write; @@ -77,11 +76,6 @@ ngx_freebsd_sendfile_chain(ngx_connectio eagain = 0; flags = 0; -#if (NGX_HAVE_AIO_SENDFILE && NGX_SUPPRESS_WARN) - aio = NULL; - file = NULL; -#endif - header.iovs = headers; header.nalloc = NGX_IOVS_PREALLOCATE; @@ -90,7 +84,7 @@ ngx_freebsd_sendfile_chain(ngx_connectio for ( ;; ) { eintr = 0; -#if (NGX_HAVE_AIO_SENDFILE) +#if (NGX_HAVE_SENDFILE_NODISKIO) ebusy = 0; #endif prev_send = send; @@ -179,9 +173,8 @@ ngx_freebsd_sendfile_chain(ngx_connectio sent = 0; -#if (NGX_HAVE_AIO_SENDFILE) - aio = file->file->aio; - flags = (aio && aio->preload_handler) ? SF_NODISKIO : 0; +#if (NGX_HAVE_SENDFILE_NODISKIO) + flags = (c->busy_count <= 2) ? SF_NODISKIO : 0; #endif rc = sendfile(file->file->fd, c->fd, file->file_pos, @@ -199,7 +192,7 @@ ngx_freebsd_sendfile_chain(ngx_connectio eintr = 1; break; -#if (NGX_HAVE_AIO_SENDFILE) +#if (NGX_HAVE_SENDFILE_NODISKIO) case NGX_EBUSY: ebusy = 1; break; @@ -252,54 +245,30 @@ ngx_freebsd_sendfile_chain(ngx_connectio in = ngx_chain_update_sent(in, sent); -#if (NGX_HAVE_AIO_SENDFILE) +#if (NGX_HAVE_SENDFILE_NODISKIO) if (ebusy) { - if (aio->event.active) { - /* - * tolerate duplicate calls; they can happen due to subrequests - * or multiple calls of the next body filter from a filter - */ - - if (sent) { - c->busy_count = 0; - } - - return in; - } - if (sent == 0) { c->busy_count++; - if (c->busy_count > 2) { - ngx_log_error(NGX_LOG_ALERT, c->log, 0, - "sendfile(%V) returned busy again", - &file->file->name); - - c->busy_count = 0; - aio->preload_handler = NULL; - - send = prev_send; - continue; - } + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "sendfile() busy, count:%d", c->busy_count); } else { c->busy_count = 0; } - n = aio->preload_handler(file); - - if (n > 0) { - send = prev_send + sent; - continue; + if (wev->posted) { + ngx_delete_posted_event(wev); } + ngx_post_event(wev, &ngx_posted_next_events); + + wev->ready = 0; return in; } - if (flags == SF_NODISKIO) { - c->busy_count = 0; - } + c->busy_count = 0; #endif From pluknet at nginx.com Thu Nov 11 11:48:32 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 11 Nov 2021 14:48:32 +0300 Subject: [PATCH 2 of 3] HTTP/3: allowed QUIC stream connection reuse In-Reply-To: <20211110214218.wq7bozp3ppy72kgf@Romans-MacBook-Pro.local> References: <8ae53c592c719af4f3ba.1634561309@arut-laptop> <20211110214218.wq7bozp3ppy72kgf@Romans-MacBook-Pro.local> Message-ID: <31C3455F-41FD-4474-8F3C-0CCEAE8D13CE@nginx.com> > On 11 Nov 2021, at 00:42, Roman Arutyunyan wrote: > > On Wed, Nov 10, 2021 at 03:59:39PM +0300, Sergey Kandaurov wrote: >> >>> On 18 Oct 2021, at 15:48, Roman Arutyunyan wrote: >>> >>> # HG changeset patch >>> # User Roman Arutyunyan >>> # Date 1634561226 -10800 >>> # Mon Oct 18 15:47:06 2021 +0300 >>> # Branch quic >>> # Node ID 8ae53c592c719af4f3ba47dbd85f78be27aaf7db >>> # Parent 8739f475583031399879ef0af2eb5af76008449e >>> HTTP/3: allowed QUIC stream connection reuse. >>> >>> A QUIC stream connection is treated as reusable until first bytes of request >>> arrive, which is also when the request object is now allocated. A connection >>> closed as a result of draining, is reset with the error code >>> H3_REQUEST_REJECTED. Such behavior is allowed by quic-http-34: >>> >>> Once a request stream has been opened, the request MAY be cancelled >>> by either endpoint. Clients cancel requests if the response is no >>> longer of interest; servers cancel requests if they are unable to or >>> choose not to respond. >>> >>> When the server cancels a request without performing any application >>> processing, the request is considered "rejected." The server SHOULD >>> abort its response stream with the error code H3_REQUEST_REJECTED. >>> >>> The client can treat requests rejected by the server as though they had >>> never been sent at all, thereby allowing them to be retried later. >>> >> >> Looks good. See below for minor comments. >> BTW, if we still hit the worker_connections limit, this leads to >> an entire QUIC connection close, but I doubt we can easily improve this. > > When there's not enough worker_connections for a new QUIC stream, we can > send H3_REQUEST_REJECTED to client without creating a stream. We can discuss > this later. > >>> diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c >>> --- a/src/http/ngx_http_request.c >>> +++ b/src/http/ngx_http_request.c >>> @@ -3743,15 +3743,14 @@ ngx_http_free_request(ngx_http_request_t >>> >>> log->action = "closing request"; >>> >>> - if (r->connection->timedout) { >>> + if (r->connection->timedout >>> +#if (NGX_HTTP_QUIC) >>> + && r->connection->quic == NULL >>> +#endif >>> + ) >>> + { >>> clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); >>> >>> -#if (NGX_HTTP_V3) >>> - if (r->connection->quic) { >>> - (void) ngx_quic_reset_stream(r->connection, >>> - NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR); >>> - } else >>> -#endif >>> if (clcf->reset_timedout_connection) { >>> linger.l_onoff = 1; >>> linger.l_linger = 0; >>> @@ -3763,14 +3762,6 @@ ngx_http_free_request(ngx_http_request_t >>> "setsockopt(SO_LINGER) failed"); >>> } >>> } >>> - >>> - } else if (!r->response_sent) { >>> -#if (NGX_HTTP_V3) >>> - if (r->connection->quic) { >>> - (void) ngx_quic_reset_stream(r->connection, >>> - NGX_HTTP_V3_ERR_INTERNAL_ERROR); >>> - } >>> -#endif >>> } >>> >>> /* the various request strings were allocated from r->pool */ >>> @@ -3830,6 +3821,12 @@ ngx_http_close_connection(ngx_connection >>> >>> #endif >>> >>> +#if (NGX_HTTP_V3) >>> + if (ngx_http_v3_connection(c)) { >>> + ngx_http_v3_reset_connection(c); >>> + } >>> +#endif >>> + >>> #if (NGX_STAT_STUB) >>> (void) ngx_atomic_fetch_add(ngx_stat_active, -1); >>> #endif >>> diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h >>> --- a/src/http/v3/ngx_http_v3.h >>> +++ b/src/http/v3/ngx_http_v3.h >>> @@ -90,6 +90,9 @@ >>> #define ngx_http_v3_shutdown_connection(c, code, reason) \ >>> ngx_quic_shutdown_connection(c->quic->parent, code, reason) >>> >>> +#define ngx_http_v3_connection(c) \ >>> + ((c)->quic ? ngx_http_quic_get_connection(c)->addr_conf->http3 : 0) >>> + >>> >>> typedef struct { >>> size_t max_table_capacity; >>> @@ -138,6 +141,7 @@ struct ngx_http_v3_session_s { >>> >>> >>> void ngx_http_v3_init(ngx_connection_t *c); >>> +void ngx_http_v3_reset_connection(ngx_connection_t *c); >>> ngx_int_t ngx_http_v3_init_session(ngx_connection_t *c); >>> ngx_int_t ngx_http_v3_check_flood(ngx_connection_t *c); >>> >>> diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c >>> --- a/src/http/v3/ngx_http_v3_request.c >>> +++ b/src/http/v3/ngx_http_v3_request.c >>> @@ -10,6 +10,7 @@ >>> #include >>> >>> >>> +static void ngx_http_v3_wait_request_handler(ngx_event_t *rev); >>> static void ngx_http_v3_cleanup_request(void *data); >>> static void ngx_http_v3_process_request(ngx_event_t *rev); >>> static ngx_int_t ngx_http_v3_process_header(ngx_http_request_t *r, >>> @@ -53,12 +54,8 @@ static const struct { >>> void >>> ngx_http_v3_init(ngx_connection_t *c) >>> { >>> - size_t size; >>> uint64_t n; >>> - ngx_buf_t *b; >>> ngx_event_t *rev; >>> - ngx_pool_cleanup_t *cln; >>> - ngx_http_request_t *r; >>> ngx_http_connection_t *hc; >>> ngx_http_v3_session_t *h3c; >>> ngx_http_core_loc_conf_t *clcf; >>> @@ -96,7 +93,7 @@ ngx_http_v3_init(ngx_connection_t *c) >>> h3c = ngx_http_v3_get_session(c); >>> >>> if (h3c->goaway) { >>> - ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_REQUEST_REJECTED); >>> + c->close = 1; >>> ngx_http_close_connection(c); >>> return; >>> } >>> @@ -116,21 +113,57 @@ ngx_http_v3_init(ngx_connection_t *c) >>> "reached maximum number of requests"); >>> } >>> >>> - cln = ngx_pool_cleanup_add(c->pool, 0); >>> - if (cln == NULL) { >>> + rev = c->read; >>> + rev->handler = ngx_http_v3_wait_request_handler; >>> + c->write->handler = ngx_http_empty_handler; >>> + >>> + if (rev->ready) { >>> + rev->handler(rev); >>> + return; >>> + } >>> + >>> + cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module); >>> + >>> + ngx_add_timer(rev, cscf->client_header_timeout); >>> + ngx_reusable_connection(c, 1); >>> + >>> + if (ngx_handle_read_event(rev, 0) != NGX_OK) { >>> + ngx_http_close_connection(c); >>> + return; >>> + } >>> +} >>> + >>> + >>> +static void >>> +ngx_http_v3_wait_request_handler(ngx_event_t *rev) >>> +{ >>> + size_t size; >>> + ssize_t n; >>> + ngx_buf_t *b; >>> + ngx_connection_t *c; >>> + ngx_pool_cleanup_t *cln; >>> + ngx_http_request_t *r; >>> + ngx_http_connection_t *hc; >>> + ngx_http_v3_session_t *h3c; >>> + ngx_http_core_srv_conf_t *cscf; >>> + >>> + c = rev->data; >>> + >>> + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 wait request handler"); >>> + >>> + if (rev->timedout) { >>> + ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed out"); >>> + c->timedout = 1; >>> ngx_http_close_connection(c); >>> return; >>> } >>> >>> - cln->handler = ngx_http_v3_cleanup_request; >>> - cln->data = c; >>> - >>> - h3c->nrequests++; >>> - >>> - if (h3c->keepalive.timer_set) { >>> - ngx_del_timer(&h3c->keepalive); >>> + if (c->close) { >>> + ngx_http_close_connection(c); >>> + return; >>> } >>> >>> + hc = c->data; >>> cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module); >>> >>> size = cscf->client_header_buffer_size; >>> @@ -159,8 +192,49 @@ ngx_http_v3_init(ngx_connection_t *c) >>> b->end = b->last + size; >>> } >>> >>> + n = c->recv(c, b->last, size); >>> + >>> + if (n == NGX_AGAIN) { >>> + >>> + if (!rev->timer_set) { >>> + ngx_add_timer(rev, cscf->client_header_timeout); >>> + ngx_reusable_connection(c, 1); >>> + } >>> + >>> + if (ngx_handle_read_event(rev, 0) != NGX_OK) { >>> + ngx_http_close_connection(c); >>> + return; >>> + } >>> + >>> + /* >>> + * We are trying to not hold c->buffer's memory for an idle connection. >>> + */ >>> + >>> + if (ngx_pfree(c->pool, b->start) == NGX_OK) { >>> + b->start = NULL; >>> + } >>> + >>> + return; >>> + } >>> + >>> + if (n == NGX_ERROR) { >>> + ngx_http_close_connection(c); >>> + return; >>> + } >>> + >>> + if (n == 0) { >>> + ngx_log_error(NGX_LOG_INFO, c->log, 0, >>> + "client closed connection"); >>> + ngx_http_close_connection(c); >>> + return; >>> + } >>> + >>> + b->last += n; >>> + >>> c->log->action = "reading client request"; >>> >>> + ngx_reusable_connection(c, 0); >>> + >>> r = ngx_http_create_request(c); >>> if (r == NULL) { >>> ngx_http_close_connection(c); >>> @@ -171,7 +245,7 @@ ngx_http_v3_init(ngx_connection_t *c) >>> >>> r->v3_parse = ngx_pcalloc(r->pool, sizeof(ngx_http_v3_parse_t)); >>> if (r->v3_parse == NULL) { >>> - ngx_http_close_connection(c); >>> + ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); >>> return; >>> } >>> >> >> Since we now defer request initialization until first bytes, that's fine. >> >>> @@ -179,23 +253,59 @@ ngx_http_v3_init(ngx_connection_t *c) >>> * cscf->large_client_header_buffers.num; >>> >>> c->data = r; >>> - c->requests = n + 1; >>> + c->requests = (c->quic->id >> 2) + 1; >>> + >>> + cln = ngx_pool_cleanup_add(r->pool, 0); >>> + if (cln == NULL) { >>> + ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); >>> + return; >>> + } >>> + >>> + cln->handler = ngx_http_v3_cleanup_request; >>> + cln->data = r; >>> + >>> + h3c = ngx_http_v3_get_session(c); >>> + h3c->nrequests++; >>> + >>> + if (h3c->keepalive.timer_set) { >>> + ngx_del_timer(&h3c->keepalive); >>> + } >>> >>> - rev = c->read; >>> rev->handler = ngx_http_v3_process_request; >>> + ngx_http_v3_process_request(rev); >>> +} >>> >>> - ngx_http_v3_process_request(rev); >>> + >>> +void >>> +ngx_http_v3_reset_connection(ngx_connection_t *c) >>> +{ >>> + if (c->timedout) { >>> + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_GENERAL_PROTOCOL_ERROR); >>> + >>> + } else if (c->close) { >>> + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_REQUEST_REJECTED); >>> + >>> + } else if (c->requests == 0 || c->error) { >>> + ngx_quic_reset_stream(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR); >>> + } >>> } >> >> The c->requests check looks suspicious, >> is it something to catch a not yet initialized request? > > If for whatever reason we close the stream connection before creating a request > (thus c->requests == 0), that is a clear signal we need to reset the stream > with NGX_HTTP_V3_ERR_INTERNAL_ERROR error code. The exceptions are > timeout and connection reuse, but these cases are handled just before. > All other cases are errors (failed allocation, recv() failure etc). > >>> static void >>> ngx_http_v3_cleanup_request(void *data) >>> { >>> - ngx_connection_t *c = data; >>> + ngx_http_request_t *r = data; >>> >>> + ngx_connection_t *c; >>> ngx_http_v3_session_t *h3c; >>> ngx_http_core_loc_conf_t *clcf; >>> >>> + c = r->connection; >>> + >>> + if (!r->response_sent) { >>> + c->error = 1; >>> + } >>> + >>> h3c = ngx_http_v3_get_session(c); >>> >>> if (--h3c->nrequests == 0) { >>> diff --git a/src/http/v3/ngx_http_v3_streams.c b/src/http/v3/ngx_http_v3_streams.c >>> --- a/src/http/v3/ngx_http_v3_streams.c >>> +++ b/src/http/v3/ngx_http_v3_streams.c >>> @@ -49,7 +49,8 @@ ngx_http_v3_init_uni_stream(ngx_connecti >>> ngx_http_v3_finalize_connection(c, >>> NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, >>> "reached maximum number of uni streams"); >>> - ngx_http_close_connection(c); >>> + c->data = NULL; >>> + ngx_http_v3_close_uni_stream(c); >>> return; >>> } >>> >>> @@ -57,7 +58,11 @@ ngx_http_v3_init_uni_stream(ngx_connecti >>> >>> us = ngx_pcalloc(c->pool, sizeof(ngx_http_v3_uni_stream_t)); >>> if (us == NULL) { >>> - ngx_http_close_connection(c); >>> + ngx_http_v3_finalize_connection(c, >>> + NGX_HTTP_V3_ERR_INTERNAL_ERROR, >>> + "memory allocation error"); >>> + c->data = NULL; >>> + ngx_http_v3_close_uni_stream(c); >>> return; >>> } >>> >>> @@ -79,12 +84,12 @@ ngx_http_v3_close_uni_stream(ngx_connect >>> ngx_http_v3_session_t *h3c; >>> ngx_http_v3_uni_stream_t *us; >>> >>> - us = c->data; >>> - h3c = ngx_http_v3_get_session(c); >>> - >>> ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 close stream"); >>> >>> - if (us->index >= 0) { >>> + us = c->data; >>> + >>> + if (us && us->index >= 0) { >>> + h3c = ngx_http_v3_get_session(c); >>> h3c->known_streams[us->index] = NULL; >>> } >>> >> >> Is there any difference after switching to ngx_http_v3_close_uni_stream(), >> besides ngx_stat_active now no longer decremented? This itself looks like >> a right change, since ngx_stat_active isn't incremented for uni streams. > > ngx_stat_active is indeed a good reason. The other reason is > ngx_http_v3_reset_connection() which is called when closing an HTTP/3 stream > connection. A uni stream connection is also an HTTP/3 stream connection, but > we don't want to call ngx_http_v3_reset_connection() for it. We only > want to call it for request streams. We could add another condition, but it's > easier not to call common http code for uni streams at all. The point is QUIC connection finalization includes ngx_quic_close_streams(), which prevents further I/O on streams, so stream reset won't have an effect. In that sense, the patch could be simplified. On the other hand, with 3rd patch applied, this is certainly no longer true: calling ngx_http_close_connection() on unidirectional streams won't only mean a unidirectional stream cancellation non-sense, but that would also invoke creating a decoder stream as part of closing the given uni stream in case it doesn't exist yet. That stream cancellation would even be sent after CC, since calling ngx_quic_close_streams() already happened. Still, I don't think it should normally happen such that we would need an additional check in ngx_quic_open_stream() in addition to the below patch. To sum up, your change looks good. > >> BTW, we need additional checks to prevent processing new streams >> after ngx_http_v3_finalize_connection(). > > Makes sense. > >> # HG changeset patch >> # User Sergey Kandaurov >> # Date 1636549164 -10800 >> # Wed Nov 10 15:59:24 2021 +0300 >> # Branch quic >> # Node ID 924ee879f8befa1ec574d41a3979e5c5c5db8639 >> # Parent c6386afdd1552105c18ce5c47b4b5cd6f6de8b88 >> QUIC: stop processing new client streams on quic connection error. >> >> diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c >> --- a/src/event/quic/ngx_event_quic_streams.c >> +++ b/src/event/quic/ngx_event_quic_streams.c >> @@ -314,7 +314,7 @@ ngx_quic_create_client_stream(ngx_connec >> >> qc = ngx_quic_get_connection(c); >> >> - if (qc->shutdown) { >> + if (qc->shutdown || qc->error) { > > Why not qc->closing? Yes, indeed. > >> return NGX_QUIC_STREAM_GONE; >> } >> >> @@ -385,7 +385,7 @@ ngx_quic_create_client_stream(ngx_connec >> return NULL; >> } >> >> - if (qc->shutdown) { >> + if (qc->shutdown || qc->error) { >> return NGX_QUIC_STREAM_GONE; >> } >> } >> -- Sergey Kandaurov From xeioex at nginx.com Thu Nov 11 14:31:00 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 11 Nov 2021 14:31:00 +0000 Subject: [njs] RegExp: incapsulating PCRE API. Message-ID: details: https://hg.nginx.org/njs/rev/67ee2e4907a8 branches: changeset: 1743:67ee2e4907a8 user: Dmitry Volyntsev date: Thu Nov 11 14:26:30 2021 +0000 description: RegExp: incapsulating PCRE API. diffstat: src/njs_main.h | 1 - src/njs_parser.c | 2 +- src/njs_pcre.c | 214 +++++++++++++++++++++++++++++++------ src/njs_pcre.h | 40 ------- src/njs_regex.h | 65 +++++++++-- src/njs_regexp.c | 262 +++++++++++++--------------------------------- src/njs_regexp.h | 16 +-- src/njs_string.c | 31 ++-- src/njs_vm.h | 3 +- src/test/njs_unit_test.c | 2 + 10 files changed, 326 insertions(+), 310 deletions(-) diffs (truncated from 1159 to 1000 lines): diff -r dedadba0ee87 -r 67ee2e4907a8 src/njs_main.h --- a/src/njs_main.h Wed Nov 10 14:50:16 2021 +0000 +++ b/src/njs_main.h Thu Nov 11 14:26:30 2021 +0000 @@ -38,7 +38,6 @@ #include #include -#include #include #include diff -r dedadba0ee87 -r 67ee2e4907a8 src/njs_parser.c --- a/src/njs_parser.c Wed Nov 10 14:50:16 2021 +0000 +++ b/src/njs_parser.c Thu Nov 11 14:26:30 2021 +0000 @@ -1198,7 +1198,7 @@ njs_parser_regexp_literal(njs_parser_t * njs_int_t ret; njs_lexer_t *lexer; njs_value_t *value, retval; - njs_regexp_flags_t flags; + njs_regex_flags_t flags; njs_regexp_pattern_t *pattern; static const njs_value_t string_message = njs_string("message"); diff -r dedadba0ee87 -r 67ee2e4907a8 src/njs_pcre.c --- a/src/njs_pcre.c Wed Nov 10 14:50:16 2021 +0000 +++ b/src/njs_pcre.c Thu Nov 11 14:26:30 2021 +0000 @@ -7,21 +7,23 @@ #include +#include + static void *njs_pcre_malloc(size_t size); static void njs_pcre_free(void *p); -static njs_regex_context_t *regex_context; +static njs_regex_generic_ctx_t *regex_context; -njs_regex_context_t * -njs_regex_context_create(njs_pcre_malloc_t private_malloc, +njs_regex_generic_ctx_t * +njs_regex_generic_ctx_create(njs_pcre_malloc_t private_malloc, njs_pcre_free_t private_free, void *memory_data) { - njs_regex_context_t *ctx; + njs_regex_generic_ctx_t *ctx; - ctx = private_malloc(sizeof(njs_regex_context_t), memory_data); + ctx = private_malloc(sizeof(njs_regex_generic_ctx_t), memory_data); if (njs_fast_path(ctx != NULL)) { ctx->private_malloc = private_malloc; @@ -33,15 +35,138 @@ njs_regex_context_create(njs_pcre_malloc } +njs_regex_compile_ctx_t * +njs_regex_compile_ctx_create(njs_regex_generic_ctx_t *ctx) +{ + return ctx; +} + + +/* + * 1) PCRE with PCRE_JAVASCRIPT_COMPAT flag rejects regexps with + * lone closing square brackets as invalid. Whereas according + * to ES6: 11.8.5 it is a valid regexp expression. + * + * 2) escaping zero byte characters as "\u0000". + * + * Escaping it here as a workaround. + */ + +njs_int_t +njs_regex_escape(njs_mp_t *mp, njs_str_t *text) +{ + size_t brackets, zeros; + u_char *p, *dst, *start, *end; + njs_bool_t in; + + start = text->start; + end = text->start + text->length; + + in = 0; + zeros = 0; + brackets = 0; + + for (p = start; p < end; p++) { + + switch (*p) { + case '[': + in = 1; + break; + + case ']': + if (!in) { + brackets++; + } + + in = 0; + break; + + case '\\': + p++; + + if (p == end || *p != '\0') { + break; + } + + /* Fall through. */ + + case '\0': + zeros++; + break; + } + } + + if (!brackets && !zeros) { + return NJS_OK; + } + + text->length = text->length + brackets + zeros * njs_length("\\u0000"); + + text->start = njs_mp_alloc(mp, text->length); + if (njs_slow_path(text->start == NULL)) { + return NJS_ERROR; + } + + in = 0; + dst = text->start; + + for (p = start; p < end; p++) { + + switch (*p) { + case '[': + in = 1; + break; + + case ']': + if (!in) { + *dst++ = '\\'; + } + + in = 0; + break; + + case '\\': + *dst++ = *p++; + + if (p == end) { + goto done; + } + + if (*p != '\0') { + break; + } + + /* Fall through. */ + + case '\0': + dst = njs_cpymem(dst, "\\u0000", 6); + continue; + } + + *dst++ = *p; + } + +done: + + text->length = dst - text->start; + + return NJS_OK; +} + + njs_int_t njs_regex_compile(njs_regex_t *regex, u_char *source, size_t len, - njs_uint_t options, njs_regex_context_t *ctx) + njs_regex_flags_t flags, njs_regex_compile_ctx_t *cctx, njs_trace_t *trace) { - int ret, err, erroff; - char *pattern, *error; - void *(*saved_malloc)(size_t size); - void (*saved_free)(void *p); - const char *errstr; + int ret, err, erroff; + char *pattern, *error; + void *(*saved_malloc)(size_t size); + void (*saved_free)(void *p); + njs_uint_t options; + const char *errstr; + njs_regex_generic_ctx_t *ctx; + + ctx = cctx; ret = NJS_ERROR; @@ -51,31 +176,43 @@ njs_regex_compile(njs_regex_t *regex, u_ pcre_free = njs_pcre_free; regex_context = ctx; - if (len == 0) { - pattern = (char *) source; +#ifdef PCRE_JAVASCRIPT_COMPAT + /* JavaScript compatibility has been introduced in PCRE-7.7. */ + options = PCRE_JAVASCRIPT_COMPAT; +#else + options = 0; +#endif + + if ((flags & NJS_REGEX_IGNORE_CASE)) { + options |= PCRE_CASELESS; + } - } else { - pattern = ctx->private_malloc(len + 1, ctx->memory_data); - if (njs_slow_path(pattern == NULL)) { - goto done; - } + if ((flags & NJS_REGEX_MULTILINE)) { + options |= PCRE_MULTILINE; + } - memcpy(pattern, source, len); - pattern[len] = '\0'; + if ((flags & NJS_REGEX_STICKY)) { + options |= PCRE_ANCHORED; } + if ((flags & NJS_REGEX_UTF8)) { + options |= PCRE_UTF8; + } + + pattern = (char *) source; + regex->code = pcre_compile(pattern, options, &errstr, &erroff, NULL); if (njs_slow_path(regex->code == NULL)) { error = pattern + erroff; if (*error != '\0') { - njs_alert(ctx->trace, NJS_LEVEL_ERROR, + njs_alert(trace, NJS_LEVEL_ERROR, "pcre_compile(\"%s\") failed: %s at \"%s\"", pattern, errstr, error); } else { - njs_alert(ctx->trace, NJS_LEVEL_ERROR, + njs_alert(trace, NJS_LEVEL_ERROR, "pcre_compile(\"%s\") failed: %s", pattern, errstr); } @@ -87,7 +224,7 @@ njs_regex_compile(njs_regex_t *regex, u_ regex->extra = pcre_study(regex->code, 0, &errstr); if (njs_slow_path(errstr != NULL)) { - njs_alert(ctx->trace, NJS_LEVEL_WARN, + njs_alert(trace, NJS_LEVEL_WARN, "pcre_study(\"%s\") failed: %s", pattern, errstr); } @@ -95,7 +232,7 @@ njs_regex_compile(njs_regex_t *regex, u_ ®ex->ncaptures); if (njs_slow_path(err < 0)) { - njs_alert(ctx->trace, NJS_LEVEL_ERROR, + njs_alert(trace, NJS_LEVEL_ERROR, "pcre_fullinfo(\"%s\", PCRE_INFO_CAPTURECOUNT) failed: %d", pattern, err); @@ -106,7 +243,7 @@ njs_regex_compile(njs_regex_t *regex, u_ ®ex->backrefmax); if (njs_slow_path(err < 0)) { - njs_alert(ctx->trace, NJS_LEVEL_ERROR, + njs_alert(trace, NJS_LEVEL_ERROR, "pcre_fullinfo(\"%s\", PCRE_INFO_BACKREFMAX) failed: %d", pattern, err); @@ -121,7 +258,7 @@ njs_regex_compile(njs_regex_t *regex, u_ ®ex->nentries); if (njs_slow_path(err < 0)) { - njs_alert(ctx->trace, NJS_LEVEL_ERROR, + njs_alert(trace, NJS_LEVEL_ERROR, "pcre_fullinfo(\"%s\", PCRE_INFO_NAMECOUNT) failed: %d", pattern, err); @@ -133,7 +270,7 @@ njs_regex_compile(njs_regex_t *regex, u_ ®ex->entry_size); if (njs_slow_path(err < 0)) { - njs_alert(ctx->trace, NJS_LEVEL_ERROR, "pcre_fullinfo(\"%s\", " + njs_alert(trace, NJS_LEVEL_ERROR, "pcre_fullinfo(\"%s\", " "PCRE_INFO_NAMEENTRYSIZE) failed: %d", pattern, err); goto done; @@ -143,7 +280,7 @@ njs_regex_compile(njs_regex_t *regex, u_ ®ex->entries); if (njs_slow_path(err < 0)) { - njs_alert(ctx->trace, NJS_LEVEL_ERROR, "pcre_fullinfo(\"%s\", " + njs_alert(trace, NJS_LEVEL_ERROR, "pcre_fullinfo(\"%s\", " "PCRE_INFO_NAMETABLE) failed: %d", pattern, err); goto done; @@ -193,7 +330,7 @@ njs_regex_named_captures(njs_regex_t *re njs_regex_match_data_t * -njs_regex_match_data(njs_regex_t *regex, njs_regex_context_t *ctx) +njs_regex_match_data(njs_regex_t *regex, njs_regex_generic_ctx_t *ctx) { size_t size; njs_uint_t ncaptures; @@ -222,7 +359,7 @@ njs_regex_match_data(njs_regex_t *regex, void njs_regex_match_data_free(njs_regex_match_data_t *match_data, - njs_regex_context_t *ctx) + njs_regex_generic_ctx_t *ctx) { ctx->private_free(match_data, ctx->memory_data); } @@ -244,25 +381,28 @@ njs_pcre_free(void *p) njs_int_t njs_regex_match(njs_regex_t *regex, const u_char *subject, size_t off, - size_t len, njs_regex_match_data_t *match_data, njs_regex_context_t *ctx) + size_t len, njs_regex_match_data_t *match_data, njs_trace_t *trace) { int ret; ret = pcre_exec(regex->code, regex->extra, (const char *) subject, len, off, 0, match_data->captures, match_data->ncaptures); - /* PCRE_ERROR_NOMATCH is -1. */ + if (ret <= PCRE_ERROR_NOMATCH) { + if (ret == PCRE_ERROR_NOMATCH) { + return NJS_DECLINED; + } - if (njs_slow_path(ret < PCRE_ERROR_NOMATCH)) { - njs_alert(ctx->trace, NJS_LEVEL_ERROR, "pcre_exec() failed: %d", ret); + njs_alert(trace, NJS_LEVEL_ERROR, "pcre_exec() failed: %d", ret); + return NJS_ERROR; } return ret; } -int * -njs_regex_captures(njs_regex_match_data_t *match_data) +size_t +njs_regex_capture(njs_regex_match_data_t *match_data, njs_uint_t n) { - return match_data->captures; + return match_data->captures[n]; } diff -r dedadba0ee87 -r 67ee2e4907a8 src/njs_pcre.h --- a/src/njs_pcre.h Wed Nov 10 14:50:16 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,40 +0,0 @@ - -/* - * Copyright (C) Igor Sysoev - * Copyright (C) NGINX, Inc. - */ - -#ifndef _NJS_PCRE_H_INCLUDED_ -#define _NJS_PCRE_H_INCLUDED_ - - -#include - - -#define NJS_REGEX_NOMATCH PCRE_ERROR_NOMATCH - - -struct njs_regex_s { - pcre *code; - pcre_extra *extra; - int ncaptures; - int backrefmax; - int nentries; - int entry_size; - char *entries; -}; - - -struct njs_regex_match_data_s { - int ncaptures; - /* - * Each capture is stored in 3 "int" vector elements. - * The N capture positions are stored in [n * 2] and [n * 2 + 1] elements. - * The 3rd bookkeeping elements are at the end of the vector. - * The first vector is for the "$0" capture and it is always allocated. - */ - int captures[3]; -}; - - -#endif /* _NJS_PCRE_H_INCLUDED_ */ diff -r dedadba0ee87 -r 67ee2e4907a8 src/njs_regex.h --- a/src/njs_regex.h Wed Nov 10 14:50:16 2021 +0000 +++ b/src/njs_regex.h Thu Nov 11 14:26:30 2021 +0000 @@ -7,39 +7,78 @@ #ifndef _NJS_REGEX_H_INCLUDED_ #define _NJS_REGEX_H_INCLUDED_ +#define NJS_REGEX_UNSET (size_t) (-1) + + +typedef enum { + NJS_REGEX_INVALID_FLAG = -1, + NJS_REGEX_NO_FLAGS = 0, + NJS_REGEX_GLOBAL = 1, + NJS_REGEX_IGNORE_CASE = 2, + NJS_REGEX_MULTILINE = 4, + NJS_REGEX_STICKY = 8, + NJS_REGEX_UTF8 = 16, +} njs_regex_flags_t; + typedef void *(*njs_pcre_malloc_t)(size_t size, void *memory_data); typedef void (*njs_pcre_free_t)(void *p, void *memory_data); -typedef struct njs_regex_s njs_regex_t; -typedef struct njs_regex_match_data_s njs_regex_match_data_t; - - typedef struct { njs_pcre_malloc_t private_malloc; njs_pcre_free_t private_free; void *memory_data; - njs_trace_t *trace; -} njs_regex_context_t; +} njs_regex_generic_ctx_t; + + +#define njs_regex_compile_ctx_t void + + +typedef struct { + void *code; + void *extra; + int ncaptures; + int backrefmax; + int nentries; + int entry_size; + char *entries; +} njs_regex_t; -NJS_EXPORT njs_regex_context_t * - njs_regex_context_create(njs_pcre_malloc_t private_malloc, +typedef struct { + int ncaptures; + /* + * Each capture is stored in 3 "int" vector elements. + * The N capture positions are stored in [n * 2] and [n * 2 + 1] elements. + * The 3rd bookkeeping elements are at the end of the vector. + * The first vector is for the "$0" capture and it is always allocated. + */ + int captures[3]; +} njs_regex_match_data_t; + + +NJS_EXPORT njs_regex_generic_ctx_t * + njs_regex_generic_ctx_create(njs_pcre_malloc_t private_malloc, njs_pcre_free_t private_free, void *memory_data); +NJS_EXPORT njs_regex_compile_ctx_t *njs_regex_compile_ctx_create( + njs_regex_generic_ctx_t *ctx); +NJS_EXPORT njs_int_t njs_regex_escape(njs_mp_t *mp, njs_str_t *text); NJS_EXPORT njs_int_t njs_regex_compile(njs_regex_t *regex, u_char *source, - size_t len, njs_uint_t options, njs_regex_context_t *ctx); + size_t len, njs_regex_flags_t flags, njs_regex_compile_ctx_t *ctx, + njs_trace_t *trace); NJS_EXPORT njs_bool_t njs_regex_is_valid(njs_regex_t *regex); NJS_EXPORT njs_int_t njs_regex_named_captures(njs_regex_t *regex, njs_str_t *name, int n); NJS_EXPORT njs_regex_match_data_t *njs_regex_match_data(njs_regex_t *regex, - njs_regex_context_t *ctx); + njs_regex_generic_ctx_t *ctx); NJS_EXPORT void njs_regex_match_data_free(njs_regex_match_data_t *match_data, - njs_regex_context_t *ctx); + njs_regex_generic_ctx_t *ctx); NJS_EXPORT njs_int_t njs_regex_match(njs_regex_t *regex, const u_char *subject, size_t off, size_t len, njs_regex_match_data_t *match_data, - njs_regex_context_t *ctx); -NJS_EXPORT int *njs_regex_captures(njs_regex_match_data_t *match_data); + njs_trace_t *trace); +NJS_EXPORT size_t njs_regex_capture(njs_regex_match_data_t *match_data, + njs_uint_t n); #endif /* _NJS_REGEX_H_INCLUDED_ */ diff -r dedadba0ee87 -r 67ee2e4907a8 src/njs_regexp.c --- a/src/njs_regexp.c Wed Nov 10 14:50:16 2021 +0000 +++ b/src/njs_regexp.c Thu Nov 11 14:26:30 2021 +0000 @@ -20,7 +20,7 @@ static void njs_regexp_free(void *p, voi static njs_int_t njs_regexp_prototype_source(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused); static int njs_regexp_pattern_compile(njs_vm_t *vm, njs_regex_t *regex, - u_char *source, int options); + u_char *source, size_t len, njs_regex_flags_t flags); static u_char *njs_regexp_compile_trace_handler(njs_trace_t *trace, njs_trace_data_t *td, u_char *start); static u_char *njs_regexp_match_trace_handler(njs_trace_t *trace, @@ -37,21 +37,26 @@ const njs_value_t njs_string_lindex = n njs_int_t njs_regexp_init(njs_vm_t *vm) { - vm->regex_context = njs_regex_context_create(njs_regexp_malloc, - njs_regexp_free, vm->mem_pool); - if (njs_slow_path(vm->regex_context == NULL)) { + vm->regex_generic_ctx = njs_regex_generic_ctx_create(njs_regexp_malloc, + njs_regexp_free, + vm->mem_pool); + if (njs_slow_path(vm->regex_generic_ctx == NULL)) { njs_memory_error(vm); return NJS_ERROR; } - vm->single_match_data = njs_regex_match_data(NULL, vm->regex_context); + vm->regex_compile_ctx = njs_regex_compile_ctx_create(vm->regex_generic_ctx); + if (njs_slow_path(vm->regex_compile_ctx == NULL)) { + njs_memory_error(vm); + return NJS_ERROR; + } + + vm->single_match_data = njs_regex_match_data(NULL, vm->regex_generic_ctx); if (njs_slow_path(vm->single_match_data == NULL)) { njs_memory_error(vm); return NJS_ERROR; } - vm->regex_context->trace = &vm->trace; - return NJS_OK; } @@ -70,10 +75,10 @@ njs_regexp_free(void *p, void *memory_da } -static njs_regexp_flags_t +static njs_regex_flags_t njs_regexp_value_flags(njs_vm_t *vm, const njs_value_t *regexp) { - njs_regexp_flags_t flags; + njs_regex_flags_t flags; njs_regexp_pattern_t *pattern; flags = 0; @@ -81,19 +86,19 @@ njs_regexp_value_flags(njs_vm_t *vm, con pattern = njs_regexp_pattern(regexp); if (pattern->global) { - flags |= NJS_REGEXP_GLOBAL; + flags |= NJS_REGEX_GLOBAL; } if (pattern->ignore_case) { - flags |= NJS_REGEXP_IGNORE_CASE; + flags |= NJS_REGEX_IGNORE_CASE; } if (pattern->multiline) { - flags |= NJS_REGEXP_MULTILINE; + flags |= NJS_REGEX_MULTILINE; } if (pattern->sticky) { - flags |= NJS_REGEXP_STICKY; + flags |= NJS_REGEX_STICKY; } return flags; @@ -108,7 +113,7 @@ njs_regexp_constructor(njs_vm_t *vm, njs njs_int_t ret; njs_str_t string; njs_value_t source, *pattern, *flags; - njs_regexp_flags_t re_flags; + njs_regex_flags_t re_flags; pattern = njs_arg(args, nargs, 1); @@ -168,7 +173,7 @@ njs_regexp_constructor(njs_vm_t *vm, njs njs_int_t njs_regexp_create(njs_vm_t *vm, njs_value_t *value, u_char *start, - size_t length, njs_regexp_flags_t flags) + size_t length, njs_regex_flags_t flags) { njs_regexp_t *regexp; njs_regexp_pattern_t *pattern; @@ -200,143 +205,30 @@ njs_regexp_create(njs_vm_t *vm, njs_valu } -/* - * 1) PCRE with PCRE_JAVASCRIPT_COMPAT flag rejects regexps with - * lone closing square brackets as invalid. Whereas according - * to ES6: 11.8.5 it is a valid regexp expression. - * - * 2) escaping zero byte characters as "\u0000". - * - * Escaping it here as a workaround. - */ - -njs_inline njs_int_t -njs_regexp_escape(njs_vm_t *vm, njs_str_t *text) -{ - size_t brackets, zeros; - u_char *p, *dst, *start, *end; - njs_bool_t in; - - start = text->start; - end = text->start + text->length; - - in = 0; - zeros = 0; - brackets = 0; - - for (p = start; p < end; p++) { - - switch (*p) { - case '[': - in = 1; - break; - - case ']': - if (!in) { - brackets++; - } - - in = 0; - break; - - case '\\': - p++; - - if (p == end || *p != '\0') { - break; - } - - /* Fall through. */ - - case '\0': - zeros++; - break; - } - } - - if (!brackets && !zeros) { - return NJS_OK; - } - - text->length = text->length + brackets + zeros * njs_length("\\u0000"); - - text->start = njs_mp_alloc(vm->mem_pool, text->length); - if (njs_slow_path(text->start == NULL)) { - njs_memory_error(vm); - return NJS_ERROR; - } - - in = 0; - dst = text->start; - - for (p = start; p < end; p++) { - - switch (*p) { - case '[': - in = 1; - break; - - case ']': - if (!in) { - *dst++ = '\\'; - } - - in = 0; - break; - - case '\\': - *dst++ = *p++; - - if (p == end) { - goto done; - } - - if (*p != '\0') { - break; - } - - /* Fall through. */ - - case '\0': - dst = njs_cpymem(dst, "\\u0000", 6); - continue; - } - - *dst++ = *p; - } - -done: - - text->length = dst - text->start; - - return NJS_OK; -} - - -njs_regexp_flags_t +njs_regex_flags_t njs_regexp_flags(u_char **start, u_char *end) { - u_char *p; - njs_regexp_flags_t flags, flag; + u_char *p; + njs_regex_flags_t flags, flag; - flags = NJS_REGEXP_NO_FLAGS; + flags = NJS_REGEX_NO_FLAGS; for (p = *start; p < end; p++) { switch (*p) { case 'g': - flag = NJS_REGEXP_GLOBAL; + flag = NJS_REGEX_GLOBAL; break; case 'i': - flag = NJS_REGEXP_IGNORE_CASE; + flag = NJS_REGEX_IGNORE_CASE; break; case 'm': - flag = NJS_REGEXP_MULTILINE; + flag = NJS_REGEX_MULTILINE; break; case 'y': - flag = NJS_REGEXP_STICKY; + flag = NJS_REGEX_STICKY; break; default: @@ -364,15 +256,15 @@ invalid: *start = p + 1; - return NJS_REGEXP_INVALID_FLAG; + return NJS_REGEX_INVALID_FLAG; } njs_regexp_pattern_t * njs_regexp_pattern_create(njs_vm_t *vm, u_char *start, size_t length, - njs_regexp_flags_t flags) + njs_regex_flags_t flags) { - int options, ret; + int ret; u_char *p, *end; size_t size; njs_str_t text; @@ -382,15 +274,16 @@ njs_regexp_pattern_create(njs_vm_t *vm, njs_regexp_pattern_t *pattern; size = 1; /* A trailing "/". */ - size += ((flags & NJS_REGEXP_GLOBAL) != 0); - size += ((flags & NJS_REGEXP_IGNORE_CASE) != 0); - size += ((flags & NJS_REGEXP_MULTILINE) != 0); + size += ((flags & NJS_REGEX_GLOBAL) != 0); + size += ((flags & NJS_REGEX_IGNORE_CASE) != 0); + size += ((flags & NJS_REGEX_MULTILINE) != 0); text.start = start; text.length = length; - ret = njs_regexp_escape(vm, &text); + ret = njs_regex_escape(vm->mem_pool, &text); if (njs_slow_path(ret != NJS_OK)) { + njs_memory_error(vm); return NULL; } @@ -412,39 +305,27 @@ njs_regexp_pattern_create(njs_vm_t *vm, end = p; *p++ = '\0'; - pattern->global = ((flags & NJS_REGEXP_GLOBAL) != 0); + pattern->global = ((flags & NJS_REGEX_GLOBAL) != 0); if (pattern->global) { *p++ = 'g'; } -#ifdef PCRE_JAVASCRIPT_COMPAT - /* JavaScript compatibility has been introduced in PCRE-7.7. */ - options = PCRE_JAVASCRIPT_COMPAT; -#else - options = 0; -#endif - - pattern->ignore_case = ((flags & NJS_REGEXP_IGNORE_CASE) != 0); + pattern->ignore_case = ((flags & NJS_REGEX_IGNORE_CASE) != 0); if (pattern->ignore_case) { *p++ = 'i'; - options |= PCRE_CASELESS; } - pattern->multiline = ((flags & NJS_REGEXP_MULTILINE) != 0); + pattern->multiline = ((flags & NJS_REGEX_MULTILINE) != 0); if (pattern->multiline) { *p++ = 'm'; - options |= PCRE_MULTILINE; } - pattern->sticky = ((flags & NJS_REGEXP_STICKY) != 0); - if (pattern->sticky) { - options |= PCRE_ANCHORED; - } + pattern->sticky = ((flags & NJS_REGEX_STICKY) != 0); *p++ = '\0'; ret = njs_regexp_pattern_compile(vm, &pattern->regex[0], - &pattern->source[1], options); + &pattern->source[1], text.length, flags); if (njs_fast_path(ret >= 0)) { pattern->ncaptures = ret; @@ -454,7 +335,8 @@ njs_regexp_pattern_create(njs_vm_t *vm, } ret = njs_regexp_pattern_compile(vm, &pattern->regex[1], - &pattern->source[1], options | PCRE_UTF8); + &pattern->source[1], text.length, + flags | NJS_REGEX_UTF8); if (njs_fast_path(ret >= 0)) { if (njs_slow_path(njs_regex_is_valid(&pattern->regex[0]) @@ -519,7 +401,7 @@ fail: static int njs_regexp_pattern_compile(njs_vm_t *vm, njs_regex_t *regex, u_char *source, - int options) + size_t len, njs_regex_flags_t flags) { njs_int_t ret; njs_trace_handler_t handler; @@ -527,8 +409,8 @@ njs_regexp_pattern_compile(njs_vm_t *vm, handler = vm->trace.handler; vm->trace.handler = njs_regexp_compile_trace_handler; - /* Zero length means a zero-terminated string. */ - ret = njs_regex_compile(regex, source, 0, options, vm->regex_context); + ret = njs_regex_compile(regex, source, len, flags, vm->regex_compile_ctx, + &vm->trace); vm->trace.handler = handler; @@ -568,8 +450,7 @@ njs_regexp_match(njs_vm_t *vm, njs_regex handler = vm->trace.handler; vm->trace.handler = njs_regexp_match_trace_handler; - ret = njs_regex_match(regex, subject, off, len, match_data, - vm->regex_context); + ret = njs_regex_match(regex, subject, off, len, match_data, &vm->trace); vm->trace.handler = handler; @@ -742,19 +623,19 @@ njs_regexp_prototype_flag(njs_vm_t *vm, pattern = njs_regexp_pattern(this); switch (flag) { - case NJS_REGEXP_GLOBAL: + case NJS_REGEX_GLOBAL: yn = pattern->global; break; - case NJS_REGEXP_IGNORE_CASE: + case NJS_REGEX_IGNORE_CASE: yn = pattern->ignore_case; break; - case NJS_REGEXP_MULTILINE: + case NJS_REGEX_MULTILINE: yn = pattern->multiline; break; - case NJS_REGEXP_STICKY: + case NJS_REGEX_STICKY: default: yn = pattern->sticky; break; @@ -996,7 +877,8 @@ njs_regexp_builtin_exec(njs_vm_t *vm, nj goto not_found; } - match_data = njs_regex_match_data(&pattern->regex[type], vm->regex_context); + match_data = njs_regex_match_data(&pattern->regex[type], + vm->regex_generic_ctx); if (njs_slow_path(match_data == NULL)) { njs_memory_error(vm); return NJS_ERROR; @@ -1023,9 +905,8 @@ njs_regexp_builtin_exec(njs_vm_t *vm, nj return NJS_OK; } - if (njs_slow_path(ret != NJS_REGEX_NOMATCH)) { - njs_regex_match_data_free(match_data, vm->regex_context); - + if (njs_slow_path(ret == NJS_ERROR)) { + njs_regex_match_data_free(match_data, vm->regex_generic_ctx); return NJS_ERROR; } @@ -1050,8 +931,8 @@ static njs_array_t * njs_regexp_exec_result(njs_vm_t *vm, njs_value_t *r, njs_utf8_t utf8, njs_string_prop_t *string, njs_regex_match_data_t *match_data) { - int *captures; u_char *start; + size_t c; int32_t size, length; uint32_t index; njs_int_t ret; @@ -1076,14 +957,13 @@ njs_regexp_exec_result(njs_vm_t *vm, njs goto fail; } - captures = njs_regex_captures(match_data); - for (i = 0; i < pattern->ncaptures; i++) { n = 2 * i; + c = njs_regex_capture(match_data, n); - if (captures[n] != -1) { - start = &string->start[captures[n]]; - size = captures[n + 1] - captures[n]; + if (c != NJS_REGEX_UNSET) { + start = &string->start[c]; + size = njs_regex_capture(match_data, n + 1) - c; if (utf8 == NJS_STRING_UTF8) { length = njs_max(njs_utf8_length(start, size), 0); @@ -1109,21 +989,25 @@ njs_regexp_exec_result(njs_vm_t *vm, njs goto fail; } + c = njs_regex_capture(match_data, 0); + if (utf8 == NJS_STRING_UTF8) { - index = njs_string_index(string, captures[0]); + index = njs_string_index(string, c); } else { - index = captures[0]; + index = c; } njs_set_number(&prop->value, index); if (pattern->global || pattern->sticky) { + c = njs_regex_capture(match_data, 1); + if (utf8 == NJS_STRING_UTF8) { - index = njs_string_index(string, captures[1]); + index = njs_string_index(string, c); } else { - index = captures[1]; + index = c; } njs_set_number(&value, index); @@ -1226,7 +1110,7 @@ fail: done: - njs_regex_match_data_free(match_data, vm->regex_context); + njs_regex_match_data_free(match_data, vm->regex_generic_ctx); return (ret == NJS_OK) ? array : NULL; } @@ -1919,7 +1803,7 @@ static const njs_object_prop_t njs_rege .name = njs_string("global"), .value = njs_value(NJS_INVALID, 1, NAN), .getter = njs_native_function2(njs_regexp_prototype_flag, 0, - NJS_REGEXP_GLOBAL), + NJS_REGEX_GLOBAL), .setter = njs_value(NJS_UNDEFINED, 0, NAN), From xeioex at nginx.com Thu Nov 11 14:31:02 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 11 Nov 2021 14:31:02 +0000 Subject: [njs] RegExp: improved source string treatment. Message-ID: details: https://hg.nginx.org/njs/rev/cf9e73e05aaf branches: changeset: 1744:cf9e73e05aaf user: Dmitry Volyntsev date: Thu Nov 11 14:26:41 2021 +0000 description: RegExp: improved source string treatment. Previously, njs_regexp_pattern_create() in addition to a pattern compilation made a string representation for a RegExp which was returned by RegExp.prototype.toString() as is. After 02444445df29 (0.6.0), RegExp.prototype.toString() was implemented according to the spec, and since then it creates a RegExp string on the fly. This patch removes the extra code which was left. In addition, as a source string may not be a valid UTF-8 string (in RegExp literals), RegExp.prototype.toString() now ensures that a valid UTF-8 string is returned. diffstat: src/njs_regexp.c | 99 ++++++++++++++++++++++++----------------------- src/njs_regexp_pattern.h | 11 +---- 2 files changed, 52 insertions(+), 58 deletions(-) diffs (213 lines): diff -r 67ee2e4907a8 -r cf9e73e05aaf src/njs_regexp.c --- a/src/njs_regexp.c Thu Nov 11 14:26:30 2021 +0000 +++ b/src/njs_regexp.c Thu Nov 11 14:26:41 2021 +0000 @@ -265,7 +265,7 @@ njs_regexp_pattern_create(njs_vm_t *vm, njs_regex_flags_t flags) { int ret; - u_char *p, *end; + u_char *p; size_t size; njs_str_t text; njs_uint_t n; @@ -273,11 +273,6 @@ njs_regexp_pattern_create(njs_vm_t *vm, njs_regexp_group_t *group; njs_regexp_pattern_t *pattern; - size = 1; /* A trailing "/". */ - size += ((flags & NJS_REGEX_GLOBAL) != 0); - size += ((flags & NJS_REGEX_IGNORE_CASE) != 0); - size += ((flags & NJS_REGEX_MULTILINE) != 0); - text.start = start; text.length = length; @@ -287,45 +282,28 @@ njs_regexp_pattern_create(njs_vm_t *vm, return NULL; } - pattern = njs_mp_zalloc(vm->mem_pool, sizeof(njs_regexp_pattern_t) + 1 - + text.length + size + 1); + pattern = njs_mp_alloc(vm->mem_pool, sizeof(njs_regexp_pattern_t) + + text.length + 1); if (njs_slow_path(pattern == NULL)) { njs_memory_error(vm); return NULL; } - pattern->flags = size; + njs_memzero(pattern, sizeof(njs_regexp_pattern_t)); p = (u_char *) pattern + sizeof(njs_regexp_pattern_t); pattern->source = p; - *p++ = '/'; - p = memcpy(p, text.start, text.length); - p += text.length; - end = p; + p = njs_cpymem(p, text.start, text.length); *p++ = '\0'; pattern->global = ((flags & NJS_REGEX_GLOBAL) != 0); - if (pattern->global) { - *p++ = 'g'; - } - pattern->ignore_case = ((flags & NJS_REGEX_IGNORE_CASE) != 0); - if (pattern->ignore_case) { - *p++ = 'i'; - } - pattern->multiline = ((flags & NJS_REGEX_MULTILINE) != 0); - if (pattern->multiline) { - *p++ = 'm'; - } - pattern->sticky = ((flags & NJS_REGEX_STICKY) != 0); - *p++ = '\0'; - ret = njs_regexp_pattern_compile(vm, &pattern->regex[0], - &pattern->source[1], text.length, flags); + &pattern->source[0], text.length, flags); if (njs_fast_path(ret >= 0)) { pattern->ncaptures = ret; @@ -335,7 +313,7 @@ njs_regexp_pattern_create(njs_vm_t *vm, } ret = njs_regexp_pattern_compile(vm, &pattern->regex[1], - &pattern->source[1], text.length, + &pattern->source[0], text.length, flags | NJS_REGEX_UTF8); if (njs_fast_path(ret >= 0)) { @@ -362,8 +340,6 @@ njs_regexp_pattern_create(njs_vm_t *vm, goto fail; } - *end = '/'; - pattern->ngroups = njs_regex_named_captures(regex, NULL, 0); if (pattern->ngroups != 0) { @@ -651,9 +627,7 @@ static njs_int_t njs_regexp_prototype_source(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { - u_char *source; - int32_t length; - uint32_t size; + njs_str_t src; njs_value_t *this; njs_regexp_pattern_t *pattern; @@ -674,13 +648,11 @@ njs_regexp_prototype_source(njs_vm_t *vm } pattern = njs_regexp_pattern(this); - /* Skip starting "/". */ - source = pattern->source + 1; - size = njs_strlen(source) - pattern->flags; - length = njs_utf8_length(source, size); + src.start = pattern->source; + src.length = njs_strlen(pattern->source); - return njs_regexp_string_create(vm, &vm->retval, source, size, length); + return njs_string_decode_utf8(vm, &vm->retval, &src); } @@ -756,25 +728,56 @@ njs_int_t njs_regexp_to_string(njs_vm_t *vm, njs_value_t *retval, const njs_value_t *value) { - u_char *p, *source; + u_char *p, *start; + size_t size, extra; int32_t length; - uint32_t size; + njs_str_t s; njs_regexp_pattern_t *pattern; + njs_unicode_decode_t ctx; pattern = njs_regexp_pattern(value); - source = pattern->source; - size = njs_strlen(source); - length = njs_utf8_length(source, size); + s.start = pattern->source; + s.length = njs_strlen(pattern->source); + + length = njs_decode_utf8_length(&s, &size); - length = (length >= 0) ? (length + (pattern->sticky != 0)): 0; + extra = njs_length("//"); + extra += (pattern->global != 0); + extra += (pattern->ignore_case != 0); + extra += (pattern->multiline != 0); + extra += (pattern->sticky != 0); - p = njs_string_alloc(vm, retval, size + (pattern->sticky != 0), length); - if (njs_slow_path(p == NULL)) { + size += extra; + + length = (length >= 0) ? (length + extra) : 0; + + start = njs_string_alloc(vm, retval, size, length); + if (njs_slow_path(start == NULL)) { return NJS_ERROR; } - p = njs_cpymem(p, source, size); + njs_utf8_decode_init(&ctx); + + p = start; + + *p++ = '/'; + + p = njs_utf8_stream_encode(&ctx, s.start, &s.start[s.length], p, 1, 0); + + *p++ = '/'; + + if (pattern->global) { + *p++ = 'g'; + } + + if (pattern->ignore_case) { + *p++ = 'i'; + } + + if (pattern->multiline) { + *p++ = 'm'; + } if (pattern->sticky) { *p++ = 'y'; diff -r 67ee2e4907a8 -r cf9e73e05aaf src/njs_regexp_pattern.h --- a/src/njs_regexp_pattern.h Thu Nov 11 14:26:30 2021 +0000 +++ b/src/njs_regexp_pattern.h Thu Nov 11 14:26:41 2021 +0000 @@ -20,21 +20,12 @@ typedef struct njs_regexp_group_s njs_r struct njs_regexp_pattern_s { njs_regex_t regex[2]; - /* - * A pattern source is used by RegExp.prototype.toString() method and - * RegExp.prototype.source and RegExp.prototype.flags accessor properties. - * So it is is stored in form "/pattern/flags" - * and as zero-terminated C string but not as value, because retrieving - * it is very seldom operation. To get just a pattern string for - * RegExp.source property a length of flags part "/flags" is stored - * in flags field. - */ + /* A zero-terminated C string. */ u_char *source; uint16_t ncaptures; uint16_t ngroups; - uint8_t flags; /* 2 bits */ uint8_t global; /* 1 bit */ uint8_t ignore_case; /* 1 bit */ uint8_t multiline; /* 1 bit */ From xeioex at nginx.com Thu Nov 11 14:31:04 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 11 Nov 2021 14:31:04 +0000 Subject: [njs] Introduced PCRE2 RegExp backend. Message-ID: details: https://hg.nginx.org/njs/rev/728c3741f556 branches: changeset: 1745:728c3741f556 user: Dmitry Volyntsev date: Thu Nov 11 14:27:15 2021 +0000 description: Introduced PCRE2 RegExp backend. diffstat: auto/options | 4 + auto/pcre | 76 ++++++++++---- auto/sources | 15 ++- nginx/config.make | 2 +- src/njs_pcre2.c | 240 +++++++++++++++++++++++++++++++++++++++++++++++ src/njs_regex.h | 28 +++- src/test/njs_unit_test.c | 98 +++++++++++++----- 7 files changed, 399 insertions(+), 64 deletions(-) diffs (701 lines): diff -r cf9e73e05aaf -r 728c3741f556 auto/options --- a/auto/options Thu Nov 11 14:26:41 2021 +0000 +++ b/auto/options Thu Nov 11 14:27:15 2021 +0000 @@ -11,6 +11,8 @@ NJS_DEBUG_MEMORY=NO NJS_ADDRESS_SANITIZER=NO NJS_TEST262=YES +NJS_TRY_PCRE2=YES + NJS_CONFIGURE_OPTIONS= for njs_option @@ -31,6 +33,8 @@ do --debug-memory=*) NJS_DEBUG_MEMORY="$value" ;; --test262=*) NJS_TEST262="$value" ;; + --no-pcre2) NJS_TRY_PCRE2=NO ;; + --help) . auto/help exit 0 diff -r cf9e73e05aaf -r 728c3741f556 auto/pcre --- a/auto/pcre Thu Nov 11 14:26:41 2021 +0000 +++ b/auto/pcre Thu Nov 11 14:27:15 2021 +0000 @@ -2,33 +2,67 @@ # Copyright (C) Igor Sysoev # Copyright (C) NGINX, Inc. +njs_found=no +NJS_HAVE_PCRE2=NO -NJS_PCRE_CFLAGS= -NJS_PCRE_LIB= +if [ $NJS_TRY_PCRE2 = YES ]; then + if /bin/sh -c "(pcre2-config --version)" >> $NJS_AUTOCONF_ERR 2>&1; then + + NJS_PCRE_CFLAGS=`pcre2-config --cflags` + NJS_PCRE_LIB=`pcre2-config --libs8` -njs_found=no + njs_feature="PCRE2 library" + njs_feature_name=NJS_HAVE_PCRE2 + njs_feature_run=no + njs_feature_incs="-DPCRE2_CODE_UNIT_WIDTH=8 $NJS_PCRE_CFLAGS" + njs_feature_libs=$NJS_PCRE_LIB + njs_feature_test="#include -if /bin/sh -c "(pcre-config --version)" >> $NJS_AUTOCONF_ERR 2>&1; then + int main(void) { + pcre2_code *re; - NJS_PCRE_CFLAGS=`pcre-config --cflags` - NJS_PCRE_LIB=`pcre-config --libs` + re = pcre2_compile((PCRE2_SPTR)\"\", + PCRE2_ZERO_TERMINATED, 0, + NULL, NULL, NULL); + return (re == NULL); + }" + + . auto/feature - njs_feature="PCRE library" - njs_feature_name=NJS_HAVE_PCRE - njs_feature_run=no - njs_feature_incs=$NJS_PCRE_CFLAGS - njs_feature_libs=$NJS_PCRE_LIB - njs_feature_test="#include + if [ $njs_found = yes ]; then + NJS_HAVE_PCRE2=YES + echo " + PCRE2 version: `pcre2-config --version`" + fi + fi +fi + +if [ $njs_found = no ]; then + if /bin/sh -c "(pcre-config --version)" >> $NJS_AUTOCONF_ERR 2>&1; then + + NJS_PCRE_CFLAGS=`pcre-config --cflags` + NJS_PCRE_LIB=`pcre-config --libs` - int main(void) { - pcre *re; + njs_feature="PCRE library" + njs_feature_name=NJS_HAVE_PCRE + njs_feature_run=no + njs_feature_incs=$NJS_PCRE_CFLAGS + njs_feature_libs=$NJS_PCRE_LIB + njs_feature_test="#include + + int main(void) { + pcre *re; - re = pcre_compile(NULL, 0, NULL, 0, NULL); - if (re == NULL) - return 1; - return 0; - }" - . auto/feature + re = pcre_compile(NULL, 0, NULL, 0, NULL); + if (re == NULL) + return 1; + return 0; + }" + . auto/feature + + if [ $njs_found = yes ]; then + echo " + PCRE version: `pcre-config --version`" + fi + fi fi if [ $njs_found = no ]; then @@ -37,5 +71,3 @@ if [ $njs_found = no ]; then echo exit 1; fi - -echo " + PCRE version: `pcre-config --version`" diff -r cf9e73e05aaf -r 728c3741f556 auto/sources --- a/auto/sources Thu Nov 11 14:26:41 2021 +0000 +++ b/auto/sources Thu Nov 11 14:27:15 2021 +0000 @@ -16,7 +16,6 @@ NJS_LIB_SRCS=" \ src/njs_md5.c \ src/njs_sha1.c \ src/njs_sha2.c \ - src/njs_pcre.c \ src/njs_time.c \ src/njs_file.c \ src/njs_malloc.c \ @@ -64,6 +63,14 @@ NJS_LIB_SRCS=" \ src/njs_async.c \ " +NJS_LIB_PCRE_SRCS=" \ + src/njs_pcre.c \ +" + +NJS_LIB_PCRE2_SRCS=" \ + src/njs_pcre2.c \ +" + NJS_LIB_TEST_SRCS=" \ src/test/lvlhsh_unit_test.c \ src/test/random_unit_test.c \ @@ -76,6 +83,12 @@ NJS_TEST_SRCS=" \ src/test/njs_benchmark.c \ " +if [ "$NJS_HAVE_PCRE2" = "YES" ]; then + NJS_LIB_SRCS="$NJS_LIB_SRCS $NJS_LIB_PCRE2_SRCS" +else + NJS_LIB_SRCS="$NJS_LIB_SRCS $NJS_LIB_PCRE_SRCS" +fi + NJS_TS_SRCS=$(find ts/ -name "*.d.ts" -o -name "*.json") NJS_TEST_TS_SRCS=$(find test/ts/ -name "*.ts" -o -name "*.json") diff -r cf9e73e05aaf -r 728c3741f556 nginx/config.make --- a/nginx/config.make Thu Nov 11 14:26:41 2021 +0000 +++ b/nginx/config.make Thu Nov 11 14:27:15 2021 +0000 @@ -3,7 +3,7 @@ cat << END $ngx_addon_dir/../build/libnjs.a: $NGX_MAKEFILE cd $ngx_addon_dir/.. \\ && if [ -f build/Makefile ]; then \$(MAKE) clean; fi \\ - && CFLAGS="\$(CFLAGS)" CC="\$(CC)" ./configure \\ + && CFLAGS="\$(CFLAGS)" CC="\$(CC)" ./configure --no-pcre2 \\ && \$(MAKE) END diff -r cf9e73e05aaf -r 728c3741f556 src/njs_pcre2.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/njs_pcre2.c Thu Nov 11 14:27:15 2021 +0000 @@ -0,0 +1,240 @@ + +/* + * Copyright (C) Dmitry Volyntsev + * Copyright (C) NGINX, Inc. + */ + + +#include + +#define PCRE2_CODE_UNIT_WIDTH 8 +#include + + +static const u_char* njs_regex_pcre2_error(int errcode, u_char buffer[128]); + + +njs_regex_generic_ctx_t * +njs_regex_generic_ctx_create(njs_pcre_malloc_t private_malloc, + njs_pcre_free_t private_free, void *memory_data) +{ + return pcre2_general_context_create(private_malloc, private_free, + memory_data); +} + + +njs_regex_compile_ctx_t * +njs_regex_compile_ctx_create(njs_regex_generic_ctx_t *ctx) +{ + return pcre2_compile_context_create(ctx); +} + + +njs_int_t +njs_regex_escape(njs_mp_t *mp, njs_str_t *text) +{ + return NJS_OK; +} + + +njs_int_t +njs_regex_compile(njs_regex_t *regex, u_char *source, size_t len, + njs_regex_flags_t flags, njs_regex_compile_ctx_t *ctx, njs_trace_t *trace) +{ + int ret; + u_char *error; + size_t erroff; + njs_uint_t options; + u_char errstr[128]; + + options = PCRE2_ALT_BSUX | PCRE2_MATCH_UNSET_BACKREF; + + if ((flags & NJS_REGEX_IGNORE_CASE)) { + options |= PCRE2_CASELESS; + } + + if ((flags & NJS_REGEX_MULTILINE)) { + options |= PCRE2_MULTILINE; + } + + if ((flags & NJS_REGEX_STICKY)) { + options |= PCRE2_ANCHORED; + } + + if ((flags & NJS_REGEX_UTF8)) { + options |= PCRE2_UTF; + } + + regex->code = pcre2_compile(source, len, options, &ret, &erroff, ctx); + + if (njs_slow_path(regex->code == NULL)) { + error = &source[erroff]; + + njs_alert(trace, NJS_LEVEL_ERROR, + "pcre_compile2(\"%s\") failed: %s at \"%s\"", + source, njs_regex_pcre2_error(ret, errstr), error); + + return NJS_DECLINED; + } + + ret = pcre2_pattern_info(regex->code, PCRE2_INFO_CAPTURECOUNT, + ®ex->ncaptures); + + if (njs_slow_path(ret < 0)) { + njs_alert(trace, NJS_LEVEL_ERROR, + "pcre2_pattern_info(\"%s\", PCRE2_INFO_CAPTURECOUNT) failed: %s", + source, njs_regex_pcre2_error(ret, errstr)); + + return NJS_ERROR; + } + + ret = pcre2_pattern_info(regex->code, PCRE2_INFO_BACKREFMAX, + ®ex->backrefmax); + + if (njs_slow_path(ret < 0)) { + njs_alert(trace, NJS_LEVEL_ERROR, + "pcre2_pattern_info(\"%s\", PCRE2_INFO_BACKREFMAX) failed: %s", + source, njs_regex_pcre2_error(ret, errstr)); + + return NJS_ERROR; + } + + /* Reserve additional elements for the first "$0" capture. */ + regex->ncaptures++; + + if (regex->ncaptures > 1) { + ret = pcre2_pattern_info(regex->code, PCRE2_INFO_NAMECOUNT, + ®ex->nentries); + + if (njs_slow_path(ret < 0)) { + njs_alert(trace, NJS_LEVEL_ERROR, + "pcre2_pattern_info(\"%s\", PCRE2_INFO_NAMECOUNT) failed: %s", + source, njs_regex_pcre2_error(ret, errstr)); + + return NJS_ERROR; + } + + if (regex->nentries != 0) { + ret = pcre2_pattern_info(regex->code, PCRE2_INFO_NAMEENTRYSIZE, + ®ex->entry_size); + + if (njs_slow_path(ret < 0)) { + njs_alert(trace, NJS_LEVEL_ERROR, + "pcre2_pattern_info(\"%s\", PCRE2_INFO_NAMEENTRYSIZE)" + " failed: %s", source, + njs_regex_pcre2_error(ret, errstr)); + + return NJS_ERROR; + } + + ret = pcre2_pattern_info(regex->code, PCRE2_INFO_NAMETABLE, + ®ex->entries); + + if (njs_slow_path(ret < 0)) { + njs_alert(trace, NJS_LEVEL_ERROR, + "pcre2_pattern_info(\"%s\", PCRE2_INFO_NAMETABLE) " + "failed: %s", source, + njs_regex_pcre2_error(ret, errstr)); + + return NJS_ERROR; + } + } + } + + return NJS_OK; +} + + +njs_bool_t +njs_regex_is_valid(njs_regex_t *regex) +{ + return (regex->code != NULL); +} + + +njs_int_t +njs_regex_named_captures(njs_regex_t *regex, njs_str_t *name, int n) +{ + char *entry; + + if (name == NULL) { + return regex->nentries; + } + + if (n >= regex->nentries) { + return NJS_ERROR; + } + + entry = regex->entries + regex->entry_size * n; + + name->start = (u_char *) entry + 2; + name->length = njs_strlen(name->start); + + return (entry[0] << 8) + entry[1]; +} + + +njs_regex_match_data_t * +njs_regex_match_data(njs_regex_t *regex, njs_regex_generic_ctx_t *ctx) +{ + if (regex != NULL) { + return pcre2_match_data_create_from_pattern(regex->code, ctx); + } + + return pcre2_match_data_create(0, ctx); +} + + +void +njs_regex_match_data_free(njs_regex_match_data_t *match_data, + njs_regex_generic_ctx_t *unused) +{ + pcre2_match_data_free(match_data); +} + + +njs_int_t +njs_regex_match(njs_regex_t *regex, const u_char *subject, size_t off, + size_t len, njs_regex_match_data_t *match_data, njs_trace_t *trace) +{ + int ret; + u_char errstr[128]; + + ret = pcre2_match(regex->code, subject, len, off, 0, match_data, NULL); + + if (ret < 0) { + if (ret == PCRE2_ERROR_NOMATCH) { + return NJS_DECLINED; + } + + njs_alert(trace, NJS_LEVEL_ERROR, "pcre2_match() failed: %s", + njs_regex_pcre2_error(ret, errstr)); + return NJS_ERROR; + } + + return ret; +} + + +size_t +njs_regex_capture(njs_regex_match_data_t *match_data, njs_uint_t n) +{ + size_t c; + + c = pcre2_get_ovector_pointer(match_data)[n]; + + if (c == PCRE2_UNSET) { + return NJS_REGEX_UNSET; + } + + return c; +} + + +static const u_char * +njs_regex_pcre2_error(int errcode, u_char buffer[128]) +{ + pcre2_get_error_message(errcode, buffer, 128); + + return buffer; +} diff -r cf9e73e05aaf -r 728c3741f556 src/njs_regex.h --- a/src/njs_regex.h Thu Nov 11 14:26:41 2021 +0000 +++ b/src/njs_regex.h Thu Nov 11 14:27:15 2021 +0000 @@ -26,16 +26,6 @@ typedef void (*njs_pcre_free_t)(void *p, typedef struct { - njs_pcre_malloc_t private_malloc; - njs_pcre_free_t private_free; - void *memory_data; -} njs_regex_generic_ctx_t; - - -#define njs_regex_compile_ctx_t void - - -typedef struct { void *code; void *extra; int ncaptures; @@ -46,6 +36,22 @@ typedef struct { } njs_regex_t; +#ifdef NJS_HAVE_PCRE2 + +#define njs_regex_generic_ctx_t void +#define njs_regex_compile_ctx_t void +#define njs_regex_match_data_t void + +#else + +typedef struct { + njs_pcre_malloc_t private_malloc; + njs_pcre_free_t private_free; + void *memory_data; +} njs_regex_generic_ctx_t; + +#define njs_regex_compile_ctx_t void + typedef struct { int ncaptures; /* @@ -57,6 +63,8 @@ typedef struct { int captures[3]; } njs_regex_match_data_t; +#endif + NJS_EXPORT njs_regex_generic_ctx_t * njs_regex_generic_ctx_create(njs_pcre_malloc_t private_malloc, diff -r cf9e73e05aaf -r 728c3741f556 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Thu Nov 11 14:26:41 2021 +0000 +++ b/src/test/njs_unit_test.c Thu Nov 11 14:27:15 2021 +0000 @@ -6,7 +6,9 @@ #include +#ifndef NJS_HAVE_PCRE2 #include +#endif #include "njs_externals_test.h" @@ -19,6 +21,12 @@ #define njs_evar(little, big) big #endif +#ifdef NJS_HAVE_PCRE2 +#define njs_pcre_var(pcre2, pcre) pcre2 +#else +#define njs_pcre_var(pcre2, pcre) pcre +#endif + #define njs_declare_sparse_array(nm, sz) \ "var " nm " = Array(" njs_stringify(sz) "); " \ @@ -8632,9 +8640,6 @@ static njs_unit_test_t njs_test[] = { njs_str("String.bytesFrom([255,149,15,97,95]).replace(/_/g, 'X')[4]"), njs_str("X") }, - { njs_str("/]/"), - njs_str("/\\]/") }, - { njs_str("/=/"), njs_str("/=/") }, @@ -8647,12 +8652,13 @@ static njs_unit_test_t njs_test[] = { njs_str("/\\s*;\\s*/"), njs_str("/\\s*;\\s*/") }, +#ifndef NJS_HAVE_PCRE2 + { njs_str("/]/"), + njs_str("/\\]/") }, + { njs_str("RegExp(']')"), njs_str("/\\]/") }, - { njs_str("RegExp('[\\\\')"), - njs_str("SyntaxError: pcre_compile(\"[\\\") failed: \\ at end of pattern") }, - { njs_str("RegExp('[\\\\\\\\]]')"), njs_str("/[\\\\]\\]/") }, @@ -8673,6 +8679,12 @@ static njs_unit_test_t njs_test[] = { njs_str("/]cd/"), njs_str("/\\]cd/") }, +#endif + + { njs_str("RegExp('[\\\\')"), + njs_str("SyntaxError: " + njs_pcre_var("pcre_compile2(\"[\\\") failed: \\ at end of pattern at \"\"", + "pcre_compile(\"[\\\") failed: \\ at end of pattern")) }, { njs_str("RegExp('\\\\0').source[1]"), njs_str("0") }, @@ -10698,8 +10710,10 @@ static njs_unit_test_t njs_test[] = { njs_str("/a\r/"), njs_str("SyntaxError: Unterminated RegExp \"/a\" in 1") }, +#ifndef NJS_HAVE_PCRE2 { njs_str("/a\\q/"), njs_str("/a\\q/") }, +#endif { njs_str("/\\\\/"), njs_str("/\\\\/") }, @@ -10750,17 +10764,23 @@ static njs_unit_test_t njs_test[] = ".every(ch=>/[\\]\\[!\"#$%&'()*+,.\\/:;<=>?@\\^_`{|}-]/.test(ch))"), njs_str("true") }, +#ifndef NJS_HAVE_PCRE2 { njs_str("/a\\q/.test('a\\q')"), njs_str("true") }, +#endif { njs_str("/(\\.(?!com|org)|\\/)/.test('ah.info')"), njs_str("true") }, { njs_str("/(/.test('')"), - njs_str("SyntaxError: pcre_compile(\"(\") failed: missing ) in 1") }, + njs_str("SyntaxError: " + njs_pcre_var("pcre_compile2(\"(\") failed: missing closing parenthesis at \"\" in 1", + "pcre_compile(\"(\") failed: missing ) in 1")) }, { njs_str("/+/.test('')"), - njs_str("SyntaxError: pcre_compile(\"+\") failed: nothing to repeat at \"+\" in 1") }, + njs_str("SyntaxError: " + njs_pcre_var("pcre_compile2(\"+\") failed: quantifier does not follow a repeatable item at \"+\" in 1", + "pcre_compile(\"+\") failed: nothing to repeat at \"+\" in 1")) }, { njs_str("/^$/.test('')"), njs_str("true") }, @@ -11040,17 +11060,27 @@ static njs_unit_test_t njs_test[] = njs_str("true") }, { njs_str("new RegExp('[')"), - njs_str("SyntaxError: pcre_compile(\"[\") failed: missing terminating ] for character class") }, + njs_str("SyntaxError: " + njs_pcre_var("pcre_compile2(\"[\") failed: missing terminating ] for character class at \"\"", + "pcre_compile(\"[\") failed: missing terminating ] for character class")) }, { njs_str("new RegExp('['.repeat(16))"), - njs_str("SyntaxError: pcre_compile(\"[[[[[[[[[[[[[[[[\") failed: missing terminating ] for character class") }, + njs_str("SyntaxError: " + njs_pcre_var("pcre_compile2(\"[[[[[[[[[[[[[[[[\") failed: missing terminating ] for character class at \"\"", + "pcre_compile(\"[[[[[[[[[[[[[[[[\") failed: missing terminating ] for character class")) }, { njs_str("new RegExp('\\\\')"), - njs_str("SyntaxError: pcre_compile(\"\\\") failed: \\ at end of pattern") }, + njs_str("SyntaxError: " + njs_pcre_var("pcre_compile2(\"\\\") failed: \\ at end of pattern at \"\"", + "pcre_compile(\"\\\") failed: \\ at end of pattern")) }, { njs_str("[0].map(RegExp().toString)"), njs_str("TypeError: \"this\" argument is not an object") }, + { njs_str("var arr = /\\1(A)/.exec('AA');" + "[arr[0], arr[1]]"), + njs_str("A,A") }, + /* Non-standard ECMA-262 features. */ /* 0x10400 is not a surrogate pair of 0xD801 and 0xDC00. */ @@ -21230,7 +21260,7 @@ static njs_unit_test_t njs_tz_test[] = }; -static njs_unit_test_t njs_regexp_test[] = +static njs_unit_test_t njs_regexp_optional_tests[] = { { njs_str("/[\\\\u02E0-\\\\u02E4]/"), njs_str("/[\\\\u02E0-\\\\u02E4]/") }, @@ -21262,6 +21292,7 @@ static njs_unit_test_t njs_regexp_test[ { njs_str("RegExp('\x00').test('\0')"), njs_str("true") }, +#ifndef NJS_HAVE_PCRE2 { njs_str("RegExp('\x00\\\\x00').source"), njs_str("\\u0000\\x00") }, @@ -21270,6 +21301,7 @@ static njs_unit_test_t njs_regexp_test[ { njs_str("RegExp('\\\\\\0').source"), njs_str("\\\\u0000") }, +#endif { njs_str("RegExp('[\0]').test('\0')"), njs_str("true") }, @@ -22154,9 +22186,11 @@ static njs_int_t njs_regexp_optional_test(njs_unit_test_t tests[], size_t num, njs_str_t *name, njs_opts_t *opts, njs_stat_t *stat) { + njs_bool_t safe; + +#ifndef NJS_HAVE_PCRE2 int erroff; pcre *re1, *re2; - njs_int_t ret; const char *errstr; /* @@ -22169,6 +22203,10 @@ njs_regexp_optional_test(njs_unit_test_t re1 = pcre_compile("/[\\u0410]/", PCRE_JAVASCRIPT_COMPAT, &errstr, &erroff, NULL); + if (re1 != NULL) { + pcre_free(re1); + } + /* * pcre-7.8 fails to compile unicode escape codes inside square brackets * even when PCRE_UTF8 option is provided. @@ -22176,24 +22214,24 @@ njs_regexp_optional_test(njs_unit_test_t re2 = pcre_compile("/[\\u0410]/", PCRE_JAVASCRIPT_COMPAT | PCRE_UTF8, &errstr, &erroff, NULL); - if (re1 == NULL && re2 != NULL) { - ret = njs_unit_test(tests, num, name, opts, stat); - if (ret != NJS_OK) { - return ret; - } - - } else { - njs_printf("njs unicode regexp tests skipped, libpcre fails\n"); - } - - if (re1 != NULL) { - pcre_free(re1); - } - if (re2 != NULL) { pcre_free(re2); } + safe = (re1 == NULL && re2 != NULL); + +#else + + safe = 1; + +#endif + + if (safe) { + return njs_unit_test(tests, num, name, opts, stat); + } + + njs_printf("regexp optional tests skipped\n"); + return NJS_OK; } @@ -23331,10 +23369,10 @@ static njs_test_suite_t njs_suites[] = njs_nitems(njs_tz_test), njs_timezone_optional_test }, - { njs_str("regexp"), + { njs_str("regexp optional"), { .repeat = 1, .unsafe = 1 }, - njs_regexp_test, - njs_nitems(njs_regexp_test), + njs_regexp_optional_tests, + njs_nitems(njs_regexp_optional_tests), njs_regexp_optional_test }, { njs_str("vm_json"), From pluknet at nginx.com Thu Nov 11 15:17:43 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 11 Nov 2021 18:17:43 +0300 Subject: [PATCH 1 of 2] QUIC: allowed main QUIC connection for some operations In-Reply-To: <8b049432ef2dcdb8d1a8.1634823659@arut-laptop> References: <8b049432ef2dcdb8d1a8.1634823659@arut-laptop> Message-ID: > On 21 Oct 2021, at 16:40, Roman Arutyunyan wrote: > > # HG changeset patch > # User Roman Arutyunyan > # Date 1634219818 -10800 > # Thu Oct 14 16:56:58 2021 +0300 > # Branch quic > # Node ID 8b049432ef2dcdb8d1a8ec1a5e41c0a340285b65 > # Parent 404de224517e33f685613d6425dcdb3c8ef5b97e > QUIC: allowed main QUIC connection for some operations. > > Operations like ngx_quic_open_stream(), ngx_http_quic_get_connection(), > ngx_http_v3_finalize_connection(), ngx_http_v3_shutdown_connection() used to > receive a QUIC stream connection. Now they can receive the main QUIC > connection as well. This is useful when calling them out of a stream context. > > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > --- a/src/event/quic/ngx_event_quic_streams.c > +++ b/src/event/quic/ngx_event_quic_streams.c > @@ -35,11 +35,12 @@ ngx_connection_t * > ngx_quic_open_stream(ngx_connection_t *c, ngx_uint_t bidi) > { > uint64_t id; > - ngx_quic_stream_t *qs, *nqs; > + ngx_connection_t *pc; > + ngx_quic_stream_t *nqs; > ngx_quic_connection_t *qc; > > - qs = c->quic; > - qc = ngx_quic_get_connection(qs->parent); > + pc = c->quic ? c->quic->parent : c; > + qc = ngx_quic_get_connection(pc); is it really needed? > > if (bidi) { > if (qc->streams.server_streams_bidi > @@ -85,7 +86,7 @@ ngx_quic_open_stream(ngx_connection_t *c > qc->streams.server_streams_uni++; > } > > - nqs = ngx_quic_create_stream(qs->parent, id); > + nqs = ngx_quic_create_stream(pc, id); > if (nqs == NULL) { > return NULL; > } > diff --git a/src/http/modules/ngx_http_quic_module.h b/src/http/modules/ngx_http_quic_module.h > --- a/src/http/modules/ngx_http_quic_module.h > +++ b/src/http/modules/ngx_http_quic_module.h > @@ -19,7 +19,8 @@ > > > #define ngx_http_quic_get_connection(c) \ > - ((ngx_http_connection_t *) (c)->quic->parent->data) > + ((ngx_http_connection_t *) ((c)->quic ? (c)->quic->parent->data \ > + : (c)->data)) > Looks like this is the only change useful in the 2nd patch. To avoid it, ngx_http_v3_inc_insert_count_handler() could get h3c from the decoder stream, but ngx_http_v3_get_uni_stream() isn't external. > > ngx_int_t ngx_http_quic_init(ngx_connection_t *c); > diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c > --- a/src/http/v3/ngx_http_v3.c > +++ b/src/http/v3/ngx_http_v3.c > @@ -70,8 +70,8 @@ ngx_http_v3_keepalive_handler(ngx_event_ > > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 keepalive handler"); > > - ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, > - "keepalive timeout"); > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, > + "keepalive timeout"); > } > > > diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h > --- a/src/http/v3/ngx_http_v3.h > +++ b/src/http/v3/ngx_http_v3.h > @@ -85,10 +85,12 @@ > module) > > #define ngx_http_v3_finalize_connection(c, code, reason) \ > - ngx_quic_finalize_connection(c->quic->parent, code, reason) > + ngx_quic_finalize_connection((c)->quic ? (c)->quic->parent : (c), \ > + code, reason) > > #define ngx_http_v3_shutdown_connection(c, code, reason) \ > - ngx_quic_shutdown_connection(c->quic->parent, code, reason) > + ngx_quic_shutdown_connection((c)->quic ? (c)->quic->parent : (c), \ > + code, reason) > > > typedef struct { -- Sergey Kandaurov From pluknet at nginx.com Thu Nov 11 15:20:59 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 11 Nov 2021 18:20:59 +0300 Subject: [PATCH 2 of 2] HTTP/3: delayed Insert Count Increment instruction In-Reply-To: References: Message-ID: <1267BB21-7BFC-483B-AF39-12CD2546B3AD@nginx.com> > On 21 Oct 2021, at 16:41, Roman Arutyunyan wrote: > > # HG changeset patch > # User Roman Arutyunyan > # Date 1634804424 -10800 > # Thu Oct 21 11:20:24 2021 +0300 > # Branch quic > # Node ID e2d65b59ccb9035cbd619358a121ba5bcca3404a > # Parent 8b049432ef2dcdb8d1a8ec1a5e41c0a340285b65 > HTTP/3: delayed Insert Count Increment instruction. > > Sending the instruction is delayed until the end of the current event cycle. > Delaying the instruction is allowed by quic-qpack-21, section 2.2.2.3. > The goal is to reduce the amount of data sent back to client by accumulating > inserts. Or it may be not sent at all if there are no blocked streams. > > diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c > --- a/src/http/v3/ngx_http_v3.c > +++ b/src/http/v3/ngx_http_v3.c > @@ -47,6 +47,10 @@ ngx_http_v3_init_session(ngx_connection_ > h3c->keepalive.handler = ngx_http_v3_keepalive_handler; > h3c->keepalive.cancelable = 1; > > + h3c->table.send_insert_count.log = pc->log; > + h3c->table.send_insert_count.data = pc; > + h3c->table.send_insert_count.handler = ngx_http_v3_inc_insert_count_handler; > + > cln = ngx_pool_cleanup_add(pc->pool, 0); > if (cln == NULL) { > return NGX_ERROR; > @@ -85,6 +89,10 @@ ngx_http_v3_cleanup_session(void *data) > if (h3c->keepalive.timer_set) { > ngx_del_timer(&h3c->keepalive); > } > + > + if (h3c->table.send_insert_count.posted) { > + ngx_delete_posted_event(&h3c->table.send_insert_count); > + } > } > > > diff --git a/src/http/v3/ngx_http_v3_parse.c b/src/http/v3/ngx_http_v3_parse.c > --- a/src/http/v3/ngx_http_v3_parse.c > +++ b/src/http/v3/ngx_http_v3_parse.c > @@ -395,6 +395,8 @@ done: > if (ngx_http_v3_send_ack_section(c, c->quic->id) != NGX_OK) { > return NGX_ERROR; > } > + > + ngx_http_v3_ack_insert_count(c, st->prefix.insert_count); > } > > st->state = sw_start; > diff --git a/src/http/v3/ngx_http_v3_tables.c b/src/http/v3/ngx_http_v3_tables.c > --- a/src/http/v3/ngx_http_v3_tables.c > +++ b/src/http/v3/ngx_http_v3_tables.c > @@ -232,11 +232,9 @@ ngx_http_v3_insert(ngx_connection_t *c, > dt->elts[dt->nelts++] = field; > dt->size += size; > > - /* TODO increment can be sent less often */ > + dt->insert_count++; > > - if (ngx_http_v3_send_inc_insert_count(c, 1) != NGX_OK) { > - return NGX_ERROR; > - } > + ngx_post_event(&dt->send_insert_count, &ngx_posted_events); > > if (ngx_http_v3_new_entry(c) != NGX_OK) { > return NGX_ERROR; > @@ -246,6 +244,34 @@ ngx_http_v3_insert(ngx_connection_t *c, > } > > > +void > +ngx_http_v3_inc_insert_count_handler(ngx_event_t *ev) > +{ > + ngx_connection_t *c; > + ngx_http_v3_session_t *h3c; > + ngx_http_v3_dynamic_table_t *dt; > + > + c = ev->data; > + > + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, > + "http3 inc insert count handler"); > + > + h3c = ngx_http_v3_get_session(c); > + dt = &h3c->table; > + > + if (dt->insert_count > dt->ack_insert_count) { > + if (ngx_http_v3_send_inc_insert_count(c, > + dt->insert_count - dt->ack_insert_count) > + != NGX_OK) > + { > + return; > + } > + > + dt->ack_insert_count = dt->insert_count; > + } > +} > + > + > ngx_int_t > ngx_http_v3_set_capacity(ngx_connection_t *c, ngx_uint_t capacity) > { > @@ -603,6 +629,21 @@ ngx_http_v3_check_insert_count(ngx_conne > } > > > +void > +ngx_http_v3_ack_insert_count(ngx_connection_t *c, uint64_t insert_count) > +{ > + ngx_http_v3_session_t *h3c; > + ngx_http_v3_dynamic_table_t *dt; > + > + h3c = ngx_http_v3_get_session(c); > + dt = &h3c->table; > + > + if (dt->ack_insert_count < insert_count) { > + dt->ack_insert_count = insert_count; > + } > +} > + > + > static void > ngx_http_v3_unblock(void *data) > { > diff --git a/src/http/v3/ngx_http_v3_tables.h b/src/http/v3/ngx_http_v3_tables.h > --- a/src/http/v3/ngx_http_v3_tables.h > +++ b/src/http/v3/ngx_http_v3_tables.h > @@ -26,9 +26,13 @@ typedef struct { > ngx_uint_t base; > size_t size; > size_t capacity; > + uint64_t insert_count; > + uint64_t ack_insert_count; > + ngx_event_t send_insert_count; > } ngx_http_v3_dynamic_table_t; > > > +void ngx_http_v3_inc_insert_count_handler(ngx_event_t *ev); > void ngx_http_v3_cleanup_table(ngx_http_v3_session_t *h3c); > ngx_int_t ngx_http_v3_ref_insert(ngx_connection_t *c, ngx_uint_t dynamic, > ngx_uint_t index, ngx_str_t *value); > @@ -46,6 +50,7 @@ ngx_int_t ngx_http_v3_decode_insert_coun > ngx_uint_t *insert_count); > ngx_int_t ngx_http_v3_check_insert_count(ngx_connection_t *c, > ngx_uint_t insert_count); > +void ngx_http_v3_ack_insert_count(ngx_connection_t *c, uint64_t insert_count); > ngx_int_t ngx_http_v3_set_param(ngx_connection_t *c, uint64_t id, > uint64_t value); > Looks good. -- Sergey Kandaurov From greeshma.avadhootha at gmail.com Thu Nov 11 19:26:02 2021 From: greeshma.avadhootha at gmail.com (Greeshma A) Date: Thu, 11 Nov 2021 11:26:02 -0800 Subject: Which nginx variable holds the value for destination IP address in stream layer In-Reply-To: References: Message-ID: Hi, Apologise for reviving this thread. I have gone through the documentation and I did not find anything that would capture the destination IP address in the request. I see $sever_addr, but that captures the Destination hostname, not the destination IP. If you have anything on the top of your mind regarding this, please let me know. I have been stuck at this for a while now. Apart from that, I have been trying to subscribe to the nginx at nginx.org. But there has been no confirmation and my mails are bouncing back. Would really appreciate any leads on this. Thanks, -Greeshma On Mon, Nov 8, 2021 at 12:58 PM Maxim Dounin wrote: > Hello! > > On Mon, Nov 08, 2021 at 12:39:14PM -0800, Greeshma A wrote: > > > @Maxim, thanks for pointing this out. Please share the forum/mailing list > > that would be appropriate. Really appreciate it. Haven't had much luck > with > > the documentation regarding this. > > Relevant mailing list can be found here: > > http://nginx.org/en/support.html > > Note well that it might be a good idea to re-check the > documentation before re-posting your question to another mailing > list. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From arut at nginx.com Mon Nov 15 12:33:25 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 15 Nov 2021 15:33:25 +0300 Subject: [PATCH 2 of 3] HTTP/3: allowed QUIC stream connection reuse In-Reply-To: <31C3455F-41FD-4474-8F3C-0CCEAE8D13CE@nginx.com> References: <8ae53c592c719af4f3ba.1634561309@arut-laptop> <20211110214218.wq7bozp3ppy72kgf@Romans-MacBook-Pro.local> <31C3455F-41FD-4474-8F3C-0CCEAE8D13CE@nginx.com> Message-ID: <20211115123325.6azp4uve7qfeedna@Romans-MacBook-Pro.local> On Thu, Nov 11, 2021 at 02:48:32PM +0300, Sergey Kandaurov wrote: > > > On 11 Nov 2021, at 00:42, Roman Arutyunyan wrote: > > > > On Wed, Nov 10, 2021 at 03:59:39PM +0300, Sergey Kandaurov wrote: > >> > >>> On 18 Oct 2021, at 15:48, Roman Arutyunyan wrote: > >>> > >>> # HG changeset patch > >>> # User Roman Arutyunyan > >>> # Date 1634561226 -10800 > >>> # Mon Oct 18 15:47:06 2021 +0300 > >>> # Branch quic > >>> # Node ID 8ae53c592c719af4f3ba47dbd85f78be27aaf7db > >>> # Parent 8739f475583031399879ef0af2eb5af76008449e > >>> HTTP/3: allowed QUIC stream connection reuse. > >>> > >>> A QUIC stream connection is treated as reusable until first bytes of request > >>> arrive, which is also when the request object is now allocated. A connection > >>> closed as a result of draining, is reset with the error code > >>> H3_REQUEST_REJECTED. Such behavior is allowed by quic-http-34: > >>> > >>> Once a request stream has been opened, the request MAY be cancelled > >>> by either endpoint. Clients cancel requests if the response is no > >>> longer of interest; servers cancel requests if they are unable to or > >>> choose not to respond. > >>> > >>> When the server cancels a request without performing any application > >>> processing, the request is considered "rejected." The server SHOULD > >>> abort its response stream with the error code H3_REQUEST_REJECTED. > >>> > >>> The client can treat requests rejected by the server as though they had > >>> never been sent at all, thereby allowing them to be retried later. > >>> > >> > >> Looks good. See below for minor comments. > >> BTW, if we still hit the worker_connections limit, this leads to > >> an entire QUIC connection close, but I doubt we can easily improve this. > > > > When there's not enough worker_connections for a new QUIC stream, we can > > send H3_REQUEST_REJECTED to client without creating a stream. We can discuss > > this later. Here's a patch that addresses this. [..] -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1636646820 -10800 # Thu Nov 11 19:07:00 2021 +0300 # Branch quic # Node ID 801103b7645d93d0d06f63019e54d9e76f1baa6c # Parent d2c193aa84800da00314f1af72ae722d964445a4 QUIC: reject streams which we could not create. The reasons why a stream may not be created by server currently include hitting worker_connections limit and memory allocation error. Previously in these cases the entire QUIC connection was closed and all its streams were shut down. Now the new stream is rejected and existing streams continue working. To reject an HTTP/3 request stream, RESET_STREAM and STOP_SENDING with H3_REQUEST_REJECTED error code are sent to client. HTTP/3 uni streams and Stream streams are not rejected. diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h --- a/src/event/quic/ngx_event_quic.h +++ b/src/event/quic/ngx_event_quic.h @@ -61,6 +61,9 @@ typedef struct { ngx_flag_t retry; ngx_flag_t gso_enabled; ngx_str_t host_key; + ngx_int_t close_stream_code; + ngx_int_t reject_uni_stream_code; + ngx_int_t reject_bidi_stream_code; u_char av_token_key[NGX_QUIC_AV_KEY_LEN]; u_char sr_token_key[NGX_QUIC_SR_KEY_LEN]; } ngx_quic_conf_t; diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -15,6 +15,7 @@ static ngx_quic_stream_t *ngx_quic_create_client_stream(ngx_connection_t *c, uint64_t id); +static ngx_int_t ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id); static ngx_int_t ngx_quic_init_stream(ngx_quic_stream_t *qs); static void ngx_quic_init_streams_handler(ngx_connection_t *c); static ngx_quic_stream_t *ngx_quic_create_stream(ngx_connection_t *c, @@ -377,8 +378,13 @@ ngx_quic_create_client_stream(ngx_connec for ( /* void */ ; min_id < id; min_id += 0x04) { qs = ngx_quic_create_stream(c, min_id); + if (qs == NULL) { - return NULL; + if (ngx_quic_reject_stream(c, min_id) != NGX_OK) { + return NULL; + } + + continue; } if (ngx_quic_init_stream(qs) != NGX_OK) { @@ -390,7 +396,66 @@ ngx_quic_create_client_stream(ngx_connec } } - return ngx_quic_create_stream(c, id); + qs = ngx_quic_create_stream(c, id); + + if (qs == NULL) { + if (ngx_quic_reject_stream(c, id) != NGX_OK) { + return NULL; + } + + return NGX_QUIC_STREAM_GONE; + } + + return qs; +} + + +static ngx_int_t +ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id) +{ + uint64_t code; + ngx_quic_frame_t *frame; + ngx_quic_connection_t *qc; + + qc = ngx_quic_get_connection(c); + + code = (id & NGX_QUIC_STREAM_UNIDIRECTIONAL) + ? qc->conf->reject_uni_stream_code + : qc->conf->reject_bidi_stream_code; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic stream id:0x%xL reject err:0x%xL", id, code); + + if (code == 0) { + return NGX_DECLINED; + } + + frame = ngx_quic_alloc_frame(c); + if (frame == NULL) { + return NGX_ERROR; + } + + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_RESET_STREAM; + frame->u.reset_stream.id = id; + frame->u.reset_stream.error_code = code; + frame->u.reset_stream.final_size = 0; + + ngx_quic_queue_frame(qc, frame); + + frame = ngx_quic_alloc_frame(c); + if (frame == NULL) { + return NGX_ERROR; + } + + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_STOP_SENDING; + frame->u.stop_sending.id = id; + frame->u.stop_sending.error_code = code; + + ngx_quic_queue_frame(qc, frame); + + return NGX_OK; } @@ -866,7 +931,9 @@ ngx_quic_stream_cleanup_handler(void *da if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0 || (qs->id & NGX_QUIC_STREAM_UNIDIRECTIONAL) == 0) { - if (!c->read->pending_eof && !c->read->error) { + if (!c->read->pending_eof && !c->read->error + && qc->conf->close_stream_code) + { frame = ngx_quic_alloc_frame(pc); if (frame == NULL) { goto done; @@ -875,7 +942,7 @@ ngx_quic_stream_cleanup_handler(void *da frame->level = ssl_encryption_application; frame->type = NGX_QUIC_FT_STOP_SENDING; frame->u.stop_sending.id = qs->id; - frame->u.stop_sending.error_code = 0x100; /* HTTP/3 no error */ + frame->u.stop_sending.error_code = qc->conf->close_stream_code; ngx_quic_queue_frame(qc, frame); } diff --git a/src/http/modules/ngx_http_quic_module.c b/src/http/modules/ngx_http_quic_module.c --- a/src/http/modules/ngx_http_quic_module.c +++ b/src/http/modules/ngx_http_quic_module.c @@ -314,6 +314,7 @@ ngx_http_quic_create_srv_conf(ngx_conf_t * conf->tp.sr_enabled = 0 * conf->tp.preferred_address = NULL * conf->host_key = { 0, NULL } + * cong->reject_uni_stream_code = 0; */ conf->tp.max_idle_timeout = NGX_CONF_UNSET_MSEC; @@ -331,6 +332,8 @@ ngx_http_quic_create_srv_conf(ngx_conf_t conf->retry = NGX_CONF_UNSET; conf->gso_enabled = NGX_CONF_UNSET; + conf->close_stream_code = NGX_HTTP_V3_ERR_NO_ERROR; + conf->reject_bidi_stream_code = NGX_HTTP_V3_ERR_REQUEST_REJECTED; return conf; } diff --git a/src/stream/ngx_stream_quic_module.c b/src/stream/ngx_stream_quic_module.c --- a/src/stream/ngx_stream_quic_module.c +++ b/src/stream/ngx_stream_quic_module.c @@ -241,6 +241,9 @@ ngx_stream_quic_create_srv_conf(ngx_conf * conf->tp.retry_scid = { 0, NULL }; * conf->tp.preferred_address = NULL * conf->host_key = { 0, NULL } + * conf->close_stream_code = 0; + * conf->reject_uni_stream_code = 0; + * conf->reject_bidi_stream_code = 0; */ conf->tp.max_idle_timeout = NGX_CONF_UNSET_MSEC; From vl at nginx.com Tue Nov 16 09:18:47 2021 From: vl at nginx.com (Vladimir Homutov) Date: Tue, 16 Nov 2021 12:18:47 +0300 Subject: [PATCH 2 of 3] HTTP/3: allowed QUIC stream connection reuse In-Reply-To: <20211115123325.6azp4uve7qfeedna@Romans-MacBook-Pro.local> References: <8ae53c592c719af4f3ba.1634561309@arut-laptop> <20211110214218.wq7bozp3ppy72kgf@Romans-MacBook-Pro.local> <31C3455F-41FD-4474-8F3C-0CCEAE8D13CE@nginx.com> <20211115123325.6azp4uve7qfeedna@Romans-MacBook-Pro.local> Message-ID: On Mon, Nov 15, 2021 at 03:33:25PM +0300, Roman Arutyunyan wrote: > # HG changeset patch > # User Roman Arutyunyan > # Date 1636646820 -10800 > # Thu Nov 11 19:07:00 2021 +0300 > # Branch quic > # Node ID 801103b7645d93d0d06f63019e54d9e76f1baa6c > # Parent d2c193aa84800da00314f1af72ae722d964445a4 > QUIC: reject streams which we could not create. > > The reasons why a stream may not be created by server currently include hitting > worker_connections limit and memory allocation error. Previously in these > cases the entire QUIC connection was closed and all its streams were shut down. > Now the new stream is rejected and existing streams continue working. > > To reject an HTTP/3 request stream, RESET_STREAM and STOP_SENDING with > H3_REQUEST_REJECTED error code are sent to client. HTTP/3 uni streams and > Stream streams are not rejected. > > diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h > --- a/src/event/quic/ngx_event_quic.h > +++ b/src/event/quic/ngx_event_quic.h > @@ -61,6 +61,9 @@ typedef struct { > ngx_flag_t retry; > ngx_flag_t gso_enabled; > ngx_str_t host_key; > + ngx_int_t close_stream_code; > + ngx_int_t reject_uni_stream_code; > + ngx_int_t reject_bidi_stream_code; i would prefer stream_close_code and stream_reject_code_uni|bidi, a bit similar to transport parameter naming like 'initial_max_stream_data_bidi_local', YMMV > u_char av_token_key[NGX_QUIC_AV_KEY_LEN]; > u_char sr_token_key[NGX_QUIC_SR_KEY_LEN]; > } ngx_quic_conf_t; > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > --- a/src/event/quic/ngx_event_quic_streams.c > +++ b/src/event/quic/ngx_event_quic_streams.c > @@ -15,6 +15,7 @@ > > static ngx_quic_stream_t *ngx_quic_create_client_stream(ngx_connection_t *c, > uint64_t id); > +static ngx_int_t ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id); > static ngx_int_t ngx_quic_init_stream(ngx_quic_stream_t *qs); > static void ngx_quic_init_streams_handler(ngx_connection_t *c); > static ngx_quic_stream_t *ngx_quic_create_stream(ngx_connection_t *c, > @@ -377,8 +378,13 @@ ngx_quic_create_client_stream(ngx_connec > for ( /* void */ ; min_id < id; min_id += 0x04) { > > qs = ngx_quic_create_stream(c, min_id); > + > if (qs == NULL) { > - return NULL; > + if (ngx_quic_reject_stream(c, min_id) != NGX_OK) { > + return NULL; > + } > + > + continue; > } > > if (ngx_quic_init_stream(qs) != NGX_OK) { > @@ -390,7 +396,66 @@ ngx_quic_create_client_stream(ngx_connec > } > } > > - return ngx_quic_create_stream(c, id); > + qs = ngx_quic_create_stream(c, id); > + > + if (qs == NULL) { > + if (ngx_quic_reject_stream(c, id) != NGX_OK) { > + return NULL; > + } > + > + return NGX_QUIC_STREAM_GONE; > + } > + > + return qs; > +} > + > + > +static ngx_int_t > +ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id) > +{ > + uint64_t code; > + ngx_quic_frame_t *frame; > + ngx_quic_connection_t *qc; > + > + qc = ngx_quic_get_connection(c); > + > + code = (id & NGX_QUIC_STREAM_UNIDIRECTIONAL) > + ? qc->conf->reject_uni_stream_code > + : qc->conf->reject_bidi_stream_code; > + > + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, > + "quic stream id:0x%xL reject err:0x%xL", id, code); Here we may decline stream rejection, but have already logged it. I suggest putting debug below 'code == 0' test. > + > + if (code == 0) { > + return NGX_DECLINED; > + } > + > + frame = ngx_quic_alloc_frame(c); > + if (frame == NULL) { > + return NGX_ERROR; > + } > + > + frame->level = ssl_encryption_application; > + frame->type = NGX_QUIC_FT_RESET_STREAM; > + frame->u.reset_stream.id = id; > + frame->u.reset_stream.error_code = code; > + frame->u.reset_stream.final_size = 0; > + > + ngx_quic_queue_frame(qc, frame); > + > + frame = ngx_quic_alloc_frame(c); > + if (frame == NULL) { > + return NGX_ERROR; > + } > + > + frame->level = ssl_encryption_application; > + frame->type = NGX_QUIC_FT_STOP_SENDING; > + frame->u.stop_sending.id = id; > + frame->u.stop_sending.error_code = code; > + > + ngx_quic_queue_frame(qc, frame); > + > + return NGX_OK; > } > > > @@ -866,7 +931,9 @@ ngx_quic_stream_cleanup_handler(void *da > if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0 > || (qs->id & NGX_QUIC_STREAM_UNIDIRECTIONAL) == 0) > { > - if (!c->read->pending_eof && !c->read->error) { > + if (!c->read->pending_eof && !c->read->error > + && qc->conf->close_stream_code) > + { > frame = ngx_quic_alloc_frame(pc); > if (frame == NULL) { > goto done; > @@ -875,7 +942,7 @@ ngx_quic_stream_cleanup_handler(void *da > frame->level = ssl_encryption_application; > frame->type = NGX_QUIC_FT_STOP_SENDING; > frame->u.stop_sending.id = qs->id; > - frame->u.stop_sending.error_code = 0x100; /* HTTP/3 no error */ > + frame->u.stop_sending.error_code = qc->conf->close_stream_code; > > ngx_quic_queue_frame(qc, frame); > } > diff --git a/src/http/modules/ngx_http_quic_module.c b/src/http/modules/ngx_http_quic_module.c > --- a/src/http/modules/ngx_http_quic_module.c > +++ b/src/http/modules/ngx_http_quic_module.c > @@ -314,6 +314,7 @@ ngx_http_quic_create_srv_conf(ngx_conf_t > * conf->tp.sr_enabled = 0 > * conf->tp.preferred_address = NULL > * conf->host_key = { 0, NULL } > + * cong->reject_uni_stream_code = 0; > */ > > conf->tp.max_idle_timeout = NGX_CONF_UNSET_MSEC; > @@ -331,6 +332,8 @@ ngx_http_quic_create_srv_conf(ngx_conf_t > > conf->retry = NGX_CONF_UNSET; > conf->gso_enabled = NGX_CONF_UNSET; > + conf->close_stream_code = NGX_HTTP_V3_ERR_NO_ERROR; > + conf->reject_bidi_stream_code = NGX_HTTP_V3_ERR_REQUEST_REJECTED; > > return conf; > } > diff --git a/src/stream/ngx_stream_quic_module.c b/src/stream/ngx_stream_quic_module.c > --- a/src/stream/ngx_stream_quic_module.c > +++ b/src/stream/ngx_stream_quic_module.c > @@ -241,6 +241,9 @@ ngx_stream_quic_create_srv_conf(ngx_conf > * conf->tp.retry_scid = { 0, NULL }; > * conf->tp.preferred_address = NULL > * conf->host_key = { 0, NULL } > + * conf->close_stream_code = 0; > + * conf->reject_uni_stream_code = 0; > + * conf->reject_bidi_stream_code = 0; > */ > > conf->tp.max_idle_timeout = NGX_CONF_UNSET_MSEC; Overal patch looks good to me From pluknet at nginx.com Tue Nov 16 11:59:46 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 16 Nov 2021 14:59:46 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> References: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> Message-ID: <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> > On 11 Nov 2021, at 06:10, Maxim Dounin wrote: > > # HG changeset patch > # User Maxim Dounin > # Date 1636599377 -10800 > # Thu Nov 11 05:56:17 2021 +0300 > # Node ID 76e072a6947a221868705c13973de15319c0d921 > # Parent 82b750b20c5205d685e59031247fe898f011394e > HTTP/2: fixed sendfile() aio handling. > > With sendfile() in threads ("aio threads; sendfile on;"), client connection > can block on writing, waiting for sendfile() to complete. In HTTP/2 this > might result in the request hang, since an attempt to continue processig processing > in thread event handler will call request's write event handler, which > is usually stopped by ngx_http_v2_send_chain(): it does nothing if there > are no additional data and stream->queued is set. Further, HTTP/2 resets > stream's c->write->ready to 0 if writing blocks, so just fixing > ngx_http_v2_send_chain() is not enough. > > Can be reproduced with test suite on Linux with: > > TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t > > The following tests currently fail: h2_keepalive.t, h2_priority.t, > h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. > > Similarly, sendfile() with AIO preloading on FreeBSD can block as well, > with similar results. This is, however, harder to reproduce, especially > on modern FreeBSD systems, since sendfile() usually do not return EBUSY. does not > > Fix is to post a write event on HTTP/2 connection in the thread event > handler (and aio preload handler). This ensures that sendfile() will be > completed and stream processing will be resumed by HTTP/2 code. > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > --- a/src/http/ngx_http_copy_filter_module.c > +++ b/src/http/ngx_http_copy_filter_module.c > @@ -250,6 +250,21 @@ ngx_http_copy_aio_sendfile_event_handler > r->aio = 0; > ev->complete = 0; > > +#if (NGX_HTTP_V2) > + > + if (r->stream) { > + /* > + * for HTTP/2, trigger a write event on the main connection > + * to handle sendfile() preload > + */ > + > + ngx_post_event(r->stream->connection->connection->write, > + &ngx_posted_events); > + return; > + } > + > +#endif > + > r->connection->write->handler(r->connection->write); > } > > @@ -323,6 +338,20 @@ ngx_http_copy_thread_event_handler(ngx_e > r->main->blocked--; > r->aio = 0; > > +#if (NGX_HTTP_V2) > + > + if (r->stream) { > + /* > + * for HTTP/2, trigger a write event on the main connection > + * to handle sendfile() in threads > + */ > + > + ngx_post_event(r->stream->connection->connection->write, > + &ngx_posted_events); > + } > + > +#endif > + This thread event handler is used not only for sendfile() completion, but also to complete reading in threads a buffered upstream response. In this case, posting a write event on HTTP/2 connection looks unnecessary, since there is no sendfile() in action, it will do nothing. On the other hand, if it is indeed used to complete a sendfile() task, which needs to invoke http2 write handler, calling write_event_handler() directly from thread event handler seems to be redundant: it could be optimized away since http2 write handler will normally end up in posting a write event on the main connection, anyway, see the call sequence ngx_http_v2_write_handler() -> ngx_http_v2_send_output_queue() -> ngx_http_v2_data_frame_handler() -> ngx_http_v2_handle_stream(). So, currently it looks as below. 2021/11/16 11:30:21 [debug] 3704204#3704206: run task #1 in thread pool "default" 2021/11/16 11:30:21 [debug] 3704204#3704206: linux sendfile thread handler .. 2021/11/16 11:30:21 [debug] 3704204#3704204: run completion handler for task #1 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 http thread: "/1?" done:0 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 post event 00005613DD6E9A90 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 http upstream process downstream ... 2021/11/16 11:30:21 [debug] 3704204#3704204: posted event 00005613DD6E9A90 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 delete posted event 00005613DD6E9A90 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 http2 write handler ... 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 http2:1 DATA frame 00005613DD596C88 was sent 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 post event 00005613DD58D9A0 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 http2 frame sent: 00005613DD596C88 sid:1 bl:1 len:8192 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 update posted event 00005613DD58D9A0 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 http2:1 DATA frame 00005613DD597750 was sent partially 2021/11/16 11:30:21 [debug] 3704204#3704204: posted event 00005613DD58D9A0 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 delete posted event 00005613DD58D9A0 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 http run request: "/1?" 2021/11/16 11:30:21 [debug] 3704204#3704204: *1 http upstream process downstream So, it could be narrowed down, something like the aio preload handler: diff -r 76e072a6947a -r 5f48b9a797d1 src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c Thu Nov 11 05:56:17 2021 +0300 +++ b/src/http/ngx_http_copy_filter_module.c Mon Nov 15 21:04:26 2021 +0000 @@ -340,7 +340,7 @@ #if (NGX_HTTP_V2) - if (r->stream) { + if (r->stream && r->stream->connection->connection->sendfile_task) { /* * for HTTP/2, trigger a write event on the main connection * to handle sendfile() in threads @@ -348,6 +348,7 @@ ngx_post_event(r->stream->connection->connection->write, &ngx_posted_events); + return; } #endif > if (r->done) { > /* > * trigger connection event handler if the subrequest was > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -3905,6 +3905,20 @@ ngx_http_upstream_thread_event_handler(n > r->main->blocked--; > r->aio = 0; > > +#if (NGX_HTTP_V2) > + > + if (r->stream) { > + /* > + * for HTTP/2, trigger a write event on the main connection > + * to handle sendfile() in threads > + */ > + > + ngx_post_event(r->stream->connection->connection->write, > + &ngx_posted_events); > + } > + > +#endif > + > if (r->done) { > /* > * trigger connection event handler if the subrequest was > I could not figure out, how this part is related, since upstream thread handler is only enabled with "aio_write on;" to write down a buffered upstream response to disk. It doesn't seem to be used with sendfile(). -- Sergey Kandaurov From arut at nginx.com Tue Nov 16 12:45:22 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 16 Nov 2021 15:45:22 +0300 Subject: [PATCH 1 of 2] QUIC: allowed main QUIC connection for some operations In-Reply-To: References: <8b049432ef2dcdb8d1a8.1634823659@arut-laptop> Message-ID: <20211116124522.xejjgxn455udjlf6@Romans-MacBook-Pro.local> On Thu, Nov 11, 2021 at 06:17:43PM +0300, Sergey Kandaurov wrote: > > > On 21 Oct 2021, at 16:40, Roman Arutyunyan wrote: > > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1634219818 -10800 > > # Thu Oct 14 16:56:58 2021 +0300 > > # Branch quic > > # Node ID 8b049432ef2dcdb8d1a8ec1a5e41c0a340285b65 > > # Parent 404de224517e33f685613d6425dcdb3c8ef5b97e > > QUIC: allowed main QUIC connection for some operations. > > > > Operations like ngx_quic_open_stream(), ngx_http_quic_get_connection(), > > ngx_http_v3_finalize_connection(), ngx_http_v3_shutdown_connection() used to > > receive a QUIC stream connection. Now they can receive the main QUIC > > connection as well. This is useful when calling them out of a stream context. > > > > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > > --- a/src/event/quic/ngx_event_quic_streams.c > > +++ b/src/event/quic/ngx_event_quic_streams.c > > @@ -35,11 +35,12 @@ ngx_connection_t * > > ngx_quic_open_stream(ngx_connection_t *c, ngx_uint_t bidi) > > { > > uint64_t id; > > - ngx_quic_stream_t *qs, *nqs; > > + ngx_connection_t *pc; > > + ngx_quic_stream_t *nqs; > > ngx_quic_connection_t *qc; > > > > - qs = c->quic; > > - qc = ngx_quic_get_connection(qs->parent); > > + pc = c->quic ? c->quic->parent : c; > > + qc = ngx_quic_get_connection(pc); > > is it really needed? ngx_http_v3_inc_insert_count_handler() calls ngx_http_v3_send_inc_insert_count(), which calls ngx_http_v3_get_uni_stream(), which may call ngx_quic_open_stream(). All these calls are made in the context of parent QUIC connection. So we need ngx_quic_open_stream() to be able to operate on it. Maybe it's even better to support only parent QUIC connection in ngx_quic_open_stream() and drop stream support. > > if (bidi) { > > if (qc->streams.server_streams_bidi > > @@ -85,7 +86,7 @@ ngx_quic_open_stream(ngx_connection_t *c > > qc->streams.server_streams_uni++; > > } > > > > - nqs = ngx_quic_create_stream(qs->parent, id); > > + nqs = ngx_quic_create_stream(pc, id); > > if (nqs == NULL) { > > return NULL; > > } > > diff --git a/src/http/modules/ngx_http_quic_module.h b/src/http/modules/ngx_http_quic_module.h > > --- a/src/http/modules/ngx_http_quic_module.h > > +++ b/src/http/modules/ngx_http_quic_module.h > > @@ -19,7 +19,8 @@ > > > > > > #define ngx_http_quic_get_connection(c) \ > > - ((ngx_http_connection_t *) (c)->quic->parent->data) > > + ((ngx_http_connection_t *) ((c)->quic ? (c)->quic->parent->data \ > > + : (c)->data)) > > > > Looks like this is the only change useful in the 2nd patch. > To avoid it, ngx_http_v3_inc_insert_count_handler() could get h3c from > the decoder stream, but ngx_http_v3_get_uni_stream() isn't external. ngx_http_v3_get_uni_stream() calls ngx_http_v3_get_session(), which calls ngx_http_quic_get_connection() anyway. > > ngx_int_t ngx_http_quic_init(ngx_connection_t *c); > > diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c > > --- a/src/http/v3/ngx_http_v3.c > > +++ b/src/http/v3/ngx_http_v3.c > > @@ -70,8 +70,8 @@ ngx_http_v3_keepalive_handler(ngx_event_ > > > > ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 keepalive handler"); > > > > - ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, > > - "keepalive timeout"); > > + ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, > > + "keepalive timeout"); > > } > > > > > > diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h > > --- a/src/http/v3/ngx_http_v3.h > > +++ b/src/http/v3/ngx_http_v3.h > > @@ -85,10 +85,12 @@ > > module) > > > > #define ngx_http_v3_finalize_connection(c, code, reason) \ > > - ngx_quic_finalize_connection(c->quic->parent, code, reason) > > + ngx_quic_finalize_connection((c)->quic ? (c)->quic->parent : (c), \ > > + code, reason) > > > > #define ngx_http_v3_shutdown_connection(c, code, reason) \ > > - ngx_quic_shutdown_connection(c->quic->parent, code, reason) > > + ngx_quic_shutdown_connection((c)->quic ? (c)->quic->parent : (c), \ > > + code, reason) Maybe we need to eliminate these at all. > -- > Sergey Kandaurov > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan From arut at nginx.com Tue Nov 16 12:56:18 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 16 Nov 2021 15:56:18 +0300 Subject: [PATCH 2 of 2] HTTP/3: delayed Insert Count Increment instruction In-Reply-To: <1267BB21-7BFC-483B-AF39-12CD2546B3AD@nginx.com> References: <1267BB21-7BFC-483B-AF39-12CD2546B3AD@nginx.com> Message-ID: <20211116125618.ztijjkg576p3z2en@Romans-MacBook-Pro.local> On Thu, Nov 11, 2021 at 06:20:59PM +0300, Sergey Kandaurov wrote: > > > On 21 Oct 2021, at 16:41, Roman Arutyunyan wrote: > > > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1634804424 -10800 > > # Thu Oct 21 11:20:24 2021 +0300 > > # Branch quic > > # Node ID e2d65b59ccb9035cbd619358a121ba5bcca3404a > > # Parent 8b049432ef2dcdb8d1a8ec1a5e41c0a340285b65 > > HTTP/3: delayed Insert Count Increment instruction. > > > > Sending the instruction is delayed until the end of the current event cycle. > > Delaying the instruction is allowed by quic-qpack-21, section 2.2.2.3. > > The goal is to reduce the amount of data sent back to client by accumulating > > inserts. > > Or it may be not sent at all if there are no blocked streams. Sure. HTTP/3: delayed Insert Count Increment instruction. Sending the instruction is delayed until the end of the current event cycle. Delaying the instruction is allowed by quic-qpack-21, section 2.2.2.3. The goal is to reduce the amount of data sent back to client by accumulating several inserts in one instruction and sometimes not sending the instruction at all, if Section Acknowledgement was sent just before it. > > > > diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c > > --- a/src/http/v3/ngx_http_v3.c > > +++ b/src/http/v3/ngx_http_v3.c > > @@ -47,6 +47,10 @@ ngx_http_v3_init_session(ngx_connection_ > > h3c->keepalive.handler = ngx_http_v3_keepalive_handler; > > h3c->keepalive.cancelable = 1; > > > > + h3c->table.send_insert_count.log = pc->log; > > + h3c->table.send_insert_count.data = pc; > > + h3c->table.send_insert_count.handler = ngx_http_v3_inc_insert_count_handler; > > + > > cln = ngx_pool_cleanup_add(pc->pool, 0); > > if (cln == NULL) { > > return NGX_ERROR; > > @@ -85,6 +89,10 @@ ngx_http_v3_cleanup_session(void *data) > > if (h3c->keepalive.timer_set) { > > ngx_del_timer(&h3c->keepalive); > > } > > + > > + if (h3c->table.send_insert_count.posted) { > > + ngx_delete_posted_event(&h3c->table.send_insert_count); > > + } > > } > > > > > > diff --git a/src/http/v3/ngx_http_v3_parse.c b/src/http/v3/ngx_http_v3_parse.c > > --- a/src/http/v3/ngx_http_v3_parse.c > > +++ b/src/http/v3/ngx_http_v3_parse.c > > @@ -395,6 +395,8 @@ done: > > if (ngx_http_v3_send_ack_section(c, c->quic->id) != NGX_OK) { > > return NGX_ERROR; > > } > > + > > + ngx_http_v3_ack_insert_count(c, st->prefix.insert_count); > > } > > > > st->state = sw_start; > > diff --git a/src/http/v3/ngx_http_v3_tables.c b/src/http/v3/ngx_http_v3_tables.c > > --- a/src/http/v3/ngx_http_v3_tables.c > > +++ b/src/http/v3/ngx_http_v3_tables.c > > @@ -232,11 +232,9 @@ ngx_http_v3_insert(ngx_connection_t *c, > > dt->elts[dt->nelts++] = field; > > dt->size += size; > > > > - /* TODO increment can be sent less often */ > > + dt->insert_count++; > > > > - if (ngx_http_v3_send_inc_insert_count(c, 1) != NGX_OK) { > > - return NGX_ERROR; > > - } > > + ngx_post_event(&dt->send_insert_count, &ngx_posted_events); > > > > if (ngx_http_v3_new_entry(c) != NGX_OK) { > > return NGX_ERROR; > > @@ -246,6 +244,34 @@ ngx_http_v3_insert(ngx_connection_t *c, > > } > > > > > > +void > > +ngx_http_v3_inc_insert_count_handler(ngx_event_t *ev) > > +{ > > + ngx_connection_t *c; > > + ngx_http_v3_session_t *h3c; > > + ngx_http_v3_dynamic_table_t *dt; > > + > > + c = ev->data; > > + > > + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, > > + "http3 inc insert count handler"); > > + > > + h3c = ngx_http_v3_get_session(c); > > + dt = &h3c->table; > > + > > + if (dt->insert_count > dt->ack_insert_count) { > > + if (ngx_http_v3_send_inc_insert_count(c, > > + dt->insert_count - dt->ack_insert_count) > > + != NGX_OK) > > + { > > + return; > > + } > > + > > + dt->ack_insert_count = dt->insert_count; > > + } > > +} > > + > > + > > ngx_int_t > > ngx_http_v3_set_capacity(ngx_connection_t *c, ngx_uint_t capacity) > > { > > @@ -603,6 +629,21 @@ ngx_http_v3_check_insert_count(ngx_conne > > } > > > > > > +void > > +ngx_http_v3_ack_insert_count(ngx_connection_t *c, uint64_t insert_count) > > +{ > > + ngx_http_v3_session_t *h3c; > > + ngx_http_v3_dynamic_table_t *dt; > > + > > + h3c = ngx_http_v3_get_session(c); > > + dt = &h3c->table; > > + > > + if (dt->ack_insert_count < insert_count) { > > + dt->ack_insert_count = insert_count; > > + } > > +} > > + > > + > > static void > > ngx_http_v3_unblock(void *data) > > { > > diff --git a/src/http/v3/ngx_http_v3_tables.h b/src/http/v3/ngx_http_v3_tables.h > > --- a/src/http/v3/ngx_http_v3_tables.h > > +++ b/src/http/v3/ngx_http_v3_tables.h > > @@ -26,9 +26,13 @@ typedef struct { > > ngx_uint_t base; > > size_t size; > > size_t capacity; > > + uint64_t insert_count; > > + uint64_t ack_insert_count; > > + ngx_event_t send_insert_count; > > } ngx_http_v3_dynamic_table_t; > > > > > > +void ngx_http_v3_inc_insert_count_handler(ngx_event_t *ev); > > void ngx_http_v3_cleanup_table(ngx_http_v3_session_t *h3c); > > ngx_int_t ngx_http_v3_ref_insert(ngx_connection_t *c, ngx_uint_t dynamic, > > ngx_uint_t index, ngx_str_t *value); > > @@ -46,6 +50,7 @@ ngx_int_t ngx_http_v3_decode_insert_coun > > ngx_uint_t *insert_count); > > ngx_int_t ngx_http_v3_check_insert_count(ngx_connection_t *c, > > ngx_uint_t insert_count); > > +void ngx_http_v3_ack_insert_count(ngx_connection_t *c, uint64_t insert_count); > > ngx_int_t ngx_http_v3_set_param(ngx_connection_t *c, uint64_t id, > > uint64_t value); > > > > Looks good. > > -- > Sergey Kandaurov > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan From pluknet at nginx.com Tue Nov 16 12:58:18 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 16 Nov 2021 15:58:18 +0300 Subject: [PATCH 2 of 2] HTTP/3: delayed Insert Count Increment instruction In-Reply-To: <20211116125618.ztijjkg576p3z2en@Romans-MacBook-Pro.local> References: <1267BB21-7BFC-483B-AF39-12CD2546B3AD@nginx.com> <20211116125618.ztijjkg576p3z2en@Romans-MacBook-Pro.local> Message-ID: > On 16 Nov 2021, at 15:56, Roman Arutyunyan wrote: > > On Thu, Nov 11, 2021 at 06:20:59PM +0300, Sergey Kandaurov wrote: >> >>> On 21 Oct 2021, at 16:41, Roman Arutyunyan wrote: >>> >>> # HG changeset patch >>> # User Roman Arutyunyan >>> # Date 1634804424 -10800 >>> # Thu Oct 21 11:20:24 2021 +0300 >>> # Branch quic >>> # Node ID e2d65b59ccb9035cbd619358a121ba5bcca3404a >>> # Parent 8b049432ef2dcdb8d1a8ec1a5e41c0a340285b65 >>> HTTP/3: delayed Insert Count Increment instruction. >>> >>> Sending the instruction is delayed until the end of the current event cycle. >>> Delaying the instruction is allowed by quic-qpack-21, section 2.2.2.3. >>> The goal is to reduce the amount of data sent back to client by accumulating >>> inserts. >> >> Or it may be not sent at all if there are no blocked streams. > > Sure. > > HTTP/3: delayed Insert Count Increment instruction. > > Sending the instruction is delayed until the end of the current event cycle. > Delaying the instruction is allowed by quic-qpack-21, section 2.2.2.3. > The goal is to reduce the amount of data sent back to client by accumulating > several inserts in one instruction and sometimes not sending the instruction at > all, if Section Acknowledgement was sent just before it. > Looks fine. >>> >>> diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c >>> --- a/src/http/v3/ngx_http_v3.c >>> +++ b/src/http/v3/ngx_http_v3.c >>> @@ -47,6 +47,10 @@ ngx_http_v3_init_session(ngx_connection_ >>> h3c->keepalive.handler = ngx_http_v3_keepalive_handler; >>> h3c->keepalive.cancelable = 1; >>> >>> + h3c->table.send_insert_count.log = pc->log; >>> + h3c->table.send_insert_count.data = pc; >>> + h3c->table.send_insert_count.handler = ngx_http_v3_inc_insert_count_handler; >>> + >>> cln = ngx_pool_cleanup_add(pc->pool, 0); >>> if (cln == NULL) { >>> return NGX_ERROR; >>> @@ -85,6 +89,10 @@ ngx_http_v3_cleanup_session(void *data) >>> if (h3c->keepalive.timer_set) { >>> ngx_del_timer(&h3c->keepalive); >>> } >>> + >>> + if (h3c->table.send_insert_count.posted) { >>> + ngx_delete_posted_event(&h3c->table.send_insert_count); >>> + } >>> } >>> >>> >>> diff --git a/src/http/v3/ngx_http_v3_parse.c b/src/http/v3/ngx_http_v3_parse.c >>> --- a/src/http/v3/ngx_http_v3_parse.c >>> +++ b/src/http/v3/ngx_http_v3_parse.c >>> @@ -395,6 +395,8 @@ done: >>> if (ngx_http_v3_send_ack_section(c, c->quic->id) != NGX_OK) { >>> return NGX_ERROR; >>> } >>> + >>> + ngx_http_v3_ack_insert_count(c, st->prefix.insert_count); >>> } >>> >>> st->state = sw_start; >>> diff --git a/src/http/v3/ngx_http_v3_tables.c b/src/http/v3/ngx_http_v3_tables.c >>> --- a/src/http/v3/ngx_http_v3_tables.c >>> +++ b/src/http/v3/ngx_http_v3_tables.c >>> @@ -232,11 +232,9 @@ ngx_http_v3_insert(ngx_connection_t *c, >>> dt->elts[dt->nelts++] = field; >>> dt->size += size; >>> >>> - /* TODO increment can be sent less often */ >>> + dt->insert_count++; >>> >>> - if (ngx_http_v3_send_inc_insert_count(c, 1) != NGX_OK) { >>> - return NGX_ERROR; >>> - } >>> + ngx_post_event(&dt->send_insert_count, &ngx_posted_events); >>> >>> if (ngx_http_v3_new_entry(c) != NGX_OK) { >>> return NGX_ERROR; >>> @@ -246,6 +244,34 @@ ngx_http_v3_insert(ngx_connection_t *c, >>> } >>> >>> >>> +void >>> +ngx_http_v3_inc_insert_count_handler(ngx_event_t *ev) >>> +{ >>> + ngx_connection_t *c; >>> + ngx_http_v3_session_t *h3c; >>> + ngx_http_v3_dynamic_table_t *dt; >>> + >>> + c = ev->data; >>> + >>> + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, >>> + "http3 inc insert count handler"); >>> + >>> + h3c = ngx_http_v3_get_session(c); >>> + dt = &h3c->table; >>> + >>> + if (dt->insert_count > dt->ack_insert_count) { >>> + if (ngx_http_v3_send_inc_insert_count(c, >>> + dt->insert_count - dt->ack_insert_count) >>> + != NGX_OK) >>> + { >>> + return; >>> + } >>> + >>> + dt->ack_insert_count = dt->insert_count; >>> + } >>> +} >>> + >>> + >>> ngx_int_t >>> ngx_http_v3_set_capacity(ngx_connection_t *c, ngx_uint_t capacity) >>> { >>> @@ -603,6 +629,21 @@ ngx_http_v3_check_insert_count(ngx_conne >>> } >>> >>> >>> +void >>> +ngx_http_v3_ack_insert_count(ngx_connection_t *c, uint64_t insert_count) >>> +{ >>> + ngx_http_v3_session_t *h3c; >>> + ngx_http_v3_dynamic_table_t *dt; >>> + >>> + h3c = ngx_http_v3_get_session(c); >>> + dt = &h3c->table; >>> + >>> + if (dt->ack_insert_count < insert_count) { >>> + dt->ack_insert_count = insert_count; >>> + } >>> +} >>> + >>> + >>> static void >>> ngx_http_v3_unblock(void *data) >>> { >>> diff --git a/src/http/v3/ngx_http_v3_tables.h b/src/http/v3/ngx_http_v3_tables.h >>> --- a/src/http/v3/ngx_http_v3_tables.h >>> +++ b/src/http/v3/ngx_http_v3_tables.h >>> @@ -26,9 +26,13 @@ typedef struct { >>> ngx_uint_t base; >>> size_t size; >>> size_t capacity; >>> + uint64_t insert_count; >>> + uint64_t ack_insert_count; >>> + ngx_event_t send_insert_count; >>> } ngx_http_v3_dynamic_table_t; >>> >>> >>> +void ngx_http_v3_inc_insert_count_handler(ngx_event_t *ev); >>> void ngx_http_v3_cleanup_table(ngx_http_v3_session_t *h3c); >>> ngx_int_t ngx_http_v3_ref_insert(ngx_connection_t *c, ngx_uint_t dynamic, >>> ngx_uint_t index, ngx_str_t *value); >>> @@ -46,6 +50,7 @@ ngx_int_t ngx_http_v3_decode_insert_coun >>> ngx_uint_t *insert_count); >>> ngx_int_t ngx_http_v3_check_insert_count(ngx_connection_t *c, >>> ngx_uint_t insert_count); >>> +void ngx_http_v3_ack_insert_count(ngx_connection_t *c, uint64_t insert_count); >>> ngx_int_t ngx_http_v3_set_param(ngx_connection_t *c, uint64_t id, >>> uint64_t value); >>> >> >> Looks good. -- Sergey Kandaurov From mdounin at mdounin.ru Tue Nov 16 14:41:02 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 17:41:02 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> References: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> Message-ID: Hello! On Tue, Nov 16, 2021 at 02:59:46PM +0300, Sergey Kandaurov wrote: > > > On 11 Nov 2021, at 06:10, Maxim Dounin wrote: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1636599377 -10800 > > # Thu Nov 11 05:56:17 2021 +0300 > > # Node ID 76e072a6947a221868705c13973de15319c0d921 > > # Parent 82b750b20c5205d685e59031247fe898f011394e > > HTTP/2: fixed sendfile() aio handling. > > > > With sendfile() in threads ("aio threads; sendfile on;"), client connection > > can block on writing, waiting for sendfile() to complete. In HTTP/2 this > > might result in the request hang, since an attempt to continue processig > > processing Fixed, thnx. > > in thread event handler will call request's write event handler, which > > is usually stopped by ngx_http_v2_send_chain(): it does nothing if there > > are no additional data and stream->queued is set. Further, HTTP/2 resets > > stream's c->write->ready to 0 if writing blocks, so just fixing > > ngx_http_v2_send_chain() is not enough. > > > > Can be reproduced with test suite on Linux with: > > > > TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t > > > > The following tests currently fail: h2_keepalive.t, h2_priority.t, > > h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. > > > > Similarly, sendfile() with AIO preloading on FreeBSD can block as well, > > with similar results. This is, however, harder to reproduce, especially > > on modern FreeBSD systems, since sendfile() usually do not return EBUSY. > > does not Fixed, thnx. > > Fix is to post a write event on HTTP/2 connection in the thread event > > handler (and aio preload handler). This ensures that sendfile() will be > > completed and stream processing will be resumed by HTTP/2 code. > > > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > > --- a/src/http/ngx_http_copy_filter_module.c > > +++ b/src/http/ngx_http_copy_filter_module.c > > @@ -250,6 +250,21 @@ ngx_http_copy_aio_sendfile_event_handler > > r->aio = 0; > > ev->complete = 0; > > > > +#if (NGX_HTTP_V2) > > + > > + if (r->stream) { > > + /* > > + * for HTTP/2, trigger a write event on the main connection > > + * to handle sendfile() preload > > + */ > > + > > + ngx_post_event(r->stream->connection->connection->write, > > + &ngx_posted_events); > > + return; > > + } > > + > > +#endif > > + > > r->connection->write->handler(r->connection->write); > > } > > > > @@ -323,6 +338,20 @@ ngx_http_copy_thread_event_handler(ngx_e > > r->main->blocked--; > > r->aio = 0; > > > > +#if (NGX_HTTP_V2) > > + > > + if (r->stream) { > > + /* > > + * for HTTP/2, trigger a write event on the main connection > > + * to handle sendfile() in threads > > + */ > > + > > + ngx_post_event(r->stream->connection->connection->write, > > + &ngx_posted_events); > > + } > > + > > +#endif > > + > > This thread event handler is used not only for sendfile() completion, > but also to complete reading in threads a buffered upstream response. > In this case, posting a write event on HTTP/2 connection looks > unnecessary, since there is no sendfile() in action, it will do nothing. > On the other hand, if it is indeed used to complete a sendfile() task, > which needs to invoke http2 write handler, calling write_event_handler() > directly from thread event handler seems to be redundant: it could be > optimized away since http2 write handler will normally end up in posting > a write event on the main connection, anyway, see the call sequence > ngx_http_v2_write_handler() -> ngx_http_v2_send_output_queue() > -> ngx_http_v2_data_frame_handler() -> ngx_http_v2_handle_stream(). [...] > So, it could be narrowed down, something like the aio preload handler: > > diff -r 76e072a6947a -r 5f48b9a797d1 src/http/ngx_http_copy_filter_module.c > --- a/src/http/ngx_http_copy_filter_module.c Thu Nov 11 05:56:17 2021 +0300 > +++ b/src/http/ngx_http_copy_filter_module.c Mon Nov 15 21:04:26 2021 +0000 > @@ -340,7 +340,7 @@ > > #if (NGX_HTTP_V2) > > - if (r->stream) { > + if (r->stream && r->stream->connection->connection->sendfile_task) { > /* > * for HTTP/2, trigger a write event on the main connection > * to handle sendfile() in threads > @@ -348,6 +348,7 @@ > > ngx_post_event(r->stream->connection->connection->write, > &ngx_posted_events); > + return; > } > > #endif This "return" won't work, since even with sendfile() enabled and being used, the handler can be called for non-sendfile operations as well. That is, both posting an event to the main connection _and_ calling request write handler are required. This might be redundant in some cases, but there is no reasonable way to avoid this with sendfile() enabled. Checking sendfile_task might be used to avoid extra posted event with sendfile disabled, but it looks overcomplicated to me and I don't think it worth the effort. It's at most a minor optimization. > > if (r->done) { > > /* > > * trigger connection event handler if the subrequest was > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > > --- a/src/http/ngx_http_upstream.c > > +++ b/src/http/ngx_http_upstream.c > > @@ -3905,6 +3905,20 @@ ngx_http_upstream_thread_event_handler(n > > r->main->blocked--; > > r->aio = 0; > > > > +#if (NGX_HTTP_V2) > > + > > + if (r->stream) { > > + /* > > + * for HTTP/2, trigger a write event on the main connection > > + * to handle sendfile() in threads > > + */ > > + > > + ngx_post_event(r->stream->connection->connection->write, > > + &ngx_posted_events); > > + } > > + > > +#endif > > + > > if (r->done) { > > /* > > * trigger connection event handler if the subrequest was > > > > I could not figure out, how this part is related, since upstream > thread handler is only enabled with "aio_write on;" to write down > a buffered upstream response to disk. It doesn't seem to be used > with sendfile(). Thread handlers are set on per-file basis. As a result, if aio_write is enabled, the ngx_http_upstream_thread_event_handler() handler can be used for sendfile() as well. Also note the following "trigger connection event handler..." part: it is also only needed for sendfile(), yet present in the ngx_http_upstream_thread_event_handler(). -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Nov 16 14:50:42 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:50:42 +0000 Subject: [nginx] Version bump. Message-ID: details: https://hg.nginx.org/nginx/rev/e9a98fb95c48 branches: stable-1.20 changeset: 7954:e9a98fb95c48 user: Maxim Dounin date: Tue Nov 16 16:57:52 2021 +0300 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r c14b92e0e626 -r e9a98fb95c48 src/core/nginx.h --- a/src/core/nginx.h Tue May 25 15:35:38 2021 +0300 +++ b/src/core/nginx.h Tue Nov 16 16:57:52 2021 +0300 @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1020001 -#define NGINX_VERSION "1.20.1" +#define nginx_version 1020002 +#define NGINX_VERSION "1.20.2" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From mdounin at mdounin.ru Tue Nov 16 14:50:45 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:50:45 +0000 Subject: [nginx] SSL: ngx_ssl_shutdown() rework. Message-ID: details: https://hg.nginx.org/nginx/rev/0601a4e793bf branches: stable-1.20 changeset: 7955:0601a4e793bf user: Maxim Dounin date: Tue Jun 01 17:37:49 2021 +0300 description: SSL: ngx_ssl_shutdown() rework. Instead of calling SSL_free() with each return point, introduced a single place where cleanup happens. As a positive side effect, this fixes two potential memory leaks on ngx_handle_read_event() and ngx_handle_write_event() errors where there were no SSL_free() calls (though unlikely practical, as errors there are only expected to happen due to bugs or kernel issues). diffstat: src/event/ngx_event_openssl.c | 45 +++++++++++++++++++++--------------------- 1 files changed, 22 insertions(+), 23 deletions(-) diffs (95 lines): diff -r e9a98fb95c48 -r 0601a4e793bf src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Nov 16 16:57:52 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Jun 01 17:37:49 2021 +0300 @@ -2896,9 +2896,12 @@ ngx_int_t ngx_ssl_shutdown(ngx_connection_t *c) { int n, sslerr, mode; + ngx_int_t rc; ngx_err_t err; ngx_uint_t tries; + rc = NGX_OK; + ngx_ssl_ocsp_cleanup(c); if (SSL_in_init(c->ssl->connection)) { @@ -2908,11 +2911,7 @@ ngx_ssl_shutdown(ngx_connection_t *c) * Avoid calling SSL_shutdown() if handshake wasn't completed. */ - SSL_free(c->ssl->connection); - c->ssl = NULL; - c->recv = ngx_recv; - - return NGX_OK; + goto done; } if (c->timedout || c->error || c->buffered) { @@ -2954,11 +2953,7 @@ ngx_ssl_shutdown(ngx_connection_t *c) ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_shutdown: %d", n); if (n == 1) { - SSL_free(c->ssl->connection); - c->ssl = NULL; - c->recv = ngx_recv; - - return NGX_OK; + goto done; } if (n == 0 && tries-- > 1) { @@ -2984,11 +2979,11 @@ ngx_ssl_shutdown(ngx_connection_t *c) } if (ngx_handle_read_event(c->read, 0) != NGX_OK) { - return NGX_ERROR; + goto failed; } if (ngx_handle_write_event(c->write, 0) != NGX_OK) { - return NGX_ERROR; + goto failed; } ngx_add_timer(c->read, 3000); @@ -2997,23 +2992,27 @@ ngx_ssl_shutdown(ngx_connection_t *c) } if (sslerr == SSL_ERROR_ZERO_RETURN || ERR_peek_error() == 0) { - SSL_free(c->ssl->connection); - c->ssl = NULL; - c->recv = ngx_recv; - - return NGX_OK; + goto done; } err = (sslerr == SSL_ERROR_SYSCALL) ? ngx_errno : 0; ngx_ssl_connection_error(c, sslerr, err, "SSL_shutdown() failed"); - SSL_free(c->ssl->connection); - c->ssl = NULL; - c->recv = ngx_recv; - - return NGX_ERROR; - } + break; + } + +failed: + + rc = NGX_ERROR; + +done: + + SSL_free(c->ssl->connection); + c->ssl = NULL; + c->recv = ngx_recv; + + return rc; } From mdounin at mdounin.ru Tue Nov 16 14:50:48 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:50:48 +0000 Subject: [nginx] Fixed SSL logging with lingering close. Message-ID: details: https://hg.nginx.org/nginx/rev/ae70fcb8ac93 branches: stable-1.20 changeset: 7956:ae70fcb8ac93 user: Maxim Dounin date: Tue Jun 01 17:37:51 2021 +0300 description: Fixed SSL logging with lingering close. Recent fixes to SSL shutdown with lingering close (554c6ae25ffc, 1.19.5) broke logging of SSL variables. To make sure logging of SSL variables works properly, avoid freeing c->ssl when doing an SSL shutdown before lingering close. Reported by Reinis Rozitis (http://mailman.nginx.org/pipermail/nginx/2021-May/060670.html). diffstat: src/event/ngx_event_openssl.c | 6 ++++++ src/event/ngx_event_openssl.h | 1 + src/http/ngx_http_request.c | 2 ++ 3 files changed, 9 insertions(+), 0 deletions(-) diffs (39 lines): diff -r 0601a4e793bf -r ae70fcb8ac93 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Jun 01 17:37:49 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Jun 01 17:37:51 2021 +0300 @@ -3008,6 +3008,12 @@ failed: done: + if (c->ssl->shutdown_without_free) { + c->ssl->shutdown_without_free = 0; + c->recv = ngx_recv; + return rc; + } + SSL_free(c->ssl->connection); c->ssl = NULL; c->recv = ngx_recv; diff -r 0601a4e793bf -r ae70fcb8ac93 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Tue Jun 01 17:37:49 2021 +0300 +++ b/src/event/ngx_event_openssl.h Tue Jun 01 17:37:51 2021 +0300 @@ -100,6 +100,7 @@ struct ngx_ssl_connection_s { unsigned buffer:1; unsigned no_wait_shutdown:1; unsigned no_send_shutdown:1; + unsigned shutdown_without_free:1; unsigned handshake_buffer_set:1; unsigned try_early_data:1; unsigned in_early:1; diff -r 0601a4e793bf -r ae70fcb8ac93 src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Tue Jun 01 17:37:49 2021 +0300 +++ b/src/http/ngx_http_request.c Tue Jun 01 17:37:51 2021 +0300 @@ -3398,6 +3398,8 @@ ngx_http_set_lingering_close(ngx_connect if (c->ssl) { ngx_int_t rc; + c->ssl->shutdown_without_free = 1; + rc = ngx_ssl_shutdown(c); if (rc == NGX_ERROR) { From mdounin at mdounin.ru Tue Nov 16 14:50:51 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:50:51 +0000 Subject: [nginx] gRPC: handling GOAWAY with a higher last stream identifier. Message-ID: details: https://hg.nginx.org/nginx/rev/f5732fa038ad branches: stable-1.20 changeset: 7957:f5732fa038ad user: Sergey Kandaurov date: Thu Jun 17 11:43:55 2021 +0300 description: gRPC: handling GOAWAY with a higher last stream identifier. Previously, once received from upstream, it couldn't limit opening additional streams in a cached keepalive connection. diffstat: src/http/modules/ngx_http_grpc_module.c | 9 +++++++++ 1 files changed, 9 insertions(+), 0 deletions(-) diffs (61 lines): diff -r ae70fcb8ac93 -r f5732fa038ad src/http/modules/ngx_http_grpc_module.c --- a/src/http/modules/ngx_http_grpc_module.c Tue Jun 01 17:37:51 2021 +0300 +++ b/src/http/modules/ngx_http_grpc_module.c Thu Jun 17 11:43:55 2021 +0300 @@ -124,6 +124,7 @@ typedef struct { unsigned done:1; unsigned status:1; unsigned rst:1; + unsigned goaway:1; ngx_http_request_t *request; @@ -1213,6 +1214,7 @@ ngx_http_grpc_reinit_request(ngx_http_re ctx->done = 0; ctx->status = 0; ctx->rst = 0; + ctx->goaway = 0; ctx->connection = NULL; return NGX_OK; @@ -1568,6 +1570,7 @@ ngx_http_grpc_body_output_filter(void *d && ctx->out == NULL && ctx->output_closed && !ctx->output_blocked + && !ctx->goaway && ctx->state == ngx_http_grpc_st_start) { u->keepalive = 1; @@ -1717,6 +1720,8 @@ ngx_http_grpc_process_header(ngx_http_re return NGX_HTTP_UPSTREAM_INVALID_HEADER; } + ctx->goaway = 1; + continue; } @@ -1910,6 +1915,7 @@ ngx_http_grpc_process_header(ngx_http_re && ctx->out == NULL && ctx->output_closed && !ctx->output_blocked + && !ctx->goaway && b->last == b->pos) { u->keepalive = 1; @@ -2038,6 +2044,7 @@ ngx_http_grpc_filter(void *data, ssize_t if (ctx->in == NULL && ctx->output_closed && !ctx->output_blocked + && !ctx->goaway && ctx->state == ngx_http_grpc_st_start) { u->keepalive = 1; @@ -2207,6 +2214,8 @@ ngx_http_grpc_filter(void *data, ssize_t return NGX_ERROR; } + ctx->goaway = 1; + continue; } From mdounin at mdounin.ru Tue Nov 16 14:50:53 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:50:53 +0000 Subject: [nginx] SSL: set events ready flags after handshake. Message-ID: details: https://hg.nginx.org/nginx/rev/9b72da2b5b57 branches: stable-1.20 changeset: 7958:9b72da2b5b57 user: Maxim Dounin date: Tue Aug 03 20:50:30 2021 +0300 description: SSL: set events ready flags after handshake. The c->read->ready and c->write->ready flags might be reset during the handshake, and not set again if the handshake was finished on the other event. At the same time, some data might be read from the socket during the handshake, so missing c->read->ready flag might result in a connection hang, for example, when waiting for an SMTP greeting (which was already received during the handshake). Found by Sergey Kandaurov. diffstat: src/event/ngx_event_openssl.c | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diffs (23 lines): diff -r f5732fa038ad -r 9b72da2b5b57 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Thu Jun 17 11:43:55 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 03 20:50:30 2021 +0300 @@ -1740,6 +1740,9 @@ ngx_ssl_handshake(ngx_connection_t *c) c->recv_chain = ngx_ssl_recv_chain; c->send_chain = ngx_ssl_send_chain; + c->read->ready = 1; + c->write->ready = 1; + #ifndef SSL_OP_NO_RENEGOTIATION #if OPENSSL_VERSION_NUMBER < 0x10100000L #ifdef SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS @@ -1885,6 +1888,9 @@ ngx_ssl_try_early_data(ngx_connection_t c->recv_chain = ngx_ssl_recv_chain; c->send_chain = ngx_ssl_send_chain; + c->read->ready = 1; + c->write->ready = 1; + rc = ngx_ssl_ocsp_validate(c); if (rc == NGX_ERROR) { From mdounin at mdounin.ru Tue Nov 16 14:50:56 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:50:56 +0000 Subject: [nginx] SSL: SSL_CTX_set_tmp_dh() error handling. Message-ID: details: https://hg.nginx.org/nginx/rev/efbcecbe5805 branches: stable-1.20 changeset: 7959:efbcecbe5805 user: Sergey Kandaurov date: Wed Aug 04 21:27:51 2021 +0300 description: SSL: SSL_CTX_set_tmp_dh() error handling. For example, it can fail due to weak DH parameters. diffstat: src/event/ngx_event_openssl.c | 8 +++++++- 1 files changed, 7 insertions(+), 1 deletions(-) diffs (18 lines): diff -r 9b72da2b5b57 -r efbcecbe5805 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Aug 03 20:50:30 2021 +0300 +++ b/src/event/ngx_event_openssl.c Wed Aug 04 21:27:51 2021 +0300 @@ -1376,7 +1376,13 @@ ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_ return NGX_ERROR; } - SSL_CTX_set_tmp_dh(ssl->ctx, dh); + if (SSL_CTX_set_tmp_dh(ssl->ctx, dh) != 1) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "SSL_CTX_set_tmp_dh(\"%s\") failed", file->data); + DH_free(dh); + BIO_free(bio); + return NGX_ERROR; + } DH_free(dh); BIO_free(bio); From mdounin at mdounin.ru Tue Nov 16 14:50:59 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:50:59 +0000 Subject: [nginx] SSL: RSA data type is deprecated in OpenSSL 3.0. Message-ID: details: https://hg.nginx.org/nginx/rev/ec2798eb3648 branches: stable-1.20 changeset: 7960:ec2798eb3648 user: Sergey Kandaurov date: Tue Aug 10 23:42:59 2021 +0300 description: SSL: RSA data type is deprecated in OpenSSL 3.0. The only consumer is a callback function for SSL_CTX_set_tmp_rsa_callback() deprecated in OpenSSL 1.1.0. Now the function is conditionally compiled too. diffstat: src/event/ngx_event_openssl.c | 6 +++++- src/event/ngx_event_openssl.h | 2 ++ 2 files changed, 7 insertions(+), 1 deletions(-) diffs (44 lines): diff -r efbcecbe5805 -r ec2798eb3648 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Aug 04 21:27:51 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 10 23:42:59 2021 +0300 @@ -1116,6 +1116,8 @@ ngx_ssl_info_callback(const ngx_ssl_conn } +#if (OPENSSL_VERSION_NUMBER < 0x10100001L && !defined LIBRESSL_VERSION_NUMBER) + RSA * ngx_ssl_rsa512_key_callback(ngx_ssl_conn_t *ssl_conn, int is_export, int key_length) @@ -1126,7 +1128,7 @@ ngx_ssl_rsa512_key_callback(ngx_ssl_conn return NULL; } -#if (OPENSSL_VERSION_NUMBER < 0x10100003L && !defined OPENSSL_NO_DEPRECATED) +#ifndef OPENSSL_NO_DEPRECATED if (key == NULL) { key = RSA_generate_key(512, RSA_F4, NULL, NULL); @@ -1137,6 +1139,8 @@ ngx_ssl_rsa512_key_callback(ngx_ssl_conn return key; } +#endif + ngx_array_t * ngx_ssl_read_password_file(ngx_conf_t *cf, ngx_str_t *file) diff -r efbcecbe5805 -r ec2798eb3648 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Wed Aug 04 21:27:51 2021 +0300 +++ b/src/event/ngx_event_openssl.h Tue Aug 10 23:42:59 2021 +0300 @@ -196,8 +196,10 @@ ngx_int_t ngx_ssl_ocsp_validate(ngx_conn ngx_int_t ngx_ssl_ocsp_get_status(ngx_connection_t *c, const char **s); void ngx_ssl_ocsp_cleanup(ngx_connection_t *c); ngx_int_t ngx_ssl_ocsp_cache_init(ngx_shm_zone_t *shm_zone, void *data); +#if (OPENSSL_VERSION_NUMBER < 0x10100001L && !defined LIBRESSL_VERSION_NUMBER) RSA *ngx_ssl_rsa512_key_callback(ngx_ssl_conn_t *ssl_conn, int is_export, int key_length); +#endif ngx_array_t *ngx_ssl_read_password_file(ngx_conf_t *cf, ngx_str_t *file); ngx_array_t *ngx_ssl_preserve_passwords(ngx_conf_t *cf, ngx_array_t *passwords); From mdounin at mdounin.ru Tue Nov 16 14:51:02 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:51:02 +0000 Subject: [nginx] SSL: SSL_get_peer_certificate() is deprecated in OpenSSL 3.0. Message-ID: details: https://hg.nginx.org/nginx/rev/c7c6a87c068d branches: stable-1.20 changeset: 7961:c7c6a87c068d user: Sergey Kandaurov date: Tue Aug 10 23:43:16 2021 +0300 description: SSL: SSL_get_peer_certificate() is deprecated in OpenSSL 3.0. Switch to SSL_get1_peer_certificate() when building with OpenSSL 3.0 and OPENSSL_NO_DEPRECATED defined. diffstat: src/event/ngx_event_openssl.h | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) diffs (15 lines): diff -r ec2798eb3648 -r c7c6a87c068d src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Tue Aug 10 23:42:59 2021 +0300 +++ b/src/event/ngx_event_openssl.h Tue Aug 10 23:43:16 2021 +0300 @@ -64,6 +64,11 @@ #endif +#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && !defined SSL_get_peer_certificate) +#define SSL_get_peer_certificate(s) SSL_get1_peer_certificate(s) +#endif + + typedef struct ngx_ssl_ocsp_s ngx_ssl_ocsp_t; From mdounin at mdounin.ru Tue Nov 16 14:51:05 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:51:05 +0000 Subject: [nginx] SSL: using SSL_CTX_set0_tmp_dh_pkey() with OpenSSL 3.0 in dhparam. Message-ID: details: https://hg.nginx.org/nginx/rev/ddfad46492b5 branches: stable-1.20 changeset: 7962:ddfad46492b5 user: Sergey Kandaurov date: Tue Aug 10 23:43:16 2021 +0300 description: SSL: using SSL_CTX_set0_tmp_dh_pkey() with OpenSSL 3.0 in dhparam. Using PEM_read_bio_DHparams() and SSL_CTX_set_tmp_dh() is deprecated as part of deprecating the low level DH functions in favor of EVP_PKEY: https://git.openssl.org/?p=openssl.git;a=commitdiff;h=163f6dc diffstat: src/event/ngx_event_openssl.c | 32 +++++++++++++++++++++++++++++++- 1 files changed, 31 insertions(+), 1 deletions(-) diffs (56 lines): diff -r c7c6a87c068d -r ddfad46492b5 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Aug 10 23:43:16 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 10 23:43:16 2021 +0300 @@ -1354,7 +1354,6 @@ ngx_ssl_passwords_cleanup(void *data) ngx_int_t ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file) { - DH *dh; BIO *bio; if (file->len == 0) { @@ -1372,6 +1371,10 @@ ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_ return NGX_ERROR; } +#ifdef SSL_CTX_set_tmp_dh + { + DH *dh; + dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL); if (dh == NULL) { ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, @@ -1389,6 +1392,33 @@ ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_ } DH_free(dh); + } +#else + { + EVP_PKEY *dh; + + /* + * PEM_read_bio_DHparams() and SSL_CTX_set_tmp_dh() + * are deprecated in OpenSSL 3.0 + */ + + dh = PEM_read_bio_Parameters(bio, NULL); + if (dh == NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "PEM_read_bio_Parameters(\"%s\") failed", file->data); + BIO_free(bio); + return NGX_ERROR; + } + + if (SSL_CTX_set0_tmp_dh_pkey(ssl->ctx, dh) != 1) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "SSL_CTX_set0_tmp_dh_pkey(\%s\") failed", file->data); + BIO_free(bio); + return NGX_ERROR; + } + } +#endif + BIO_free(bio); return NGX_OK; From mdounin at mdounin.ru Tue Nov 16 14:51:08 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:51:08 +0000 Subject: [nginx] SSL: ERR_peek_error_line_data() compatibility with OpenSSL 3.0. Message-ID: details: https://hg.nginx.org/nginx/rev/9b9299494238 branches: stable-1.20 changeset: 7963:9b9299494238 user: Sergey Kandaurov date: Tue Aug 10 23:43:16 2021 +0300 description: SSL: ERR_peek_error_line_data() compatibility with OpenSSL 3.0. ERR_peek_error_line_data() was deprecated in favour of ERR_peek_error_all(). Here we use the ERR_peek_error_data() helper to pass only used arguments. diffstat: src/event/ngx_event_openssl.c | 2 +- src/event/ngx_event_openssl.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletions(-) diffs (27 lines): diff -r ddfad46492b5 -r 9b9299494238 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Aug 10 23:43:16 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 10 23:43:16 2021 +0300 @@ -3280,7 +3280,7 @@ ngx_ssl_error(ngx_uint_t level, ngx_log_ for ( ;; ) { - n = ERR_peek_error_line_data(NULL, NULL, &data, &flags); + n = ERR_peek_error_data(&data, &flags); if (n == 0) { break; diff -r ddfad46492b5 -r 9b9299494238 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Tue Aug 10 23:43:16 2021 +0300 +++ b/src/event/ngx_event_openssl.h Tue Aug 10 23:43:16 2021 +0300 @@ -69,6 +69,11 @@ #endif +#if (OPENSSL_VERSION_NUMBER < 0x30000000L && !defined ERR_peek_error_data) +#define ERR_peek_error_data(d, f) ERR_peek_error_line_data(NULL, NULL, d, f) +#endif + + typedef struct ngx_ssl_ocsp_s ngx_ssl_ocsp_t; From mdounin at mdounin.ru Tue Nov 16 14:51:11 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:51:11 +0000 Subject: [nginx] SSL: silenced warnings when building with OpenSSL 3.0. Message-ID: details: https://hg.nginx.org/nginx/rev/7b79f0944197 branches: stable-1.20 changeset: 7964:7b79f0944197 user: Sergey Kandaurov date: Tue Aug 10 23:43:16 2021 +0300 description: SSL: silenced warnings when building with OpenSSL 3.0. The OPENSSL_SUPPRESS_DEPRECATED macro is used to suppress deprecation warnings. This covers Session Tickets keys, SSL Engine, DH low level API for DHE ciphers. Unlike OPENSSL_API_COMPAT, it works well with OpenSSL built with no-deprecated. In particular, it doesn't unhide various macros in OpenSSL includes, which are meant to be hidden under OPENSSL_NO_DEPRECATED. diffstat: src/event/ngx_event_openssl.h | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diffs (12 lines): diff -r 9b9299494238 -r 7b79f0944197 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Tue Aug 10 23:43:16 2021 +0300 +++ b/src/event/ngx_event_openssl.h Tue Aug 10 23:43:16 2021 +0300 @@ -12,6 +12,8 @@ #include #include +#define OPENSSL_SUPPRESS_DEPRECATED + #include #include #include From mdounin at mdounin.ru Tue Nov 16 14:51:14 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:51:14 +0000 Subject: [nginx] SSL: use of the SSL_OP_IGNORE_UNEXPECTED_EOF option. Message-ID: details: https://hg.nginx.org/nginx/rev/f2bbbc0ccdfb branches: stable-1.20 changeset: 7965:f2bbbc0ccdfb user: Sergey Kandaurov date: Tue Aug 10 23:43:17 2021 +0300 description: SSL: use of the SSL_OP_IGNORE_UNEXPECTED_EOF option. A new behaviour was introduced in OpenSSL 1.1.1e, when a peer does not send close_notify before closing the connection. Previously, it was to return SSL_ERROR_SYSCALL with errno 0, known since at least OpenSSL 0.9.7, and is handled gracefully in nginx. Now it returns SSL_ERROR_SSL with a distinct reason SSL_R_UNEXPECTED_EOF_WHILE_READING ("unexpected eof while reading"). This leads to critical errors seen in nginx within various routines such as SSL_do_handshake(), SSL_read(), SSL_shutdown(). The behaviour was restored in OpenSSL 1.1.1f, but presents in OpenSSL 3.0 by default. Use of the SSL_OP_IGNORE_UNEXPECTED_EOF option added in OpenSSL 3.0 allows to set a compatible behaviour to return SSL_ERROR_ZERO_RETURN: https://git.openssl.org/?p=openssl.git;a=commitdiff;h=09b90e0 See for additional details: https://github.com/openssl/openssl/issues/11381 diffstat: src/event/ngx_event_openssl.c | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diffs (14 lines): diff -r 7b79f0944197 -r f2bbbc0ccdfb src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Aug 10 23:43:16 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 10 23:43:17 2021 +0300 @@ -378,6 +378,10 @@ ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_ SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_CLIENT_RENEGOTIATION); #endif +#ifdef SSL_OP_IGNORE_UNEXPECTED_EOF + SSL_CTX_set_options(ssl->ctx, SSL_OP_IGNORE_UNEXPECTED_EOF); +#endif + #ifdef SSL_MODE_RELEASE_BUFFERS SSL_CTX_set_mode(ssl->ctx, SSL_MODE_RELEASE_BUFFERS); #endif From mdounin at mdounin.ru Tue Nov 16 14:51:16 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:51:16 +0000 Subject: [nginx] Upstream: fixed timeouts with gRPC, SSL and select (ticket #2229). Message-ID: details: https://hg.nginx.org/nginx/rev/5d09596909c6 branches: stable-1.20 changeset: 7966:5d09596909c6 user: Maxim Dounin date: Fri Aug 20 03:53:56 2021 +0300 description: Upstream: fixed timeouts with gRPC, SSL and select (ticket #2229). With SSL it is possible that an established connection is ready for reading after the handshake. Further, events might be already disabled in case of level-triggered event methods. If this happens and ngx_http_upstream_send_request() blocks waiting for some data from the upstream, such as flow control in case of gRPC, the connection will time out due to no read events on the upstream connection. Fix is to explicitly check the c->read->ready flag if sending request blocks and post a read event if it is set. Note that while it is possible to modify ngx_ssl_handshake() to keep read events active, this won't completely resolve the issue, since there can be data already received during the SSL handshake (see 573bd30e46b4). diffstat: src/http/ngx_http_upstream.c | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diffs (14 lines): diff -r f2bbbc0ccdfb -r 5d09596909c6 src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Tue Aug 10 23:43:17 2021 +0300 +++ b/src/http/ngx_http_upstream.c Fri Aug 20 03:53:56 2021 +0300 @@ -2062,6 +2062,10 @@ ngx_http_upstream_send_request(ngx_http_ c->tcp_nopush = NGX_TCP_NOPUSH_UNSET; } + if (c->read->ready) { + ngx_post_event(c->read, &ngx_posted_events); + } + return; } From mdounin at mdounin.ru Tue Nov 16 14:51:19 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:51:19 +0000 Subject: [nginx] Updated OpenSSL used for win32 builds. Message-ID: details: https://hg.nginx.org/nginx/rev/5de6a960632e branches: stable-1.20 changeset: 7967:5de6a960632e user: Maxim Dounin date: Tue Aug 31 17:54:54 2021 +0300 description: Updated OpenSSL used for win32 builds. diffstat: misc/GNUmakefile | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 5d09596909c6 -r 5de6a960632e misc/GNUmakefile --- a/misc/GNUmakefile Fri Aug 20 03:53:56 2021 +0300 +++ b/misc/GNUmakefile Tue Aug 31 17:54:54 2021 +0300 @@ -6,7 +6,7 @@ TEMP = tmp CC = cl OBJS = objs.msvc8 -OPENSSL = openssl-1.1.1k +OPENSSL = openssl-1.1.1l ZLIB = zlib-1.2.11 PCRE = pcre-8.44 From mdounin at mdounin.ru Tue Nov 16 14:51:22 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:51:22 +0000 Subject: [nginx] Fixed $content_length cacheability with chunked (ticket #2252). Message-ID: details: https://hg.nginx.org/nginx/rev/5354bf552520 branches: stable-1.20 changeset: 7968:5354bf552520 user: Maxim Dounin date: Wed Oct 06 18:01:42 2021 +0300 description: Fixed $content_length cacheability with chunked (ticket #2252). diffstat: src/http/ngx_http_variables.c | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diffs (14 lines): diff -r 5de6a960632e -r 5354bf552520 src/http/ngx_http_variables.c --- a/src/http/ngx_http_variables.c Tue Aug 31 17:54:54 2021 +0300 +++ b/src/http/ngx_http_variables.c Wed Oct 06 18:01:42 2021 +0300 @@ -1179,6 +1179,10 @@ ngx_http_variable_content_length(ngx_htt v->no_cacheable = 0; v->not_found = 0; + } else if (r->headers_in.chunked) { + v->not_found = 1; + v->no_cacheable = 1; + } else { v->not_found = 1; } From mdounin at mdounin.ru Tue Nov 16 14:51:25 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:51:25 +0000 Subject: [nginx] Changed ngx_chain_update_chains() to test tag first (ticket #2248). Message-ID: details: https://hg.nginx.org/nginx/rev/84c60a3cd12a branches: stable-1.20 changeset: 7969:84c60a3cd12a user: Maxim Dounin date: Sat Oct 30 02:39:19 2021 +0300 description: Changed ngx_chain_update_chains() to test tag first (ticket #2248). Without this change, aio used with HTTP/2 can result in connection hang, as observed with "aio threads; aio_write on;" and proxying (ticket #2248). The problem is that HTTP/2 updates buffers outside of the output filters (notably, marks them as sent), and then posts a write event to call output filters. If a filter does not call the next one for some reason (for example, because of an AIO operation in progress), this might result in a state when the owner of a buffer already called ngx_chain_update_chains() and can reuse the buffer, while the same buffer is still sitting in the busy chain of some other filter. In the particular case a buffer was sitting in output chain's ctx->busy, and was reused by event pipe. Output chain's ctx->busy was permanently blocked by it, and this resulted in connection hang. Fix is to change ngx_chain_update_chains() to skip buffers from other modules unconditionally, without trying to wait for these buffers to become empty. diffstat: src/core/ngx_buf.c | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diffs (24 lines): diff -r 5354bf552520 -r 84c60a3cd12a src/core/ngx_buf.c --- a/src/core/ngx_buf.c Wed Oct 06 18:01:42 2021 +0300 +++ b/src/core/ngx_buf.c Sat Oct 30 02:39:19 2021 +0300 @@ -203,16 +203,16 @@ ngx_chain_update_chains(ngx_pool_t *p, n while (*busy) { cl = *busy; - if (ngx_buf_size(cl->buf) != 0) { - break; - } - if (cl->buf->tag != tag) { *busy = cl->next; ngx_free_chain(p, cl); continue; } + if (ngx_buf_size(cl->buf) != 0) { + break; + } + cl->buf->pos = cl->buf->start; cl->buf->last = cl->buf->start; From mdounin at mdounin.ru Tue Nov 16 14:51:28 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:51:28 +0000 Subject: [nginx] nginx-1.20.2-RELEASE Message-ID: details: https://hg.nginx.org/nginx/rev/50777834e8c2 branches: stable-1.20 changeset: 7970:50777834e8c2 user: Maxim Dounin date: Tue Nov 16 17:44:02 2021 +0300 description: nginx-1.20.2-RELEASE diffstat: docs/xml/nginx/changes.xml | 78 ++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 78 insertions(+), 0 deletions(-) diffs (88 lines): diff -r 84c60a3cd12a -r 50777834e8c2 docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml Sat Oct 30 02:39:19 2021 +0300 +++ b/docs/xml/nginx/changes.xml Tue Nov 16 17:44:02 2021 +0300 @@ -5,6 +5,84 @@ + + + + +????????????? ? OpenSSL 3.0. + + +OpenSSL 3.0 compatibility. + + + + + +SSL-?????????? ????? ???? ??????? ??? ?????? ? ???; +?????? ????????? ? 1.19.5. + + +SSL variables might be empty when used in logs; +the bug had appeared in 1.19.5. + + + + + +keepalive-?????????? ? gRPC-????????? ????? ?? ??????????? +????? ????????? GOAWAY-??????. + + +keepalive connections with gRPC backends might not be closed +after receiving a GOAWAY frame. + + + + + +SSL-?????????? ? ???????? ? ?????? stream +????? ???????? ????? SSL handshake. + + +backend SSL connections in the stream module +might hang after an SSL handshake. + + + + + +SSL-?????????? ? gRPC-????????? ????? ????????, +???? ?????????????? ?????? select, poll ??? /dev/poll. + + +SSL connections with gRPC backends might hang +if select, poll, or /dev/poll methods were used. + + + + + +? ?????????? $content_length ??? ????????????? chunked transfer encoding. + + +in the $content_length variable when using chunked transfer encoding. + + + + + +??? ????????????? HTTP/2 ? ????????? aio_write +??????? ????? ????????. + + +requests might hang +when using HTTP/2 and the "aio_write" directive. + + + + + + From mdounin at mdounin.ru Tue Nov 16 14:51:31 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Nov 2021 14:51:31 +0000 Subject: [nginx] release-1.20.2 tag Message-ID: details: https://hg.nginx.org/nginx/rev/b5c87e0e57ef branches: stable-1.20 changeset: 7971:b5c87e0e57ef user: Maxim Dounin date: Tue Nov 16 17:44:02 2021 +0300 description: release-1.20.2 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r 50777834e8c2 -r b5c87e0e57ef .hgtags --- a/.hgtags Tue Nov 16 17:44:02 2021 +0300 +++ b/.hgtags Tue Nov 16 17:44:02 2021 +0300 @@ -462,3 +462,4 @@ da571b8eaf8f30f36c43b3c9b25e01e31f47149c ffcbb9980ee2bad27b4d7b1cd680b14ff47b29aa release-1.19.10 1df854b66534be699b0b9bbe337d4f799ebf5d13 release-1.20.0 39a422cf9876828e917c3b75747a6c276100976d release-1.20.1 +50777834e8c254c640fad8f3da7e6a73b088de3f release-1.20.2 From arut at nginx.com Wed Nov 17 07:12:57 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 17 Nov 2021 10:12:57 +0300 Subject: [PATCH 2 of 3] HTTP/3: allowed QUIC stream connection reuse In-Reply-To: References: <8ae53c592c719af4f3ba.1634561309@arut-laptop> <20211110214218.wq7bozp3ppy72kgf@Romans-MacBook-Pro.local> <31C3455F-41FD-4474-8F3C-0CCEAE8D13CE@nginx.com> <20211115123325.6azp4uve7qfeedna@Romans-MacBook-Pro.local> Message-ID: <20211117071257.cjaxjx3fkxlayaxq@Romans-MacBook-Pro.local> On Tue, Nov 16, 2021 at 12:18:47PM +0300, Vladimir Homutov wrote: > On Mon, Nov 15, 2021 at 03:33:25PM +0300, Roman Arutyunyan wrote: > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1636646820 -10800 > > # Thu Nov 11 19:07:00 2021 +0300 > > # Branch quic > > # Node ID 801103b7645d93d0d06f63019e54d9e76f1baa6c > > # Parent d2c193aa84800da00314f1af72ae722d964445a4 > > QUIC: reject streams which we could not create. > > > > The reasons why a stream may not be created by server currently include hitting > > worker_connections limit and memory allocation error. Previously in these > > cases the entire QUIC connection was closed and all its streams were shut down. > > Now the new stream is rejected and existing streams continue working. > > > > To reject an HTTP/3 request stream, RESET_STREAM and STOP_SENDING with > > H3_REQUEST_REJECTED error code are sent to client. HTTP/3 uni streams and > > Stream streams are not rejected. > > > > diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h > > --- a/src/event/quic/ngx_event_quic.h > > +++ b/src/event/quic/ngx_event_quic.h > > @@ -61,6 +61,9 @@ typedef struct { > > ngx_flag_t retry; > > ngx_flag_t gso_enabled; > > ngx_str_t host_key; > > + ngx_int_t close_stream_code; > > + ngx_int_t reject_uni_stream_code; > > + ngx_int_t reject_bidi_stream_code; > > i would prefer stream_close_code and stream_reject_code_uni|bidi, > a bit similar to transport parameter naming like > 'initial_max_stream_data_bidi_local', YMMV OK, let's do this. > > u_char av_token_key[NGX_QUIC_AV_KEY_LEN]; > > u_char sr_token_key[NGX_QUIC_SR_KEY_LEN]; > > } ngx_quic_conf_t; > > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > > --- a/src/event/quic/ngx_event_quic_streams.c > > +++ b/src/event/quic/ngx_event_quic_streams.c > > @@ -15,6 +15,7 @@ > > > > static ngx_quic_stream_t *ngx_quic_create_client_stream(ngx_connection_t *c, > > uint64_t id); > > +static ngx_int_t ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id); > > static ngx_int_t ngx_quic_init_stream(ngx_quic_stream_t *qs); > > static void ngx_quic_init_streams_handler(ngx_connection_t *c); > > static ngx_quic_stream_t *ngx_quic_create_stream(ngx_connection_t *c, > > @@ -377,8 +378,13 @@ ngx_quic_create_client_stream(ngx_connec > > for ( /* void */ ; min_id < id; min_id += 0x04) { > > > > qs = ngx_quic_create_stream(c, min_id); > > + > > if (qs == NULL) { > > - return NULL; > > + if (ngx_quic_reject_stream(c, min_id) != NGX_OK) { > > + return NULL; > > + } > > + > > + continue; > > } > > > > if (ngx_quic_init_stream(qs) != NGX_OK) { > > @@ -390,7 +396,66 @@ ngx_quic_create_client_stream(ngx_connec > > } > > } > > > > - return ngx_quic_create_stream(c, id); > > + qs = ngx_quic_create_stream(c, id); > > + > > + if (qs == NULL) { > > + if (ngx_quic_reject_stream(c, id) != NGX_OK) { > > + return NULL; > > + } > > + > > + return NGX_QUIC_STREAM_GONE; > > + } > > + > > + return qs; > > +} > > + > > + > > +static ngx_int_t > > +ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id) > > +{ > > + uint64_t code; > > + ngx_quic_frame_t *frame; > > + ngx_quic_connection_t *qc; > > + > > + qc = ngx_quic_get_connection(c); > > + > > + code = (id & NGX_QUIC_STREAM_UNIDIRECTIONAL) > > + ? qc->conf->reject_uni_stream_code > > + : qc->conf->reject_bidi_stream_code; > > + > > + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, > > + "quic stream id:0x%xL reject err:0x%xL", id, code); > > Here we may decline stream rejection, but have already logged it. > I suggest putting debug below 'code == 0' test. Zero code still carries some information. If it looks misleading, then yes, let's move it below. > > + if (code == 0) { > > + return NGX_DECLINED; > > + } > > + > > + frame = ngx_quic_alloc_frame(c); > > + if (frame == NULL) { > > + return NGX_ERROR; > > + } > > + > > + frame->level = ssl_encryption_application; > > + frame->type = NGX_QUIC_FT_RESET_STREAM; > > + frame->u.reset_stream.id = id; > > + frame->u.reset_stream.error_code = code; > > + frame->u.reset_stream.final_size = 0; > > + > > + ngx_quic_queue_frame(qc, frame); > > + > > + frame = ngx_quic_alloc_frame(c); > > + if (frame == NULL) { > > + return NGX_ERROR; > > + } > > + > > + frame->level = ssl_encryption_application; > > + frame->type = NGX_QUIC_FT_STOP_SENDING; > > + frame->u.stop_sending.id = id; > > + frame->u.stop_sending.error_code = code; > > + > > + ngx_quic_queue_frame(qc, frame); > > + > > + return NGX_OK; > > } > > > > > > @@ -866,7 +931,9 @@ ngx_quic_stream_cleanup_handler(void *da > > if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0 > > || (qs->id & NGX_QUIC_STREAM_UNIDIRECTIONAL) == 0) > > { > > - if (!c->read->pending_eof && !c->read->error) { > > + if (!c->read->pending_eof && !c->read->error > > + && qc->conf->close_stream_code) > > + { > > frame = ngx_quic_alloc_frame(pc); > > if (frame == NULL) { > > goto done; > > @@ -875,7 +942,7 @@ ngx_quic_stream_cleanup_handler(void *da > > frame->level = ssl_encryption_application; > > frame->type = NGX_QUIC_FT_STOP_SENDING; > > frame->u.stop_sending.id = qs->id; > > - frame->u.stop_sending.error_code = 0x100; /* HTTP/3 no error */ > > + frame->u.stop_sending.error_code = qc->conf->close_stream_code; > > > > ngx_quic_queue_frame(qc, frame); > > } > > diff --git a/src/http/modules/ngx_http_quic_module.c b/src/http/modules/ngx_http_quic_module.c > > --- a/src/http/modules/ngx_http_quic_module.c > > +++ b/src/http/modules/ngx_http_quic_module.c > > @@ -314,6 +314,7 @@ ngx_http_quic_create_srv_conf(ngx_conf_t > > * conf->tp.sr_enabled = 0 > > * conf->tp.preferred_address = NULL > > * conf->host_key = { 0, NULL } > > + * cong->reject_uni_stream_code = 0; > > */ > > > > conf->tp.max_idle_timeout = NGX_CONF_UNSET_MSEC; > > @@ -331,6 +332,8 @@ ngx_http_quic_create_srv_conf(ngx_conf_t > > > > conf->retry = NGX_CONF_UNSET; > > conf->gso_enabled = NGX_CONF_UNSET; > > + conf->close_stream_code = NGX_HTTP_V3_ERR_NO_ERROR; > > + conf->reject_bidi_stream_code = NGX_HTTP_V3_ERR_REQUEST_REJECTED; > > > > return conf; > > } > > diff --git a/src/stream/ngx_stream_quic_module.c b/src/stream/ngx_stream_quic_module.c > > --- a/src/stream/ngx_stream_quic_module.c > > +++ b/src/stream/ngx_stream_quic_module.c > > @@ -241,6 +241,9 @@ ngx_stream_quic_create_srv_conf(ngx_conf > > * conf->tp.retry_scid = { 0, NULL }; > > * conf->tp.preferred_address = NULL > > * conf->host_key = { 0, NULL } > > + * conf->close_stream_code = 0; > > + * conf->reject_uni_stream_code = 0; > > + * conf->reject_bidi_stream_code = 0; > > */ > > > > conf->tp.max_idle_timeout = NGX_CONF_UNSET_MSEC; > > > Overal patch looks good to me > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1636646820 -10800 # Thu Nov 11 19:07:00 2021 +0300 # Branch quic # Node ID 4ad8fc79cb33257c928a9098a87324b350576551 # Parent d2c193aa84800da00314f1af72ae722d964445a4 QUIC: reject streams which we could not create. The reasons why a stream may not be created by server currently include hitting worker_connections limit and memory allocation error. Previously in these cases the entire QUIC connection was closed and all its streams were shut down. Now the new stream is rejected and existing streams continue working. To reject an HTTP/3 request stream, RESET_STREAM and STOP_SENDING with H3_REQUEST_REJECTED error code are sent to client. HTTP/3 uni streams and Stream streams are not rejected. diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h --- a/src/event/quic/ngx_event_quic.h +++ b/src/event/quic/ngx_event_quic.h @@ -61,6 +61,9 @@ typedef struct { ngx_flag_t retry; ngx_flag_t gso_enabled; ngx_str_t host_key; + ngx_int_t stream_close_code; + ngx_int_t stream_reject_code_uni; + ngx_int_t stream_reject_code_bidi; u_char av_token_key[NGX_QUIC_AV_KEY_LEN]; u_char sr_token_key[NGX_QUIC_SR_KEY_LEN]; } ngx_quic_conf_t; diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -15,6 +15,7 @@ static ngx_quic_stream_t *ngx_quic_create_client_stream(ngx_connection_t *c, uint64_t id); +static ngx_int_t ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id); static ngx_int_t ngx_quic_init_stream(ngx_quic_stream_t *qs); static void ngx_quic_init_streams_handler(ngx_connection_t *c); static ngx_quic_stream_t *ngx_quic_create_stream(ngx_connection_t *c, @@ -377,8 +378,13 @@ ngx_quic_create_client_stream(ngx_connec for ( /* void */ ; min_id < id; min_id += 0x04) { qs = ngx_quic_create_stream(c, min_id); + if (qs == NULL) { - return NULL; + if (ngx_quic_reject_stream(c, min_id) != NGX_OK) { + return NULL; + } + + continue; } if (ngx_quic_init_stream(qs) != NGX_OK) { @@ -390,7 +396,66 @@ ngx_quic_create_client_stream(ngx_connec } } - return ngx_quic_create_stream(c, id); + qs = ngx_quic_create_stream(c, id); + + if (qs == NULL) { + if (ngx_quic_reject_stream(c, id) != NGX_OK) { + return NULL; + } + + return NGX_QUIC_STREAM_GONE; + } + + return qs; +} + + +static ngx_int_t +ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id) +{ + uint64_t code; + ngx_quic_frame_t *frame; + ngx_quic_connection_t *qc; + + qc = ngx_quic_get_connection(c); + + code = (id & NGX_QUIC_STREAM_UNIDIRECTIONAL) + ? qc->conf->stream_reject_code_uni + : qc->conf->stream_reject_code_bidi; + + if (code == 0) { + return NGX_DECLINED; + } + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic stream id:0x%xL reject err:0x%xL", id, code); + + frame = ngx_quic_alloc_frame(c); + if (frame == NULL) { + return NGX_ERROR; + } + + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_RESET_STREAM; + frame->u.reset_stream.id = id; + frame->u.reset_stream.error_code = code; + frame->u.reset_stream.final_size = 0; + + ngx_quic_queue_frame(qc, frame); + + frame = ngx_quic_alloc_frame(c); + if (frame == NULL) { + return NGX_ERROR; + } + + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_STOP_SENDING; + frame->u.stop_sending.id = id; + frame->u.stop_sending.error_code = code; + + ngx_quic_queue_frame(qc, frame); + + return NGX_OK; } @@ -866,7 +931,9 @@ ngx_quic_stream_cleanup_handler(void *da if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0 || (qs->id & NGX_QUIC_STREAM_UNIDIRECTIONAL) == 0) { - if (!c->read->pending_eof && !c->read->error) { + if (!c->read->pending_eof && !c->read->error + && qc->conf->stream_close_code) + { frame = ngx_quic_alloc_frame(pc); if (frame == NULL) { goto done; @@ -875,7 +942,7 @@ ngx_quic_stream_cleanup_handler(void *da frame->level = ssl_encryption_application; frame->type = NGX_QUIC_FT_STOP_SENDING; frame->u.stop_sending.id = qs->id; - frame->u.stop_sending.error_code = 0x100; /* HTTP/3 no error */ + frame->u.stop_sending.error_code = qc->conf->stream_close_code; ngx_quic_queue_frame(qc, frame); } diff --git a/src/http/modules/ngx_http_quic_module.c b/src/http/modules/ngx_http_quic_module.c --- a/src/http/modules/ngx_http_quic_module.c +++ b/src/http/modules/ngx_http_quic_module.c @@ -314,6 +314,7 @@ ngx_http_quic_create_srv_conf(ngx_conf_t * conf->tp.sr_enabled = 0 * conf->tp.preferred_address = NULL * conf->host_key = { 0, NULL } + * cong->stream_reject_code_uni = 0; */ conf->tp.max_idle_timeout = NGX_CONF_UNSET_MSEC; @@ -331,6 +332,8 @@ ngx_http_quic_create_srv_conf(ngx_conf_t conf->retry = NGX_CONF_UNSET; conf->gso_enabled = NGX_CONF_UNSET; + conf->stream_close_code = NGX_HTTP_V3_ERR_NO_ERROR; + conf->stream_reject_code_bidi = NGX_HTTP_V3_ERR_REQUEST_REJECTED; return conf; } diff --git a/src/stream/ngx_stream_quic_module.c b/src/stream/ngx_stream_quic_module.c --- a/src/stream/ngx_stream_quic_module.c +++ b/src/stream/ngx_stream_quic_module.c @@ -241,6 +241,9 @@ ngx_stream_quic_create_srv_conf(ngx_conf * conf->tp.retry_scid = { 0, NULL }; * conf->tp.preferred_address = NULL * conf->host_key = { 0, NULL } + * conf->stream_close_code = 0; + * conf->stream_reject_code_uni = 0; + * conf->stream_reject_code_bidi= 0; */ conf->tp.max_idle_timeout = NGX_CONF_UNSET_MSEC; From arut at nginx.com Wed Nov 17 07:31:00 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 17 Nov 2021 10:31:00 +0300 Subject: [PATCH 0 of 2] QUIC flow control update Message-ID: The series improves flow control updates. - the first patch adds stream flow control update on STREAM_DATA_BLOCKED - the second patch adds DATA_BLOCKED support and does similar things for it From arut at nginx.com Wed Nov 17 07:31:01 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 17 Nov 2021 10:31:01 +0300 Subject: [PATCH 1 of 2] QUIC: update stream flow control credit on STREAM_DATA_BLOCKED In-Reply-To: References: Message-ID: <4e3a7fc0533192f51a01.1637134261@arut-laptop> # HG changeset patch # User Roman Arutyunyan # Date 1637133234 -10800 # Wed Nov 17 10:13:54 2021 +0300 # Branch quic # Node ID 4e3a7fc0533192f51a01042a1e9dd2b595881420 # Parent 4ad8fc79cb33257c928a9098a87324b350576551 QUIC: update stream flow control credit on STREAM_DATA_BLOCKED. Previously, after receiving STREAM_DATA_BLOCKED, current flow control limit was sent to client. Now, if the limit can be updated to the full window size, it is updated and the new value is sent to client, otherwise nothing is sent. The change lets client update flow control credit on demand. Also, it saves traffic by not sending MAX_STREAM_DATA with the same value twice. diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -31,6 +31,7 @@ static size_t ngx_quic_max_stream_flow(n static void ngx_quic_stream_cleanup_handler(void *data); static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); +static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); ngx_connection_t * @@ -1190,8 +1191,6 @@ ngx_int_t ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f) { - uint64_t limit; - ngx_quic_frame_t *frame; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; @@ -1217,29 +1216,10 @@ ngx_quic_handle_stream_data_blocked_fram return NGX_OK; } - limit = qs->recv_max_data; - - if (ngx_quic_init_stream(qs) != NGX_OK) { - return NGX_ERROR; - } - - } else { - limit = qs->recv_max_data; + return ngx_quic_init_stream(qs); } - frame = ngx_quic_alloc_frame(c); - if (frame == NULL) { - return NGX_ERROR; - } - - frame->level = pkt->level; - frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; - frame->u.max_stream_data.id = f->id; - frame->u.max_stream_data.limit = limit; - - ngx_quic_queue_frame(qc, frame); - - return NGX_OK; + return ngx_quic_update_max_stream_data(qs->connection); } @@ -1587,22 +1567,9 @@ ngx_quic_update_flow(ngx_connection_t *c if (!rev->pending_eof && !rev->error && qs->recv_max_data <= qs->recv_offset + qs->recv_window / 2) { - qs->recv_max_data = qs->recv_offset + qs->recv_window; - - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic flow update msd:%uL", qs->recv_max_data); - - frame = ngx_quic_alloc_frame(pc); - if (frame == NULL) { + if (ngx_quic_update_max_stream_data(c) != NGX_OK) { return NGX_ERROR; } - - frame->level = ssl_encryption_application; - frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; - frame->u.max_stream_data.id = qs->id; - frame->u.max_stream_data.limit = qs->recv_max_data; - - ngx_quic_queue_frame(qc, frame); } qc->streams.recv_offset += len; @@ -1632,6 +1599,44 @@ ngx_quic_update_flow(ngx_connection_t *c } +static ngx_int_t +ngx_quic_update_max_stream_data(ngx_connection_t *c) +{ + uint64_t recv_max_data; + ngx_quic_frame_t *frame; + ngx_quic_stream_t *qs; + ngx_quic_connection_t *qc; + + qs = c->quic; + qc = ngx_quic_get_connection(qs->parent); + + recv_max_data = qs->recv_offset + qs->recv_window; + + if (qs->recv_max_data == recv_max_data) { + return NGX_OK; + } + + qs->recv_max_data = recv_max_data; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic flow update msd:%uL", qs->recv_max_data); + + frame = ngx_quic_alloc_frame(c); + if (frame == NULL) { + return NGX_ERROR; + } + + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; + frame->u.max_stream_data.id = qs->id; + frame->u.max_stream_data.limit = qs->recv_max_data; + + ngx_quic_queue_frame(qc, frame); + + return NGX_OK; +} + + ngx_int_t ngx_quic_handle_read_event(ngx_event_t *rev, ngx_uint_t flags) { From arut at nginx.com Wed Nov 17 07:31:02 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 17 Nov 2021 10:31:02 +0300 Subject: [PATCH 2 of 2] QUIC: handle DATA_BLOCKED frame from client In-Reply-To: References: Message-ID: <0fb2613594f6bd8dd8f0.1637134262@arut-laptop> # HG changeset patch # User Roman Arutyunyan # Date 1637086755 -10800 # Tue Nov 16 21:19:15 2021 +0300 # Branch quic # Node ID 0fb2613594f6bd8dd8f07a30c69900866b573158 # Parent 4e3a7fc0533192f51a01042a1e9dd2b595881420 QUIC: handle DATA_BLOCKED frame from client. Previously the frame was not handled and connection was closed with an error. Now, after receiving this frame, global flow control is updated and new flow control credit is sent to client. diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -1252,6 +1252,17 @@ ngx_quic_handle_frames(ngx_connection_t break; + case NGX_QUIC_FT_DATA_BLOCKED: + + if (ngx_quic_handle_data_blocked_frame(c, pkt, + &frame.u.data_blocked) + != NGX_OK) + { + return NGX_ERROR; + } + + break; + case NGX_QUIC_FT_STREAM_DATA_BLOCKED: if (ngx_quic_handle_stream_data_blocked_frame(c, pkt, diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -32,6 +32,7 @@ static void ngx_quic_stream_cleanup_hand static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); +static ngx_int_t ngx_quic_update_max_data(ngx_connection_t *c); ngx_connection_t * @@ -1188,6 +1189,14 @@ ngx_quic_handle_streams_blocked_frame(ng ngx_int_t +ngx_quic_handle_data_blocked_frame(ngx_connection_t *c, + ngx_quic_header_t *pkt, ngx_quic_data_blocked_frame_t *f) +{ + return ngx_quic_update_max_data(c); +} + + +ngx_int_t ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f) { @@ -1544,7 +1553,6 @@ ngx_quic_update_flow(ngx_connection_t *c uint64_t len; ngx_event_t *rev; ngx_connection_t *pc; - ngx_quic_frame_t *frame; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; @@ -1577,22 +1585,9 @@ ngx_quic_update_flow(ngx_connection_t *c if (qc->streams.recv_max_data <= qc->streams.recv_offset + qc->streams.recv_window / 2) { - qc->streams.recv_max_data = qc->streams.recv_offset - + qc->streams.recv_window; - - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, - "quic flow update md:%uL", qc->streams.recv_max_data); - - frame = ngx_quic_alloc_frame(pc); - if (frame == NULL) { + if (ngx_quic_update_max_data(pc) != NGX_OK) { return NGX_ERROR; } - - frame->level = ssl_encryption_application; - frame->type = NGX_QUIC_FT_MAX_DATA; - frame->u.max_data.max_data = qc->streams.recv_max_data; - - ngx_quic_queue_frame(qc, frame); } return NGX_OK; @@ -1637,6 +1632,41 @@ ngx_quic_update_max_stream_data(ngx_conn } +static ngx_int_t +ngx_quic_update_max_data(ngx_connection_t *c) +{ + uint64_t recv_max_data; + ngx_quic_frame_t *frame; + ngx_quic_connection_t *qc; + + qc = ngx_quic_get_connection(c); + + recv_max_data = qc->streams.recv_offset + qc->streams.recv_window; + + if (qc->streams.recv_max_data == recv_max_data) { + return NGX_OK; + } + + qc->streams.recv_max_data = recv_max_data; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic flow update md:%uL", qc->streams.recv_max_data); + + frame = ngx_quic_alloc_frame(c); + if (frame == NULL) { + return NGX_ERROR; + } + + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_MAX_DATA; + frame->u.max_data.max_data = qc->streams.recv_max_data; + + ngx_quic_queue_frame(qc, frame); + + return NGX_OK; +} + + ngx_int_t ngx_quic_handle_read_event(ngx_event_t *rev, ngx_uint_t flags) { diff --git a/src/event/quic/ngx_event_quic_streams.h b/src/event/quic/ngx_event_quic_streams.h --- a/src/event/quic/ngx_event_quic_streams.h +++ b/src/event/quic/ngx_event_quic_streams.h @@ -20,6 +20,8 @@ ngx_int_t ngx_quic_handle_max_data_frame ngx_quic_max_data_frame_t *f); ngx_int_t ngx_quic_handle_streams_blocked_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_streams_blocked_frame_t *f); +ngx_int_t ngx_quic_handle_data_blocked_frame(ngx_connection_t *c, + ngx_quic_header_t *pkt, ngx_quic_data_blocked_frame_t *f); ngx_int_t ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f); ngx_int_t ngx_quic_handle_max_stream_data_frame(ngx_connection_t *c, From arut at nginx.com Wed Nov 17 08:17:27 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 17 Nov 2021 11:17:27 +0300 Subject: [PATCH 1 of 2] QUIC: update stream flow control credit on STREAM_DATA_BLOCKED In-Reply-To: <4e3a7fc0533192f51a01.1637134261@arut-laptop> References: <4e3a7fc0533192f51a01.1637134261@arut-laptop> Message-ID: <20211117081727.bpmho2oiwhljv5h3@Romans-MacBook-Pro.local> On Wed, Nov 17, 2021 at 10:31:01AM +0300, Roman Arutyunyan wrote: > # HG changeset patch > # User Roman Arutyunyan > # Date 1637133234 -10800 > # Wed Nov 17 10:13:54 2021 +0300 > # Branch quic > # Node ID 4e3a7fc0533192f51a01042a1e9dd2b595881420 > # Parent 4ad8fc79cb33257c928a9098a87324b350576551 > QUIC: update stream flow control credit on STREAM_DATA_BLOCKED. > > Previously, after receiving STREAM_DATA_BLOCKED, current flow control limit > was sent to client. Now, if the limit can be updated to the full window size, > it is updated and the new value is sent to client, otherwise nothing is sent. > > The change lets client update flow control credit on demand. Also, it saves > traffic by not sending MAX_STREAM_DATA with the same value twice. > > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > --- a/src/event/quic/ngx_event_quic_streams.c > +++ b/src/event/quic/ngx_event_quic_streams.c > @@ -31,6 +31,7 @@ static size_t ngx_quic_max_stream_flow(n > static void ngx_quic_stream_cleanup_handler(void *data); > static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); > static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); > +static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); > > > ngx_connection_t * > @@ -1190,8 +1191,6 @@ ngx_int_t > ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, > ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f) > { > - uint64_t limit; > - ngx_quic_frame_t *frame; > ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > > @@ -1217,29 +1216,10 @@ ngx_quic_handle_stream_data_blocked_fram > return NGX_OK; > } > > - limit = qs->recv_max_data; > - > - if (ngx_quic_init_stream(qs) != NGX_OK) { > - return NGX_ERROR; > - } > - > - } else { > - limit = qs->recv_max_data; > + return ngx_quic_init_stream(qs); > } > > - frame = ngx_quic_alloc_frame(c); > - if (frame == NULL) { > - return NGX_ERROR; > - } > - > - frame->level = pkt->level; > - frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; > - frame->u.max_stream_data.id = f->id; > - frame->u.max_stream_data.limit = limit; > - > - ngx_quic_queue_frame(qc, frame); > - > - return NGX_OK; > + return ngx_quic_update_max_stream_data(qs->connection); > } > > > @@ -1587,22 +1567,9 @@ ngx_quic_update_flow(ngx_connection_t *c > if (!rev->pending_eof && !rev->error > && qs->recv_max_data <= qs->recv_offset + qs->recv_window / 2) > { > - qs->recv_max_data = qs->recv_offset + qs->recv_window; > - > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > - "quic flow update msd:%uL", qs->recv_max_data); > - > - frame = ngx_quic_alloc_frame(pc); > - if (frame == NULL) { > + if (ngx_quic_update_max_stream_data(c) != NGX_OK) { > return NGX_ERROR; > } > - > - frame->level = ssl_encryption_application; > - frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; > - frame->u.max_stream_data.id = qs->id; > - frame->u.max_stream_data.limit = qs->recv_max_data; > - > - ngx_quic_queue_frame(qc, frame); > } > > qc->streams.recv_offset += len; > @@ -1632,6 +1599,44 @@ ngx_quic_update_flow(ngx_connection_t *c > } > > > +static ngx_int_t > +ngx_quic_update_max_stream_data(ngx_connection_t *c) > +{ > + uint64_t recv_max_data; > + ngx_quic_frame_t *frame; > + ngx_quic_stream_t *qs; > + ngx_quic_connection_t *qc; > + > + qs = c->quic; > + qc = ngx_quic_get_connection(qs->parent); > + > + recv_max_data = qs->recv_offset + qs->recv_window; > + > + if (qs->recv_max_data == recv_max_data) { > + return NGX_OK; > + } > + > + qs->recv_max_data = recv_max_data; > + > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > + "quic flow update msd:%uL", qs->recv_max_data); > + > + frame = ngx_quic_alloc_frame(c); The argument should be "pc": frame = ngx_quic_alloc_frame(pc); > + if (frame == NULL) { > + return NGX_ERROR; > + } > + > + frame->level = ssl_encryption_application; > + frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; > + frame->u.max_stream_data.id = qs->id; > + frame->u.max_stream_data.limit = qs->recv_max_data; > + > + ngx_quic_queue_frame(qc, frame); > + > + return NGX_OK; > +} > + > + > ngx_int_t > ngx_quic_handle_read_event(ngx_event_t *rev, ngx_uint_t flags) > { > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan From vl at nginx.com Wed Nov 17 08:23:19 2021 From: vl at nginx.com (Vladimir Homutov) Date: Wed, 17 Nov 2021 11:23:19 +0300 Subject: [PATCH 2 of 3] HTTP/3: allowed QUIC stream connection reuse In-Reply-To: <20211117071257.cjaxjx3fkxlayaxq@Romans-MacBook-Pro.local> References: <8ae53c592c719af4f3ba.1634561309@arut-laptop> <20211110214218.wq7bozp3ppy72kgf@Romans-MacBook-Pro.local> <31C3455F-41FD-4474-8F3C-0CCEAE8D13CE@nginx.com> <20211115123325.6azp4uve7qfeedna@Romans-MacBook-Pro.local> <20211117071257.cjaxjx3fkxlayaxq@Romans-MacBook-Pro.local> Message-ID: 17.11.2021 10:12, Roman Arutyunyan ?????: > On Tue, Nov 16, 2021 at 12:18:47PM +0300, Vladimir Homutov wrote: >> On Mon, Nov 15, 2021 at 03:33:25PM +0300, Roman Arutyunyan wrote: >>> # HG changeset patch >>> # User Roman Arutyunyan >>> # Date 1636646820 -10800 >>> # Thu Nov 11 19:07:00 2021 +0300 >>> # Branch quic >>> # Node ID 801103b7645d93d0d06f63019e54d9e76f1baa6c >>> # Parent d2c193aa84800da00314f1af72ae722d964445a4 >>> QUIC: reject streams which we could not create. >>> >>> The reasons why a stream may not be created by server currently include hitting >>> worker_connections limit and memory allocation error. Previously in these >>> cases the entire QUIC connection was closed and all its streams were shut down. >>> Now the new stream is rejected and existing streams continue working. >>> >>> To reject an HTTP/3 request stream, RESET_STREAM and STOP_SENDING with >>> H3_REQUEST_REJECTED error code are sent to client. HTTP/3 uni streams and >>> Stream streams are not rejected. >>> >>> diff --git a/src/event/quic/ngx_event_quic.h b/src/event/quic/ngx_event_quic.h >>> --- a/src/event/quic/ngx_event_quic.h >>> +++ b/src/event/quic/ngx_event_quic.h >>> @@ -61,6 +61,9 @@ typedef struct { >>> ngx_flag_t retry; >>> ngx_flag_t gso_enabled; >>> ngx_str_t host_key; >>> + ngx_int_t close_stream_code; >>> + ngx_int_t reject_uni_stream_code; >>> + ngx_int_t reject_bidi_stream_code; >> >> i would prefer stream_close_code and stream_reject_code_uni|bidi, >> a bit similar to transport parameter naming like >> 'initial_max_stream_data_bidi_local', YMMV > > OK, let's do this. > >>> u_char av_token_key[NGX_QUIC_AV_KEY_LEN]; >>> u_char sr_token_key[NGX_QUIC_SR_KEY_LEN]; >>> } ngx_quic_conf_t; >>> diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c >>> --- a/src/event/quic/ngx_event_quic_streams.c >>> +++ b/src/event/quic/ngx_event_quic_streams.c >>> @@ -15,6 +15,7 @@ >>> >>> static ngx_quic_stream_t *ngx_quic_create_client_stream(ngx_connection_t *c, >>> uint64_t id); >>> +static ngx_int_t ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id); >>> static ngx_int_t ngx_quic_init_stream(ngx_quic_stream_t *qs); >>> static void ngx_quic_init_streams_handler(ngx_connection_t *c); >>> static ngx_quic_stream_t *ngx_quic_create_stream(ngx_connection_t *c, >>> @@ -377,8 +378,13 @@ ngx_quic_create_client_stream(ngx_connec >>> for ( /* void */ ; min_id < id; min_id += 0x04) { >>> >>> qs = ngx_quic_create_stream(c, min_id); >>> + >>> if (qs == NULL) { >>> - return NULL; >>> + if (ngx_quic_reject_stream(c, min_id) != NGX_OK) { >>> + return NULL; >>> + } >>> + >>> + continue; >>> } >>> >>> if (ngx_quic_init_stream(qs) != NGX_OK) { >>> @@ -390,7 +396,66 @@ ngx_quic_create_client_stream(ngx_connec >>> } >>> } >>> >>> - return ngx_quic_create_stream(c, id); >>> + qs = ngx_quic_create_stream(c, id); >>> + >>> + if (qs == NULL) { >>> + if (ngx_quic_reject_stream(c, id) != NGX_OK) { >>> + return NULL; >>> + } >>> + >>> + return NGX_QUIC_STREAM_GONE; >>> + } >>> + >>> + return qs; >>> +} >>> + >>> + >>> +static ngx_int_t >>> +ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id) >>> +{ >>> + uint64_t code; >>> + ngx_quic_frame_t *frame; >>> + ngx_quic_connection_t *qc; >>> + >>> + qc = ngx_quic_get_connection(c); >>> + >>> + code = (id & NGX_QUIC_STREAM_UNIDIRECTIONAL) >>> + ? qc->conf->reject_uni_stream_code >>> + : qc->conf->reject_bidi_stream_code; >>> + >>> + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, >>> + "quic stream id:0x%xL reject err:0x%xL", id, code); >> >> Here we may decline stream rejection, but have already logged it. >> I suggest putting debug below 'code == 0' test. > > Zero code still carries some information. If it looks misleading, then yes, > let's move it below. > not really necessary, let it stay as is; we have many places where we debug just fact that function was entered. >>> + if (code == 0) { >>> + return NGX_DECLINED; >>> + } >>> + >>> + frame = ngx_quic_alloc_frame(c); >>> + if (frame == NULL) { >>> + return NGX_ERROR; >>> + } >>> + >>> + frame->level = ssl_encryption_application; >>> + frame->type = NGX_QUIC_FT_RESET_STREAM; >>> + frame->u.reset_stream.id = id; >>> + frame->u.reset_stream.error_code = code; >>> + frame->u.reset_stream.final_size = 0; >>> + >>> + ngx_quic_queue_frame(qc, frame); >>> + >>> + frame = ngx_quic_alloc_frame(c); >>> + if (frame == NULL) { >>> + return NGX_ERROR; >>> + } >>> + >>> + frame->level = ssl_encryption_application; >>> + frame->type = NGX_QUIC_FT_STOP_SENDING; >>> + frame->u.stop_sending.id = id; >>> + frame->u.stop_sending.error_code = code; >>> + >>> + ngx_quic_queue_frame(qc, frame); >>> + >>> + return NGX_OK; >>> } >>> >>> >>> @@ -866,7 +931,9 @@ ngx_quic_stream_cleanup_handler(void *da >>> if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0 >>> || (qs->id & NGX_QUIC_STREAM_UNIDIRECTIONAL) == 0) >>> { >>> - if (!c->read->pending_eof && !c->read->error) { >>> + if (!c->read->pending_eof && !c->read->error >>> + && qc->conf->close_stream_code) >>> + { >>> frame = ngx_quic_alloc_frame(pc); >>> if (frame == NULL) { >>> goto done; >>> @@ -875,7 +942,7 @@ ngx_quic_stream_cleanup_handler(void *da >>> frame->level = ssl_encryption_application; >>> frame->type = NGX_QUIC_FT_STOP_SENDING; >>> frame->u.stop_sending.id = qs->id; >>> - frame->u.stop_sending.error_code = 0x100; /* HTTP/3 no error */ >>> + frame->u.stop_sending.error_code = qc->conf->close_stream_code; >>> >>> ngx_quic_queue_frame(qc, frame); >>> } >>> diff --git a/src/http/modules/ngx_http_quic_module.c b/src/http/modules/ngx_http_quic_module.c >>> --- a/src/http/modules/ngx_http_quic_module.c >>> +++ b/src/http/modules/ngx_http_quic_module.c >>> @@ -314,6 +314,7 @@ ngx_http_quic_create_srv_conf(ngx_conf_t >>> * conf->tp.sr_enabled = 0 >>> * conf->tp.preferred_address = NULL >>> * conf->host_key = { 0, NULL } >>> + * cong->reject_uni_stream_code = 0; >>> */ >>> >>> conf->tp.max_idle_timeout = NGX_CONF_UNSET_MSEC; >>> @@ -331,6 +332,8 @@ ngx_http_quic_create_srv_conf(ngx_conf_t >>> >>> conf->retry = NGX_CONF_UNSET; >>> conf->gso_enabled = NGX_CONF_UNSET; >>> + conf->close_stream_code = NGX_HTTP_V3_ERR_NO_ERROR; >>> + conf->reject_bidi_stream_code = NGX_HTTP_V3_ERR_REQUEST_REJECTED; >>> >>> return conf; >>> } >>> diff --git a/src/stream/ngx_stream_quic_module.c b/src/stream/ngx_stream_quic_module.c >>> --- a/src/stream/ngx_stream_quic_module.c >>> +++ b/src/stream/ngx_stream_quic_module.c >>> @@ -241,6 +241,9 @@ ngx_stream_quic_create_srv_conf(ngx_conf >>> * conf->tp.retry_scid = { 0, NULL }; >>> * conf->tp.preferred_address = NULL >>> * conf->host_key = { 0, NULL } >>> + * conf->close_stream_code = 0; >>> + * conf->reject_uni_stream_code = 0; >>> + * conf->reject_bidi_stream_code = 0; >>> */ >>> >>> conf->tp.max_idle_timeout = NGX_CONF_UNSET_MSEC; >> >> >> Overal patch looks good to me >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > From xeioex at nginx.com Wed Nov 17 14:12:26 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 17 Nov 2021 14:12:26 +0000 Subject: [njs] Fixed WebCrypto sign() and verify() methods with OpenSSL 3.0. Message-ID: details: https://hg.nginx.org/njs/rev/fd40eb687bc7 branches: changeset: 1746:fd40eb687bc7 user: Dmitry Volyntsev date: Wed Nov 17 14:11:28 2021 +0000 description: Fixed WebCrypto sign() and verify() methods with OpenSSL 3.0. diffstat: external/njs_webcrypto.c | 36 ++++++++++++++++++++++++------------ 1 files changed, 24 insertions(+), 12 deletions(-) diffs (57 lines): diff -r 728c3741f556 -r fd40eb687bc7 external/njs_webcrypto.c --- a/external/njs_webcrypto.c Thu Nov 11 14:27:15 2021 +0000 +++ b/external/njs_webcrypto.c Wed Nov 17 14:11:28 2021 +0000 @@ -2006,22 +2006,22 @@ njs_ext_sign(njs_vm_t *vm, njs_value_t * md = njs_algorithm_hash_digest(hash); - ret = EVP_DigestSignInit(mctx, NULL, md, NULL, key->pkey); - if (njs_slow_path(ret <= 0)) { - njs_webcrypto_error(vm, "EVP_DigestSignInit() failed"); - goto fail; - } - - ret = EVP_DigestSignUpdate(mctx, data.start, data.length); - if (njs_slow_path(ret <= 0)) { - njs_webcrypto_error(vm, "EVP_DigestSignUpdate() failed"); - goto fail; - } - outlen = 0; switch (alg->type) { case NJS_ALGORITHM_HMAC: + ret = EVP_DigestSignInit(mctx, NULL, md, NULL, key->pkey); + if (njs_slow_path(ret <= 0)) { + njs_webcrypto_error(vm, "EVP_DigestSignInit() failed"); + goto fail; + } + + ret = EVP_DigestSignUpdate(mctx, data.start, data.length); + if (njs_slow_path(ret <= 0)) { + njs_webcrypto_error(vm, "EVP_DigestSignUpdate() failed"); + goto fail; + } + olen = EVP_MD_size(md); if (!verify) { @@ -2051,6 +2051,18 @@ njs_ext_sign(njs_vm_t *vm, njs_value_t * case NJS_ALGORITHM_RSA_PSS: case NJS_ALGORITHM_ECDSA: default: + ret = EVP_DigestInit_ex(mctx, md, NULL); + if (njs_slow_path(ret <= 0)) { + njs_webcrypto_error(vm, "EVP_DigestInit_ex() failed"); + goto fail; + } + + ret = EVP_DigestUpdate(mctx, data.start, data.length); + if (njs_slow_path(ret <= 0)) { + njs_webcrypto_error(vm, "EVP_DigestUpdate() failed"); + goto fail; + } + ret = EVP_DigestFinal_ex(mctx, m, &m_len); if (njs_slow_path(ret <= 0)) { njs_webcrypto_error(vm, "EVP_DigestFinal_ex() failed"); From xeioex at nginx.com Wed Nov 17 17:01:50 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 17 Nov 2021 17:01:50 +0000 Subject: [njs] SSL: fixed reporting of the detected library version. Message-ID: details: https://hg.nginx.org/njs/rev/a2d200d79c58 branches: changeset: 1747:a2d200d79c58 user: Dmitry Volyntsev date: Wed Nov 17 17:01:07 2021 +0000 description: SSL: fixed reporting of the detected library version. Previously, `openssl version` command was used to report the OpenSSL version. Whereas, when provided with custom CFLAGS and LDFLAGS the used library may differ from the system one. The fix is to report OpenSSL version using the provided library. diffstat: auto/openssl | 11 ++++++++++- 1 files changed, 10 insertions(+), 1 deletions(-) diffs (21 lines): diff -r fd40eb687bc7 -r a2d200d79c58 auto/openssl --- a/auto/openssl Wed Nov 17 14:11:28 2021 +0000 +++ b/auto/openssl Wed Nov 17 17:01:07 2021 +0000 @@ -25,7 +25,16 @@ njs_feature_test="#include Hi, If anyone is interested, I have rebased the DTLS patch: https://github.com/vjardin/nginx/commit/57131a20281ce9942fa8e08c223d914651b79501 best regards, Vincent From pluknet at nginx.com Thu Nov 18 09:08:57 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 18 Nov 2021 09:08:57 +0000 Subject: [njs] Fixed build with OpenSSL 3.0 built with no-deprecated. Message-ID: details: https://hg.nginx.org/njs/rev/315969946708 branches: changeset: 1748:315969946708 user: Sergey Kandaurov date: Wed Nov 17 19:14:19 2021 +0300 description: Fixed build with OpenSSL 3.0 built with no-deprecated. This covers deprecated OpenSSL_add_all_algorithms() and RSA/EC_KEY types. diffstat: auto/openssl | 2 +- external/njs_webcrypto.c | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletions(-) diffs (104 lines): diff -r a2d200d79c58 -r 315969946708 auto/openssl --- a/auto/openssl Wed Nov 17 17:01:07 2021 +0000 +++ b/auto/openssl Wed Nov 17 19:14:19 2021 +0300 @@ -18,7 +18,7 @@ njs_feature_libs="-lcrypto" njs_feature_test="#include int main() { - OpenSSL_add_all_algorithms(); + EVP_CIPHER_CTX_new(); return 0; }" . auto/feature diff -r a2d200d79c58 -r 315969946708 external/njs_webcrypto.c --- a/external/njs_webcrypto.c Wed Nov 17 17:01:07 2021 +0000 +++ b/external/njs_webcrypto.c Wed Nov 17 19:14:19 2021 +0300 @@ -1653,15 +1653,21 @@ njs_ext_import_key(njs_vm_t *vm, njs_val { int nid; BIO *bio; +#if (OPENSSL_VERSION_NUMBER < 0x30000000L) RSA *rsa; EC_KEY *ec; +#else + char gname[80]; +#endif unsigned usage; EVP_PKEY *pkey; njs_int_t ret; njs_str_t key_data, format; njs_value_t value, *options; const u_char *start; +#if (OPENSSL_VERSION_NUMBER < 0x30000000L) const EC_GROUP *group; +#endif njs_mp_cleanup_t *cln; njs_webcrypto_key_t *key; PKCS8_PRIV_KEY_INFO *pkcs8; @@ -1770,6 +1776,9 @@ njs_ext_import_key(njs_vm_t *vm, njs_val case NJS_ALGORITHM_RSA_OAEP: case NJS_ALGORITHM_RSA_PSS: case NJS_ALGORITHM_RSASSA_PKCS1_v1_5: + +#if (OPENSSL_VERSION_NUMBER < 0x30000000L) + rsa = EVP_PKEY_get1_RSA(pkey); if (njs_slow_path(rsa == NULL)) { njs_webcrypto_error(vm, "RSA key is not found"); @@ -1778,6 +1787,13 @@ njs_ext_import_key(njs_vm_t *vm, njs_val RSA_free(rsa); +#else + if (!EVP_PKEY_is_a(pkey, "RSA")) { + njs_webcrypto_error(vm, "RSA key is not found"); + goto fail; + } +#endif + ret = njs_algorithm_hash(vm, options, &key->hash); if (njs_slow_path(ret == NJS_ERROR)) { goto fail; @@ -1789,6 +1805,9 @@ njs_ext_import_key(njs_vm_t *vm, njs_val case NJS_ALGORITHM_ECDSA: case NJS_ALGORITHM_ECDH: + +#if (OPENSSL_VERSION_NUMBER < 0x30000000L) + ec = EVP_PKEY_get1_EC_KEY(pkey); if (njs_slow_path(ec == NULL)) { njs_webcrypto_error(vm, "EC key is not found"); @@ -1799,6 +1818,22 @@ njs_ext_import_key(njs_vm_t *vm, njs_val nid = EC_GROUP_get_curve_name(group); EC_KEY_free(ec); +#else + + if (!EVP_PKEY_is_a(pkey, "EC")) { + njs_webcrypto_error(vm, "EC key is not found"); + goto fail; + } + + if (EVP_PKEY_get_group_name(pkey, gname, sizeof(gname), NULL) != 1) { + njs_webcrypto_error(vm, "EVP_PKEY_get_group_name() failed"); + goto fail; + } + + nid = OBJ_txt2nid(gname); + +#endif + ret = njs_algorithm_curve(vm, options, &key->curve); if (njs_slow_path(ret == NJS_ERROR)) { goto fail; @@ -2624,7 +2659,9 @@ njs_external_webcrypto_init(njs_vm_t *vm njs_str_t name; njs_opaque_value_t value; +#if (OPENSSL_VERSION_NUMBER < 0x10100003L) OpenSSL_add_all_algorithms(); +#endif njs_webcrypto_crypto_key_proto_id = njs_vm_external_prototype(vm, njs_ext_webcrypto_crypto_key, From arut at nginx.com Thu Nov 18 09:52:13 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 18 Nov 2021 12:52:13 +0300 Subject: [PATCH 0 of 2] HTTP/3 Insert Count Increment delay In-Reply-To: <20211116124522.xejjgxn455udjlf6@Romans-MacBook-Pro.local> References: <20211116124522.xejjgxn455udjlf6@Romans-MacBook-Pro.local> Message-ID: After an internal discussion, here's a new patchset. The first patch is now different. It makes sure that all functions that operate on a stream receive stream connection, and all functions that operate QUIC connection-wise receive QUIC connection. As a side effect, logging seems to be more consistent now. Connection-wise operations are logged in parent connection log. From arut at nginx.com Thu Nov 18 09:52:14 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 18 Nov 2021 12:52:14 +0300 Subject: [PATCH 1 of 2] HTTP/3: use parent QUIC connection as argument when possible In-Reply-To: References: Message-ID: # HG changeset patch # User Roman Arutyunyan # Date 1637160358 -10800 # Wed Nov 17 17:45:58 2021 +0300 # Branch quic # Node ID b844c77ff22218a4863d1d926bcaaa0b043c8af5 # Parent 41caf541011045612975b7bb8423a18fd424df77 HTTP/3: use parent QUIC connection as argument when possible. Functions in ngx_http_v3.c, ngx_http_v3_streams.c and ngx_http_v3_tables.c now receive parent QUIC connection as the first argument instead of QUIC stream connection. It makes sense since they are not related to a QUIC stream and operate connection-wise. Also, ngx_quic_open_stream() now receives parent QUIC connection instead of QUIC stream connection for the same reason. Also, ngx_http_v3_finalize_connection() and ngx_http_v3_shutdown_connection() macros are eliminated. Instead, ngx_quic_finalize_connection() and ngx_quic_shutdown_connection() are called directly with the parent QUIC connection. diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -37,11 +37,10 @@ ngx_connection_t * ngx_quic_open_stream(ngx_connection_t *c, ngx_uint_t bidi) { uint64_t id; - ngx_quic_stream_t *qs, *nqs; + ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - qs = c->quic; - qc = ngx_quic_get_connection(qs->parent); + qc = ngx_quic_get_connection(c); if (bidi) { if (qc->streams.server_streams_bidi @@ -87,12 +86,12 @@ ngx_quic_open_stream(ngx_connection_t *c qc->streams.server_streams_uni++; } - nqs = ngx_quic_create_stream(qs->parent, id); - if (nqs == NULL) { + qs = ngx_quic_create_stream(c, id); + if (qs == NULL) { return NULL; } - return nqs->connection; + return qs->connection; } diff --git a/src/http/modules/ngx_http_quic_module.h b/src/http/modules/ngx_http_quic_module.h --- a/src/http/modules/ngx_http_quic_module.h +++ b/src/http/modules/ngx_http_quic_module.h @@ -19,7 +19,8 @@ #define ngx_http_quic_get_connection(c) \ - ((ngx_http_connection_t *) (c)->quic->parent->data) + ((ngx_http_connection_t *) ((c)->quic ? (c)->quic->parent->data \ + : (c)->data)) ngx_int_t ngx_http_quic_init(ngx_connection_t *c); diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c --- a/src/http/v3/ngx_http_v3.c +++ b/src/http/v3/ngx_http_v3.c @@ -17,13 +17,11 @@ static void ngx_http_v3_cleanup_session( ngx_int_t ngx_http_v3_init_session(ngx_connection_t *c) { - ngx_connection_t *pc; ngx_pool_cleanup_t *cln; ngx_http_connection_t *hc; ngx_http_v3_session_t *h3c; - pc = c->quic->parent; - hc = pc->data; + hc = c->data; if (hc->v3_session) { return NGX_OK; @@ -31,7 +29,7 @@ ngx_http_v3_init_session(ngx_connection_ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 init session"); - h3c = ngx_pcalloc(pc->pool, sizeof(ngx_http_v3_session_t)); + h3c = ngx_pcalloc(c->pool, sizeof(ngx_http_v3_session_t)); if (h3c == NULL) { goto failed; } @@ -42,12 +40,12 @@ ngx_http_v3_init_session(ngx_connection_ ngx_queue_init(&h3c->blocked); ngx_queue_init(&h3c->pushing); - h3c->keepalive.log = pc->log; - h3c->keepalive.data = pc; + h3c->keepalive.log = c->log; + h3c->keepalive.data = c; h3c->keepalive.handler = ngx_http_v3_keepalive_handler; h3c->keepalive.cancelable = 1; - cln = ngx_pool_cleanup_add(pc->pool, 0); + cln = ngx_pool_cleanup_add(c->pool, 0); if (cln == NULL) { goto failed; } @@ -63,8 +61,8 @@ failed: ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create http3 session"); - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, - "failed to create http3 session"); + ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, + "failed to create http3 session"); return NGX_ERROR; } @@ -106,8 +104,8 @@ ngx_http_v3_check_flood(ngx_connection_t if (h3c->total_bytes / 8 > h3c->payload_bytes + 1048576) { ngx_log_error(NGX_LOG_INFO, c->log, 0, "http3 flood detected"); - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, - "HTTP/3 flood detected"); + ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, + "HTTP/3 flood detected"); return NGX_ERROR; } diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h --- a/src/http/v3/ngx_http_v3.h +++ b/src/http/v3/ngx_http_v3.h @@ -84,12 +84,6 @@ ngx_http_get_module_srv_conf(ngx_http_quic_get_connection(c)->conf_ctx, \ module) -#define ngx_http_v3_finalize_connection(c, code, reason) \ - ngx_quic_finalize_connection(c->quic->parent, code, reason) - -#define ngx_http_v3_shutdown_connection(c, code, reason) \ - ngx_quic_shutdown_connection(c->quic->parent, code, reason) - #define ngx_http_v3_connection(c) \ ((c)->quic ? ngx_http_quic_get_connection(c)->addr_conf->http3 : 0) diff --git a/src/http/v3/ngx_http_v3_filter_module.c b/src/http/v3/ngx_http_v3_filter_module.c --- a/src/http/v3/ngx_http_v3_filter_module.c +++ b/src/http/v3/ngx_http_v3_filter_module.c @@ -907,7 +907,7 @@ ngx_http_v3_create_push_request(ngx_http ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, "http3 create push request id:%uL", push_id); - c = ngx_http_v3_create_push_stream(pc, push_id); + c = ngx_http_v3_create_push_stream(pc->quic->parent, push_id); if (c == NULL) { return NGX_ABORT; } diff --git a/src/http/v3/ngx_http_v3_parse.c b/src/http/v3/ngx_http_v3_parse.c --- a/src/http/v3/ngx_http_v3_parse.c +++ b/src/http/v3/ngx_http_v3_parse.c @@ -353,7 +353,8 @@ ngx_http_v3_parse_headers(ngx_connection case sw_verify: - rc = ngx_http_v3_check_insert_count(c, st->prefix.insert_count); + rc = ngx_http_v3_check_insert_count(c->quic->parent, + st->prefix.insert_count); if (rc != NGX_OK) { return rc; } @@ -392,7 +393,9 @@ done: ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 parse headers done"); if (st->prefix.insert_count > 0) { - if (ngx_http_v3_send_ack_section(c, c->quic->id) != NGX_OK) { + if (ngx_http_v3_send_ack_section(c->quic->parent, c->quic->id) + != NGX_OK) + { return NGX_ERROR; } } @@ -466,7 +469,7 @@ ngx_http_v3_parse_field_section_prefix(n done: - rc = ngx_http_v3_decode_insert_count(c, &st->insert_count); + rc = ngx_http_v3_decode_insert_count(c->quic->parent, &st->insert_count); if (rc != NGX_OK) { return rc; } @@ -1102,14 +1105,16 @@ ngx_http_v3_parse_lookup(ngx_connection_ u_char *p; if (!dynamic) { - if (ngx_http_v3_lookup_static(c, index, name, value) != NGX_OK) { + if (ngx_http_v3_lookup_static(c->quic->parent, index, name, value) + != NGX_OK) + { return NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED; } return NGX_OK; } - if (ngx_http_v3_lookup(c, index, name, value) != NGX_OK) { + if (ngx_http_v3_lookup(c->quic->parent, index, name, value) != NGX_OK) { return NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED; } @@ -1264,7 +1269,7 @@ ngx_http_v3_parse_control(ngx_connection return rc; } - rc = ngx_http_v3_cancel_push(c, st->vlint.value); + rc = ngx_http_v3_cancel_push(c->quic->parent, st->vlint.value); if (rc != NGX_OK) { return rc; } @@ -1310,7 +1315,7 @@ ngx_http_v3_parse_control(ngx_connection return rc; } - rc = ngx_http_v3_set_max_push_id(c, st->vlint.value); + rc = ngx_http_v3_set_max_push_id(c->quic->parent, st->vlint.value); if (rc != NGX_OK) { return rc; } @@ -1334,7 +1339,7 @@ ngx_http_v3_parse_control(ngx_connection return rc; } - rc = ngx_http_v3_goaway(c, st->vlint.value); + rc = ngx_http_v3_goaway(c->quic->parent, st->vlint.value); if (rc != NGX_OK) { return rc; } @@ -1398,7 +1403,9 @@ ngx_http_v3_parse_settings(ngx_connectio return rc; } - if (ngx_http_v3_set_param(c, st->id, st->vlint.value) != NGX_OK) { + if (ngx_http_v3_set_param(c->quic->parent, st->id, st->vlint.value) + != NGX_OK) + { return NGX_HTTP_V3_ERR_SETTINGS_ERROR; } @@ -1493,7 +1500,7 @@ ngx_http_v3_parse_encoder(ngx_connection return rc; } - rc = ngx_http_v3_set_capacity(c, st->pint.value); + rc = ngx_http_v3_set_capacity(c->quic->parent, st->pint.value); if (rc != NGX_OK) { return rc; } @@ -1508,7 +1515,7 @@ ngx_http_v3_parse_encoder(ngx_connection return rc; } - rc = ngx_http_v3_duplicate(c, st->pint.value); + rc = ngx_http_v3_duplicate(c->quic->parent, st->pint.value); if (rc != NGX_OK) { return rc; } @@ -1613,7 +1620,8 @@ done: st->dynamic ? "dynamic" : "static", st->index, &st->value); - rc = ngx_http_v3_ref_insert(c, st->dynamic, st->index, &st->value); + rc = ngx_http_v3_ref_insert(c->quic->parent, st->dynamic, st->index, + &st->value); if (rc != NGX_OK) { return rc; } @@ -1731,7 +1739,7 @@ done: "http3 parse field iln done \"%V\":\"%V\"", &st->name, &st->value); - rc = ngx_http_v3_insert(c, &st->name, &st->value); + rc = ngx_http_v3_insert(c->quic->parent, &st->name, &st->value); if (rc != NGX_OK) { return rc; } @@ -1793,7 +1801,7 @@ ngx_http_v3_parse_decoder(ngx_connection return rc; } - rc = ngx_http_v3_ack_section(c, st->pint.value); + rc = ngx_http_v3_ack_section(c->quic->parent, st->pint.value); if (rc != NGX_OK) { return rc; } @@ -1808,7 +1816,7 @@ ngx_http_v3_parse_decoder(ngx_connection return rc; } - rc = ngx_http_v3_cancel_stream(c, st->pint.value); + rc = ngx_http_v3_cancel_stream(c->quic->parent, st->pint.value); if (rc != NGX_OK) { return rc; } @@ -1823,7 +1831,7 @@ ngx_http_v3_parse_decoder(ngx_connection return rc; } - rc = ngx_http_v3_inc_insert_count(c, st->pint.value); + rc = ngx_http_v3_inc_insert_count(c->quic->parent, st->pint.value); if (rc != NGX_OK) { return rc; } diff --git a/src/http/v3/ngx_http_v3_request.c b/src/http/v3/ngx_http_v3_request.c --- a/src/http/v3/ngx_http_v3_request.c +++ b/src/http/v3/ngx_http_v3_request.c @@ -61,7 +61,7 @@ ngx_http_v3_init(ngx_connection_t *c) ngx_http_core_loc_conf_t *clcf; ngx_http_core_srv_conf_t *cscf; - if (ngx_http_v3_init_session(c) != NGX_OK) { + if (ngx_http_v3_init_session(c->quic->parent) != NGX_OK) { ngx_http_close_connection(c); return; } @@ -84,8 +84,9 @@ ngx_http_v3_init(ngx_connection_t *c) n = c->quic->id >> 2; if (n >= clcf->keepalive_requests * 2) { - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, - "too many requests per connection"); + ngx_quic_finalize_connection(c->quic->parent, + NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, + "too many requests per connection"); ngx_http_close_connection(c); return; } @@ -104,13 +105,13 @@ ngx_http_v3_init(ngx_connection_t *c) { h3c->goaway = 1; - if (ngx_http_v3_send_goaway(c, (n + 1) << 2) != NGX_OK) { + if (ngx_http_v3_send_goaway(c->quic->parent, (n + 1) << 2) != NGX_OK) { ngx_http_close_connection(c); return; } - ngx_http_v3_shutdown_connection(c, NGX_HTTP_V3_ERR_NO_ERROR, - "reached maximum number of requests"); + ngx_quic_shutdown_connection(c->quic->parent, NGX_HTTP_V3_ERR_NO_ERROR, + "reached maximum number of requests"); } rev = c->read; @@ -284,7 +285,7 @@ ngx_http_v3_reset_connection(ngx_connect h3scf = ngx_http_v3_get_module_srv_conf(c, ngx_http_v3_module); if (h3scf->max_table_capacity > 0 && !c->read->eof) { - (void) ngx_http_v3_send_cancel_stream(c, c->quic->id); + (void) ngx_http_v3_send_cancel_stream(c->quic->parent, c->quic->id); } if (c->timedout) { @@ -416,7 +417,7 @@ ngx_http_v3_process_request(ngx_event_t r->request_length += b->pos - p; h3c->total_bytes += b->pos - p; - if (ngx_http_v3_check_flood(c) != NGX_OK) { + if (ngx_http_v3_check_flood(c->quic->parent) != NGX_OK) { ngx_http_close_request(r, NGX_HTTP_CLOSE); break; } @@ -1266,7 +1267,9 @@ ngx_http_v3_request_body_filter(ngx_http r->request_length += cl->buf->pos - p; h3c->total_bytes += cl->buf->pos - p; - if (ngx_http_v3_check_flood(r->connection) != NGX_OK) { + if (ngx_http_v3_check_flood(r->connection->quic->parent) + != NGX_OK) + { return NGX_HTTP_CLOSE; } diff --git a/src/http/v3/ngx_http_v3_streams.c b/src/http/v3/ngx_http_v3_streams.c --- a/src/http/v3/ngx_http_v3_streams.c +++ b/src/http/v3/ngx_http_v3_streams.c @@ -46,9 +46,9 @@ ngx_http_v3_init_uni_stream(ngx_connecti n = c->quic->id >> 2; if (n >= h3scf->max_uni_streams) { - ngx_http_v3_finalize_connection(c, - NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, - "reached maximum number of uni streams"); + ngx_quic_finalize_connection(c->quic->parent, + NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, + "reached maximum number of uni streams"); c->data = NULL; ngx_http_v3_close_uni_stream(c); return; @@ -58,9 +58,9 @@ ngx_http_v3_init_uni_stream(ngx_connecti us = ngx_pcalloc(c->pool, sizeof(ngx_http_v3_uni_stream_t)); if (us == NULL) { - ngx_http_v3_finalize_connection(c, - NGX_HTTP_V3_ERR_INTERNAL_ERROR, - "memory allocation error"); + ngx_quic_finalize_connection(c->quic->parent, + NGX_HTTP_V3_ERR_INTERNAL_ERROR, + "memory allocation error"); c->data = NULL; ngx_http_v3_close_uni_stream(c); return; @@ -216,7 +216,7 @@ ngx_http_v3_uni_read_handler(ngx_event_t h3c = ngx_http_v3_get_session(c); h3c->total_bytes += n; - if (ngx_http_v3_check_flood(c) != NGX_OK) { + if (ngx_http_v3_check_flood(c->quic->parent) != NGX_OK) { ngx_http_v3_close_uni_stream(c); return; } @@ -249,7 +249,7 @@ ngx_http_v3_uni_read_handler(ngx_event_t failed: - ngx_http_v3_finalize_connection(c, rc, "stream error"); + ngx_quic_finalize_connection(c->quic->parent, rc, "stream error"); ngx_http_v3_close_uni_stream(c); } @@ -264,8 +264,8 @@ ngx_http_v3_dummy_write_handler(ngx_even ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 dummy write handler"); if (ngx_handle_write_event(wev, 0) != NGX_OK) { - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_INTERNAL_ERROR, - NULL); + ngx_quic_finalize_connection(c->quic->parent, + NGX_HTTP_V3_ERR_INTERNAL_ERROR, NULL); ngx_http_v3_close_uni_stream(c); } } @@ -325,8 +325,8 @@ failed: ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create push stream"); - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, - "failed to create push stream"); + ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, + "failed to create push stream"); if (sc) { ngx_http_v3_close_uni_stream(sc); } @@ -418,8 +418,8 @@ failed: ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create server stream"); - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, - "failed to create server stream"); + ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_STREAM_CREATION_ERROR, + "failed to create server stream"); if (sc) { ngx_http_v3_close_uni_stream(sc); } @@ -476,8 +476,8 @@ failed: ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send settings"); - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, - "failed to send settings"); + ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, + "failed to send settings"); ngx_http_v3_close_uni_stream(cc); return NGX_ERROR; @@ -518,8 +518,8 @@ failed: ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send goaway"); - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, - "failed to send goaway"); + ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, + "failed to send goaway"); ngx_http_v3_close_uni_stream(cc); return NGX_ERROR; @@ -559,8 +559,8 @@ failed: ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send section acknowledgement"); - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, - "failed to send section acknowledgement"); + ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, + "failed to send section acknowledgement"); ngx_http_v3_close_uni_stream(dc); return NGX_ERROR; @@ -599,8 +599,8 @@ failed: ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send stream cancellation"); - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, - "failed to send stream cancellation"); + ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, + "failed to send stream cancellation"); ngx_http_v3_close_uni_stream(dc); return NGX_ERROR; @@ -640,8 +640,8 @@ failed: ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to send insert count increment"); - ngx_http_v3_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, - "failed to send insert count increment"); + ngx_quic_finalize_connection(c, NGX_HTTP_V3_ERR_EXCESSIVE_LOAD, + "failed to send insert count increment"); ngx_http_v3_close_uni_stream(dc); return NGX_ERROR; diff --git a/src/http/v3/ngx_http_v3_tables.c b/src/http/v3/ngx_http_v3_tables.c --- a/src/http/v3/ngx_http_v3_tables.c +++ b/src/http/v3/ngx_http_v3_tables.c @@ -590,9 +590,9 @@ ngx_http_v3_check_insert_count(ngx_conne ngx_log_error(NGX_LOG_INFO, c->log, 0, "client exceeded http3_max_blocked_streams limit"); - ngx_http_v3_finalize_connection(c, - NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED, - "too many blocked streams"); + ngx_quic_finalize_connection(c, + NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED, + "too many blocked streams"); return NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED; } From arut at nginx.com Thu Nov 18 09:52:15 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 18 Nov 2021 12:52:15 +0300 Subject: [PATCH 2 of 2] HTTP/3: delayed Insert Count Increment instruction In-Reply-To: References: Message-ID: <77e14add92c2e3a9c83e.1637229135@arut-laptop> # HG changeset patch # User Roman Arutyunyan # Date 1637160353 -10800 # Wed Nov 17 17:45:53 2021 +0300 # Branch quic # Node ID 77e14add92c2e3a9c83eae5d4a8e260bb3e39daf # Parent b844c77ff22218a4863d1d926bcaaa0b043c8af5 HTTP/3: delayed Insert Count Increment instruction. Sending the instruction is delayed until the end of the current event cycle. Delaying the instruction is allowed by quic-qpack-21, section 2.2.2.3. The goal is to reduce the amount of data sent back to client by accumulating several inserts in one instruction and sometimes not sending the instruction at all, if Section Acknowledgement was sent just before it. diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c --- a/src/http/v3/ngx_http_v3.c +++ b/src/http/v3/ngx_http_v3.c @@ -45,6 +45,10 @@ ngx_http_v3_init_session(ngx_connection_ h3c->keepalive.handler = ngx_http_v3_keepalive_handler; h3c->keepalive.cancelable = 1; + h3c->table.send_insert_count.log = c->log; + h3c->table.send_insert_count.data = c; + h3c->table.send_insert_count.handler = ngx_http_v3_inc_insert_count_handler; + cln = ngx_pool_cleanup_add(c->pool, 0); if (cln == NULL) { goto failed; @@ -91,6 +95,10 @@ ngx_http_v3_cleanup_session(void *data) if (h3c->keepalive.timer_set) { ngx_del_timer(&h3c->keepalive); } + + if (h3c->table.send_insert_count.posted) { + ngx_delete_posted_event(&h3c->table.send_insert_count); + } } diff --git a/src/http/v3/ngx_http_v3_parse.c b/src/http/v3/ngx_http_v3_parse.c --- a/src/http/v3/ngx_http_v3_parse.c +++ b/src/http/v3/ngx_http_v3_parse.c @@ -398,6 +398,8 @@ done: { return NGX_ERROR; } + + ngx_http_v3_ack_insert_count(c, st->prefix.insert_count); } st->state = sw_start; diff --git a/src/http/v3/ngx_http_v3_tables.c b/src/http/v3/ngx_http_v3_tables.c --- a/src/http/v3/ngx_http_v3_tables.c +++ b/src/http/v3/ngx_http_v3_tables.c @@ -232,11 +232,9 @@ ngx_http_v3_insert(ngx_connection_t *c, dt->elts[dt->nelts++] = field; dt->size += size; - /* TODO increment can be sent less often */ + dt->insert_count++; - if (ngx_http_v3_send_inc_insert_count(c, 1) != NGX_OK) { - return NGX_ERROR; - } + ngx_post_event(&dt->send_insert_count, &ngx_posted_events); if (ngx_http_v3_new_entry(c) != NGX_OK) { return NGX_ERROR; @@ -246,6 +244,34 @@ ngx_http_v3_insert(ngx_connection_t *c, } +void +ngx_http_v3_inc_insert_count_handler(ngx_event_t *ev) +{ + ngx_connection_t *c; + ngx_http_v3_session_t *h3c; + ngx_http_v3_dynamic_table_t *dt; + + c = ev->data; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, + "http3 inc insert count handler"); + + h3c = ngx_http_v3_get_session(c); + dt = &h3c->table; + + if (dt->insert_count > dt->ack_insert_count) { + if (ngx_http_v3_send_inc_insert_count(c, + dt->insert_count - dt->ack_insert_count) + != NGX_OK) + { + return; + } + + dt->ack_insert_count = dt->insert_count; + } +} + + ngx_int_t ngx_http_v3_set_capacity(ngx_connection_t *c, ngx_uint_t capacity) { @@ -607,6 +633,21 @@ ngx_http_v3_check_insert_count(ngx_conne } +void +ngx_http_v3_ack_insert_count(ngx_connection_t *c, uint64_t insert_count) +{ + ngx_http_v3_session_t *h3c; + ngx_http_v3_dynamic_table_t *dt; + + h3c = ngx_http_v3_get_session(c); + dt = &h3c->table; + + if (dt->ack_insert_count < insert_count) { + dt->ack_insert_count = insert_count; + } +} + + static void ngx_http_v3_unblock(void *data) { diff --git a/src/http/v3/ngx_http_v3_tables.h b/src/http/v3/ngx_http_v3_tables.h --- a/src/http/v3/ngx_http_v3_tables.h +++ b/src/http/v3/ngx_http_v3_tables.h @@ -26,9 +26,13 @@ typedef struct { ngx_uint_t base; size_t size; size_t capacity; + uint64_t insert_count; + uint64_t ack_insert_count; + ngx_event_t send_insert_count; } ngx_http_v3_dynamic_table_t; +void ngx_http_v3_inc_insert_count_handler(ngx_event_t *ev); void ngx_http_v3_cleanup_table(ngx_http_v3_session_t *h3c); ngx_int_t ngx_http_v3_ref_insert(ngx_connection_t *c, ngx_uint_t dynamic, ngx_uint_t index, ngx_str_t *value); @@ -46,6 +50,7 @@ ngx_int_t ngx_http_v3_decode_insert_coun ngx_uint_t *insert_count); ngx_int_t ngx_http_v3_check_insert_count(ngx_connection_t *c, ngx_uint_t insert_count); +void ngx_http_v3_ack_insert_count(ngx_connection_t *c, uint64_t insert_count); ngx_int_t ngx_http_v3_set_param(ngx_connection_t *c, uint64_t id, uint64_t value); From xeioex at nginx.com Thu Nov 18 13:55:02 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 18 Nov 2021 13:55:02 +0000 Subject: [njs] Updated README with the reference to njs-examples repo. Message-ID: details: https://hg.nginx.org/njs/rev/a57d9a17f702 branches: changeset: 1749:a57d9a17f702 user: Dmitry Volyntsev date: Thu Nov 18 13:48:55 2021 +0000 description: Updated README with the reference to njs-examples repo. diffstat: README | 12 ++++++++---- 1 files changed, 8 insertions(+), 4 deletions(-) diffs (35 lines): diff -r 315969946708 -r a57d9a17f702 README --- a/README Wed Nov 17 19:14:19 2021 +0300 +++ b/README Thu Nov 18 13:48:55 2021 +0000 @@ -1,6 +1,6 @@ NGINX JavaScript (njs) ----------- +---------------------- njs is a subset of the JavaScript language that allows extending nginx functionality. njs is created in compliance with ECMAScript 5.1 (strict mode) @@ -8,16 +8,20 @@ with some ECMAScript 6 and later extensi The documentation is available online: - http://nginx.org/en/docs/njs/ + https://nginx.org/en/docs/njs/ + +Additional examples and howtos can be found here: + + https://github.com/nginx/njs-examples Please ask questions, report issues, and send patches to the mailing list: - nginx-devel at nginx.org (http://mailman.nginx.org/mailman/listinfo/nginx-devel) + nginx-devel at nginx.org (https://mailman.nginx.org/mailman/listinfo/nginx-devel) or via Github: https://github.com/nginx/njs -- -NGINX, Inc., http://nginx.com +NGINX, Inc., https://nginx.com From pluknet at nginx.com Thu Nov 18 16:46:48 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 18 Nov 2021 19:46:48 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: References: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> Message-ID: <22EEAB4A-054D-4AD2-BCFB-86B7EC679D78@nginx.com> > On 16 Nov 2021, at 17:41, Maxim Dounin wrote: > > Hello! > > On Tue, Nov 16, 2021 at 02:59:46PM +0300, Sergey Kandaurov wrote: > >> >>> On 11 Nov 2021, at 06:10, Maxim Dounin wrote: >>> >>> # HG changeset patch >>> # User Maxim Dounin >>> # Date 1636599377 -10800 >>> # Thu Nov 11 05:56:17 2021 +0300 >>> # Node ID 76e072a6947a221868705c13973de15319c0d921 >>> # Parent 82b750b20c5205d685e59031247fe898f011394e >>> HTTP/2: fixed sendfile() aio handling. >>> >>> With sendfile() in threads ("aio threads; sendfile on;"), client connection >>> can block on writing, waiting for sendfile() to complete. In HTTP/2 this >>> might result in the request hang, since an attempt to continue processig >> >> processing > > Fixed, thnx. > >>> in thread event handler will call request's write event handler, which >>> is usually stopped by ngx_http_v2_send_chain(): it does nothing if there >>> are no additional data and stream->queued is set. Further, HTTP/2 resets >>> stream's c->write->ready to 0 if writing blocks, so just fixing >>> ngx_http_v2_send_chain() is not enough. >>> >>> Can be reproduced with test suite on Linux with: >>> >>> TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t >>> >>> The following tests currently fail: h2_keepalive.t, h2_priority.t, >>> h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. >>> >>> Similarly, sendfile() with AIO preloading on FreeBSD can block as well, >>> with similar results. This is, however, harder to reproduce, especially >>> on modern FreeBSD systems, since sendfile() usually do not return EBUSY. >> >> does not > > Fixed, thnx. > >>> Fix is to post a write event on HTTP/2 connection in the thread event >>> handler (and aio preload handler). This ensures that sendfile() will be >>> completed and stream processing will be resumed by HTTP/2 code. >>> >>> diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c >>> --- a/src/http/ngx_http_copy_filter_module.c >>> +++ b/src/http/ngx_http_copy_filter_module.c >>> @@ -250,6 +250,21 @@ ngx_http_copy_aio_sendfile_event_handler >>> r->aio = 0; >>> ev->complete = 0; >>> >>> +#if (NGX_HTTP_V2) >>> + >>> + if (r->stream) { >>> + /* >>> + * for HTTP/2, trigger a write event on the main connection >>> + * to handle sendfile() preload >>> + */ >>> + >>> + ngx_post_event(r->stream->connection->connection->write, >>> + &ngx_posted_events); >>> + return; >>> + } >>> + >>> +#endif >>> + >>> r->connection->write->handler(r->connection->write); >>> } >>> >>> @@ -323,6 +338,20 @@ ngx_http_copy_thread_event_handler(ngx_e >>> r->main->blocked--; >>> r->aio = 0; >>> >>> +#if (NGX_HTTP_V2) >>> + >>> + if (r->stream) { >>> + /* >>> + * for HTTP/2, trigger a write event on the main connection >>> + * to handle sendfile() in threads >>> + */ >>> + >>> + ngx_post_event(r->stream->connection->connection->write, >>> + &ngx_posted_events); >>> + } >>> + >>> +#endif >>> + >> >> This thread event handler is used not only for sendfile() completion, >> but also to complete reading in threads a buffered upstream response. >> In this case, posting a write event on HTTP/2 connection looks >> unnecessary, since there is no sendfile() in action, it will do nothing. >> On the other hand, if it is indeed used to complete a sendfile() task, >> which needs to invoke http2 write handler, calling write_event_handler() >> directly from thread event handler seems to be redundant: it could be >> optimized away since http2 write handler will normally end up in posting >> a write event on the main connection, anyway, see the call sequence >> ngx_http_v2_write_handler() -> ngx_http_v2_send_output_queue() >> -> ngx_http_v2_data_frame_handler() -> ngx_http_v2_handle_stream(). > > [...] > >> So, it could be narrowed down, something like the aio preload handler: >> >> diff -r 76e072a6947a -r 5f48b9a797d1 src/http/ngx_http_copy_filter_module.c >> --- a/src/http/ngx_http_copy_filter_module.c Thu Nov 11 05:56:17 2021 +0300 >> +++ b/src/http/ngx_http_copy_filter_module.c Mon Nov 15 21:04:26 2021 +0000 >> @@ -340,7 +340,7 @@ >> >> #if (NGX_HTTP_V2) >> >> - if (r->stream) { >> + if (r->stream && r->stream->connection->connection->sendfile_task) { >> /* >> * for HTTP/2, trigger a write event on the main connection >> * to handle sendfile() in threads >> @@ -348,6 +348,7 @@ >> >> ngx_post_event(r->stream->connection->connection->write, >> &ngx_posted_events); >> + return; >> } >> >> #endif > > This "return" won't work, since even with sendfile() enabled and > being used, the handler can be called for non-sendfile operations > as well. > > That is, both posting an event to the main connection _and_ > calling request write handler are required. This might be > redundant in some cases, but there is no reasonable way to avoid > this with sendfile() enabled. > > Checking sendfile_task might be used to avoid extra posted event > with sendfile disabled, but it looks overcomplicated to me and I > don't think it worth the effort. It's at most a minor > optimization. > Fair enough. >>> if (r->done) { >>> /* >>> * trigger connection event handler if the subrequest was >>> diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c >>> --- a/src/http/ngx_http_upstream.c >>> +++ b/src/http/ngx_http_upstream.c >>> @@ -3905,6 +3905,20 @@ ngx_http_upstream_thread_event_handler(n >>> r->main->blocked--; >>> r->aio = 0; >>> >>> +#if (NGX_HTTP_V2) >>> + >>> + if (r->stream) { >>> + /* >>> + * for HTTP/2, trigger a write event on the main connection >>> + * to handle sendfile() in threads >>> + */ >>> + >>> + ngx_post_event(r->stream->connection->connection->write, >>> + &ngx_posted_events); >>> + } >>> + >>> +#endif >>> + >>> if (r->done) { >>> /* >>> * trigger connection event handler if the subrequest was >>> >> >> I could not figure out, how this part is related, since upstream >> thread handler is only enabled with "aio_write on;" to write down >> a buffered upstream response to disk. It doesn't seem to be used >> with sendfile(). > > Thread handlers are set on per-file basis. As a result, if > aio_write is enabled, the ngx_http_upstream_thread_event_handler() > handler can be used for sendfile() as well. Indeed, it is the case. I was able to reproduce it with a variant of slice.t, updated to "listen .. http2". The patch fixes it. > > Also note the following "trigger connection event handler..." > part: it is also only needed for sendfile(), yet present in the > ngx_http_upstream_thread_event_handler(). Yep, I've triggered the original problem fixed with the existing condition, though I had to back out sendfile_max_chunk rework with posted next events to catch it, as otherwise the write event was observed to be always posted. -- Sergey Kandaurov From vl at nginx.com Mon Nov 22 08:21:37 2021 From: vl at nginx.com (Vladimir Homutov) Date: Mon, 22 Nov 2021 11:21:37 +0300 Subject: [PATCH 1 of 2] QUIC: update stream flow control credit on STREAM_DATA_BLOCKED In-Reply-To: <20211117081727.bpmho2oiwhljv5h3@Romans-MacBook-Pro.local> References: <4e3a7fc0533192f51a01.1637134261@arut-laptop> <20211117081727.bpmho2oiwhljv5h3@Romans-MacBook-Pro.local> Message-ID: On Wed, Nov 17, 2021 at 11:17:27AM +0300, Roman Arutyunyan wrote: > On Wed, Nov 17, 2021 at 10:31:01AM +0300, Roman Arutyunyan wrote: > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1637133234 -10800 > > # Wed Nov 17 10:13:54 2021 +0300 > > # Branch quic > > # Node ID 4e3a7fc0533192f51a01042a1e9dd2b595881420 > > # Parent 4ad8fc79cb33257c928a9098a87324b350576551 > > QUIC: update stream flow control credit on STREAM_DATA_BLOCKED. > > > > Previously, after receiving STREAM_DATA_BLOCKED, current flow control limit > > was sent to client. Now, if the limit can be updated to the full window size, > > it is updated and the new value is sent to client, otherwise nothing is sent. > > > > The change lets client update flow control credit on demand. Also, it saves > > traffic by not sending MAX_STREAM_DATA with the same value twice. > > > > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > > --- a/src/event/quic/ngx_event_quic_streams.c > > +++ b/src/event/quic/ngx_event_quic_streams.c > > @@ -31,6 +31,7 @@ static size_t ngx_quic_max_stream_flow(n > > static void ngx_quic_stream_cleanup_handler(void *data); > > static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); > > static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); > > +static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); > > > > > > ngx_connection_t * > > @@ -1190,8 +1191,6 @@ ngx_int_t > > ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, > > ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f) > > { > > - uint64_t limit; > > - ngx_quic_frame_t *frame; > > ngx_quic_stream_t *qs; > > ngx_quic_connection_t *qc; > > > > @@ -1217,29 +1216,10 @@ ngx_quic_handle_stream_data_blocked_fram > > return NGX_OK; > > } > > > > - limit = qs->recv_max_data; > > - > > - if (ngx_quic_init_stream(qs) != NGX_OK) { > > - return NGX_ERROR; > > - } > > - > > - } else { > > - limit = qs->recv_max_data; > > + return ngx_quic_init_stream(qs); > > } > > > > - frame = ngx_quic_alloc_frame(c); > > - if (frame == NULL) { > > - return NGX_ERROR; > > - } > > - > > - frame->level = pkt->level; > > - frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; > > - frame->u.max_stream_data.id = f->id; > > - frame->u.max_stream_data.limit = limit; > > - > > - ngx_quic_queue_frame(qc, frame); > > - > > - return NGX_OK; > > + return ngx_quic_update_max_stream_data(qs->connection); > > } > > > > > > @@ -1587,22 +1567,9 @@ ngx_quic_update_flow(ngx_connection_t *c > > if (!rev->pending_eof && !rev->error > > && qs->recv_max_data <= qs->recv_offset + qs->recv_window / 2) > > { > > - qs->recv_max_data = qs->recv_offset + qs->recv_window; > > - > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > - "quic flow update msd:%uL", qs->recv_max_data); > > - > > - frame = ngx_quic_alloc_frame(pc); > > - if (frame == NULL) { > > + if (ngx_quic_update_max_stream_data(c) != NGX_OK) { > > return NGX_ERROR; > > } > > - > > - frame->level = ssl_encryption_application; > > - frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; > > - frame->u.max_stream_data.id = qs->id; > > - frame->u.max_stream_data.limit = qs->recv_max_data; > > - > > - ngx_quic_queue_frame(qc, frame); > > } > > > > qc->streams.recv_offset += len; > > @@ -1632,6 +1599,44 @@ ngx_quic_update_flow(ngx_connection_t *c > > } > > > > > > +static ngx_int_t > > +ngx_quic_update_max_stream_data(ngx_connection_t *c) > > +{ > > + uint64_t recv_max_data; > > + ngx_quic_frame_t *frame; > > + ngx_quic_stream_t *qs; > > + ngx_quic_connection_t *qc; > > + > > + qs = c->quic; > > + qc = ngx_quic_get_connection(qs->parent); > > + > > + recv_max_data = qs->recv_offset + qs->recv_window; > > + > > + if (qs->recv_max_data == recv_max_data) { shouldn't it be >= ? (i.e. we want to avoid sending frame if current window doesn't extend recv_max_data; could qs->recv_window change ?) > > + return NGX_OK; > > + } > > + > > + qs->recv_max_data = recv_max_data; > > + > > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > + "quic flow update msd:%uL", qs->recv_max_data); > > + > > + frame = ngx_quic_alloc_frame(c); > > The argument should be "pc": > > frame = ngx_quic_alloc_frame(pc); also, it need to be declared/initialized, similar to other places > > > + if (frame == NULL) { > > + return NGX_ERROR; > > + } > > + > > + frame->level = ssl_encryption_application; > > + frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; > > + frame->u.max_stream_data.id = qs->id; > > + frame->u.max_stream_data.limit = qs->recv_max_data; > > + > > + ngx_quic_queue_frame(qc, frame); > > + > > + return NGX_OK; > > +} > > + > > + > > ngx_int_t > > ngx_quic_handle_read_event(ngx_event_t *rev, ngx_uint_t flags) > > { > > _______________________________________________ From vl at nginx.com Mon Nov 22 08:26:10 2021 From: vl at nginx.com (Vladimir Homutov) Date: Mon, 22 Nov 2021 11:26:10 +0300 Subject: [PATCH 2 of 2] QUIC: handle DATA_BLOCKED frame from client In-Reply-To: <0fb2613594f6bd8dd8f0.1637134262@arut-laptop> References: <0fb2613594f6bd8dd8f0.1637134262@arut-laptop> Message-ID: On Wed, Nov 17, 2021 at 10:31:02AM +0300, Roman Arutyunyan wrote: > # HG changeset patch > # User Roman Arutyunyan > # Date 1637086755 -10800 > # Tue Nov 16 21:19:15 2021 +0300 > # Branch quic > # Node ID 0fb2613594f6bd8dd8f07a30c69900866b573158 > # Parent 4e3a7fc0533192f51a01042a1e9dd2b595881420 > QUIC: handle DATA_BLOCKED frame from client. > > Previously the frame was not handled and connection was closed with an error. > Now, after receiving this frame, global flow control is updated and new > flow control credit is sent to client. > > diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c > --- a/src/event/quic/ngx_event_quic.c > +++ b/src/event/quic/ngx_event_quic.c > @@ -1252,6 +1252,17 @@ ngx_quic_handle_frames(ngx_connection_t > > break; > > + case NGX_QUIC_FT_DATA_BLOCKED: > + > + if (ngx_quic_handle_data_blocked_frame(c, pkt, > + &frame.u.data_blocked) > + != NGX_OK) > + { > + return NGX_ERROR; > + } > + > + break; > + > case NGX_QUIC_FT_STREAM_DATA_BLOCKED: > > if (ngx_quic_handle_stream_data_blocked_frame(c, pkt, > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > --- a/src/event/quic/ngx_event_quic_streams.c > +++ b/src/event/quic/ngx_event_quic_streams.c > @@ -32,6 +32,7 @@ static void ngx_quic_stream_cleanup_hand > static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); > static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); > static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); > +static ngx_int_t ngx_quic_update_max_data(ngx_connection_t *c); > > > ngx_connection_t * > @@ -1188,6 +1189,14 @@ ngx_quic_handle_streams_blocked_frame(ng > > > ngx_int_t > +ngx_quic_handle_data_blocked_frame(ngx_connection_t *c, > + ngx_quic_header_t *pkt, ngx_quic_data_blocked_frame_t *f) > +{ > + return ngx_quic_update_max_data(c); > +} > + > + > +ngx_int_t > ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, > ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f) > { > @@ -1544,7 +1553,6 @@ ngx_quic_update_flow(ngx_connection_t *c > uint64_t len; > ngx_event_t *rev; > ngx_connection_t *pc; > - ngx_quic_frame_t *frame; > ngx_quic_stream_t *qs; > ngx_quic_connection_t *qc; > > @@ -1577,22 +1585,9 @@ ngx_quic_update_flow(ngx_connection_t *c > if (qc->streams.recv_max_data > <= qc->streams.recv_offset + qc->streams.recv_window / 2) > { > - qc->streams.recv_max_data = qc->streams.recv_offset > - + qc->streams.recv_window; > - > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, > - "quic flow update md:%uL", qc->streams.recv_max_data); > - > - frame = ngx_quic_alloc_frame(pc); > - if (frame == NULL) { > + if (ngx_quic_update_max_data(pc) != NGX_OK) { > return NGX_ERROR; > } > - > - frame->level = ssl_encryption_application; > - frame->type = NGX_QUIC_FT_MAX_DATA; > - frame->u.max_data.max_data = qc->streams.recv_max_data; > - > - ngx_quic_queue_frame(qc, frame); > } > > return NGX_OK; > @@ -1637,6 +1632,41 @@ ngx_quic_update_max_stream_data(ngx_conn > } > > > +static ngx_int_t > +ngx_quic_update_max_data(ngx_connection_t *c) > +{ > + uint64_t recv_max_data; > + ngx_quic_frame_t *frame; > + ngx_quic_connection_t *qc; > + > + qc = ngx_quic_get_connection(c); > + > + recv_max_data = qc->streams.recv_offset + qc->streams.recv_window; > + > + if (qc->streams.recv_max_data == recv_max_data) { > + return NGX_OK; > + } same question as in previous patch; logic is the same; > + > + qc->streams.recv_max_data = recv_max_data; > + > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > + "quic flow update md:%uL", qc->streams.recv_max_data); > + > + frame = ngx_quic_alloc_frame(c); looks like the same issue as in the previous patch - should be pc here > + if (frame == NULL) { > + return NGX_ERROR; > + } > + > + frame->level = ssl_encryption_application; > + frame->type = NGX_QUIC_FT_MAX_DATA; > + frame->u.max_data.max_data = qc->streams.recv_max_data; > + > + ngx_quic_queue_frame(qc, frame); > + > + return NGX_OK; > +} > + > + > ngx_int_t > ngx_quic_handle_read_event(ngx_event_t *rev, ngx_uint_t flags) > { > diff --git a/src/event/quic/ngx_event_quic_streams.h b/src/event/quic/ngx_event_quic_streams.h > --- a/src/event/quic/ngx_event_quic_streams.h > +++ b/src/event/quic/ngx_event_quic_streams.h > @@ -20,6 +20,8 @@ ngx_int_t ngx_quic_handle_max_data_frame > ngx_quic_max_data_frame_t *f); > ngx_int_t ngx_quic_handle_streams_blocked_frame(ngx_connection_t *c, > ngx_quic_header_t *pkt, ngx_quic_streams_blocked_frame_t *f); > +ngx_int_t ngx_quic_handle_data_blocked_frame(ngx_connection_t *c, > + ngx_quic_header_t *pkt, ngx_quic_data_blocked_frame_t *f); > ngx_int_t ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, > ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f); > ngx_int_t ngx_quic_handle_max_stream_data_frame(ngx_connection_t *c, From arut at nginx.com Mon Nov 22 11:50:34 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 22 Nov 2021 14:50:34 +0300 Subject: [PATCH 1 of 2] QUIC: update stream flow control credit on STREAM_DATA_BLOCKED In-Reply-To: References: <4e3a7fc0533192f51a01.1637134261@arut-laptop> <20211117081727.bpmho2oiwhljv5h3@Romans-MacBook-Pro.local> Message-ID: <20211122115034.ixltle2nhgszg3lu@Romans-MacBook-Pro.local> On Mon, Nov 22, 2021 at 11:21:37AM +0300, Vladimir Homutov wrote: > On Wed, Nov 17, 2021 at 11:17:27AM +0300, Roman Arutyunyan wrote: > > On Wed, Nov 17, 2021 at 10:31:01AM +0300, Roman Arutyunyan wrote: > > > # HG changeset patch > > > # User Roman Arutyunyan > > > # Date 1637133234 -10800 > > > # Wed Nov 17 10:13:54 2021 +0300 > > > # Branch quic > > > # Node ID 4e3a7fc0533192f51a01042a1e9dd2b595881420 > > > # Parent 4ad8fc79cb33257c928a9098a87324b350576551 > > > QUIC: update stream flow control credit on STREAM_DATA_BLOCKED. > > > > > > Previously, after receiving STREAM_DATA_BLOCKED, current flow control limit > > > was sent to client. Now, if the limit can be updated to the full window size, > > > it is updated and the new value is sent to client, otherwise nothing is sent. > > > > > > The change lets client update flow control credit on demand. Also, it saves > > > traffic by not sending MAX_STREAM_DATA with the same value twice. > > > > > > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > > > --- a/src/event/quic/ngx_event_quic_streams.c > > > +++ b/src/event/quic/ngx_event_quic_streams.c > > > @@ -31,6 +31,7 @@ static size_t ngx_quic_max_stream_flow(n > > > static void ngx_quic_stream_cleanup_handler(void *data); > > > static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); > > > static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); > > > +static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); > > > > > > > > > ngx_connection_t * > > > @@ -1190,8 +1191,6 @@ ngx_int_t > > > ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, > > > ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f) > > > { > > > - uint64_t limit; > > > - ngx_quic_frame_t *frame; > > > ngx_quic_stream_t *qs; > > > ngx_quic_connection_t *qc; > > > > > > @@ -1217,29 +1216,10 @@ ngx_quic_handle_stream_data_blocked_fram > > > return NGX_OK; > > > } > > > > > > - limit = qs->recv_max_data; > > > - > > > - if (ngx_quic_init_stream(qs) != NGX_OK) { > > > - return NGX_ERROR; > > > - } > > > - > > > - } else { > > > - limit = qs->recv_max_data; > > > + return ngx_quic_init_stream(qs); > > > } > > > > > > - frame = ngx_quic_alloc_frame(c); > > > - if (frame == NULL) { > > > - return NGX_ERROR; > > > - } > > > - > > > - frame->level = pkt->level; > > > - frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; > > > - frame->u.max_stream_data.id = f->id; > > > - frame->u.max_stream_data.limit = limit; > > > - > > > - ngx_quic_queue_frame(qc, frame); > > > - > > > - return NGX_OK; > > > + return ngx_quic_update_max_stream_data(qs->connection); > > > } > > > > > > > > > @@ -1587,22 +1567,9 @@ ngx_quic_update_flow(ngx_connection_t *c > > > if (!rev->pending_eof && !rev->error > > > && qs->recv_max_data <= qs->recv_offset + qs->recv_window / 2) > > > { > > > - qs->recv_max_data = qs->recv_offset + qs->recv_window; > > > - > > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > > - "quic flow update msd:%uL", qs->recv_max_data); > > > - > > > - frame = ngx_quic_alloc_frame(pc); > > > - if (frame == NULL) { > > > + if (ngx_quic_update_max_stream_data(c) != NGX_OK) { > > > return NGX_ERROR; > > > } > > > - > > > - frame->level = ssl_encryption_application; > > > - frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; > > > - frame->u.max_stream_data.id = qs->id; > > > - frame->u.max_stream_data.limit = qs->recv_max_data; > > > - > > > - ngx_quic_queue_frame(qc, frame); > > > } > > > > > > qc->streams.recv_offset += len; > > > @@ -1632,6 +1599,44 @@ ngx_quic_update_flow(ngx_connection_t *c > > > } > > > > > > > > > +static ngx_int_t > > > +ngx_quic_update_max_stream_data(ngx_connection_t *c) > > > +{ > > > + uint64_t recv_max_data; > > > + ngx_quic_frame_t *frame; > > > + ngx_quic_stream_t *qs; > > > + ngx_quic_connection_t *qc; > > > + > > > + qs = c->quic; > > > + qc = ngx_quic_get_connection(qs->parent); > > > + > > > + recv_max_data = qs->recv_offset + qs->recv_window; > > > + > > > + if (qs->recv_max_data == recv_max_data) { > > shouldn't it be >= ? (i.e. we want to avoid sending frame if current > window doesn't extend recv_max_data; could qs->recv_window change ?) It can't be larger than that. Initially qs->recv_max_data equals qs->recv_window and then qs->recv_offset grows. And no, qs->recv_window never changes. > > > + return NGX_OK; > > > + } > > > + > > > + qs->recv_max_data = recv_max_data; > > > + > > > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > > + "quic flow update msd:%uL", qs->recv_max_data); > > > + > > > + frame = ngx_quic_alloc_frame(c); > > > > The argument should be "pc": > > > > frame = ngx_quic_alloc_frame(pc); > > also, it need to be declared/initialized, similar to other places Sure, my bad. > > > + if (frame == NULL) { > > > + return NGX_ERROR; > > > + } > > > + > > > + frame->level = ssl_encryption_application; > > > + frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; > > > + frame->u.max_stream_data.id = qs->id; > > > + frame->u.max_stream_data.limit = qs->recv_max_data; > > > + > > > + ngx_quic_queue_frame(qc, frame); > > > + > > > + return NGX_OK; > > > +} > > > + > > > + > > > ngx_int_t > > > ngx_quic_handle_read_event(ngx_event_t *rev, ngx_uint_t flags) > > > { > > > _______________________________________________ > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1637179658 -10800 # Wed Nov 17 23:07:38 2021 +0300 # Branch quic # Node ID a316d316a3356de408f77e8c63b42fdc315e926e # Parent 7a14f995dcf71ade5b896d19a9db3098fb5bceac QUIC: update stream flow control credit on STREAM_DATA_BLOCKED. Previously, after receiving STREAM_DATA_BLOCKED, current flow control limit was sent to client. Now, if the limit can be updated to the full window size, it is updated and the new value is sent to client, otherwise nothing is sent. The change lets client update flow control credit on demand. Also, it saves traffic by not sending MAX_STREAM_DATA with the same value twice. diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -31,6 +31,7 @@ static size_t ngx_quic_max_stream_flow(n static void ngx_quic_stream_cleanup_handler(void *data); static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); +static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); ngx_connection_t * @@ -1189,8 +1190,6 @@ ngx_int_t ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f) { - uint64_t limit; - ngx_quic_frame_t *frame; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; @@ -1216,29 +1215,10 @@ ngx_quic_handle_stream_data_blocked_fram return NGX_OK; } - limit = qs->recv_max_data; - - if (ngx_quic_init_stream(qs) != NGX_OK) { - return NGX_ERROR; - } - - } else { - limit = qs->recv_max_data; + return ngx_quic_init_stream(qs); } - frame = ngx_quic_alloc_frame(c); - if (frame == NULL) { - return NGX_ERROR; - } - - frame->level = pkt->level; - frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; - frame->u.max_stream_data.id = f->id; - frame->u.max_stream_data.limit = limit; - - ngx_quic_queue_frame(qc, frame); - - return NGX_OK; + return ngx_quic_update_max_stream_data(qs->connection); } @@ -1586,22 +1566,9 @@ ngx_quic_update_flow(ngx_connection_t *c if (!rev->pending_eof && !rev->error && qs->recv_max_data <= qs->recv_offset + qs->recv_window / 2) { - qs->recv_max_data = qs->recv_offset + qs->recv_window; - - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic flow update msd:%uL", qs->recv_max_data); - - frame = ngx_quic_alloc_frame(pc); - if (frame == NULL) { + if (ngx_quic_update_max_stream_data(c) != NGX_OK) { return NGX_ERROR; } - - frame->level = ssl_encryption_application; - frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; - frame->u.max_stream_data.id = qs->id; - frame->u.max_stream_data.limit = qs->recv_max_data; - - ngx_quic_queue_frame(qc, frame); } qc->streams.recv_offset += len; @@ -1631,6 +1598,46 @@ ngx_quic_update_flow(ngx_connection_t *c } +static ngx_int_t +ngx_quic_update_max_stream_data(ngx_connection_t *c) +{ + uint64_t recv_max_data; + ngx_connection_t *pc; + ngx_quic_frame_t *frame; + ngx_quic_stream_t *qs; + ngx_quic_connection_t *qc; + + qs = c->quic; + pc = qs->parent; + qc = ngx_quic_get_connection(pc); + + recv_max_data = qs->recv_offset + qs->recv_window; + + if (qs->recv_max_data == recv_max_data) { + return NGX_OK; + } + + qs->recv_max_data = recv_max_data; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic flow update msd:%uL", qs->recv_max_data); + + frame = ngx_quic_alloc_frame(pc); + if (frame == NULL) { + return NGX_ERROR; + } + + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_MAX_STREAM_DATA; + frame->u.max_stream_data.id = qs->id; + frame->u.max_stream_data.limit = qs->recv_max_data; + + ngx_quic_queue_frame(qc, frame); + + return NGX_OK; +} + + ngx_int_t ngx_quic_handle_read_event(ngx_event_t *rev, ngx_uint_t flags) { From arut at nginx.com Mon Nov 22 11:54:20 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 22 Nov 2021 14:54:20 +0300 Subject: [PATCH 2 of 2] QUIC: handle DATA_BLOCKED frame from client In-Reply-To: References: <0fb2613594f6bd8dd8f0.1637134262@arut-laptop> Message-ID: <20211122115420.fzbgiq4po2ppa53b@Romans-MacBook-Pro.local> On Mon, Nov 22, 2021 at 11:26:10AM +0300, Vladimir Homutov wrote: > On Wed, Nov 17, 2021 at 10:31:02AM +0300, Roman Arutyunyan wrote: > > # HG changeset patch > > # User Roman Arutyunyan > > # Date 1637086755 -10800 > > # Tue Nov 16 21:19:15 2021 +0300 > > # Branch quic > > # Node ID 0fb2613594f6bd8dd8f07a30c69900866b573158 > > # Parent 4e3a7fc0533192f51a01042a1e9dd2b595881420 > > QUIC: handle DATA_BLOCKED frame from client. > > > > Previously the frame was not handled and connection was closed with an error. > > Now, after receiving this frame, global flow control is updated and new > > flow control credit is sent to client. > > > > diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c > > --- a/src/event/quic/ngx_event_quic.c > > +++ b/src/event/quic/ngx_event_quic.c > > @@ -1252,6 +1252,17 @@ ngx_quic_handle_frames(ngx_connection_t > > > > break; > > > > + case NGX_QUIC_FT_DATA_BLOCKED: > > + > > + if (ngx_quic_handle_data_blocked_frame(c, pkt, > > + &frame.u.data_blocked) > > + != NGX_OK) > > + { > > + return NGX_ERROR; > > + } > > + > > + break; > > + > > case NGX_QUIC_FT_STREAM_DATA_BLOCKED: > > > > if (ngx_quic_handle_stream_data_blocked_frame(c, pkt, > > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > > --- a/src/event/quic/ngx_event_quic_streams.c > > +++ b/src/event/quic/ngx_event_quic_streams.c > > @@ -32,6 +32,7 @@ static void ngx_quic_stream_cleanup_hand > > static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); > > static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); > > static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); > > +static ngx_int_t ngx_quic_update_max_data(ngx_connection_t *c); > > > > > > ngx_connection_t * > > @@ -1188,6 +1189,14 @@ ngx_quic_handle_streams_blocked_frame(ng > > > > > > ngx_int_t > > +ngx_quic_handle_data_blocked_frame(ngx_connection_t *c, > > + ngx_quic_header_t *pkt, ngx_quic_data_blocked_frame_t *f) > > +{ > > + return ngx_quic_update_max_data(c); > > +} > > + > > + > > +ngx_int_t > > ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, > > ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f) > > { > > @@ -1544,7 +1553,6 @@ ngx_quic_update_flow(ngx_connection_t *c > > uint64_t len; > > ngx_event_t *rev; > > ngx_connection_t *pc; > > - ngx_quic_frame_t *frame; > > ngx_quic_stream_t *qs; > > ngx_quic_connection_t *qc; > > > > @@ -1577,22 +1585,9 @@ ngx_quic_update_flow(ngx_connection_t *c > > if (qc->streams.recv_max_data > > <= qc->streams.recv_offset + qc->streams.recv_window / 2) > > { > > - qc->streams.recv_max_data = qc->streams.recv_offset > > - + qc->streams.recv_window; > > - > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, > > - "quic flow update md:%uL", qc->streams.recv_max_data); > > - > > - frame = ngx_quic_alloc_frame(pc); > > - if (frame == NULL) { > > + if (ngx_quic_update_max_data(pc) != NGX_OK) { > > return NGX_ERROR; > > } > > - > > - frame->level = ssl_encryption_application; > > - frame->type = NGX_QUIC_FT_MAX_DATA; > > - frame->u.max_data.max_data = qc->streams.recv_max_data; > > - > > - ngx_quic_queue_frame(qc, frame); > > } > > > > return NGX_OK; > > @@ -1637,6 +1632,41 @@ ngx_quic_update_max_stream_data(ngx_conn > > } > > > > > > +static ngx_int_t > > +ngx_quic_update_max_data(ngx_connection_t *c) > > +{ > > + uint64_t recv_max_data; > > + ngx_quic_frame_t *frame; > > + ngx_quic_connection_t *qc; > > + > > + qc = ngx_quic_get_connection(c); > > + > > + recv_max_data = qc->streams.recv_offset + qc->streams.recv_window; > > + > > + if (qc->streams.recv_max_data == recv_max_data) { > > + return NGX_OK; > > + } > > same question as in previous patch; logic is the same; Same answer. > > + qc->streams.recv_max_data = recv_max_data; > > + > > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > + "quic flow update md:%uL", qc->streams.recv_max_data); > > + > > + frame = ngx_quic_alloc_frame(c); > > looks like the same issue as in the previous patch - should be pc here No. Here we already have the parent connection, not a stream connection. Maybe we need to rename 'c' to 'pc' everywhere for parent connection to make it clear. > > + if (frame == NULL) { > > + return NGX_ERROR; > > + } > > + > > + frame->level = ssl_encryption_application; > > + frame->type = NGX_QUIC_FT_MAX_DATA; > > + frame->u.max_data.max_data = qc->streams.recv_max_data; > > + > > + ngx_quic_queue_frame(qc, frame); > > + > > + return NGX_OK; > > +} > > + > > + > > ngx_int_t > > ngx_quic_handle_read_event(ngx_event_t *rev, ngx_uint_t flags) > > { > > diff --git a/src/event/quic/ngx_event_quic_streams.h b/src/event/quic/ngx_event_quic_streams.h > > --- a/src/event/quic/ngx_event_quic_streams.h > > +++ b/src/event/quic/ngx_event_quic_streams.h > > @@ -20,6 +20,8 @@ ngx_int_t ngx_quic_handle_max_data_frame > > ngx_quic_max_data_frame_t *f); > > ngx_int_t ngx_quic_handle_streams_blocked_frame(ngx_connection_t *c, > > ngx_quic_header_t *pkt, ngx_quic_streams_blocked_frame_t *f); > > +ngx_int_t ngx_quic_handle_data_blocked_frame(ngx_connection_t *c, > > + ngx_quic_header_t *pkt, ngx_quic_data_blocked_frame_t *f); > > ngx_int_t ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, > > ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f); > > ngx_int_t ngx_quic_handle_max_stream_data_frame(ngx_connection_t *c, > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan From vl at nginx.com Mon Nov 22 12:00:20 2021 From: vl at nginx.com (Vladimir Homutov) Date: Mon, 22 Nov 2021 15:00:20 +0300 Subject: [PATCH 2 of 2] QUIC: handle DATA_BLOCKED frame from client In-Reply-To: <20211122115420.fzbgiq4po2ppa53b@Romans-MacBook-Pro.local> References: <0fb2613594f6bd8dd8f0.1637134262@arut-laptop> <20211122115420.fzbgiq4po2ppa53b@Romans-MacBook-Pro.local> Message-ID: On Mon, Nov 22, 2021 at 02:54:20PM +0300, Roman Arutyunyan wrote: > On Mon, Nov 22, 2021 at 11:26:10AM +0300, Vladimir Homutov wrote: > > On Wed, Nov 17, 2021 at 10:31:02AM +0300, Roman Arutyunyan wrote: > > > # HG changeset patch > > > # User Roman Arutyunyan > > > # Date 1637086755 -10800 > > > # Tue Nov 16 21:19:15 2021 +0300 > > > # Branch quic > > > # Node ID 0fb2613594f6bd8dd8f07a30c69900866b573158 > > > # Parent 4e3a7fc0533192f51a01042a1e9dd2b595881420 > > > QUIC: handle DATA_BLOCKED frame from client. > > > > > > Previously the frame was not handled and connection was closed with an error. > > > Now, after receiving this frame, global flow control is updated and new > > > flow control credit is sent to client. > > > > > > diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c > > > --- a/src/event/quic/ngx_event_quic.c > > > +++ b/src/event/quic/ngx_event_quic.c > > > @@ -1252,6 +1252,17 @@ ngx_quic_handle_frames(ngx_connection_t > > > > > > break; > > > > > > + case NGX_QUIC_FT_DATA_BLOCKED: > > > + > > > + if (ngx_quic_handle_data_blocked_frame(c, pkt, > > > + &frame.u.data_blocked) > > > + != NGX_OK) > > > + { > > > + return NGX_ERROR; > > > + } > > > + > > > + break; > > > + > > > case NGX_QUIC_FT_STREAM_DATA_BLOCKED: > > > > > > if (ngx_quic_handle_stream_data_blocked_frame(c, pkt, > > > diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c > > > --- a/src/event/quic/ngx_event_quic_streams.c > > > +++ b/src/event/quic/ngx_event_quic_streams.c > > > @@ -32,6 +32,7 @@ static void ngx_quic_stream_cleanup_hand > > > static ngx_int_t ngx_quic_control_flow(ngx_connection_t *c, uint64_t last); > > > static ngx_int_t ngx_quic_update_flow(ngx_connection_t *c, uint64_t last); > > > static ngx_int_t ngx_quic_update_max_stream_data(ngx_connection_t *c); > > > +static ngx_int_t ngx_quic_update_max_data(ngx_connection_t *c); > > > > > > > > > ngx_connection_t * > > > @@ -1188,6 +1189,14 @@ ngx_quic_handle_streams_blocked_frame(ng > > > > > > > > > ngx_int_t > > > +ngx_quic_handle_data_blocked_frame(ngx_connection_t *c, > > > + ngx_quic_header_t *pkt, ngx_quic_data_blocked_frame_t *f) > > > +{ > > > + return ngx_quic_update_max_data(c); > > > +} > > > + > > > + > > > +ngx_int_t > > > ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, > > > ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f) > > > { > > > @@ -1544,7 +1553,6 @@ ngx_quic_update_flow(ngx_connection_t *c > > > uint64_t len; > > > ngx_event_t *rev; > > > ngx_connection_t *pc; > > > - ngx_quic_frame_t *frame; > > > ngx_quic_stream_t *qs; > > > ngx_quic_connection_t *qc; > > > > > > @@ -1577,22 +1585,9 @@ ngx_quic_update_flow(ngx_connection_t *c > > > if (qc->streams.recv_max_data > > > <= qc->streams.recv_offset + qc->streams.recv_window / 2) > > > { > > > - qc->streams.recv_max_data = qc->streams.recv_offset > > > - + qc->streams.recv_window; > > > - > > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, > > > - "quic flow update md:%uL", qc->streams.recv_max_data); > > > - > > > - frame = ngx_quic_alloc_frame(pc); > > > - if (frame == NULL) { > > > + if (ngx_quic_update_max_data(pc) != NGX_OK) { > > > return NGX_ERROR; > > > } > > > - > > > - frame->level = ssl_encryption_application; > > > - frame->type = NGX_QUIC_FT_MAX_DATA; > > > - frame->u.max_data.max_data = qc->streams.recv_max_data; > > > - > > > - ngx_quic_queue_frame(qc, frame); > > > } > > > > > > return NGX_OK; > > > @@ -1637,6 +1632,41 @@ ngx_quic_update_max_stream_data(ngx_conn > > > } > > > > > > > > > +static ngx_int_t > > > +ngx_quic_update_max_data(ngx_connection_t *c) > > > +{ > > > + uint64_t recv_max_data; > > > + ngx_quic_frame_t *frame; > > > + ngx_quic_connection_t *qc; > > > + > > > + qc = ngx_quic_get_connection(c); > > > + > > > + recv_max_data = qc->streams.recv_offset + qc->streams.recv_window; > > > + > > > + if (qc->streams.recv_max_data == recv_max_data) { > > > + return NGX_OK; > > > + } > > > > same question as in previous patch; logic is the same; > > Same answer. ok, got it now. > > > > + qc->streams.recv_max_data = recv_max_data; > > > + > > > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > > > + "quic flow update md:%uL", qc->streams.recv_max_data); > > > + > > > + frame = ngx_quic_alloc_frame(c); > > > > looks like the same issue as in the previous patch - should be pc here > > No. Here we already have the parent connection, not a stream connection. > Maybe we need to rename 'c' to 'pc' everywhere for parent connection to make it > clear. I don't think it's worth it, since we get 'c' as argument and no other connection is supposed. Both patches look good for me. > > > > + if (frame == NULL) { > > > + return NGX_ERROR; > > > + } > > > + > > > + frame->level = ssl_encryption_application; > > > + frame->type = NGX_QUIC_FT_MAX_DATA; > > > + frame->u.max_data.max_data = qc->streams.recv_max_data; > > > + > > > + ngx_quic_queue_frame(qc, frame); > > > + > > > + return NGX_OK; > > > +} > > > + > > > + > > > ngx_int_t > > > ngx_quic_handle_read_event(ngx_event_t *rev, ngx_uint_t flags) > > > { > > > diff --git a/src/event/quic/ngx_event_quic_streams.h b/src/event/quic/ngx_event_quic_streams.h > > > --- a/src/event/quic/ngx_event_quic_streams.h > > > +++ b/src/event/quic/ngx_event_quic_streams.h > > > @@ -20,6 +20,8 @@ ngx_int_t ngx_quic_handle_max_data_frame > > > ngx_quic_max_data_frame_t *f); > > > ngx_int_t ngx_quic_handle_streams_blocked_frame(ngx_connection_t *c, > > > ngx_quic_header_t *pkt, ngx_quic_streams_blocked_frame_t *f); > > > +ngx_int_t ngx_quic_handle_data_blocked_frame(ngx_connection_t *c, > > > + ngx_quic_header_t *pkt, ngx_quic_data_blocked_frame_t *f); > > > ngx_int_t ngx_quic_handle_stream_data_blocked_frame(ngx_connection_t *c, > > > ngx_quic_header_t *pkt, ngx_quic_stream_data_blocked_frame_t *f); > > > ngx_int_t ngx_quic_handle_max_stream_data_frame(ngx_connection_t *c, > > > > > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > -- > Roman Arutyunyan > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From xeioex at nginx.com Mon Nov 22 13:40:55 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 22 Nov 2021 13:40:55 +0000 Subject: [njs] Fixed exception throwing when RegExp match fails. Message-ID: details: https://hg.nginx.org/njs/rev/7f72930cf1ac branches: changeset: 1750:7f72930cf1ac user: Dmitry Volyntsev date: Mon Nov 22 13:37:11 2021 +0000 description: Fixed exception throwing when RegExp match fails. The issue was introduced in a83775113025 (0.1.15). This closes #439 issue on Github. diffstat: src/njs_regexp.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r a57d9a17f702 -r 7f72930cf1ac src/njs_regexp.c --- a/src/njs_regexp.c Thu Nov 18 13:48:55 2021 +0000 +++ b/src/njs_regexp.c Mon Nov 22 13:37:11 2021 +0000 @@ -446,7 +446,7 @@ njs_regexp_match_trace_handler(njs_trace trace = trace->next; p = trace->handler(trace, td, start); - njs_internal_error(vm, (const char *) start); + njs_internal_error(vm, "%*s", p - start, start); return p; } From mdounin at mdounin.ru Tue Nov 23 03:12:04 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 23 Nov 2021 06:12:04 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: <22EEAB4A-054D-4AD2-BCFB-86B7EC679D78@nginx.com> References: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> <22EEAB4A-054D-4AD2-BCFB-86B7EC679D78@nginx.com> Message-ID: Hello! On Thu, Nov 18, 2021 at 07:46:48PM +0300, Sergey Kandaurov wrote: > > On 16 Nov 2021, at 17:41, Maxim Dounin wrote: > > > > On Tue, Nov 16, 2021 at 02:59:46PM +0300, Sergey Kandaurov wrote: > > > >>> On 11 Nov 2021, at 06:10, Maxim Dounin wrote: > >>> > >>> # HG changeset patch > >>> # User Maxim Dounin > >>> # Date 1636599377 -10800 > >>> # Thu Nov 11 05:56:17 2021 +0300 > >>> # Node ID 76e072a6947a221868705c13973de15319c0d921 > >>> # Parent 82b750b20c5205d685e59031247fe898f011394e > >>> HTTP/2: fixed sendfile() aio handling. > >>> > >>> With sendfile() in threads ("aio threads; sendfile on;"), client connection > >>> can block on writing, waiting for sendfile() to complete. In HTTP/2 this > >>> might result in the request hang, since an attempt to continue processig > >> > >> processing > > > > Fixed, thnx. > > > >>> in thread event handler will call request's write event handler, which > >>> is usually stopped by ngx_http_v2_send_chain(): it does nothing if there > >>> are no additional data and stream->queued is set. Further, HTTP/2 resets > >>> stream's c->write->ready to 0 if writing blocks, so just fixing > >>> ngx_http_v2_send_chain() is not enough. > >>> > >>> Can be reproduced with test suite on Linux with: > >>> > >>> TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t > >>> > >>> The following tests currently fail: h2_keepalive.t, h2_priority.t, > >>> h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. > >>> > >>> Similarly, sendfile() with AIO preloading on FreeBSD can block as well, > >>> with similar results. This is, however, harder to reproduce, especially > >>> on modern FreeBSD systems, since sendfile() usually do not return EBUSY. > >> > >> does not > > > > Fixed, thnx. > > > >>> Fix is to post a write event on HTTP/2 connection in the thread event > >>> handler (and aio preload handler). This ensures that sendfile() will be > >>> completed and stream processing will be resumed by HTTP/2 code. > >>> > >>> diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > >>> --- a/src/http/ngx_http_copy_filter_module.c > >>> +++ b/src/http/ngx_http_copy_filter_module.c > >>> @@ -250,6 +250,21 @@ ngx_http_copy_aio_sendfile_event_handler > >>> r->aio = 0; > >>> ev->complete = 0; > >>> > >>> +#if (NGX_HTTP_V2) > >>> + > >>> + if (r->stream) { > >>> + /* > >>> + * for HTTP/2, trigger a write event on the main connection > >>> + * to handle sendfile() preload > >>> + */ > >>> + > >>> + ngx_post_event(r->stream->connection->connection->write, > >>> + &ngx_posted_events); > >>> + return; > >>> + } > >>> + > >>> +#endif > >>> + > >>> r->connection->write->handler(r->connection->write); > >>> } > >>> > >>> @@ -323,6 +338,20 @@ ngx_http_copy_thread_event_handler(ngx_e > >>> r->main->blocked--; > >>> r->aio = 0; > >>> > >>> +#if (NGX_HTTP_V2) > >>> + > >>> + if (r->stream) { > >>> + /* > >>> + * for HTTP/2, trigger a write event on the main connection > >>> + * to handle sendfile() in threads > >>> + */ > >>> + > >>> + ngx_post_event(r->stream->connection->connection->write, > >>> + &ngx_posted_events); > >>> + } > >>> + > >>> +#endif > >>> + [...] For the record, while testing this patch Sergey found another issue with sendfile() in threads and HTTP/2: since HTTP/2 might call sendfile() within the main connection, bypassing request filter chain, normal r->aio flag checking to prevent multiple operations do not work, and this eventually results in "task already active" alerts due to duplicate operations being posted. With the above patch this issue is much more likely to happen, since it intentionally triggers write events on the main HTTP/2 connection. Below are two patches: the first one addresses the issue with duplicate operations by additionally checking file->thread_task before sendfile(), and the second one is a better alternative to the above patch which doesn't post additional events on the main connection. # HG changeset patch # User Maxim Dounin # Date 1637635671 -10800 # Tue Nov 23 05:47:51 2021 +0300 # Node ID 8a18b0bff1266db221fe35dc08f4483044ea0f86 # Parent 82b750b20c5205d685e59031247fe898f011394e HTTP/2: fixed "task already active" with sendfile in threads. With sendfile in threads, "task already active" alerts might appear in logs if a write event happens on the main HTTP/2 connection, triggering a sendfile in threads while another thread operation is already running. Observed with "aio threads; aio_write on; sendfile on;" and with thread event handlers modified to post a write event to the main HTTP/2 connection (though can happen without any modifications). Fix is to avoid starting a sendfile operation if file->thread_task indicates that another thread operation is active. diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c --- a/src/os/unix/ngx_linux_sendfile_chain.c +++ b/src/os/unix/ngx_linux_sendfile_chain.c @@ -324,6 +324,18 @@ ngx_linux_sendfile_thread(ngx_connection "linux sendfile thread: %d, %uz, %O", file->file->fd, size, file->file_pos); + task = file->file->thread_task; + + if (task && task->event.active) { + /* + * with HTTP/2, another thread operation might be already running + * if sendfile() is called as a result of a write event on the main + * connection + */ + + return NGX_DONE; + } + task = c->sendfile_task; if (task == NULL) { # HG changeset patch # User Maxim Dounin # Date 1637637006 -10800 # Tue Nov 23 06:10:06 2021 +0300 # Node ID 45c857a5c64cd99309b0d585f2186d219fa357ed # Parent 8a18b0bff1266db221fe35dc08f4483044ea0f86 HTTP/2: fixed sendfile() aio handling. With sendfile() in threads ("aio threads; sendfile on;"), client connection can block on writing, waiting for sendfile() to complete. In HTTP/2 this might result in the request hang, since an attempt to continue processing in thread event handler will call request's write event handler, which is usually stopped by ngx_http_v2_send_chain(): it does nothing if there are no additional data and stream->queued is set. Further, HTTP/2 resets stream's c->write->ready to 0 if writing blocks, so just fixing ngx_http_v2_send_chain() is not enough. Can be reproduced with test suite on Linux with: TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t The following tests currently fail: h2_keepalive.t, h2_priority.t, h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. Similarly, sendfile() with AIO preloading on FreeBSD can block as well, with similar results. This is, however, harder to reproduce, especially on modern FreeBSD systems, since sendfile() usually does not return EBUSY. Fix is to modify ngx_http_v2_send_chain() so it actually tries to send data to the main connection when called, and to make sure that c->write->ready is set by the relevant event handlers. diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c +++ b/src/http/ngx_http_copy_filter_module.c @@ -241,16 +241,32 @@ static void ngx_http_copy_aio_sendfile_event_handler(ngx_event_t *ev) { ngx_event_aio_t *aio; + ngx_connection_t *c; ngx_http_request_t *r; aio = ev->data; r = aio->data; + c = r->connection; r->main->blocked--; r->aio = 0; ev->complete = 0; - r->connection->write->handler(r->connection->write); +#if (NGX_HTTP_V2) + + if (r->stream) { + /* + * for HTTP/2, update write event to make sure processing will + * reach the main connection to handle sendfile() preload + */ + + c->write->ready = 1; + c->write->active = 0; + } + +#endif + + c->write->handler(c->write); } #endif @@ -323,6 +339,20 @@ ngx_http_copy_thread_event_handler(ngx_e r->main->blocked--; r->aio = 0; +#if (NGX_HTTP_V2) + + if (r->stream) { + /* + * for HTTP/2, update write event to make sure processing will + * reach the main connection to handle sendfile() in threads + */ + + c->write->ready = 1; + c->write->active = 0; + } + +#endif + if (r->done) { /* * trigger connection event handler if the subrequest was diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -3905,6 +3905,20 @@ ngx_http_upstream_thread_event_handler(n r->main->blocked--; r->aio = 0; +#if (NGX_HTTP_V2) + + if (r->stream) { + /* + * for HTTP/2, update write event to make sure processing will + * reach the main connection to handle sendfile() in threads + */ + + c->write->ready = 1; + c->write->active = 0; + } + +#endif + if (r->done) { /* * trigger connection event handler if the subrequest was diff --git a/src/http/v2/ngx_http_v2_filter_module.c b/src/http/v2/ngx_http_v2_filter_module.c --- a/src/http/v2/ngx_http_v2_filter_module.c +++ b/src/http/v2/ngx_http_v2_filter_module.c @@ -1432,6 +1432,9 @@ ngx_http_v2_send_chain(ngx_connection_t size = 0; #endif + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, + "http2 send chain: %p", in); + while (in) { size = ngx_buf_size(in->buf); @@ -1450,12 +1453,8 @@ ngx_http_v2_send_chain(ngx_connection_t return NGX_CHAIN_ERROR; } - if (stream->queued) { - fc->write->active = 1; - fc->write->ready = 0; - - } else { - fc->buffered &= ~NGX_HTTP_V2_BUFFERED; + if (ngx_http_v2_filter_send(fc, stream) == NGX_ERROR) { + return NGX_CHAIN_ERROR; } return NULL; @@ -1464,9 +1463,16 @@ ngx_http_v2_send_chain(ngx_connection_t h2c = stream->connection; if (size && ngx_http_v2_flow_control(h2c, stream) == NGX_DECLINED) { - fc->write->active = 1; - fc->write->ready = 0; - return in; + + if (ngx_http_v2_filter_send(fc, stream) == NGX_ERROR) { + return NGX_CHAIN_ERROR; + } + + if (ngx_http_v2_flow_control(h2c, stream) == NGX_DECLINED) { + fc->write->active = 1; + fc->write->ready = 0; + return in; + } } if (in->buf->tag == (ngx_buf_tag_t) &ngx_http_v2_filter_get_shadow) { @@ -1809,6 +1815,11 @@ ngx_http_v2_waiting_queue(ngx_http_v2_co static ngx_inline ngx_int_t ngx_http_v2_filter_send(ngx_connection_t *fc, ngx_http_v2_stream_t *stream) { + if (stream->queued == 0) { + fc->buffered &= ~NGX_HTTP_V2_BUFFERED; + return NGX_OK; + } + stream->blocked = 1; if (ngx_http_v2_send_output_queue(stream->connection) == NGX_ERROR) { -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Tue Nov 23 07:44:00 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 23 Nov 2021 10:44:00 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: References: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> <22EEAB4A-054D-4AD2-BCFB-86B7EC679D78@nginx.com> Message-ID: <5F9378C5-CB2A-4466-B203-D9AB0F776997@nginx.com> > On 23 Nov 2021, at 06:12, Maxim Dounin wrote: > > Hello! > > On Thu, Nov 18, 2021 at 07:46:48PM +0300, Sergey Kandaurov wrote: > >>> On 16 Nov 2021, at 17:41, Maxim Dounin wrote: >>> >>> On Tue, Nov 16, 2021 at 02:59:46PM +0300, Sergey Kandaurov wrote: >>> >>>>> On 11 Nov 2021, at 06:10, Maxim Dounin wrote: >>>>> >>>>> # HG changeset patch >>>>> # User Maxim Dounin >>>>> # Date 1636599377 -10800 >>>>> # Thu Nov 11 05:56:17 2021 +0300 >>>>> # Node ID 76e072a6947a221868705c13973de15319c0d921 >>>>> # Parent 82b750b20c5205d685e59031247fe898f011394e >>>>> HTTP/2: fixed sendfile() aio handling. >>>>> >>>>> With sendfile() in threads ("aio threads; sendfile on;"), client connection >>>>> can block on writing, waiting for sendfile() to complete. In HTTP/2 this >>>>> might result in the request hang, since an attempt to continue processig >>>> >>>> processing >>> >>> Fixed, thnx. >>> >>>>> in thread event handler will call request's write event handler, which >>>>> is usually stopped by ngx_http_v2_send_chain(): it does nothing if there >>>>> are no additional data and stream->queued is set. Further, HTTP/2 resets >>>>> stream's c->write->ready to 0 if writing blocks, so just fixing >>>>> ngx_http_v2_send_chain() is not enough. >>>>> >>>>> Can be reproduced with test suite on Linux with: >>>>> >>>>> TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t >>>>> >>>>> The following tests currently fail: h2_keepalive.t, h2_priority.t, >>>>> h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. >>>>> >>>>> Similarly, sendfile() with AIO preloading on FreeBSD can block as well, >>>>> with similar results. This is, however, harder to reproduce, especially >>>>> on modern FreeBSD systems, since sendfile() usually do not return EBUSY. >>>> >>>> does not >>> >>> Fixed, thnx. >>> >>>>> Fix is to post a write event on HTTP/2 connection in the thread event >>>>> handler (and aio preload handler). This ensures that sendfile() will be >>>>> completed and stream processing will be resumed by HTTP/2 code. >>>>> >>>>> diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c >>>>> --- a/src/http/ngx_http_copy_filter_module.c >>>>> +++ b/src/http/ngx_http_copy_filter_module.c >>>>> @@ -250,6 +250,21 @@ ngx_http_copy_aio_sendfile_event_handler >>>>> r->aio = 0; >>>>> ev->complete = 0; >>>>> >>>>> +#if (NGX_HTTP_V2) >>>>> + >>>>> + if (r->stream) { >>>>> + /* >>>>> + * for HTTP/2, trigger a write event on the main connection >>>>> + * to handle sendfile() preload >>>>> + */ >>>>> + >>>>> + ngx_post_event(r->stream->connection->connection->write, >>>>> + &ngx_posted_events); >>>>> + return; >>>>> + } >>>>> + >>>>> +#endif >>>>> + >>>>> r->connection->write->handler(r->connection->write); >>>>> } >>>>> >>>>> @@ -323,6 +338,20 @@ ngx_http_copy_thread_event_handler(ngx_e >>>>> r->main->blocked--; >>>>> r->aio = 0; >>>>> >>>>> +#if (NGX_HTTP_V2) >>>>> + >>>>> + if (r->stream) { >>>>> + /* >>>>> + * for HTTP/2, trigger a write event on the main connection >>>>> + * to handle sendfile() in threads >>>>> + */ >>>>> + >>>>> + ngx_post_event(r->stream->connection->connection->write, >>>>> + &ngx_posted_events); >>>>> + } >>>>> + >>>>> +#endif >>>>> + > > [...] > > For the record, while testing this patch Sergey found another > issue with sendfile() in threads and HTTP/2: since HTTP/2 might > call sendfile() within the main connection, bypassing request > filter chain, normal r->aio flag checking to prevent multiple > operations do not work, and this eventually results in "task > already active" alerts due to duplicate operations being posted. > With the above patch this issue is much more likely to happen, > since it intentionally triggers write events on the main HTTP/2 > connection. > > Below are two patches: the first one addresses the issue with > duplicate operations by additionally checking file->thread_task > before sendfile(), and the second one is a better alternative to > the above patch which doesn't post additional events on the main > connection. > > # HG changeset patch > # User Maxim Dounin > # Date 1637635671 -10800 > # Tue Nov 23 05:47:51 2021 +0300 > # Node ID 8a18b0bff1266db221fe35dc08f4483044ea0f86 > # Parent 82b750b20c5205d685e59031247fe898f011394e > HTTP/2: fixed "task already active" with sendfile in threads. > > With sendfile in threads, "task already active" alerts might appear in logs > if a write event happens on the main HTTP/2 connection, triggering a sendfile > in threads while another thread operation is already running. Observed > with "aio threads; aio_write on; sendfile on;" and with thread event handlers > modified to post a write event to the main HTTP/2 connection (though can > happen without any modifications). > > Fix is to avoid starting a sendfile operation if file->thread_task indicates > that another thread operation is active. > > diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c > --- a/src/os/unix/ngx_linux_sendfile_chain.c > +++ b/src/os/unix/ngx_linux_sendfile_chain.c > @@ -324,6 +324,18 @@ ngx_linux_sendfile_thread(ngx_connection > "linux sendfile thread: %d, %uz, %O", > file->file->fd, size, file->file_pos); > > + task = file->file->thread_task; > + > + if (task && task->event.active) { > + /* > + * with HTTP/2, another thread operation might be already running > + * if sendfile() is called as a result of a write event on the main > + * connection > + */ > + > + return NGX_DONE; > + } > + > task = c->sendfile_task; > > if (task == NULL) { > # HG changeset patch > # User Maxim Dounin > # Date 1637637006 -10800 > # Tue Nov 23 06:10:06 2021 +0300 > # Node ID 45c857a5c64cd99309b0d585f2186d219fa357ed > # Parent 8a18b0bff1266db221fe35dc08f4483044ea0f86 > HTTP/2: fixed sendfile() aio handling. > > With sendfile() in threads ("aio threads; sendfile on;"), client connection > can block on writing, waiting for sendfile() to complete. In HTTP/2 this > might result in the request hang, since an attempt to continue processing > in thread event handler will call request's write event handler, which > is usually stopped by ngx_http_v2_send_chain(): it does nothing if there > are no additional data and stream->queued is set. Further, HTTP/2 resets > stream's c->write->ready to 0 if writing blocks, so just fixing > ngx_http_v2_send_chain() is not enough. > > Can be reproduced with test suite on Linux with: > > TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t > > The following tests currently fail: h2_keepalive.t, h2_priority.t, > h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. > > Similarly, sendfile() with AIO preloading on FreeBSD can block as well, > with similar results. This is, however, harder to reproduce, especially > on modern FreeBSD systems, since sendfile() usually does not return EBUSY. > > Fix is to modify ngx_http_v2_send_chain() so it actually tries to send > data to the main connection when called, and to make sure that > c->write->ready is set by the relevant event handlers. > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > --- a/src/http/ngx_http_copy_filter_module.c > +++ b/src/http/ngx_http_copy_filter_module.c > @@ -241,16 +241,32 @@ static void > ngx_http_copy_aio_sendfile_event_handler(ngx_event_t *ev) > { > ngx_event_aio_t *aio; > + ngx_connection_t *c; > ngx_http_request_t *r; > > aio = ev->data; > r = aio->data; > + c = r->connection; > > r->main->blocked--; > r->aio = 0; > ev->complete = 0; > > - r->connection->write->handler(r->connection->write); > +#if (NGX_HTTP_V2) > + > + if (r->stream) { > + /* > + * for HTTP/2, update write event to make sure processing will > + * reach the main connection to handle sendfile() preload > + */ > + > + c->write->ready = 1; > + c->write->active = 0; > + } > + > +#endif > + > + c->write->handler(c->write); > } > > #endif > @@ -323,6 +339,20 @@ ngx_http_copy_thread_event_handler(ngx_e > r->main->blocked--; > r->aio = 0; > > +#if (NGX_HTTP_V2) > + > + if (r->stream) { > + /* > + * for HTTP/2, update write event to make sure processing will > + * reach the main connection to handle sendfile() in threads > + */ > + > + c->write->ready = 1; > + c->write->active = 0; > + } > + > +#endif > + > if (r->done) { > /* > * trigger connection event handler if the subrequest was > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -3905,6 +3905,20 @@ ngx_http_upstream_thread_event_handler(n > r->main->blocked--; > r->aio = 0; > > +#if (NGX_HTTP_V2) > + > + if (r->stream) { > + /* > + * for HTTP/2, update write event to make sure processing will > + * reach the main connection to handle sendfile() in threads > + */ > + > + c->write->ready = 1; > + c->write->active = 0; > + } > + > +#endif > + > if (r->done) { > /* > * trigger connection event handler if the subrequest was > diff --git a/src/http/v2/ngx_http_v2_filter_module.c b/src/http/v2/ngx_http_v2_filter_module.c > --- a/src/http/v2/ngx_http_v2_filter_module.c > +++ b/src/http/v2/ngx_http_v2_filter_module.c > @@ -1432,6 +1432,9 @@ ngx_http_v2_send_chain(ngx_connection_t > size = 0; > #endif > > + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, > + "http2 send chain: %p", in); > + > while (in) { > size = ngx_buf_size(in->buf); > > @@ -1450,12 +1453,8 @@ ngx_http_v2_send_chain(ngx_connection_t > return NGX_CHAIN_ERROR; > } > > - if (stream->queued) { > - fc->write->active = 1; > - fc->write->ready = 0; > - > - } else { > - fc->buffered &= ~NGX_HTTP_V2_BUFFERED; > + if (ngx_http_v2_filter_send(fc, stream) == NGX_ERROR) { > + return NGX_CHAIN_ERROR; > } > > return NULL; > @@ -1464,9 +1463,16 @@ ngx_http_v2_send_chain(ngx_connection_t > h2c = stream->connection; > > if (size && ngx_http_v2_flow_control(h2c, stream) == NGX_DECLINED) { > - fc->write->active = 1; > - fc->write->ready = 0; > - return in; > + > + if (ngx_http_v2_filter_send(fc, stream) == NGX_ERROR) { > + return NGX_CHAIN_ERROR; > + } > + > + if (ngx_http_v2_flow_control(h2c, stream) == NGX_DECLINED) { > + fc->write->active = 1; > + fc->write->ready = 0; > + return in; > + } > } > > if (in->buf->tag == (ngx_buf_tag_t) &ngx_http_v2_filter_get_shadow) { > @@ -1809,6 +1815,11 @@ ngx_http_v2_waiting_queue(ngx_http_v2_co > static ngx_inline ngx_int_t > ngx_http_v2_filter_send(ngx_connection_t *fc, ngx_http_v2_stream_t *stream) > { > + if (stream->queued == 0) { > + fc->buffered &= ~NGX_HTTP_V2_BUFFERED; > + return NGX_OK; > + } > + > stream->blocked = 1; > > if (ngx_http_v2_send_output_queue(stream->connection) == NGX_ERROR) { > Looks fine. JFTR: all known issues should be fixed now. -- Sergey Kandaurov From pluknet at nginx.com Tue Nov 23 11:20:02 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 23 Nov 2021 11:20:02 +0000 Subject: [nginx] Version bump. Message-ID: details: https://hg.nginx.org/nginx/rev/284f03d6f154 branches: changeset: 7972:284f03d6f154 user: Sergey Kandaurov date: Tue Nov 23 12:52:43 2021 +0300 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r 82b750b20c52 -r 284f03d6f154 src/core/nginx.h --- a/src/core/nginx.h Tue Nov 02 17:49:22 2021 +0300 +++ b/src/core/nginx.h Tue Nov 23 12:52:43 2021 +0300 @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1021004 -#define NGINX_VERSION "1.21.4" +#define nginx_version 1021005 +#define NGINX_VERSION "1.21.5" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From pluknet at nginx.com Tue Nov 23 11:20:05 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 23 Nov 2021 11:20:05 +0000 Subject: [nginx] SSL: $ssl_curve (ticket #2135). Message-ID: details: https://hg.nginx.org/nginx/rev/3443c02ca1d1 branches: changeset: 7973:3443c02ca1d1 user: Sergey Kandaurov date: Mon Nov 01 18:09:34 2021 +0300 description: SSL: $ssl_curve (ticket #2135). The variable contains a negotiated curve used for the handshake key exchange process. Known curves are listed by their names, unknown ones are shown in hex. Note that for resumed sessions in TLSv1.2 and older protocols, $ssl_curve contains the curve used during the initial handshake, while in TLSv1.3 it contains the curve used during the session resumption (see the SSL_get_negotiated_group manual page for details). The variable is only meaningful when using OpenSSL 3.0 and above. With older versions the variable is empty. diffstat: src/event/ngx_event_openssl.c | 36 ++++++++++++++++++++++++++++++++++ src/event/ngx_event_openssl.h | 2 + src/http/modules/ngx_http_ssl_module.c | 3 ++ src/stream/ngx_stream_ssl_module.c | 3 ++ 4 files changed, 44 insertions(+), 0 deletions(-) diffs (84 lines): diff -r 284f03d6f154 -r 3443c02ca1d1 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Nov 23 12:52:43 2021 +0300 +++ b/src/event/ngx_event_openssl.c Mon Nov 01 18:09:34 2021 +0300 @@ -4734,6 +4734,42 @@ ngx_ssl_get_ciphers(ngx_connection_t *c, ngx_int_t +ngx_ssl_get_curve(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s) +{ +#ifdef SSL_get_negotiated_group + + int nid; + + nid = SSL_get_negotiated_group(c->ssl->connection); + + if (nid != NID_undef) { + + if ((nid & TLSEXT_nid_unknown) == 0) { + s->len = ngx_strlen(OBJ_nid2sn(nid)); + s->data = (u_char *) OBJ_nid2sn(nid); + return NGX_OK; + } + + s->len = sizeof("0x0000") - 1; + + s->data = ngx_pnalloc(pool, s->len); + if (s->data == NULL) { + return NGX_ERROR; + } + + ngx_sprintf(s->data, "0x%04xd", nid & 0xffff); + + return NGX_OK; + } + +#endif + + s->len = 0; + return NGX_OK; +} + + +ngx_int_t ngx_ssl_get_curves(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s) { #ifdef SSL_CTRL_GET_CURVES diff -r 284f03d6f154 -r 3443c02ca1d1 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Tue Nov 23 12:52:43 2021 +0300 +++ b/src/event/ngx_event_openssl.h Mon Nov 01 18:09:34 2021 +0300 @@ -256,6 +256,8 @@ ngx_int_t ngx_ssl_get_cipher_name(ngx_co ngx_str_t *s); ngx_int_t ngx_ssl_get_ciphers(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s); +ngx_int_t ngx_ssl_get_curve(ngx_connection_t *c, ngx_pool_t *pool, + ngx_str_t *s); ngx_int_t ngx_ssl_get_curves(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s); ngx_int_t ngx_ssl_get_session_id(ngx_connection_t *c, ngx_pool_t *pool, diff -r 284f03d6f154 -r 3443c02ca1d1 src/http/modules/ngx_http_ssl_module.c --- a/src/http/modules/ngx_http_ssl_module.c Tue Nov 23 12:52:43 2021 +0300 +++ b/src/http/modules/ngx_http_ssl_module.c Mon Nov 01 18:09:34 2021 +0300 @@ -342,6 +342,9 @@ static ngx_http_variable_t ngx_http_ssl { ngx_string("ssl_ciphers"), NULL, ngx_http_ssl_variable, (uintptr_t) ngx_ssl_get_ciphers, NGX_HTTP_VAR_CHANGEABLE, 0 }, + { ngx_string("ssl_curve"), NULL, ngx_http_ssl_variable, + (uintptr_t) ngx_ssl_get_curve, NGX_HTTP_VAR_CHANGEABLE, 0 }, + { ngx_string("ssl_curves"), NULL, ngx_http_ssl_variable, (uintptr_t) ngx_ssl_get_curves, NGX_HTTP_VAR_CHANGEABLE, 0 }, diff -r 284f03d6f154 -r 3443c02ca1d1 src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c Tue Nov 23 12:52:43 2021 +0300 +++ b/src/stream/ngx_stream_ssl_module.c Mon Nov 01 18:09:34 2021 +0300 @@ -269,6 +269,9 @@ static ngx_stream_variable_t ngx_stream { ngx_string("ssl_ciphers"), NULL, ngx_stream_ssl_variable, (uintptr_t) ngx_ssl_get_ciphers, NGX_STREAM_VAR_CHANGEABLE, 0 }, + { ngx_string("ssl_curve"), NULL, ngx_stream_ssl_variable, + (uintptr_t) ngx_ssl_get_curve, NGX_STREAM_VAR_CHANGEABLE, 0 }, + { ngx_string("ssl_curves"), NULL, ngx_stream_ssl_variable, (uintptr_t) ngx_ssl_get_curves, NGX_STREAM_VAR_CHANGEABLE, 0 }, From underverse2009 at gmail.com Tue Nov 23 12:02:12 2021 From: underverse2009 at gmail.com (Underverse Underverse) Date: Tue, 23 Nov 2021 15:02:12 +0300 Subject: nginx-quic :authority header not passed to $http_host Message-ID: <37be8a05-e5ed-2a6d-dda9-efc95b264462@gmail.com> When using http3 there's no $http_host variable, as there's no Host: header but :authority pseudo-header which is not passed to any $http_ variable. How I can get contents of :authority pseudo-header to pass it to my backend? From mdounin at mdounin.ru Wed Nov 24 03:46:44 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 24 Nov 2021 06:46:44 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: <5F9378C5-CB2A-4466-B203-D9AB0F776997@nginx.com> References: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> <22EEAB4A-054D-4AD2-BCFB-86B7EC679D78@nginx.com> <5F9378C5-CB2A-4466-B203-D9AB0F776997@nginx.com> Message-ID: Hello! On Tue, Nov 23, 2021 at 10:44:00AM +0300, Sergey Kandaurov wrote: > > On 23 Nov 2021, at 06:12, Maxim Dounin wrote: > > > > On Thu, Nov 18, 2021 at 07:46:48PM +0300, Sergey Kandaurov wrote: > > > >>> On 16 Nov 2021, at 17:41, Maxim Dounin wrote: > >>> > >>> On Tue, Nov 16, 2021 at 02:59:46PM +0300, Sergey Kandaurov wrote: > >>> > >>>>> On 11 Nov 2021, at 06:10, Maxim Dounin wrote: > >>>>> > >>>>> # HG changeset patch > >>>>> # User Maxim Dounin > >>>>> # Date 1636599377 -10800 > >>>>> # Thu Nov 11 05:56:17 2021 +0300 > >>>>> # Node ID 76e072a6947a221868705c13973de15319c0d921 > >>>>> # Parent 82b750b20c5205d685e59031247fe898f011394e > >>>>> HTTP/2: fixed sendfile() aio handling. > >>>>> > >>>>> With sendfile() in threads ("aio threads; sendfile on;"), client connection > >>>>> can block on writing, waiting for sendfile() to complete. In HTTP/2 this > >>>>> might result in the request hang, since an attempt to continue processig > >>>> > >>>> processing > >>> > >>> Fixed, thnx. > >>> > >>>>> in thread event handler will call request's write event handler, which > >>>>> is usually stopped by ngx_http_v2_send_chain(): it does nothing if there > >>>>> are no additional data and stream->queued is set. Further, HTTP/2 resets > >>>>> stream's c->write->ready to 0 if writing blocks, so just fixing > >>>>> ngx_http_v2_send_chain() is not enough. > >>>>> > >>>>> Can be reproduced with test suite on Linux with: > >>>>> > >>>>> TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t > >>>>> > >>>>> The following tests currently fail: h2_keepalive.t, h2_priority.t, > >>>>> h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. > >>>>> > >>>>> Similarly, sendfile() with AIO preloading on FreeBSD can block as well, > >>>>> with similar results. This is, however, harder to reproduce, especially > >>>>> on modern FreeBSD systems, since sendfile() usually do not return EBUSY. > >>>> > >>>> does not > >>> > >>> Fixed, thnx. > >>> > >>>>> Fix is to post a write event on HTTP/2 connection in the thread event > >>>>> handler (and aio preload handler). This ensures that sendfile() will be > >>>>> completed and stream processing will be resumed by HTTP/2 code. > >>>>> > >>>>> diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > >>>>> --- a/src/http/ngx_http_copy_filter_module.c > >>>>> +++ b/src/http/ngx_http_copy_filter_module.c > >>>>> @@ -250,6 +250,21 @@ ngx_http_copy_aio_sendfile_event_handler > >>>>> r->aio = 0; > >>>>> ev->complete = 0; > >>>>> > >>>>> +#if (NGX_HTTP_V2) > >>>>> + > >>>>> + if (r->stream) { > >>>>> + /* > >>>>> + * for HTTP/2, trigger a write event on the main connection > >>>>> + * to handle sendfile() preload > >>>>> + */ > >>>>> + > >>>>> + ngx_post_event(r->stream->connection->connection->write, > >>>>> + &ngx_posted_events); > >>>>> + return; > >>>>> + } > >>>>> + > >>>>> +#endif > >>>>> + > >>>>> r->connection->write->handler(r->connection->write); > >>>>> } > >>>>> > >>>>> @@ -323,6 +338,20 @@ ngx_http_copy_thread_event_handler(ngx_e > >>>>> r->main->blocked--; > >>>>> r->aio = 0; > >>>>> > >>>>> +#if (NGX_HTTP_V2) > >>>>> + > >>>>> + if (r->stream) { > >>>>> + /* > >>>>> + * for HTTP/2, trigger a write event on the main connection > >>>>> + * to handle sendfile() in threads > >>>>> + */ > >>>>> + > >>>>> + ngx_post_event(r->stream->connection->connection->write, > >>>>> + &ngx_posted_events); > >>>>> + } > >>>>> + > >>>>> +#endif > >>>>> + > > > > [...] > > > > For the record, while testing this patch Sergey found another > > issue with sendfile() in threads and HTTP/2: since HTTP/2 might > > call sendfile() within the main connection, bypassing request > > filter chain, normal r->aio flag checking to prevent multiple > > operations do not work, and this eventually results in "task > > already active" alerts due to duplicate operations being posted. > > With the above patch this issue is much more likely to happen, > > since it intentionally triggers write events on the main HTTP/2 > > connection. > > > > Below are two patches: the first one addresses the issue with > > duplicate operations by additionally checking file->thread_task > > before sendfile(), and the second one is a better alternative to > > the above patch which doesn't post additional events on the main > > connection. > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1637635671 -10800 > > # Tue Nov 23 05:47:51 2021 +0300 > > # Node ID 8a18b0bff1266db221fe35dc08f4483044ea0f86 > > # Parent 82b750b20c5205d685e59031247fe898f011394e > > HTTP/2: fixed "task already active" with sendfile in threads. > > > > With sendfile in threads, "task already active" alerts might appear in logs > > if a write event happens on the main HTTP/2 connection, triggering a sendfile > > in threads while another thread operation is already running. Observed > > with "aio threads; aio_write on; sendfile on;" and with thread event handlers > > modified to post a write event to the main HTTP/2 connection (though can > > happen without any modifications). > > > > Fix is to avoid starting a sendfile operation if file->thread_task indicates > > that another thread operation is active. > > > > diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c > > --- a/src/os/unix/ngx_linux_sendfile_chain.c > > +++ b/src/os/unix/ngx_linux_sendfile_chain.c > > @@ -324,6 +324,18 @@ ngx_linux_sendfile_thread(ngx_connection > > "linux sendfile thread: %d, %uz, %O", > > file->file->fd, size, file->file_pos); > > > > + task = file->file->thread_task; > > + > > + if (task && task->event.active) { > > + /* > > + * with HTTP/2, another thread operation might be already running > > + * if sendfile() is called as a result of a write event on the main > > + * connection > > + */ > > + > > + return NGX_DONE; > > + } > > + > > task = c->sendfile_task; > > > > if (task == NULL) { After looking once again into it, I tend to think this patch is incomplete. In particular, the particular check won't stop additional sendfile() if there are multiple files and different files use different thread tasks. While this is not something possible with standard modules, but nevertheless. Further, the same problem seems to apply to aio preloading (though unlikely to happen in practice). The following patch checks r->aio in relevant handlers to prevent sendfile() when another operation is running: # HG changeset patch # User Maxim Dounin # Date 1637723745 -10800 # Wed Nov 24 06:15:45 2021 +0300 # Node ID 3450841798597536d17ced29b35d1d90ce06ce0d # Parent 3443c02ca1d183fe52bf8af66627c94be2b2f785 HTTP/2: fixed "task already active" with sendfile in threads. With sendfile in threads, "task already active" alerts might appear in logs if a write event happens on the main HTTP/2 connection, triggering a sendfile in threads while another thread operation is already running. Observed with "aio threads; aio_write on; sendfile on;" and with thread event handlers modified to post a write event to the main HTTP/2 connection (though can happen without any modifications). Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate aio operation, resulting in "second aio post" alerts. This is, however, harder to reproduce, especially on modern FreeBSD systems, since sendfile() usually does not return EBUSY. Fix is to avoid starting a sendfile operation if other thread operation is active by checking r->aio in the thread handler (and, similarly, in aio preload handler). diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c +++ b/src/http/ngx_http_copy_filter_module.c @@ -219,6 +219,22 @@ ngx_http_copy_aio_sendfile_preload(ngx_b ngx_http_request_t *r; ngx_output_chain_ctx_t *ctx; +#if (NGX_HTTP_V2) + + r = file->file->aio->data; + + if (r->aio) { + /* + * with HTTP/2, another thread operation might be already running + * if sendfile() is called as a result of a write event on the main + * connection + */ + + return NGX_OK; + } + +#endif + n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); if (n == NGX_AGAIN) { @@ -270,6 +286,23 @@ ngx_http_copy_thread_handler(ngx_thread_ r = file->thread_ctx; +#if (NGX_HTTP_V2) + + if (r->aio + && r->stream + && r->stream->connection->connection->sendfile_task == task) + { + /* + * with HTTP/2, another thread operation might be already running + * if sendfile() is called as a result of a write event on the main + * connection + */ + + return NGX_OK; + } + +#endif + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); tp = clcf->thread_pool; diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -3854,6 +3854,23 @@ ngx_http_upstream_thread_handler(ngx_thr r = file->thread_ctx; p = r->upstream->pipe; +#if (NGX_HTTP_V2) + + if (r->aio + && r->stream + && r->stream->connection->connection->sendfile_task == task) + { + /* + * with HTTP/2, another thread operation might be already running + * if sendfile() is called as a result of a write event on the main + * connection + */ + + return NGX_OK; + } + +#endif + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); tp = clcf->thread_pool; [...] No changes in the second patch. -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Wed Nov 24 15:50:10 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 24 Nov 2021 18:50:10 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: References: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> <22EEAB4A-054D-4AD2-BCFB-86B7EC679D78@nginx.com> <5F9378C5-CB2A-4466-B203-D9AB0F776997@nginx.com> Message-ID: <82D01D9B-BE9A-4619-95BA-D63948429FCC@nginx.com> > On 24 Nov 2021, at 06:46, Maxim Dounin wrote: > > Hello! > > On Tue, Nov 23, 2021 at 10:44:00AM +0300, Sergey Kandaurov wrote: > >>> On 23 Nov 2021, at 06:12, Maxim Dounin wrote: >>> >>> On Thu, Nov 18, 2021 at 07:46:48PM +0300, Sergey Kandaurov wrote: >>> >>>>> On 16 Nov 2021, at 17:41, Maxim Dounin wrote: >>>>> >>>>> On Tue, Nov 16, 2021 at 02:59:46PM +0300, Sergey Kandaurov wrote: >>>>> >>>>>>> On 11 Nov 2021, at 06:10, Maxim Dounin wrote: >>>>>>> >>>>>>> # HG changeset patch >>>>>>> # User Maxim Dounin >>>>>>> # Date 1636599377 -10800 >>>>>>> # Thu Nov 11 05:56:17 2021 +0300 >>>>>>> # Node ID 76e072a6947a221868705c13973de15319c0d921 >>>>>>> # Parent 82b750b20c5205d685e59031247fe898f011394e >>>>>>> HTTP/2: fixed sendfile() aio handling. >>>>>>> >>>>>>> With sendfile() in threads ("aio threads; sendfile on;"), client connection >>>>>>> can block on writing, waiting for sendfile() to complete. In HTTP/2 this >>>>>>> might result in the request hang, since an attempt to continue processig >>>>>> >>>>>> processing >>>>> >>>>> Fixed, thnx. >>>>> >>>>>>> in thread event handler will call request's write event handler, which >>>>>>> is usually stopped by ngx_http_v2_send_chain(): it does nothing if there >>>>>>> are no additional data and stream->queued is set. Further, HTTP/2 resets >>>>>>> stream's c->write->ready to 0 if writing blocks, so just fixing >>>>>>> ngx_http_v2_send_chain() is not enough. >>>>>>> >>>>>>> Can be reproduced with test suite on Linux with: >>>>>>> >>>>>>> TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t >>>>>>> >>>>>>> The following tests currently fail: h2_keepalive.t, h2_priority.t, >>>>>>> h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. >>>>>>> >>>>>>> Similarly, sendfile() with AIO preloading on FreeBSD can block as well, >>>>>>> with similar results. This is, however, harder to reproduce, especially >>>>>>> on modern FreeBSD systems, since sendfile() usually do not return EBUSY. >>>>>> >>>>>> does not >>>>> >>>>> Fixed, thnx. >>>>> >>>>>>> Fix is to post a write event on HTTP/2 connection in the thread event >>>>>>> handler (and aio preload handler). This ensures that sendfile() will be >>>>>>> completed and stream processing will be resumed by HTTP/2 code. >>>>>>> >>>>>>> diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c >>>>>>> --- a/src/http/ngx_http_copy_filter_module.c >>>>>>> +++ b/src/http/ngx_http_copy_filter_module.c >>>>>>> @@ -250,6 +250,21 @@ ngx_http_copy_aio_sendfile_event_handler >>>>>>> r->aio = 0; >>>>>>> ev->complete = 0; >>>>>>> >>>>>>> +#if (NGX_HTTP_V2) >>>>>>> + >>>>>>> + if (r->stream) { >>>>>>> + /* >>>>>>> + * for HTTP/2, trigger a write event on the main connection >>>>>>> + * to handle sendfile() preload >>>>>>> + */ >>>>>>> + >>>>>>> + ngx_post_event(r->stream->connection->connection->write, >>>>>>> + &ngx_posted_events); >>>>>>> + return; >>>>>>> + } >>>>>>> + >>>>>>> +#endif >>>>>>> + >>>>>>> r->connection->write->handler(r->connection->write); >>>>>>> } >>>>>>> >>>>>>> @@ -323,6 +338,20 @@ ngx_http_copy_thread_event_handler(ngx_e >>>>>>> r->main->blocked--; >>>>>>> r->aio = 0; >>>>>>> >>>>>>> +#if (NGX_HTTP_V2) >>>>>>> + >>>>>>> + if (r->stream) { >>>>>>> + /* >>>>>>> + * for HTTP/2, trigger a write event on the main connection >>>>>>> + * to handle sendfile() in threads >>>>>>> + */ >>>>>>> + >>>>>>> + ngx_post_event(r->stream->connection->connection->write, >>>>>>> + &ngx_posted_events); >>>>>>> + } >>>>>>> + >>>>>>> +#endif >>>>>>> + >>> >>> [...] >>> >>> For the record, while testing this patch Sergey found another >>> issue with sendfile() in threads and HTTP/2: since HTTP/2 might >>> call sendfile() within the main connection, bypassing request >>> filter chain, normal r->aio flag checking to prevent multiple >>> operations do not work, and this eventually results in "task >>> already active" alerts due to duplicate operations being posted. >>> With the above patch this issue is much more likely to happen, >>> since it intentionally triggers write events on the main HTTP/2 >>> connection. >>> >>> Below are two patches: the first one addresses the issue with >>> duplicate operations by additionally checking file->thread_task >>> before sendfile(), and the second one is a better alternative to >>> the above patch which doesn't post additional events on the main >>> connection. >>> >>> # HG changeset patch >>> # User Maxim Dounin >>> # Date 1637635671 -10800 >>> # Tue Nov 23 05:47:51 2021 +0300 >>> # Node ID 8a18b0bff1266db221fe35dc08f4483044ea0f86 >>> # Parent 82b750b20c5205d685e59031247fe898f011394e >>> HTTP/2: fixed "task already active" with sendfile in threads. >>> >>> With sendfile in threads, "task already active" alerts might appear in logs >>> if a write event happens on the main HTTP/2 connection, triggering a sendfile >>> in threads while another thread operation is already running. Observed >>> with "aio threads; aio_write on; sendfile on;" and with thread event handlers >>> modified to post a write event to the main HTTP/2 connection (though can >>> happen without any modifications). >>> >>> Fix is to avoid starting a sendfile operation if file->thread_task indicates >>> that another thread operation is active. >>> >>> diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c >>> --- a/src/os/unix/ngx_linux_sendfile_chain.c >>> +++ b/src/os/unix/ngx_linux_sendfile_chain.c >>> @@ -324,6 +324,18 @@ ngx_linux_sendfile_thread(ngx_connection >>> "linux sendfile thread: %d, %uz, %O", >>> file->file->fd, size, file->file_pos); >>> >>> + task = file->file->thread_task; >>> + >>> + if (task && task->event.active) { >>> + /* >>> + * with HTTP/2, another thread operation might be already running >>> + * if sendfile() is called as a result of a write event on the main >>> + * connection >>> + */ >>> + >>> + return NGX_DONE; >>> + } >>> + >>> task = c->sendfile_task; >>> >>> if (task == NULL) { > > After looking once again into it, I tend to think this patch is > incomplete. In particular, the particular check won't stop > additional sendfile() if there are multiple files and different > files use different thread tasks. While this is not something > possible with standard modules, but nevertheless. Could you please clarify? Is it something rather theoretical (and irrelevant to subrequests)? Because there is a room for only one task in c->sendfile_task. > > Further, the same problem seems to apply to aio preloading (though > unlikely to happen in practice). > > The following patch checks r->aio in relevant handlers to prevent > sendfile() when another operation is running: As a positive effect, moving the check down to thread handlers allows to complete a sendfile operation, within http2 write handler, after thread task completion, since now the check is performed after the "if (task->event.complete) {" logic in ngx_linux_sendfile_thread(). E.g.: 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 write handler 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 frame out: 000055BA978A5E28 sid:1 bl:0 len:8192 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 frame out: 000055BA978A5D20 sid:1 bl:1 len:8192 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: 15, 8192, 8794 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: complete:1 err:0 sent:8192 2021/11/24 15:16:43 [debug] 474666#474666: *1 no tcp_nodelay 2021/11/24 15:16:43 [debug] 474666#474666: *1 tcp_nopush 2021/11/24 15:16:43 [debug] 474666#474666: *1 writev: 9 of 9 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: 15, 8192, 16986 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: complete:0 > > # HG changeset patch > # User Maxim Dounin > # Date 1637723745 -10800 > # Wed Nov 24 06:15:45 2021 +0300 > # Node ID 3450841798597536d17ced29b35d1d90ce06ce0d > # Parent 3443c02ca1d183fe52bf8af66627c94be2b2f785 > HTTP/2: fixed "task already active" with sendfile in threads. > > With sendfile in threads, "task already active" alerts might appear in logs > if a write event happens on the main HTTP/2 connection, triggering a sendfile > in threads while another thread operation is already running. Observed > with "aio threads; aio_write on; sendfile on;" and with thread event handlers > modified to post a write event to the main HTTP/2 connection (though can > happen without any modifications). > > Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate > aio operation, resulting in "second aio post" alerts. This is, however, > harder to reproduce, especially on modern FreeBSD systems, since sendfile() > usually does not return EBUSY. > > Fix is to avoid starting a sendfile operation if other thread operation > is active by checking r->aio in the thread handler (and, similarly, in > aio preload handler). > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > --- a/src/http/ngx_http_copy_filter_module.c > +++ b/src/http/ngx_http_copy_filter_module.c > @@ -219,6 +219,22 @@ ngx_http_copy_aio_sendfile_preload(ngx_b > ngx_http_request_t *r; > ngx_output_chain_ctx_t *ctx; > > +#if (NGX_HTTP_V2) > + > + r = file->file->aio->data; > + > + if (r->aio) { > + /* > + * with HTTP/2, another thread operation might be already running > + * if sendfile() is called as a result of a write event on the main > + * connection > + */ > + > + return NGX_OK; > + } > + > +#endif > + Shouldn't this also check for r->stream to narrow it down to HTTP/2 ? At least, it looks inconsistent taking the code is under NGX_HTTP_V2. On the contrary, shouldn't this expand to non-HTTP/2 protocols as well? > n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); > > if (n == NGX_AGAIN) { > @@ -270,6 +286,23 @@ ngx_http_copy_thread_handler(ngx_thread_ > > r = file->thread_ctx; > > +#if (NGX_HTTP_V2) > + > + if (r->aio > + && r->stream > + && r->stream->connection->connection->sendfile_task == task) > + { I'm not quite sure how the last part of condition is related (if at all) to stop additional sendfile() for different files using different tasks, as outlined above. Also, looking at ngx_linux_sendfile_thread(), where c->sendfile_task is initially allocated, I cannot imagine how the tasks won't match there. I guess it is enough to check for a non-NULL sendfile_task. > + /* > + * with HTTP/2, another thread operation might be already running > + * if sendfile() is called as a result of a write event on the main > + * connection > + */ > + > + return NGX_OK; > + } > + > +#endif > + > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > tp = clcf->thread_pool; > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -3854,6 +3854,23 @@ ngx_http_upstream_thread_handler(ngx_thr > r = file->thread_ctx; > p = r->upstream->pipe; > > +#if (NGX_HTTP_V2) > + > + if (r->aio > + && r->stream > + && r->stream->connection->connection->sendfile_task == task) > + { > + /* > + * with HTTP/2, another thread operation might be already running > + * if sendfile() is called as a result of a write event on the main > + * connection > + */ > + > + return NGX_OK; > + } > + > +#endif > + > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > tp = clcf->thread_pool; > > > [...] > > No changes in the second patch. -- Sergey Kandaurov From mdounin at mdounin.ru Wed Nov 24 19:06:07 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 24 Nov 2021 22:06:07 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: <82D01D9B-BE9A-4619-95BA-D63948429FCC@nginx.com> References: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> <22EEAB4A-054D-4AD2-BCFB-86B7EC679D78@nginx.com> <5F9378C5-CB2A-4466-B203-D9AB0F776997@nginx.com> <82D01D9B-BE9A-4619-95BA-D63948429FCC@nginx.com> Message-ID: Hello! On Wed, Nov 24, 2021 at 06:50:10PM +0300, Sergey Kandaurov wrote: > > On 24 Nov 2021, at 06:46, Maxim Dounin wrote: > > > > On Tue, Nov 23, 2021 at 10:44:00AM +0300, Sergey Kandaurov wrote: > > > >>> On 23 Nov 2021, at 06:12, Maxim Dounin wrote: > >>> > >>> On Thu, Nov 18, 2021 at 07:46:48PM +0300, Sergey Kandaurov wrote: > >>> > >>>>> On 16 Nov 2021, at 17:41, Maxim Dounin wrote: > >>>>> > >>>>> On Tue, Nov 16, 2021 at 02:59:46PM +0300, Sergey Kandaurov wrote: > >>>>> > >>>>>>> On 11 Nov 2021, at 06:10, Maxim Dounin wrote: > >>>>>>> > >>>>>>> # HG changeset patch > >>>>>>> # User Maxim Dounin > >>>>>>> # Date 1636599377 -10800 > >>>>>>> # Thu Nov 11 05:56:17 2021 +0300 > >>>>>>> # Node ID 76e072a6947a221868705c13973de15319c0d921 > >>>>>>> # Parent 82b750b20c5205d685e59031247fe898f011394e > >>>>>>> HTTP/2: fixed sendfile() aio handling. > >>>>>>> > >>>>>>> With sendfile() in threads ("aio threads; sendfile on;"), client connection > >>>>>>> can block on writing, waiting for sendfile() to complete. In HTTP/2 this > >>>>>>> might result in the request hang, since an attempt to continue processig > >>>>>> > >>>>>> processing > >>>>> > >>>>> Fixed, thnx. > >>>>> > >>>>>>> in thread event handler will call request's write event handler, which > >>>>>>> is usually stopped by ngx_http_v2_send_chain(): it does nothing if there > >>>>>>> are no additional data and stream->queued is set. Further, HTTP/2 resets > >>>>>>> stream's c->write->ready to 0 if writing blocks, so just fixing > >>>>>>> ngx_http_v2_send_chain() is not enough. > >>>>>>> > >>>>>>> Can be reproduced with test suite on Linux with: > >>>>>>> > >>>>>>> TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t > >>>>>>> > >>>>>>> The following tests currently fail: h2_keepalive.t, h2_priority.t, > >>>>>>> h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. > >>>>>>> > >>>>>>> Similarly, sendfile() with AIO preloading on FreeBSD can block as well, > >>>>>>> with similar results. This is, however, harder to reproduce, especially > >>>>>>> on modern FreeBSD systems, since sendfile() usually do not return EBUSY. > >>>>>> > >>>>>> does not > >>>>> > >>>>> Fixed, thnx. > >>>>> > >>>>>>> Fix is to post a write event on HTTP/2 connection in the thread event > >>>>>>> handler (and aio preload handler). This ensures that sendfile() will be > >>>>>>> completed and stream processing will be resumed by HTTP/2 code. > >>>>>>> > >>>>>>> diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > >>>>>>> --- a/src/http/ngx_http_copy_filter_module.c > >>>>>>> +++ b/src/http/ngx_http_copy_filter_module.c > >>>>>>> @@ -250,6 +250,21 @@ ngx_http_copy_aio_sendfile_event_handler > >>>>>>> r->aio = 0; > >>>>>>> ev->complete = 0; > >>>>>>> > >>>>>>> +#if (NGX_HTTP_V2) > >>>>>>> + > >>>>>>> + if (r->stream) { > >>>>>>> + /* > >>>>>>> + * for HTTP/2, trigger a write event on the main connection > >>>>>>> + * to handle sendfile() preload > >>>>>>> + */ > >>>>>>> + > >>>>>>> + ngx_post_event(r->stream->connection->connection->write, > >>>>>>> + &ngx_posted_events); > >>>>>>> + return; > >>>>>>> + } > >>>>>>> + > >>>>>>> +#endif > >>>>>>> + > >>>>>>> r->connection->write->handler(r->connection->write); > >>>>>>> } > >>>>>>> > >>>>>>> @@ -323,6 +338,20 @@ ngx_http_copy_thread_event_handler(ngx_e > >>>>>>> r->main->blocked--; > >>>>>>> r->aio = 0; > >>>>>>> > >>>>>>> +#if (NGX_HTTP_V2) > >>>>>>> + > >>>>>>> + if (r->stream) { > >>>>>>> + /* > >>>>>>> + * for HTTP/2, trigger a write event on the main connection > >>>>>>> + * to handle sendfile() in threads > >>>>>>> + */ > >>>>>>> + > >>>>>>> + ngx_post_event(r->stream->connection->connection->write, > >>>>>>> + &ngx_posted_events); > >>>>>>> + } > >>>>>>> + > >>>>>>> +#endif > >>>>>>> + > >>> > >>> [...] > >>> > >>> For the record, while testing this patch Sergey found another > >>> issue with sendfile() in threads and HTTP/2: since HTTP/2 might > >>> call sendfile() within the main connection, bypassing request > >>> filter chain, normal r->aio flag checking to prevent multiple > >>> operations do not work, and this eventually results in "task > >>> already active" alerts due to duplicate operations being posted. > >>> With the above patch this issue is much more likely to happen, > >>> since it intentionally triggers write events on the main HTTP/2 > >>> connection. > >>> > >>> Below are two patches: the first one addresses the issue with > >>> duplicate operations by additionally checking file->thread_task > >>> before sendfile(), and the second one is a better alternative to > >>> the above patch which doesn't post additional events on the main > >>> connection. > >>> > >>> # HG changeset patch > >>> # User Maxim Dounin > >>> # Date 1637635671 -10800 > >>> # Tue Nov 23 05:47:51 2021 +0300 > >>> # Node ID 8a18b0bff1266db221fe35dc08f4483044ea0f86 > >>> # Parent 82b750b20c5205d685e59031247fe898f011394e > >>> HTTP/2: fixed "task already active" with sendfile in threads. > >>> > >>> With sendfile in threads, "task already active" alerts might appear in logs > >>> if a write event happens on the main HTTP/2 connection, triggering a sendfile > >>> in threads while another thread operation is already running. Observed > >>> with "aio threads; aio_write on; sendfile on;" and with thread event handlers > >>> modified to post a write event to the main HTTP/2 connection (though can > >>> happen without any modifications). > >>> > >>> Fix is to avoid starting a sendfile operation if file->thread_task indicates > >>> that another thread operation is active. > >>> > >>> diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c > >>> --- a/src/os/unix/ngx_linux_sendfile_chain.c > >>> +++ b/src/os/unix/ngx_linux_sendfile_chain.c > >>> @@ -324,6 +324,18 @@ ngx_linux_sendfile_thread(ngx_connection > >>> "linux sendfile thread: %d, %uz, %O", > >>> file->file->fd, size, file->file_pos); > >>> > >>> + task = file->file->thread_task; > >>> + > >>> + if (task && task->event.active) { > >>> + /* > >>> + * with HTTP/2, another thread operation might be already running > >>> + * if sendfile() is called as a result of a write event on the main > >>> + * connection > >>> + */ > >>> + > >>> + return NGX_DONE; > >>> + } > >>> + > >>> task = c->sendfile_task; > >>> > >>> if (task == NULL) { > > > > After looking once again into it, I tend to think this patch is > > incomplete. In particular, the particular check won't stop > > additional sendfile() if there are multiple files and different > > files use different thread tasks. While this is not something > > possible with standard modules, but nevertheless. > > Could you please clarify? > Is it something rather theoretical (and irrelevant to subrequests)? > Because there is a room for only one task in c->sendfile_task. The code above checks file->file->thread_task, not c->sendfile_task, so there can be many different tasks. This is not something about subrequests: multiple thread/aio operations are allowed with subrequests as long as they are within different subrequests (the r->aio flag is within a particular subrequest). The case I'm talking about can happen if a module returns multiple buffers with different files, and these files use different thread tasks (for non-sendfile thread operations). This is not something which can happen with the standard modules, but might happen in theory. > > Further, the same problem seems to apply to aio preloading (though > > unlikely to happen in practice). > > > > The following patch checks r->aio in relevant handlers to prevent > > sendfile() when another operation is running: > > As a positive effect, moving the check down to thread handlers allows > to complete a sendfile operation, within http2 write handler, after > thread task completion, since now the check is performed after the > "if (task->event.complete) {" logic in ngx_linux_sendfile_thread(). > E.g.: > 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 write handler > 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 frame out: 000055BA978A5E28 sid:1 bl:0 len:8192 > 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 frame out: 000055BA978A5D20 sid:1 bl:1 len:8192 > 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: 15, 8192, 8794 > 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: complete:1 err:0 sent:8192 > 2021/11/24 15:16:43 [debug] 474666#474666: *1 no tcp_nodelay > 2021/11/24 15:16:43 [debug] 474666#474666: *1 tcp_nopush > 2021/11/24 15:16:43 [debug] 474666#474666: *1 writev: 9 of 9 > 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: 15, 8192, 16986 > 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: complete:0 While the side effect is expected, I don't think this is not something important. As far as I understand, you are looking at what happens with the previous version of the initial patch with ngx_post_event(), but in real world such a write event is unlikely. (Not to mention that this won't happen with HTTP/1.x, and only applies to HTTP/2 without SSL.) > > # HG changeset patch > > # User Maxim Dounin > > # Date 1637723745 -10800 > > # Wed Nov 24 06:15:45 2021 +0300 > > # Node ID 3450841798597536d17ced29b35d1d90ce06ce0d > > # Parent 3443c02ca1d183fe52bf8af66627c94be2b2f785 > > HTTP/2: fixed "task already active" with sendfile in threads. > > > > With sendfile in threads, "task already active" alerts might appear in logs > > if a write event happens on the main HTTP/2 connection, triggering a sendfile > > in threads while another thread operation is already running. Observed > > with "aio threads; aio_write on; sendfile on;" and with thread event handlers > > modified to post a write event to the main HTTP/2 connection (though can > > happen without any modifications). > > > > Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate > > aio operation, resulting in "second aio post" alerts. This is, however, > > harder to reproduce, especially on modern FreeBSD systems, since sendfile() > > usually does not return EBUSY. > > > > Fix is to avoid starting a sendfile operation if other thread operation > > is active by checking r->aio in the thread handler (and, similarly, in > > aio preload handler). > > > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > > --- a/src/http/ngx_http_copy_filter_module.c > > +++ b/src/http/ngx_http_copy_filter_module.c > > @@ -219,6 +219,22 @@ ngx_http_copy_aio_sendfile_preload(ngx_b > > ngx_http_request_t *r; > > ngx_output_chain_ctx_t *ctx; > > > > +#if (NGX_HTTP_V2) > > + > > + r = file->file->aio->data; > > + > > + if (r->aio) { > > + /* > > + * with HTTP/2, another thread operation might be already running > > + * if sendfile() is called as a result of a write event on the main > > + * connection > > + */ > > + > > + return NGX_OK; > > + } > > + > > +#endif > > + > > Shouldn't this also check for r->stream to narrow it down to HTTP/2 ? > At least, it looks inconsistent taking the code is under NGX_HTTP_V2. > On the contrary, shouldn't this expand to non-HTTP/2 protocols as well? I tend to think that correct approach would be to expand this to non-HTTP/2 as well. Without the HTTP/2 check, it will also catch duplicate sendfile calls due to subrequests (currently not handled by the aio preloading, and handled separately in sendfile in threads). Updated patch below. > > n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); > > > > if (n == NGX_AGAIN) { > > @@ -270,6 +286,23 @@ ngx_http_copy_thread_handler(ngx_thread_ > > > > r = file->thread_ctx; > > > > +#if (NGX_HTTP_V2) > > + > > + if (r->aio > > + && r->stream > > + && r->stream->connection->connection->sendfile_task == task) > > + { > > I'm not quite sure how the last part of condition is related (if at all) > to stop additional sendfile() for different files using different tasks, > as outlined above. > > Also, looking at ngx_linux_sendfile_thread(), where c->sendfile_task is > initially allocated, I cannot imagine how the tasks won't match there. > I guess it is enough to check for a non-NULL sendfile_task. The same thread handler can be used for different thread operations, notably sendfile(), read(), or write(). The idea is to check if the operation we are called for is actually sendfile() in threads, and not thread read or write (which will use different task). With sendfile(), calls with r->aio set are expected, and we can simply return NGX_OK to properly handle them. With thread read or write, simply returning NGX_OK likely will silently broke things, so it's better to let it instead fail in ngx_thread_task_post(). > > + /* > > + * with HTTP/2, another thread operation might be already running > > + * if sendfile() is called as a result of a write event on the main > > + * connection > > + */ > > + > > + return NGX_OK; > > + } > > + > > +#endif > > + > > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > > tp = clcf->thread_pool; > > > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > > --- a/src/http/ngx_http_upstream.c > > +++ b/src/http/ngx_http_upstream.c > > @@ -3854,6 +3854,23 @@ ngx_http_upstream_thread_handler(ngx_thr > > r = file->thread_ctx; > > p = r->upstream->pipe; > > > > +#if (NGX_HTTP_V2) > > + > > + if (r->aio > > + && r->stream > > + && r->stream->connection->connection->sendfile_task == task) > > + { > > + /* > > + * with HTTP/2, another thread operation might be already running > > + * if sendfile() is called as a result of a write event on the main > > + * connection > > + */ > > + > > + return NGX_OK; > > + } > > + > > +#endif > > + > > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > > tp = clcf->thread_pool; > > Updated patch: # HG changeset patch # User Maxim Dounin # Date 1637774456 -10800 # Wed Nov 24 20:20:56 2021 +0300 # Node ID 80c8892153bef7edec591cd1af37237b22d6d0c5 # Parent 3443c02ca1d183fe52bf8af66627c94be2b2f785 HTTP/2: fixed "task already active" with sendfile in threads. With sendfile in threads, "task already active" alerts might appear in logs if a write event happens on the main HTTP/2 connection, triggering a sendfile in threads while another thread operation is already running. Observed with "aio threads; aio_write on; sendfile on;" and with thread event handlers modified to post a write event to the main HTTP/2 connection (though can happen without any modifications). Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate aio operation, resulting in "second aio post" alerts. This is, however, harder to reproduce, especially on modern FreeBSD systems, since sendfile() usually does not return EBUSY. Fix is to avoid starting a sendfile operation if other thread operation is active by checking r->aio in the thread handler (and, similarly, in aio preload handler). The added check also makes duplicate calls protection in sendfile() in threads redundant, so it is removed. Further, now there is a corresponding duplicate calls protection in AIO preloading. diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c +++ b/src/http/ngx_http_copy_filter_module.c @@ -219,13 +219,25 @@ ngx_http_copy_aio_sendfile_preload(ngx_b ngx_http_request_t *r; ngx_output_chain_ctx_t *ctx; + aio = file->file->aio; + r = aio->data; + + if (r->aio) { + /* + * tolerate sendfile() calls if another operation is already + * running; this can happen due to subrequests, multiple calls + * of the next body filter from a filter, or in HTTP/2 due to + * a write event on the main connection + */ + + return NGX_OK; + } + n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); if (n == NGX_AGAIN) { - aio = file->file->aio; aio->handler = ngx_http_copy_aio_sendfile_event_handler; - r = aio->data; r->main->blocked++; r->aio = 1; @@ -263,6 +275,7 @@ static ngx_int_t ngx_http_copy_thread_handler(ngx_thread_task_t *task, ngx_file_t *file) { ngx_str_t name; + ngx_connection_t *c; ngx_thread_pool_t *tp; ngx_http_request_t *r; ngx_output_chain_ctx_t *ctx; @@ -270,6 +283,27 @@ ngx_http_copy_thread_handler(ngx_thread_ r = file->thread_ctx; + if (r->aio) { + /* + * tolerate sendfile() calls if another operation is already + * running; this can happen due to subrequests, multiple calls + * of the next body filter from a filter, or in HTTP/2 due to + * a write event on the main connection + */ + + c = r->connection; + +#if (NGX_HTTP_V2) + if (r->stream) { + c = r->stream->connection->connection; + } +#endif + + if (task == c->sendfile_task) { + return NGX_OK; + } + } + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); tp = clcf->thread_pool; diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -3847,6 +3847,7 @@ ngx_http_upstream_thread_handler(ngx_thr { ngx_str_t name; ngx_event_pipe_t *p; + ngx_connection_t *c; ngx_thread_pool_t *tp; ngx_http_request_t *r; ngx_http_core_loc_conf_t *clcf; @@ -3854,6 +3855,27 @@ ngx_http_upstream_thread_handler(ngx_thr r = file->thread_ctx; p = r->upstream->pipe; + if (r->aio) { + /* + * tolerate sendfile() calls if another operation is already + * running; this can happen due to subrequests, multiple calls + * of the next body filter from a filter, or in HTTP/2 due to + * a write event on the main connection + */ + + c = r->connection; + +#if (NGX_HTTP_V2) + if (r->stream) { + c = r->stream->connection->connection; + } +#endif + + if (task == c->sendfile_task) { + return NGX_OK; + } + } + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); tp = clcf->thread_pool; diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c --- a/src/os/unix/ngx_linux_sendfile_chain.c +++ b/src/os/unix/ngx_linux_sendfile_chain.c @@ -379,15 +379,6 @@ ngx_linux_sendfile_thread(ngx_connection return ctx->sent; } - if (task->event.active && ctx->file == file) { - /* - * tolerate duplicate calls; they can happen due to subrequests - * or multiple calls of the next body filter from a filter - */ - - return NGX_DONE; - } - ctx->file = file; ctx->socket = c->fd; ctx->size = size; -- Maxim Dounin http://mdounin.ru/ From pluknet at nginx.com Thu Nov 25 13:15:09 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 25 Nov 2021 16:15:09 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: References: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> <22EEAB4A-054D-4AD2-BCFB-86B7EC679D78@nginx.com> <5F9378C5-CB2A-4466-B203-D9AB0F776997@nginx.com> <82D01D9B-BE9A-4619-95BA-D63948429FCC@nginx.com> Message-ID: > On 24 Nov 2021, at 22:06, Maxim Dounin wrote: > > Hello! > > On Wed, Nov 24, 2021 at 06:50:10PM +0300, Sergey Kandaurov wrote: > >>> On 24 Nov 2021, at 06:46, Maxim Dounin wrote: >>> >>> On Tue, Nov 23, 2021 at 10:44:00AM +0300, Sergey Kandaurov wrote: >>> >>>>> On 23 Nov 2021, at 06:12, Maxim Dounin wrote: >>>>> >>>>> On Thu, Nov 18, 2021 at 07:46:48PM +0300, Sergey Kandaurov wrote: >>>>> >>>>>>> On 16 Nov 2021, at 17:41, Maxim Dounin wrote: >>>>>>> >>>>>>> On Tue, Nov 16, 2021 at 02:59:46PM +0300, Sergey Kandaurov wrote: >>>>>>> >>>>>>>>> On 11 Nov 2021, at 06:10, Maxim Dounin wrote: >>>>>>>>> >>>>>>>>> # HG changeset patch >>>>>>>>> # User Maxim Dounin >>>>>>>>> # Date 1636599377 -10800 >>>>>>>>> # Thu Nov 11 05:56:17 2021 +0300 >>>>>>>>> # Node ID 76e072a6947a221868705c13973de15319c0d921 >>>>>>>>> # Parent 82b750b20c5205d685e59031247fe898f011394e >>>>>>>>> HTTP/2: fixed sendfile() aio handling. >>>>>>>>> >>>>>>>>> With sendfile() in threads ("aio threads; sendfile on;"), client connection >>>>>>>>> can block on writing, waiting for sendfile() to complete. In HTTP/2 this >>>>>>>>> might result in the request hang, since an attempt to continue processig >>>>>>>> >>>>>>>> processing >>>>>>> >>>>>>> Fixed, thnx. >>>>>>> >>>>>>>>> in thread event handler will call request's write event handler, which >>>>>>>>> is usually stopped by ngx_http_v2_send_chain(): it does nothing if there >>>>>>>>> are no additional data and stream->queued is set. Further, HTTP/2 resets >>>>>>>>> stream's c->write->ready to 0 if writing blocks, so just fixing >>>>>>>>> ngx_http_v2_send_chain() is not enough. >>>>>>>>> >>>>>>>>> Can be reproduced with test suite on Linux with: >>>>>>>>> >>>>>>>>> TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t >>>>>>>>> >>>>>>>>> The following tests currently fail: h2_keepalive.t, h2_priority.t, >>>>>>>>> h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. >>>>>>>>> >>>>>>>>> Similarly, sendfile() with AIO preloading on FreeBSD can block as well, >>>>>>>>> with similar results. This is, however, harder to reproduce, especially >>>>>>>>> on modern FreeBSD systems, since sendfile() usually do not return EBUSY. >>>>>>>> >>>>>>>> does not >>>>>>> >>>>>>> Fixed, thnx. >>>>>>> >>>>>>>>> Fix is to post a write event on HTTP/2 connection in the thread event >>>>>>>>> handler (and aio preload handler). This ensures that sendfile() will be >>>>>>>>> completed and stream processing will be resumed by HTTP/2 code. >>>>>>>>> >>>>>>>>> diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c >>>>>>>>> --- a/src/http/ngx_http_copy_filter_module.c >>>>>>>>> +++ b/src/http/ngx_http_copy_filter_module.c >>>>>>>>> @@ -250,6 +250,21 @@ ngx_http_copy_aio_sendfile_event_handler >>>>>>>>> r->aio = 0; >>>>>>>>> ev->complete = 0; >>>>>>>>> >>>>>>>>> +#if (NGX_HTTP_V2) >>>>>>>>> + >>>>>>>>> + if (r->stream) { >>>>>>>>> + /* >>>>>>>>> + * for HTTP/2, trigger a write event on the main connection >>>>>>>>> + * to handle sendfile() preload >>>>>>>>> + */ >>>>>>>>> + >>>>>>>>> + ngx_post_event(r->stream->connection->connection->write, >>>>>>>>> + &ngx_posted_events); >>>>>>>>> + return; >>>>>>>>> + } >>>>>>>>> + >>>>>>>>> +#endif >>>>>>>>> + >>>>>>>>> r->connection->write->handler(r->connection->write); >>>>>>>>> } >>>>>>>>> >>>>>>>>> @@ -323,6 +338,20 @@ ngx_http_copy_thread_event_handler(ngx_e >>>>>>>>> r->main->blocked--; >>>>>>>>> r->aio = 0; >>>>>>>>> >>>>>>>>> +#if (NGX_HTTP_V2) >>>>>>>>> + >>>>>>>>> + if (r->stream) { >>>>>>>>> + /* >>>>>>>>> + * for HTTP/2, trigger a write event on the main connection >>>>>>>>> + * to handle sendfile() in threads >>>>>>>>> + */ >>>>>>>>> + >>>>>>>>> + ngx_post_event(r->stream->connection->connection->write, >>>>>>>>> + &ngx_posted_events); >>>>>>>>> + } >>>>>>>>> + >>>>>>>>> +#endif >>>>>>>>> + >>>>> >>>>> [...] >>>>> >>>>> For the record, while testing this patch Sergey found another >>>>> issue with sendfile() in threads and HTTP/2: since HTTP/2 might >>>>> call sendfile() within the main connection, bypassing request >>>>> filter chain, normal r->aio flag checking to prevent multiple >>>>> operations do not work, and this eventually results in "task >>>>> already active" alerts due to duplicate operations being posted. >>>>> With the above patch this issue is much more likely to happen, >>>>> since it intentionally triggers write events on the main HTTP/2 >>>>> connection. >>>>> >>>>> Below are two patches: the first one addresses the issue with >>>>> duplicate operations by additionally checking file->thread_task >>>>> before sendfile(), and the second one is a better alternative to >>>>> the above patch which doesn't post additional events on the main >>>>> connection. >>>>> >>>>> # HG changeset patch >>>>> # User Maxim Dounin >>>>> # Date 1637635671 -10800 >>>>> # Tue Nov 23 05:47:51 2021 +0300 >>>>> # Node ID 8a18b0bff1266db221fe35dc08f4483044ea0f86 >>>>> # Parent 82b750b20c5205d685e59031247fe898f011394e >>>>> HTTP/2: fixed "task already active" with sendfile in threads. >>>>> >>>>> With sendfile in threads, "task already active" alerts might appear in logs >>>>> if a write event happens on the main HTTP/2 connection, triggering a sendfile >>>>> in threads while another thread operation is already running. Observed >>>>> with "aio threads; aio_write on; sendfile on;" and with thread event handlers >>>>> modified to post a write event to the main HTTP/2 connection (though can >>>>> happen without any modifications). >>>>> >>>>> Fix is to avoid starting a sendfile operation if file->thread_task indicates >>>>> that another thread operation is active. >>>>> >>>>> diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c >>>>> --- a/src/os/unix/ngx_linux_sendfile_chain.c >>>>> +++ b/src/os/unix/ngx_linux_sendfile_chain.c >>>>> @@ -324,6 +324,18 @@ ngx_linux_sendfile_thread(ngx_connection >>>>> "linux sendfile thread: %d, %uz, %O", >>>>> file->file->fd, size, file->file_pos); >>>>> >>>>> + task = file->file->thread_task; >>>>> + >>>>> + if (task && task->event.active) { >>>>> + /* >>>>> + * with HTTP/2, another thread operation might be already running >>>>> + * if sendfile() is called as a result of a write event on the main >>>>> + * connection >>>>> + */ >>>>> + >>>>> + return NGX_DONE; >>>>> + } >>>>> + >>>>> task = c->sendfile_task; >>>>> >>>>> if (task == NULL) { >>> >>> After looking once again into it, I tend to think this patch is >>> incomplete. In particular, the particular check won't stop >>> additional sendfile() if there are multiple files and different >>> files use different thread tasks. While this is not something >>> possible with standard modules, but nevertheless. >> >> Could you please clarify? >> Is it something rather theoretical (and irrelevant to subrequests)? >> Because there is a room for only one task in c->sendfile_task. > > The code above checks file->file->thread_task, not > c->sendfile_task, so there can be many different tasks. > Got it. > This is not something about subrequests: multiple thread/aio > operations are allowed with subrequests as long as they are within > different subrequests (the r->aio flag is within a particular > subrequest). > > The case I'm talking about can happen if a module returns multiple > buffers with different files, and these files use different > thread tasks (for non-sendfile thread operations). This is not > something which can happen with the standard modules, but might > happen in theory. Agreed. > >>> Further, the same problem seems to apply to aio preloading (though >>> unlikely to happen in practice). >>> >>> The following patch checks r->aio in relevant handlers to prevent >>> sendfile() when another operation is running: >> >> As a positive effect, moving the check down to thread handlers allows >> to complete a sendfile operation, within http2 write handler, after >> thread task completion, since now the check is performed after the >> "if (task->event.complete) {" logic in ngx_linux_sendfile_thread(). >> E.g.: >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 write handler >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 frame out: 000055BA978A5E28 sid:1 bl:0 len:8192 >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 frame out: 000055BA978A5D20 sid:1 bl:1 len:8192 >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: 15, 8192, 8794 >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: complete:1 err:0 sent:8192 >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 no tcp_nodelay >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 tcp_nopush >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 writev: 9 of 9 >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: 15, 8192, 16986 >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: complete:0 > > While the side effect is expected, I don't think this is not > something important. As far as I understand, you are looking at > what happens with the previous version of the initial patch with > ngx_post_event(), but in real world such a write event is > unlikely. (Not to mention that this won't happen with HTTP/1.x, > and only applies to HTTP/2 without SSL.) Correct. I was not able to see any difference with the current series. > >>> # HG changeset patch >>> # User Maxim Dounin >>> # Date 1637723745 -10800 >>> # Wed Nov 24 06:15:45 2021 +0300 >>> # Node ID 3450841798597536d17ced29b35d1d90ce06ce0d >>> # Parent 3443c02ca1d183fe52bf8af66627c94be2b2f785 >>> HTTP/2: fixed "task already active" with sendfile in threads. >>> >>> With sendfile in threads, "task already active" alerts might appear in logs >>> if a write event happens on the main HTTP/2 connection, triggering a sendfile >>> in threads while another thread operation is already running. Observed >>> with "aio threads; aio_write on; sendfile on;" and with thread event handlers >>> modified to post a write event to the main HTTP/2 connection (though can >>> happen without any modifications). >>> >>> Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate >>> aio operation, resulting in "second aio post" alerts. This is, however, >>> harder to reproduce, especially on modern FreeBSD systems, since sendfile() >>> usually does not return EBUSY. >>> >>> Fix is to avoid starting a sendfile operation if other thread operation >>> is active by checking r->aio in the thread handler (and, similarly, in >>> aio preload handler). >>> >>> diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c >>> --- a/src/http/ngx_http_copy_filter_module.c >>> +++ b/src/http/ngx_http_copy_filter_module.c >>> @@ -219,6 +219,22 @@ ngx_http_copy_aio_sendfile_preload(ngx_b >>> ngx_http_request_t *r; >>> ngx_output_chain_ctx_t *ctx; >>> >>> +#if (NGX_HTTP_V2) >>> + >>> + r = file->file->aio->data; >>> + >>> + if (r->aio) { >>> + /* >>> + * with HTTP/2, another thread operation might be already running >>> + * if sendfile() is called as a result of a write event on the main >>> + * connection >>> + */ >>> + >>> + return NGX_OK; >>> + } >>> + >>> +#endif >>> + >> >> Shouldn't this also check for r->stream to narrow it down to HTTP/2 ? >> At least, it looks inconsistent taking the code is under NGX_HTTP_V2. >> On the contrary, shouldn't this expand to non-HTTP/2 protocols as well? > > I tend to think that correct approach would be to expand this to > non-HTTP/2 as well. Without the HTTP/2 check, it will also catch > duplicate sendfile calls due to subrequests (currently not handled > by the aio preloading, and handled separately in sendfile in > threads). > > Updated patch below. > >>> n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); >>> >>> if (n == NGX_AGAIN) { >>> @@ -270,6 +286,23 @@ ngx_http_copy_thread_handler(ngx_thread_ >>> >>> r = file->thread_ctx; >>> >>> +#if (NGX_HTTP_V2) >>> + >>> + if (r->aio >>> + && r->stream >>> + && r->stream->connection->connection->sendfile_task == task) >>> + { >> >> I'm not quite sure how the last part of condition is related (if at all) >> to stop additional sendfile() for different files using different tasks, >> as outlined above. >> >> Also, looking at ngx_linux_sendfile_thread(), where c->sendfile_task is >> initially allocated, I cannot imagine how the tasks won't match there. >> I guess it is enough to check for a non-NULL sendfile_task. > > The same thread handler can be used for different thread > operations, notably sendfile(), read(), or write(). The idea is > to check if the operation we are called for is actually sendfile() > in threads, and not thread read or write (which will use different > task). With sendfile(), calls with r->aio set are expected, and > we can simply return NGX_OK to properly handle them. With thread > read or write, simply returning NGX_OK likely will silently broke > things, so it's better to let it instead fail in > ngx_thread_task_post(). Ok. > >>> + /* >>> + * with HTTP/2, another thread operation might be already running >>> + * if sendfile() is called as a result of a write event on the main >>> + * connection >>> + */ >>> + >>> + return NGX_OK; >>> + } >>> + >>> +#endif >>> + >>> clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); >>> tp = clcf->thread_pool; >>> >>> diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c >>> --- a/src/http/ngx_http_upstream.c >>> +++ b/src/http/ngx_http_upstream.c >>> @@ -3854,6 +3854,23 @@ ngx_http_upstream_thread_handler(ngx_thr >>> r = file->thread_ctx; >>> p = r->upstream->pipe; >>> >>> +#if (NGX_HTTP_V2) >>> + >>> + if (r->aio >>> + && r->stream >>> + && r->stream->connection->connection->sendfile_task == task) >>> + { >>> + /* >>> + * with HTTP/2, another thread operation might be already running >>> + * if sendfile() is called as a result of a write event on the main >>> + * connection >>> + */ >>> + >>> + return NGX_OK; >>> + } >>> + >>> +#endif >>> + >>> clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); >>> tp = clcf->thread_pool; >>> > > Updated patch: > > # HG changeset patch > # User Maxim Dounin > # Date 1637774456 -10800 > # Wed Nov 24 20:20:56 2021 +0300 > # Node ID 80c8892153bef7edec591cd1af37237b22d6d0c5 > # Parent 3443c02ca1d183fe52bf8af66627c94be2b2f785 > HTTP/2: fixed "task already active" with sendfile in threads. > > With sendfile in threads, "task already active" alerts might appear in logs > if a write event happens on the main HTTP/2 connection, triggering a sendfile > in threads while another thread operation is already running. Observed > with "aio threads; aio_write on; sendfile on;" and with thread event handlers > modified to post a write event to the main HTTP/2 connection (though can > happen without any modifications). > > Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate > aio operation, resulting in "second aio post" alerts. This is, however, > harder to reproduce, especially on modern FreeBSD systems, since sendfile() > usually does not return EBUSY. > > Fix is to avoid starting a sendfile operation if other thread operation > is active by checking r->aio in the thread handler (and, similarly, in > aio preload handler). > > The added check also makes duplicate calls protection in sendfile() in > threads redundant, so it is removed. Further, now there is a corresponding > duplicate calls protection in AIO preloading. > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > --- a/src/http/ngx_http_copy_filter_module.c > +++ b/src/http/ngx_http_copy_filter_module.c > @@ -219,13 +219,25 @@ ngx_http_copy_aio_sendfile_preload(ngx_b > ngx_http_request_t *r; > ngx_output_chain_ctx_t *ctx; > > + aio = file->file->aio; > + r = aio->data; > + > + if (r->aio) { > + /* > + * tolerate sendfile() calls if another operation is already > + * running; this can happen due to subrequests, multiple calls > + * of the next body filter from a filter, or in HTTP/2 due to > + * a write event on the main connection > + */ > + > + return NGX_OK; > + } > + > n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); > > if (n == NGX_AGAIN) { > - aio = file->file->aio; > aio->handler = ngx_http_copy_aio_sendfile_event_handler; > > - r = aio->data; > r->main->blocked++; > r->aio = 1; > > @@ -263,6 +275,7 @@ static ngx_int_t > ngx_http_copy_thread_handler(ngx_thread_task_t *task, ngx_file_t *file) > { > ngx_str_t name; > + ngx_connection_t *c; > ngx_thread_pool_t *tp; > ngx_http_request_t *r; > ngx_output_chain_ctx_t *ctx; > @@ -270,6 +283,27 @@ ngx_http_copy_thread_handler(ngx_thread_ > > r = file->thread_ctx; > > + if (r->aio) { > + /* > + * tolerate sendfile() calls if another operation is already > + * running; this can happen due to subrequests, multiple calls > + * of the next body filter from a filter, or in HTTP/2 due to > + * a write event on the main connection > + */ > + > + c = r->connection; > + > +#if (NGX_HTTP_V2) > + if (r->stream) { > + c = r->stream->connection->connection; > + } > +#endif > + > + if (task == c->sendfile_task) { > + return NGX_OK; > + } > + } > + > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > tp = clcf->thread_pool; > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -3847,6 +3847,7 @@ ngx_http_upstream_thread_handler(ngx_thr > { > ngx_str_t name; > ngx_event_pipe_t *p; > + ngx_connection_t *c; > ngx_thread_pool_t *tp; > ngx_http_request_t *r; > ngx_http_core_loc_conf_t *clcf; > @@ -3854,6 +3855,27 @@ ngx_http_upstream_thread_handler(ngx_thr > r = file->thread_ctx; > p = r->upstream->pipe; > > + if (r->aio) { > + /* > + * tolerate sendfile() calls if another operation is already > + * running; this can happen due to subrequests, multiple calls > + * of the next body filter from a filter, or in HTTP/2 due to > + * a write event on the main connection > + */ > + > + c = r->connection; > + > +#if (NGX_HTTP_V2) > + if (r->stream) { > + c = r->stream->connection->connection; > + } > +#endif > + > + if (task == c->sendfile_task) { > + return NGX_OK; > + } > + } > + > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > tp = clcf->thread_pool; > > diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c > --- a/src/os/unix/ngx_linux_sendfile_chain.c > +++ b/src/os/unix/ngx_linux_sendfile_chain.c > @@ -379,15 +379,6 @@ ngx_linux_sendfile_thread(ngx_connection > return ctx->sent; > } > > - if (task->event.active && ctx->file == file) { > - /* > - * tolerate duplicate calls; they can happen due to subrequests > - * or multiple calls of the next body filter from a filter > - */ > - > - return NGX_DONE; > - } > - > ctx->file = file; > ctx->socket = c->fd; > ctx->size = size; > It's hard to say about preload handler, generally it looks good. Also, I can confirm that the check in ngx_linux_sendfile_thread() is now redundant and can be removed, a test case for 6422:768e287a6f36 shows that duplicate calls in subrequests are now handled in thread handlers (both in HTTP/1.x and HTTP/2), also can be tested with proxy_store.t. BTW, what about a similar check in ngx_freebsd_sendfile_chain() used to catch duplicate calls? Both were added in 6422:768e287a6f36. Anyway, it is removed in the sendfile(SF_NODISKIO) rework. -- Sergey Kandaurov From mdounin at mdounin.ru Thu Nov 25 13:54:07 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 25 Nov 2021 16:54:07 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: References: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> <22EEAB4A-054D-4AD2-BCFB-86B7EC679D78@nginx.com> <5F9378C5-CB2A-4466-B203-D9AB0F776997@nginx.com> <82D01D9B-BE9A-4619-95BA-D63948429FCC@nginx.com> Message-ID: Hello! On Thu, Nov 25, 2021 at 04:15:09PM +0300, Sergey Kandaurov wrote: > > > On 24 Nov 2021, at 22:06, Maxim Dounin wrote: > > > > Hello! > > > > On Wed, Nov 24, 2021 at 06:50:10PM +0300, Sergey Kandaurov wrote: > > > >>> On 24 Nov 2021, at 06:46, Maxim Dounin wrote: > >>> > >>> On Tue, Nov 23, 2021 at 10:44:00AM +0300, Sergey Kandaurov wrote: > >>> > >>>>> On 23 Nov 2021, at 06:12, Maxim Dounin wrote: > >>>>> > >>>>> On Thu, Nov 18, 2021 at 07:46:48PM +0300, Sergey Kandaurov wrote: > >>>>> > >>>>>>> On 16 Nov 2021, at 17:41, Maxim Dounin wrote: > >>>>>>> > >>>>>>> On Tue, Nov 16, 2021 at 02:59:46PM +0300, Sergey Kandaurov wrote: > >>>>>>> > >>>>>>>>> On 11 Nov 2021, at 06:10, Maxim Dounin wrote: > >>>>>>>>> > >>>>>>>>> # HG changeset patch > >>>>>>>>> # User Maxim Dounin > >>>>>>>>> # Date 1636599377 -10800 > >>>>>>>>> # Thu Nov 11 05:56:17 2021 +0300 > >>>>>>>>> # Node ID 76e072a6947a221868705c13973de15319c0d921 > >>>>>>>>> # Parent 82b750b20c5205d685e59031247fe898f011394e > >>>>>>>>> HTTP/2: fixed sendfile() aio handling. > >>>>>>>>> > >>>>>>>>> With sendfile() in threads ("aio threads; sendfile on;"), client connection > >>>>>>>>> can block on writing, waiting for sendfile() to complete. In HTTP/2 this > >>>>>>>>> might result in the request hang, since an attempt to continue processig > >>>>>>>> > >>>>>>>> processing > >>>>>>> > >>>>>>> Fixed, thnx. > >>>>>>> > >>>>>>>>> in thread event handler will call request's write event handler, which > >>>>>>>>> is usually stopped by ngx_http_v2_send_chain(): it does nothing if there > >>>>>>>>> are no additional data and stream->queued is set. Further, HTTP/2 resets > >>>>>>>>> stream's c->write->ready to 0 if writing blocks, so just fixing > >>>>>>>>> ngx_http_v2_send_chain() is not enough. > >>>>>>>>> > >>>>>>>>> Can be reproduced with test suite on Linux with: > >>>>>>>>> > >>>>>>>>> TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t > >>>>>>>>> > >>>>>>>>> The following tests currently fail: h2_keepalive.t, h2_priority.t, > >>>>>>>>> h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. > >>>>>>>>> > >>>>>>>>> Similarly, sendfile() with AIO preloading on FreeBSD can block as well, > >>>>>>>>> with similar results. This is, however, harder to reproduce, especially > >>>>>>>>> on modern FreeBSD systems, since sendfile() usually do not return EBUSY. > >>>>>>>> > >>>>>>>> does not > >>>>>>> > >>>>>>> Fixed, thnx. > >>>>>>> > >>>>>>>>> Fix is to post a write event on HTTP/2 connection in the thread event > >>>>>>>>> handler (and aio preload handler). This ensures that sendfile() will be > >>>>>>>>> completed and stream processing will be resumed by HTTP/2 code. > >>>>>>>>> > >>>>>>>>> diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > >>>>>>>>> --- a/src/http/ngx_http_copy_filter_module.c > >>>>>>>>> +++ b/src/http/ngx_http_copy_filter_module.c > >>>>>>>>> @@ -250,6 +250,21 @@ ngx_http_copy_aio_sendfile_event_handler > >>>>>>>>> r->aio = 0; > >>>>>>>>> ev->complete = 0; > >>>>>>>>> > >>>>>>>>> +#if (NGX_HTTP_V2) > >>>>>>>>> + > >>>>>>>>> + if (r->stream) { > >>>>>>>>> + /* > >>>>>>>>> + * for HTTP/2, trigger a write event on the main connection > >>>>>>>>> + * to handle sendfile() preload > >>>>>>>>> + */ > >>>>>>>>> + > >>>>>>>>> + ngx_post_event(r->stream->connection->connection->write, > >>>>>>>>> + &ngx_posted_events); > >>>>>>>>> + return; > >>>>>>>>> + } > >>>>>>>>> + > >>>>>>>>> +#endif > >>>>>>>>> + > >>>>>>>>> r->connection->write->handler(r->connection->write); > >>>>>>>>> } > >>>>>>>>> > >>>>>>>>> @@ -323,6 +338,20 @@ ngx_http_copy_thread_event_handler(ngx_e > >>>>>>>>> r->main->blocked--; > >>>>>>>>> r->aio = 0; > >>>>>>>>> > >>>>>>>>> +#if (NGX_HTTP_V2) > >>>>>>>>> + > >>>>>>>>> + if (r->stream) { > >>>>>>>>> + /* > >>>>>>>>> + * for HTTP/2, trigger a write event on the main connection > >>>>>>>>> + * to handle sendfile() in threads > >>>>>>>>> + */ > >>>>>>>>> + > >>>>>>>>> + ngx_post_event(r->stream->connection->connection->write, > >>>>>>>>> + &ngx_posted_events); > >>>>>>>>> + } > >>>>>>>>> + > >>>>>>>>> +#endif > >>>>>>>>> + > >>>>> > >>>>> [...] > >>>>> > >>>>> For the record, while testing this patch Sergey found another > >>>>> issue with sendfile() in threads and HTTP/2: since HTTP/2 might > >>>>> call sendfile() within the main connection, bypassing request > >>>>> filter chain, normal r->aio flag checking to prevent multiple > >>>>> operations do not work, and this eventually results in "task > >>>>> already active" alerts due to duplicate operations being posted. > >>>>> With the above patch this issue is much more likely to happen, > >>>>> since it intentionally triggers write events on the main HTTP/2 > >>>>> connection. > >>>>> > >>>>> Below are two patches: the first one addresses the issue with > >>>>> duplicate operations by additionally checking file->thread_task > >>>>> before sendfile(), and the second one is a better alternative to > >>>>> the above patch which doesn't post additional events on the main > >>>>> connection. > >>>>> > >>>>> # HG changeset patch > >>>>> # User Maxim Dounin > >>>>> # Date 1637635671 -10800 > >>>>> # Tue Nov 23 05:47:51 2021 +0300 > >>>>> # Node ID 8a18b0bff1266db221fe35dc08f4483044ea0f86 > >>>>> # Parent 82b750b20c5205d685e59031247fe898f011394e > >>>>> HTTP/2: fixed "task already active" with sendfile in threads. > >>>>> > >>>>> With sendfile in threads, "task already active" alerts might appear in logs > >>>>> if a write event happens on the main HTTP/2 connection, triggering a sendfile > >>>>> in threads while another thread operation is already running. Observed > >>>>> with "aio threads; aio_write on; sendfile on;" and with thread event handlers > >>>>> modified to post a write event to the main HTTP/2 connection (though can > >>>>> happen without any modifications). > >>>>> > >>>>> Fix is to avoid starting a sendfile operation if file->thread_task indicates > >>>>> that another thread operation is active. > >>>>> > >>>>> diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c > >>>>> --- a/src/os/unix/ngx_linux_sendfile_chain.c > >>>>> +++ b/src/os/unix/ngx_linux_sendfile_chain.c > >>>>> @@ -324,6 +324,18 @@ ngx_linux_sendfile_thread(ngx_connection > >>>>> "linux sendfile thread: %d, %uz, %O", > >>>>> file->file->fd, size, file->file_pos); > >>>>> > >>>>> + task = file->file->thread_task; > >>>>> + > >>>>> + if (task && task->event.active) { > >>>>> + /* > >>>>> + * with HTTP/2, another thread operation might be already running > >>>>> + * if sendfile() is called as a result of a write event on the main > >>>>> + * connection > >>>>> + */ > >>>>> + > >>>>> + return NGX_DONE; > >>>>> + } > >>>>> + > >>>>> task = c->sendfile_task; > >>>>> > >>>>> if (task == NULL) { > >>> > >>> After looking once again into it, I tend to think this patch is > >>> incomplete. In particular, the particular check won't stop > >>> additional sendfile() if there are multiple files and different > >>> files use different thread tasks. While this is not something > >>> possible with standard modules, but nevertheless. > >> > >> Could you please clarify? > >> Is it something rather theoretical (and irrelevant to subrequests)? > >> Because there is a room for only one task in c->sendfile_task. > > > > The code above checks file->file->thread_task, not > > c->sendfile_task, so there can be many different tasks. > > > > Got it. > > > This is not something about subrequests: multiple thread/aio > > operations are allowed with subrequests as long as they are within > > different subrequests (the r->aio flag is within a particular > > subrequest). > > > > The case I'm talking about can happen if a module returns multiple > > buffers with different files, and these files use different > > thread tasks (for non-sendfile thread operations). This is not > > something which can happen with the standard modules, but might > > happen in theory. > > Agreed. > > > > >>> Further, the same problem seems to apply to aio preloading (though > >>> unlikely to happen in practice). > >>> > >>> The following patch checks r->aio in relevant handlers to prevent > >>> sendfile() when another operation is running: > >> > >> As a positive effect, moving the check down to thread handlers allows > >> to complete a sendfile operation, within http2 write handler, after > >> thread task completion, since now the check is performed after the > >> "if (task->event.complete) {" logic in ngx_linux_sendfile_thread(). > >> E.g.: > >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 write handler > >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 frame out: 000055BA978A5E28 sid:1 bl:0 len:8192 > >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 http2 frame out: 000055BA978A5D20 sid:1 bl:1 len:8192 > >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: 15, 8192, 8794 > >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: complete:1 err:0 sent:8192 > >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 no tcp_nodelay > >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 tcp_nopush > >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 writev: 9 of 9 > >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: 15, 8192, 16986 > >> 2021/11/24 15:16:43 [debug] 474666#474666: *1 linux sendfile thread: complete:0 > > > > While the side effect is expected, I don't think this is not > > something important. As far as I understand, you are looking at > > what happens with the previous version of the initial patch with > > ngx_post_event(), but in real world such a write event is > > unlikely. (Not to mention that this won't happen with HTTP/1.x, > > and only applies to HTTP/2 without SSL.) > > Correct. > I was not able to see any difference with the current series. > > > > >>> # HG changeset patch > >>> # User Maxim Dounin > >>> # Date 1637723745 -10800 > >>> # Wed Nov 24 06:15:45 2021 +0300 > >>> # Node ID 3450841798597536d17ced29b35d1d90ce06ce0d > >>> # Parent 3443c02ca1d183fe52bf8af66627c94be2b2f785 > >>> HTTP/2: fixed "task already active" with sendfile in threads. > >>> > >>> With sendfile in threads, "task already active" alerts might appear in logs > >>> if a write event happens on the main HTTP/2 connection, triggering a sendfile > >>> in threads while another thread operation is already running. Observed > >>> with "aio threads; aio_write on; sendfile on;" and with thread event handlers > >>> modified to post a write event to the main HTTP/2 connection (though can > >>> happen without any modifications). > >>> > >>> Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate > >>> aio operation, resulting in "second aio post" alerts. This is, however, > >>> harder to reproduce, especially on modern FreeBSD systems, since sendfile() > >>> usually does not return EBUSY. > >>> > >>> Fix is to avoid starting a sendfile operation if other thread operation > >>> is active by checking r->aio in the thread handler (and, similarly, in > >>> aio preload handler). > >>> > >>> diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > >>> --- a/src/http/ngx_http_copy_filter_module.c > >>> +++ b/src/http/ngx_http_copy_filter_module.c > >>> @@ -219,6 +219,22 @@ ngx_http_copy_aio_sendfile_preload(ngx_b > >>> ngx_http_request_t *r; > >>> ngx_output_chain_ctx_t *ctx; > >>> > >>> +#if (NGX_HTTP_V2) > >>> + > >>> + r = file->file->aio->data; > >>> + > >>> + if (r->aio) { > >>> + /* > >>> + * with HTTP/2, another thread operation might be already running > >>> + * if sendfile() is called as a result of a write event on the main > >>> + * connection > >>> + */ > >>> + > >>> + return NGX_OK; > >>> + } > >>> + > >>> +#endif > >>> + > >> > >> Shouldn't this also check for r->stream to narrow it down to HTTP/2 ? > >> At least, it looks inconsistent taking the code is under NGX_HTTP_V2. > >> On the contrary, shouldn't this expand to non-HTTP/2 protocols as well? > > > > I tend to think that correct approach would be to expand this to > > non-HTTP/2 as well. Without the HTTP/2 check, it will also catch > > duplicate sendfile calls due to subrequests (currently not handled > > by the aio preloading, and handled separately in sendfile in > > threads). > > > > Updated patch below. > > > >>> n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); > >>> > >>> if (n == NGX_AGAIN) { > >>> @@ -270,6 +286,23 @@ ngx_http_copy_thread_handler(ngx_thread_ > >>> > >>> r = file->thread_ctx; > >>> > >>> +#if (NGX_HTTP_V2) > >>> + > >>> + if (r->aio > >>> + && r->stream > >>> + && r->stream->connection->connection->sendfile_task == task) > >>> + { > >> > >> I'm not quite sure how the last part of condition is related (if at all) > >> to stop additional sendfile() for different files using different tasks, > >> as outlined above. > >> > >> Also, looking at ngx_linux_sendfile_thread(), where c->sendfile_task is > >> initially allocated, I cannot imagine how the tasks won't match there. > >> I guess it is enough to check for a non-NULL sendfile_task. > > > > The same thread handler can be used for different thread > > operations, notably sendfile(), read(), or write(). The idea is > > to check if the operation we are called for is actually sendfile() > > in threads, and not thread read or write (which will use different > > task). With sendfile(), calls with r->aio set are expected, and > > we can simply return NGX_OK to properly handle them. With thread > > read or write, simply returning NGX_OK likely will silently broke > > things, so it's better to let it instead fail in > > ngx_thread_task_post(). > > Ok. > > > > >>> + /* > >>> + * with HTTP/2, another thread operation might be already running > >>> + * if sendfile() is called as a result of a write event on the main > >>> + * connection > >>> + */ > >>> + > >>> + return NGX_OK; > >>> + } > >>> + > >>> +#endif > >>> + > >>> clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > >>> tp = clcf->thread_pool; > >>> > >>> diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > >>> --- a/src/http/ngx_http_upstream.c > >>> +++ b/src/http/ngx_http_upstream.c > >>> @@ -3854,6 +3854,23 @@ ngx_http_upstream_thread_handler(ngx_thr > >>> r = file->thread_ctx; > >>> p = r->upstream->pipe; > >>> > >>> +#if (NGX_HTTP_V2) > >>> + > >>> + if (r->aio > >>> + && r->stream > >>> + && r->stream->connection->connection->sendfile_task == task) > >>> + { > >>> + /* > >>> + * with HTTP/2, another thread operation might be already running > >>> + * if sendfile() is called as a result of a write event on the main > >>> + * connection > >>> + */ > >>> + > >>> + return NGX_OK; > >>> + } > >>> + > >>> +#endif > >>> + > >>> clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > >>> tp = clcf->thread_pool; > >>> > > > > Updated patch: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1637774456 -10800 > > # Wed Nov 24 20:20:56 2021 +0300 > > # Node ID 80c8892153bef7edec591cd1af37237b22d6d0c5 > > # Parent 3443c02ca1d183fe52bf8af66627c94be2b2f785 > > HTTP/2: fixed "task already active" with sendfile in threads. > > > > With sendfile in threads, "task already active" alerts might appear in logs > > if a write event happens on the main HTTP/2 connection, triggering a sendfile > > in threads while another thread operation is already running. Observed > > with "aio threads; aio_write on; sendfile on;" and with thread event handlers > > modified to post a write event to the main HTTP/2 connection (though can > > happen without any modifications). > > > > Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate > > aio operation, resulting in "second aio post" alerts. This is, however, > > harder to reproduce, especially on modern FreeBSD systems, since sendfile() > > usually does not return EBUSY. > > > > Fix is to avoid starting a sendfile operation if other thread operation > > is active by checking r->aio in the thread handler (and, similarly, in > > aio preload handler). > > > > The added check also makes duplicate calls protection in sendfile() in > > threads redundant, so it is removed. Further, now there is a corresponding > > duplicate calls protection in AIO preloading. > > > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > > --- a/src/http/ngx_http_copy_filter_module.c > > +++ b/src/http/ngx_http_copy_filter_module.c > > @@ -219,13 +219,25 @@ ngx_http_copy_aio_sendfile_preload(ngx_b > > ngx_http_request_t *r; > > ngx_output_chain_ctx_t *ctx; > > > > + aio = file->file->aio; > > + r = aio->data; > > + > > + if (r->aio) { > > + /* > > + * tolerate sendfile() calls if another operation is already > > + * running; this can happen due to subrequests, multiple calls > > + * of the next body filter from a filter, or in HTTP/2 due to > > + * a write event on the main connection > > + */ > > + > > + return NGX_OK; > > + } > > + > > n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); > > > > if (n == NGX_AGAIN) { > > - aio = file->file->aio; > > aio->handler = ngx_http_copy_aio_sendfile_event_handler; > > > > - r = aio->data; > > r->main->blocked++; > > r->aio = 1; > > > > @@ -263,6 +275,7 @@ static ngx_int_t > > ngx_http_copy_thread_handler(ngx_thread_task_t *task, ngx_file_t *file) > > { > > ngx_str_t name; > > + ngx_connection_t *c; > > ngx_thread_pool_t *tp; > > ngx_http_request_t *r; > > ngx_output_chain_ctx_t *ctx; > > @@ -270,6 +283,27 @@ ngx_http_copy_thread_handler(ngx_thread_ > > > > r = file->thread_ctx; > > > > + if (r->aio) { > > + /* > > + * tolerate sendfile() calls if another operation is already > > + * running; this can happen due to subrequests, multiple calls > > + * of the next body filter from a filter, or in HTTP/2 due to > > + * a write event on the main connection > > + */ > > + > > + c = r->connection; > > + > > +#if (NGX_HTTP_V2) > > + if (r->stream) { > > + c = r->stream->connection->connection; > > + } > > +#endif > > + > > + if (task == c->sendfile_task) { > > + return NGX_OK; > > + } > > + } > > + > > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > > tp = clcf->thread_pool; > > > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > > --- a/src/http/ngx_http_upstream.c > > +++ b/src/http/ngx_http_upstream.c > > @@ -3847,6 +3847,7 @@ ngx_http_upstream_thread_handler(ngx_thr > > { > > ngx_str_t name; > > ngx_event_pipe_t *p; > > + ngx_connection_t *c; > > ngx_thread_pool_t *tp; > > ngx_http_request_t *r; > > ngx_http_core_loc_conf_t *clcf; > > @@ -3854,6 +3855,27 @@ ngx_http_upstream_thread_handler(ngx_thr > > r = file->thread_ctx; > > p = r->upstream->pipe; > > > > + if (r->aio) { > > + /* > > + * tolerate sendfile() calls if another operation is already > > + * running; this can happen due to subrequests, multiple calls > > + * of the next body filter from a filter, or in HTTP/2 due to > > + * a write event on the main connection > > + */ > > + > > + c = r->connection; > > + > > +#if (NGX_HTTP_V2) > > + if (r->stream) { > > + c = r->stream->connection->connection; > > + } > > +#endif > > + > > + if (task == c->sendfile_task) { > > + return NGX_OK; > > + } > > + } > > + > > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > > tp = clcf->thread_pool; > > > > diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c > > --- a/src/os/unix/ngx_linux_sendfile_chain.c > > +++ b/src/os/unix/ngx_linux_sendfile_chain.c > > @@ -379,15 +379,6 @@ ngx_linux_sendfile_thread(ngx_connection > > return ctx->sent; > > } > > > > - if (task->event.active && ctx->file == file) { > > - /* > > - * tolerate duplicate calls; they can happen due to subrequests > > - * or multiple calls of the next body filter from a filter > > - */ > > - > > - return NGX_DONE; > > - } > > - > > ctx->file = file; > > ctx->socket = c->fd; > > ctx->size = size; > > > > It's hard to say about preload handler, generally it looks good. > Also, I can confirm that the check in ngx_linux_sendfile_thread() is now > redundant and can be removed, a test case for 6422:768e287a6f36 shows > that duplicate calls in subrequests are now handled in thread handlers > (both in HTTP/1.x and HTTP/2), also can be tested with proxy_store.t. > BTW, what about a similar check in ngx_freebsd_sendfile_chain() > used to catch duplicate calls? Both were added in 6422:768e287a6f36. > Anyway, it is removed in the sendfile(SF_NODISKIO) rework. Yes, thanks, missed this, fixed. Also fixed wrong return value in the preload handler (it is expected to be ssize_t and match ngx_file_aio_read() result, so using NGX_OK is misleading, replaced with NGX_AGAIN). Diff: diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c +++ b/src/http/ngx_http_copy_filter_module.c @@ -230,7 +230,7 @@ ngx_http_copy_aio_sendfile_preload(ngx_b * a write event on the main connection */ - return NGX_OK; + return NGX_AGAIN; } n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); diff --git a/src/os/unix/ngx_freebsd_sendfile_chain.c b/src/os/unix/ngx_freebsd_sendfile_chain.c --- a/src/os/unix/ngx_freebsd_sendfile_chain.c +++ b/src/os/unix/ngx_freebsd_sendfile_chain.c @@ -255,19 +255,6 @@ ngx_freebsd_sendfile_chain(ngx_connectio #if (NGX_HAVE_AIO_SENDFILE) if (ebusy) { - if (aio->event.active) { - /* - * tolerate duplicate calls; they can happen due to subrequests - * or multiple calls of the next body filter from a filter - */ - - if (sent) { - c->busy_count = 0; - } - - return in; - } - if (sent == 0) { c->busy_count++; Full patch: # HG changeset patch # User Maxim Dounin # Date 1637847703 -10800 # Thu Nov 25 16:41:43 2021 +0300 # Node ID c960e182900a8d0b7f3041731ba416f2c7e69d14 # Parent 3443c02ca1d183fe52bf8af66627c94be2b2f785 HTTP/2: fixed "task already active" with sendfile in threads. With sendfile in threads, "task already active" alerts might appear in logs if a write event happens on the main HTTP/2 connection, triggering a sendfile in threads while another thread operation is already running. Observed with "aio threads; aio_write on; sendfile on;" and with thread event handlers modified to post a write event to the main HTTP/2 connection (though can happen without any modifications). Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate aio operation, resulting in "second aio post" alerts. This is, however, harder to reproduce, especially on modern FreeBSD systems, since sendfile() usually does not return EBUSY. Fix is to avoid starting a sendfile operation if other thread operation is active by checking r->aio in the thread handler (and, similarly, in aio preload handler). The added check also makes duplicate calls protection redundant, so it is removed. diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c +++ b/src/http/ngx_http_copy_filter_module.c @@ -219,13 +219,25 @@ ngx_http_copy_aio_sendfile_preload(ngx_b ngx_http_request_t *r; ngx_output_chain_ctx_t *ctx; + aio = file->file->aio; + r = aio->data; + + if (r->aio) { + /* + * tolerate sendfile() calls if another operation is already + * running; this can happen due to subrequests, multiple calls + * of the next body filter from a filter, or in HTTP/2 due to + * a write event on the main connection + */ + + return NGX_AGAIN; + } + n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); if (n == NGX_AGAIN) { - aio = file->file->aio; aio->handler = ngx_http_copy_aio_sendfile_event_handler; - r = aio->data; r->main->blocked++; r->aio = 1; @@ -263,6 +275,7 @@ static ngx_int_t ngx_http_copy_thread_handler(ngx_thread_task_t *task, ngx_file_t *file) { ngx_str_t name; + ngx_connection_t *c; ngx_thread_pool_t *tp; ngx_http_request_t *r; ngx_output_chain_ctx_t *ctx; @@ -270,6 +283,27 @@ ngx_http_copy_thread_handler(ngx_thread_ r = file->thread_ctx; + if (r->aio) { + /* + * tolerate sendfile() calls if another operation is already + * running; this can happen due to subrequests, multiple calls + * of the next body filter from a filter, or in HTTP/2 due to + * a write event on the main connection + */ + + c = r->connection; + +#if (NGX_HTTP_V2) + if (r->stream) { + c = r->stream->connection->connection; + } +#endif + + if (task == c->sendfile_task) { + return NGX_OK; + } + } + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); tp = clcf->thread_pool; diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -3847,6 +3847,7 @@ ngx_http_upstream_thread_handler(ngx_thr { ngx_str_t name; ngx_event_pipe_t *p; + ngx_connection_t *c; ngx_thread_pool_t *tp; ngx_http_request_t *r; ngx_http_core_loc_conf_t *clcf; @@ -3854,6 +3855,27 @@ ngx_http_upstream_thread_handler(ngx_thr r = file->thread_ctx; p = r->upstream->pipe; + if (r->aio) { + /* + * tolerate sendfile() calls if another operation is already + * running; this can happen due to subrequests, multiple calls + * of the next body filter from a filter, or in HTTP/2 due to + * a write event on the main connection + */ + + c = r->connection; + +#if (NGX_HTTP_V2) + if (r->stream) { + c = r->stream->connection->connection; + } +#endif + + if (task == c->sendfile_task) { + return NGX_OK; + } + } + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); tp = clcf->thread_pool; diff --git a/src/os/unix/ngx_freebsd_sendfile_chain.c b/src/os/unix/ngx_freebsd_sendfile_chain.c --- a/src/os/unix/ngx_freebsd_sendfile_chain.c +++ b/src/os/unix/ngx_freebsd_sendfile_chain.c @@ -255,19 +255,6 @@ ngx_freebsd_sendfile_chain(ngx_connectio #if (NGX_HAVE_AIO_SENDFILE) if (ebusy) { - if (aio->event.active) { - /* - * tolerate duplicate calls; they can happen due to subrequests - * or multiple calls of the next body filter from a filter - */ - - if (sent) { - c->busy_count = 0; - } - - return in; - } - if (sent == 0) { c->busy_count++; diff --git a/src/os/unix/ngx_linux_sendfile_chain.c b/src/os/unix/ngx_linux_sendfile_chain.c --- a/src/os/unix/ngx_linux_sendfile_chain.c +++ b/src/os/unix/ngx_linux_sendfile_chain.c @@ -379,15 +379,6 @@ ngx_linux_sendfile_thread(ngx_connection return ctx->sent; } - if (task->event.active && ctx->file == file) { - /* - * tolerate duplicate calls; they can happen due to subrequests - * or multiple calls of the next body filter from a filter - */ - - return NGX_DONE; - } - ctx->file = file; ctx->socket = c->fd; ctx->size = size; -- Maxim Dounin http://mdounin.ru/ From arut at nginx.com Thu Nov 25 14:20:48 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 25 Nov 2021 17:20:48 +0300 Subject: [PATCH 0 of 3] Misc QUIC stream patches Message-ID: Misc QUIC stream patches. From arut at nginx.com Thu Nov 25 14:20:49 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 25 Nov 2021 17:20:49 +0300 Subject: [PATCH 1 of 3] QUIC: post stream events instead of calling their handlers In-Reply-To: References: Message-ID: <5b03ffd757804542daec.1637850049@arut-laptop> # HG changeset patch # User Roman Arutyunyan # Date 1637692791 -10800 # Tue Nov 23 21:39:51 2021 +0300 # Branch quic # Node ID 5b03ffd757804542daec73188a509b02e6b2c596 # Parent d041b8d6ab0b2dea150536531345fa47c696b303 QUIC: post stream events instead of calling their handlers. This potentially reduces the number of handler calls. diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -1122,7 +1122,7 @@ ngx_quic_handle_stream_frame(ngx_connect rev->ready = 1; if (rev->active) { - rev->handler(rev); + ngx_post_event(rev, &ngx_posted_events); } } @@ -1369,7 +1369,7 @@ ngx_quic_handle_reset_stream_frame(ngx_c } if (rev->active) { - rev->handler(rev); + ngx_post_event(rev, &ngx_posted_events); } return NGX_OK; @@ -1438,7 +1438,7 @@ ngx_quic_handle_stop_sending_frame(ngx_c wev = qs->connection->write; if (wev->active) { - wev->handler(wev); + ngx_post_event(wev, &ngx_posted_events); } return NGX_OK; From arut at nginx.com Thu Nov 25 14:20:50 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 25 Nov 2021 17:20:50 +0300 Subject: [PATCH 2 of 3] QUIC: simplified stream initialization In-Reply-To: References: Message-ID: <3d2354bfa1a2a257b9f7.1637850050@arut-laptop> # HG changeset patch # User Roman Arutyunyan # Date 1637693300 -10800 # Tue Nov 23 21:48:20 2021 +0300 # Branch quic # Node ID 3d2354bfa1a2a257b9f73772ad0836585be85a6c # Parent 5b03ffd757804542daec73188a509b02e6b2c596 QUIC: simplified stream initialization. After creation, a client stream is added to qc->streams.uninitialized queue. After initialization it's removed from the queue. If a stream is never initialized, it is freed in ngx_quic_close_streams(). Stream initializer is now set as read event handler in stream connection. Previously qc->streams.uninitialized was used for delayed stream initialization. The change makes is possible not to handle separately the case of a new stream in stream-related frame handlers. It makes these handlers simpler since new streams and existing streams are now handled by the same code. diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -13,10 +13,9 @@ #define NGX_QUIC_STREAM_GONE (void *) -1 -static ngx_quic_stream_t *ngx_quic_create_client_stream(ngx_connection_t *c, - uint64_t id); +static ngx_quic_stream_t *ngx_quic_get_stream(ngx_connection_t *c, uint64_t id); static ngx_int_t ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id); -static ngx_int_t ngx_quic_init_stream(ngx_quic_stream_t *qs); +static void ngx_quic_init_stream_handler(ngx_event_t *ev); static void ngx_quic_init_streams_handler(ngx_connection_t *c); static ngx_quic_stream_t *ngx_quic_create_stream(ngx_connection_t *c, uint64_t id); @@ -306,21 +305,28 @@ ngx_quic_shutdown_stream(ngx_connection_ static ngx_quic_stream_t * -ngx_quic_create_client_stream(ngx_connection_t *c, uint64_t id) +ngx_quic_get_stream(ngx_connection_t *c, uint64_t id) { uint64_t min_id; + ngx_event_t *rev; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic stream id:0x%xL is new", id); + qc = ngx_quic_get_connection(c); + + qs = ngx_quic_find_stream(&qc->streams.tree, id); - qc = ngx_quic_get_connection(c); + if (qs) { + return qs; + } if (qc->shutdown || qc->closing) { return NGX_QUIC_STREAM_GONE; } + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic stream id:0x%xL is missing", id); + if (id & NGX_QUIC_STREAM_UNIDIRECTIONAL) { if (id & NGX_QUIC_STREAM_SERVER_INITIATED) { @@ -377,7 +383,11 @@ ngx_quic_create_client_stream(ngx_connec * streams of that type with lower-numbered stream IDs also being opened. */ - for ( /* void */ ; min_id < id; min_id += 0x04) { +#if (NGX_SUPPRESS_WARN) + qs = NULL; +#endif + + for ( /* void */ ; min_id <= id; min_id += 0x04) { qs = ngx_quic_create_stream(c, min_id); @@ -389,22 +399,17 @@ ngx_quic_create_client_stream(ngx_connec continue; } - if (ngx_quic_init_stream(qs) != NGX_OK) { - return NULL; - } + ngx_queue_insert_tail(&qc->streams.uninitialized, &qs->queue); - if (qc->shutdown || qc->closing) { - return NGX_QUIC_STREAM_GONE; + rev = qs->connection->read; + rev->handler = ngx_quic_init_stream_handler; + + if (qc->streams.initialized) { + ngx_post_event(rev, &ngx_posted_events); } } - qs = ngx_quic_create_stream(c, id); - if (qs == NULL) { - if (ngx_quic_reject_stream(c, id) != NGX_OK) { - return NULL; - } - return NGX_QUIC_STREAM_GONE; } @@ -461,29 +466,20 @@ ngx_quic_reject_stream(ngx_connection_t } -static ngx_int_t -ngx_quic_init_stream(ngx_quic_stream_t *qs) +static void +ngx_quic_init_stream_handler(ngx_event_t *ev) { - ngx_connection_t *c; - ngx_quic_connection_t *qc; - - qc = ngx_quic_get_connection(qs->parent); + ngx_connection_t *c; + ngx_quic_stream_t *qs; - c = qs->connection; - - if (!qc->streams.initialized) { - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic postpone stream init"); - - ngx_queue_insert_tail(&qc->streams.uninitialized, &qs->queue); - return NGX_OK; - } + c = ev->data; + qs = c->quic; ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic init stream"); - c->listening->handler(c); + ngx_queue_remove(&qs->queue); - return NGX_OK; + c->listening->handler(c); } @@ -527,16 +523,12 @@ ngx_quic_init_streams_handler(ngx_connec qc = ngx_quic_get_connection(c); - while (!ngx_queue_empty(&qc->streams.uninitialized)) { - q = ngx_queue_head(&qc->streams.uninitialized); - ngx_queue_remove(q); - + for (q = ngx_queue_head(&qc->streams.uninitialized); + q != ngx_queue_sentinel(&qc->streams.uninitialized); + q = ngx_queue_next(q)) + { qs = ngx_queue_data(q, ngx_quic_stream_t, queue); - - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, qs->connection->log, 0, - "quic init postponed stream"); - - qs->connection->listening->handler(qs->connection); + ngx_post_event(qs->connection->read, &ngx_posted_events); } qc->streams.initialized = 1; @@ -1015,7 +1007,6 @@ ngx_quic_handle_stream_frame(ngx_connect ngx_quic_frame_t *frame) { uint64_t last; - ngx_pool_t *pool; ngx_event_t *rev; ngx_connection_t *sc; ngx_quic_stream_t *qs; @@ -1035,39 +1026,14 @@ ngx_quic_handle_stream_frame(ngx_connect /* no overflow since both values are 62-bit */ last = f->offset + f->length; - qs = ngx_quic_find_stream(&qc->streams.tree, f->stream_id); + qs = ngx_quic_get_stream(c, f->stream_id); if (qs == NULL) { - qs = ngx_quic_create_client_stream(c, f->stream_id); - - if (qs == NULL) { - return NGX_ERROR; - } - - if (qs == NGX_QUIC_STREAM_GONE) { - return NGX_OK; - } - - sc = qs->connection; + return NGX_ERROR; + } - if (ngx_quic_control_flow(sc, last) != NGX_OK) { - goto cleanup; - } - - if (f->fin) { - sc->read->pending_eof = 1; - qs->final_size = last; - } - - if (f->offset == 0) { - sc->read->ready = 1; - } - - if (ngx_quic_order_bufs(c, &qs->in, frame->data, f->offset) != NGX_OK) { - goto cleanup; - } - - return ngx_quic_init_stream(qs); + if (qs == NGX_QUIC_STREAM_GONE) { + return NGX_OK; } sc = qs->connection; @@ -1127,15 +1093,6 @@ ngx_quic_handle_stream_frame(ngx_connect } return NGX_OK; - -cleanup: - - pool = sc->pool; - - ngx_close_connection(sc); - ngx_destroy_pool(pool); - - return NGX_ERROR; } @@ -1212,20 +1169,14 @@ ngx_quic_handle_stream_data_blocked_fram return NGX_ERROR; } - qs = ngx_quic_find_stream(&qc->streams.tree, f->id); + qs = ngx_quic_get_stream(c, f->id); if (qs == NULL) { - qs = ngx_quic_create_client_stream(c, f->id); - - if (qs == NULL) { - return NGX_ERROR; - } + return NGX_ERROR; + } - if (qs == NGX_QUIC_STREAM_GONE) { - return NGX_OK; - } - - return ngx_quic_init_stream(qs); + if (qs == NGX_QUIC_STREAM_GONE) { + return NGX_OK; } return ngx_quic_update_max_stream_data(qs->connection); @@ -1250,24 +1201,14 @@ ngx_quic_handle_max_stream_data_frame(ng return NGX_ERROR; } - qs = ngx_quic_find_stream(&qc->streams.tree, f->id); + qs = ngx_quic_get_stream(c, f->id); if (qs == NULL) { - qs = ngx_quic_create_client_stream(c, f->id); - - if (qs == NULL) { - return NGX_ERROR; - } + return NGX_ERROR; + } - if (qs == NGX_QUIC_STREAM_GONE) { - return NGX_OK; - } - - if (f->limit > qs->send_max_data) { - qs->send_max_data = f->limit; - } - - return ngx_quic_init_stream(qs); + if (qs == NGX_QUIC_STREAM_GONE) { + return NGX_OK; } if (f->limit <= qs->send_max_data) { @@ -1295,7 +1236,6 @@ ngx_int_t ngx_quic_handle_reset_stream_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_reset_stream_frame_t *f) { - ngx_pool_t *pool; ngx_event_t *rev; ngx_connection_t *sc; ngx_quic_stream_t *qs; @@ -1310,36 +1250,14 @@ ngx_quic_handle_reset_stream_frame(ngx_c return NGX_ERROR; } - qs = ngx_quic_find_stream(&qc->streams.tree, f->id); + qs = ngx_quic_get_stream(c, f->id); if (qs == NULL) { - qs = ngx_quic_create_client_stream(c, f->id); - - if (qs == NULL) { - return NGX_ERROR; - } - - if (qs == NGX_QUIC_STREAM_GONE) { - return NGX_OK; - } - - sc = qs->connection; + return NGX_ERROR; + } - rev = sc->read; - rev->error = 1; - rev->ready = 1; - - if (ngx_quic_control_flow(sc, f->final_size) != NGX_OK) { - goto cleanup; - } - - qs->final_size = f->final_size; - - if (ngx_quic_update_flow(sc, qs->final_size) != NGX_OK) { - goto cleanup; - } - - return ngx_quic_init_stream(qs); + if (qs == NGX_QUIC_STREAM_GONE) { + return NGX_OK; } sc = qs->connection; @@ -1373,15 +1291,6 @@ ngx_quic_handle_reset_stream_frame(ngx_c } return NGX_OK; - -cleanup: - - pool = sc->pool; - - ngx_close_connection(sc); - ngx_destroy_pool(pool); - - return NGX_ERROR; } @@ -1389,9 +1298,7 @@ ngx_int_t ngx_quic_handle_stop_sending_frame(ngx_connection_t *c, ngx_quic_header_t *pkt, ngx_quic_stop_sending_frame_t *f) { - ngx_pool_t *pool; ngx_event_t *wev; - ngx_connection_t *sc; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; @@ -1404,31 +1311,14 @@ ngx_quic_handle_stop_sending_frame(ngx_c return NGX_ERROR; } - qs = ngx_quic_find_stream(&qc->streams.tree, f->id); + qs = ngx_quic_get_stream(c, f->id); if (qs == NULL) { - qs = ngx_quic_create_client_stream(c, f->id); - - if (qs == NULL) { - return NGX_ERROR; - } - - if (qs == NGX_QUIC_STREAM_GONE) { - return NGX_OK; - } + return NGX_ERROR; + } - sc = qs->connection; - - if (ngx_quic_reset_stream(sc, f->error_code) != NGX_OK) { - pool = sc->pool; - - ngx_close_connection(sc); - ngx_destroy_pool(pool); - - return NGX_ERROR; - } - - return ngx_quic_init_stream(qs); + if (qs == NGX_QUIC_STREAM_GONE) { + return NGX_OK; } if (ngx_quic_reset_stream(qs->connection, f->error_code) != NGX_OK) { From arut at nginx.com Thu Nov 25 14:20:51 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 25 Nov 2021 17:20:51 +0300 Subject: [PATCH 3 of 3] QUIC: stream recv shutdown support In-Reply-To: References: Message-ID: # HG changeset patch # User Roman Arutyunyan # Date 1637695967 -10800 # Tue Nov 23 22:32:47 2021 +0300 # Branch quic # Node ID e1de02d829f7f85b1e2e6b289ec4c20318712321 # Parent 3d2354bfa1a2a257b9f73772ad0836585be85a6c QUIC: stream recv shutdown support. Recv shutdown sends STOP_SENDING to client. Both send and recv shutdown functions are now called from stream cleanup handler. While here, setting c->read->pending_eof is moved down to fix recv shutdown in the cleanup handler. diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -13,6 +13,8 @@ #define NGX_QUIC_STREAM_GONE (void *) -1 +static ngx_int_t ngx_quic_shutdown_stream_send(ngx_connection_t *c); +static ngx_int_t ngx_quic_shutdown_stream_recv(ngx_connection_t *c); static ngx_quic_stream_t *ngx_quic_get_stream(ngx_connection_t *c, uint64_t id); static ngx_int_t ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id); static void ngx_quic_init_stream_handler(ngx_event_t *ev); @@ -257,16 +259,24 @@ ngx_quic_reset_stream(ngx_connection_t * ngx_int_t ngx_quic_shutdown_stream(ngx_connection_t *c, int how) { + if (how == NGX_WRITE_SHUTDOWN) { + return ngx_quic_shutdown_stream_send(c); + + } else { + return ngx_quic_shutdown_stream_recv(c); + } +} + + +static ngx_int_t +ngx_quic_shutdown_stream_send(ngx_connection_t *c) +{ ngx_event_t *wev; ngx_connection_t *pc; ngx_quic_frame_t *frame; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - if (how != NGX_WRITE_SHUTDOWN) { - return NGX_OK; - } - wev = c->write; if (wev->error) { @@ -283,7 +293,7 @@ ngx_quic_shutdown_stream(ngx_connection_ } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic stream id:0x%xL shutdown", qs->id); + "quic stream id:0x%xL send shutdown", qs->id); frame->level = ssl_encryption_application; frame->type = NGX_QUIC_FT_STREAM; @@ -304,6 +314,48 @@ ngx_quic_shutdown_stream(ngx_connection_ } +static ngx_int_t +ngx_quic_shutdown_stream_recv(ngx_connection_t *c) +{ + ngx_event_t *rev; + ngx_connection_t *pc; + ngx_quic_frame_t *frame; + ngx_quic_stream_t *qs; + ngx_quic_connection_t *qc; + + rev = c->read; + + if (rev->pending_eof || rev->error) { + return NGX_OK; + } + + qs = c->quic; + pc = qs->parent; + qc = ngx_quic_get_connection(pc); + + if (qc->conf->stream_close_code == 0) { + return NGX_OK; + } + + frame = ngx_quic_alloc_frame(pc); + if (frame == NULL) { + return NGX_ERROR; + } + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic stream id:0x%xL recv shutdown", qs->id); + + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_STOP_SENDING; + frame->u.stop_sending.id = qs->id; + frame->u.stop_sending.error_code = qc->conf->stream_close_code; + + ngx_quic_queue_frame(qc, frame); + + return NGX_OK; +} + + static ngx_quic_stream_t * ngx_quic_get_stream(ngx_connection_t *c, uint64_t id) { @@ -918,30 +970,18 @@ ngx_quic_stream_cleanup_handler(void *da goto done; } - c->read->pending_eof = 1; - - (void) ngx_quic_update_flow(c, qs->recv_last); - if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0 || (qs->id & NGX_QUIC_STREAM_UNIDIRECTIONAL) == 0) { - if (!c->read->pending_eof && !c->read->error - && qc->conf->stream_close_code) - { - frame = ngx_quic_alloc_frame(pc); - if (frame == NULL) { - goto done; - } - - frame->level = ssl_encryption_application; - frame->type = NGX_QUIC_FT_STOP_SENDING; - frame->u.stop_sending.id = qs->id; - frame->u.stop_sending.error_code = qc->conf->stream_close_code; - - ngx_quic_queue_frame(qc, frame); + if (ngx_quic_shutdown_stream_recv(c) != NGX_OK) { + goto done; } } + c->read->pending_eof = 1; + + (void) ngx_quic_update_flow(c, qs->recv_last); + if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0) { frame = ngx_quic_alloc_frame(pc); if (frame == NULL) { @@ -968,29 +1008,7 @@ ngx_quic_stream_cleanup_handler(void *da } } - if (c->write->error) { - goto done; - } - - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic stream id:0x%xL send fin", qs->id); - - frame = ngx_quic_alloc_frame(pc); - if (frame == NULL) { - goto done; - } - - frame->level = ssl_encryption_application; - frame->type = NGX_QUIC_FT_STREAM; - frame->u.stream.off = 1; - frame->u.stream.len = 1; - frame->u.stream.fin = 1; - - frame->u.stream.stream_id = qs->id; - frame->u.stream.offset = c->sent; - frame->u.stream.length = 0; - - ngx_quic_queue_frame(qc, frame); + (void) ngx_quic_shutdown_stream_send(c); done: From pluknet at nginx.com Thu Nov 25 17:42:59 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 25 Nov 2021 20:42:59 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: References: <76e072a6947a22186870.1636600221@vm-bsd.mdounin.ru> <688CF0B6-383D-469F-86CF-737F0ECE26EF@nginx.com> <22EEAB4A-054D-4AD2-BCFB-86B7EC679D78@nginx.com> <5F9378C5-CB2A-4466-B203-D9AB0F776997@nginx.com> <82D01D9B-BE9A-4619-95BA-D63948429FCC@nginx.com> Message-ID: > On 25 Nov 2021, at 16:54, Maxim Dounin wrote: > > Hello! > > On Thu, Nov 25, 2021 at 04:15:09PM +0300, Sergey Kandaurov wrote: > [ trim ] >> BTW, what about a similar check in ngx_freebsd_sendfile_chain() >> used to catch duplicate calls? Both were added in 6422:768e287a6f36. >> Anyway, it is removed in the sendfile(SF_NODISKIO) rework. > > Yes, thanks, missed this, fixed. Also fixed wrong return value in > the preload handler (it is expected to be ssize_t and match > ngx_file_aio_read() result, so using NGX_OK is misleading, replaced > with NGX_AGAIN). > > Diff: > > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > --- a/src/http/ngx_http_copy_filter_module.c > +++ b/src/http/ngx_http_copy_filter_module.c > @@ -230,7 +230,7 @@ ngx_http_copy_aio_sendfile_preload(ngx_b > * a write event on the main connection > */ > > - return NGX_OK; > + return NGX_AGAIN; > } > Indeed, nice catch. > n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); > diff --git a/src/os/unix/ngx_freebsd_sendfile_chain.c b/src/os/unix/ngx_freebsd_sendfile_chain.c > --- a/src/os/unix/ngx_freebsd_sendfile_chain.c > +++ b/src/os/unix/ngx_freebsd_sendfile_chain.c > @@ -255,19 +255,6 @@ ngx_freebsd_sendfile_chain(ngx_connectio > #if (NGX_HAVE_AIO_SENDFILE) > > if (ebusy) { > - if (aio->event.active) { > - /* > - * tolerate duplicate calls; they can happen due to subrequests > - * or multiple calls of the next body filter from a filter > - */ > - > - if (sent) { > - c->busy_count = 0; > - } > - > - return in; > - } > - > if (sent == 0) { > c->busy_count++; > > With the above adjustment, this part looks good. > Full patch: > > # HG changeset patch > # User Maxim Dounin > # Date 1637847703 -10800 > # Thu Nov 25 16:41:43 2021 +0300 > # Node ID c960e182900a8d0b7f3041731ba416f2c7e69d14 > # Parent 3443c02ca1d183fe52bf8af66627c94be2b2f785 > HTTP/2: fixed "task already active" with sendfile in threads. > > With sendfile in threads, "task already active" alerts might appear in logs > if a write event happens on the main HTTP/2 connection, triggering a sendfile > in threads while another thread operation is already running. Observed > with "aio threads; aio_write on; sendfile on;" and with thread event handlers > modified to post a write event to the main HTTP/2 connection (though can > happen without any modifications). > > Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate > aio operation, resulting in "second aio post" alerts. This is, however, > harder to reproduce, especially on modern FreeBSD systems, since sendfile() > usually does not return EBUSY. > > Fix is to avoid starting a sendfile operation if other thread operation > is active by checking r->aio in the thread handler (and, similarly, in > aio preload handler). The added check also makes duplicate calls protection > redundant, so it is removed. > > [..] Overall, it looks good. -- Sergey Kandaurov From mdounin at mdounin.ru Thu Nov 25 19:03:46 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 25 Nov 2021 19:03:46 +0000 Subject: [nginx] HTTP/2: fixed "task already active" with sendfile in threads. Message-ID: details: https://hg.nginx.org/nginx/rev/555533169506 branches: changeset: 7974:555533169506 user: Maxim Dounin date: Thu Nov 25 22:02:05 2021 +0300 description: HTTP/2: fixed "task already active" with sendfile in threads. With sendfile in threads, "task already active" alerts might appear in logs if a write event happens on the main HTTP/2 connection, triggering a sendfile in threads while another thread operation is already running. Observed with "aio threads; aio_write on; sendfile on;" and with thread event handlers modified to post a write event to the main HTTP/2 connection (though can happen without any modifications). Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate aio operation, resulting in "second aio post" alerts. This is, however, harder to reproduce, especially on modern FreeBSD systems, since sendfile() usually does not return EBUSY. Fix is to avoid starting a sendfile operation if other thread operation is active by checking r->aio in the thread handler (and, similarly, in aio preload handler). The added check also makes duplicate calls protection redundant, so it is removed. diffstat: src/http/ngx_http_copy_filter_module.c | 38 ++++++++++++++++++++++++++++++- src/http/ngx_http_upstream.c | 22 ++++++++++++++++++ src/os/unix/ngx_freebsd_sendfile_chain.c | 13 ---------- src/os/unix/ngx_linux_sendfile_chain.c | 9 ------- 4 files changed, 58 insertions(+), 24 deletions(-) diffs (148 lines): diff -r 3443c02ca1d1 -r 555533169506 src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c Mon Nov 01 18:09:34 2021 +0300 +++ b/src/http/ngx_http_copy_filter_module.c Thu Nov 25 22:02:05 2021 +0300 @@ -219,13 +219,25 @@ ngx_http_copy_aio_sendfile_preload(ngx_b ngx_http_request_t *r; ngx_output_chain_ctx_t *ctx; + aio = file->file->aio; + r = aio->data; + + if (r->aio) { + /* + * tolerate sendfile() calls if another operation is already + * running; this can happen due to subrequests, multiple calls + * of the next body filter from a filter, or in HTTP/2 due to + * a write event on the main connection + */ + + return NGX_AGAIN; + } + n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); if (n == NGX_AGAIN) { - aio = file->file->aio; aio->handler = ngx_http_copy_aio_sendfile_event_handler; - r = aio->data; r->main->blocked++; r->aio = 1; @@ -263,6 +275,7 @@ static ngx_int_t ngx_http_copy_thread_handler(ngx_thread_task_t *task, ngx_file_t *file) { ngx_str_t name; + ngx_connection_t *c; ngx_thread_pool_t *tp; ngx_http_request_t *r; ngx_output_chain_ctx_t *ctx; @@ -270,6 +283,27 @@ ngx_http_copy_thread_handler(ngx_thread_ r = file->thread_ctx; + if (r->aio) { + /* + * tolerate sendfile() calls if another operation is already + * running; this can happen due to subrequests, multiple calls + * of the next body filter from a filter, or in HTTP/2 due to + * a write event on the main connection + */ + + c = r->connection; + +#if (NGX_HTTP_V2) + if (r->stream) { + c = r->stream->connection->connection; + } +#endif + + if (task == c->sendfile_task) { + return NGX_OK; + } + } + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); tp = clcf->thread_pool; diff -r 3443c02ca1d1 -r 555533169506 src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Mon Nov 01 18:09:34 2021 +0300 +++ b/src/http/ngx_http_upstream.c Thu Nov 25 22:02:05 2021 +0300 @@ -3847,6 +3847,7 @@ ngx_http_upstream_thread_handler(ngx_thr { ngx_str_t name; ngx_event_pipe_t *p; + ngx_connection_t *c; ngx_thread_pool_t *tp; ngx_http_request_t *r; ngx_http_core_loc_conf_t *clcf; @@ -3854,6 +3855,27 @@ ngx_http_upstream_thread_handler(ngx_thr r = file->thread_ctx; p = r->upstream->pipe; + if (r->aio) { + /* + * tolerate sendfile() calls if another operation is already + * running; this can happen due to subrequests, multiple calls + * of the next body filter from a filter, or in HTTP/2 due to + * a write event on the main connection + */ + + c = r->connection; + +#if (NGX_HTTP_V2) + if (r->stream) { + c = r->stream->connection->connection; + } +#endif + + if (task == c->sendfile_task) { + return NGX_OK; + } + } + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); tp = clcf->thread_pool; diff -r 3443c02ca1d1 -r 555533169506 src/os/unix/ngx_freebsd_sendfile_chain.c --- a/src/os/unix/ngx_freebsd_sendfile_chain.c Mon Nov 01 18:09:34 2021 +0300 +++ b/src/os/unix/ngx_freebsd_sendfile_chain.c Thu Nov 25 22:02:05 2021 +0300 @@ -255,19 +255,6 @@ ngx_freebsd_sendfile_chain(ngx_connectio #if (NGX_HAVE_AIO_SENDFILE) if (ebusy) { - if (aio->event.active) { - /* - * tolerate duplicate calls; they can happen due to subrequests - * or multiple calls of the next body filter from a filter - */ - - if (sent) { - c->busy_count = 0; - } - - return in; - } - if (sent == 0) { c->busy_count++; diff -r 3443c02ca1d1 -r 555533169506 src/os/unix/ngx_linux_sendfile_chain.c --- a/src/os/unix/ngx_linux_sendfile_chain.c Mon Nov 01 18:09:34 2021 +0300 +++ b/src/os/unix/ngx_linux_sendfile_chain.c Thu Nov 25 22:02:05 2021 +0300 @@ -379,15 +379,6 @@ ngx_linux_sendfile_thread(ngx_connection return ctx->sent; } - if (task->event.active && ctx->file == file) { - /* - * tolerate duplicate calls; they can happen due to subrequests - * or multiple calls of the next body filter from a filter - */ - - return NGX_DONE; - } - ctx->file = file; ctx->socket = c->fd; ctx->size = size; From mdounin at mdounin.ru Thu Nov 25 19:03:49 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 25 Nov 2021 19:03:49 +0000 Subject: [nginx] HTTP/2: fixed sendfile() aio handling. Message-ID: details: https://hg.nginx.org/nginx/rev/a7a77549265e branches: changeset: 7975:a7a77549265e user: Maxim Dounin date: Thu Nov 25 22:02:10 2021 +0300 description: HTTP/2: fixed sendfile() aio handling. With sendfile() in threads ("aio threads; sendfile on;"), client connection can block on writing, waiting for sendfile() to complete. In HTTP/2 this might result in the request hang, since an attempt to continue processing in thread event handler will call request's write event handler, which is usually stopped by ngx_http_v2_send_chain(): it does nothing if there are no additional data and stream->queued is set. Further, HTTP/2 resets stream's c->write->ready to 0 if writing blocks, so just fixing ngx_http_v2_send_chain() is not enough. Can be reproduced with test suite on Linux with: TEST_NGINX_GLOBALS_HTTP="aio threads; sendfile on;" prove h2*.t The following tests currently fail: h2_keepalive.t, h2_priority.t, h2_proxy_max_temp_file_size.t, h2.t, h2_trailers.t. Similarly, sendfile() with AIO preloading on FreeBSD can block as well, with similar results. This is, however, harder to reproduce, especially on modern FreeBSD systems, since sendfile() usually does not return EBUSY. Fix is to modify ngx_http_v2_send_chain() so it actually tries to send data to the main connection when called, and to make sure that c->write->ready is set by the relevant event handlers. diffstat: src/http/ngx_http_copy_filter_module.c | 32 +++++++++++++++++++++++++++++++- src/http/ngx_http_upstream.c | 14 ++++++++++++++ src/http/v2/ngx_http_v2_filter_module.c | 29 ++++++++++++++++++++--------- 3 files changed, 65 insertions(+), 10 deletions(-) diffs (142 lines): diff -r 555533169506 -r a7a77549265e src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c Thu Nov 25 22:02:05 2021 +0300 +++ b/src/http/ngx_http_copy_filter_module.c Thu Nov 25 22:02:10 2021 +0300 @@ -253,16 +253,32 @@ static void ngx_http_copy_aio_sendfile_event_handler(ngx_event_t *ev) { ngx_event_aio_t *aio; + ngx_connection_t *c; ngx_http_request_t *r; aio = ev->data; r = aio->data; + c = r->connection; r->main->blocked--; r->aio = 0; ev->complete = 0; - r->connection->write->handler(r->connection->write); +#if (NGX_HTTP_V2) + + if (r->stream) { + /* + * for HTTP/2, update write event to make sure processing will + * reach the main connection to handle sendfile() preload + */ + + c->write->ready = 1; + c->write->active = 0; + } + +#endif + + c->write->handler(c->write); } #endif @@ -357,6 +373,20 @@ ngx_http_copy_thread_event_handler(ngx_e r->main->blocked--; r->aio = 0; +#if (NGX_HTTP_V2) + + if (r->stream) { + /* + * for HTTP/2, update write event to make sure processing will + * reach the main connection to handle sendfile() in threads + */ + + c->write->ready = 1; + c->write->active = 0; + } + +#endif + if (r->done) { /* * trigger connection event handler if the subrequest was diff -r 555533169506 -r a7a77549265e src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Thu Nov 25 22:02:05 2021 +0300 +++ b/src/http/ngx_http_upstream.c Thu Nov 25 22:02:10 2021 +0300 @@ -3927,6 +3927,20 @@ ngx_http_upstream_thread_event_handler(n r->main->blocked--; r->aio = 0; +#if (NGX_HTTP_V2) + + if (r->stream) { + /* + * for HTTP/2, update write event to make sure processing will + * reach the main connection to handle sendfile() in threads + */ + + c->write->ready = 1; + c->write->active = 0; + } + +#endif + if (r->done) { /* * trigger connection event handler if the subrequest was diff -r 555533169506 -r a7a77549265e src/http/v2/ngx_http_v2_filter_module.c --- a/src/http/v2/ngx_http_v2_filter_module.c Thu Nov 25 22:02:05 2021 +0300 +++ b/src/http/v2/ngx_http_v2_filter_module.c Thu Nov 25 22:02:10 2021 +0300 @@ -1432,6 +1432,9 @@ ngx_http_v2_send_chain(ngx_connection_t size = 0; #endif + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, + "http2 send chain: %p", in); + while (in) { size = ngx_buf_size(in->buf); @@ -1450,12 +1453,8 @@ ngx_http_v2_send_chain(ngx_connection_t return NGX_CHAIN_ERROR; } - if (stream->queued) { - fc->write->active = 1; - fc->write->ready = 0; - - } else { - fc->buffered &= ~NGX_HTTP_V2_BUFFERED; + if (ngx_http_v2_filter_send(fc, stream) == NGX_ERROR) { + return NGX_CHAIN_ERROR; } return NULL; @@ -1464,9 +1463,16 @@ ngx_http_v2_send_chain(ngx_connection_t h2c = stream->connection; if (size && ngx_http_v2_flow_control(h2c, stream) == NGX_DECLINED) { - fc->write->active = 1; - fc->write->ready = 0; - return in; + + if (ngx_http_v2_filter_send(fc, stream) == NGX_ERROR) { + return NGX_CHAIN_ERROR; + } + + if (ngx_http_v2_flow_control(h2c, stream) == NGX_DECLINED) { + fc->write->active = 1; + fc->write->ready = 0; + return in; + } } if (in->buf->tag == (ngx_buf_tag_t) &ngx_http_v2_filter_get_shadow) { @@ -1809,6 +1815,11 @@ ngx_http_v2_waiting_queue(ngx_http_v2_co static ngx_inline ngx_int_t ngx_http_v2_filter_send(ngx_connection_t *fc, ngx_http_v2_stream_t *stream) { + if (stream->queued == 0) { + fc->buffered &= ~NGX_HTTP_V2_BUFFERED; + return NGX_OK; + } + stream->blocked = 1; if (ngx_http_v2_send_output_queue(stream->connection) == NGX_ERROR) { From mdounin at mdounin.ru Thu Nov 25 19:06:04 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 25 Nov 2021 22:06:04 +0300 Subject: [PATCH] HTTP/2: fixed sendfile() aio handling In-Reply-To: References: <22EEAB4A-054D-4AD2-BCFB-86B7EC679D78@nginx.com> <5F9378C5-CB2A-4466-B203-D9AB0F776997@nginx.com> <82D01D9B-BE9A-4619-95BA-D63948429FCC@nginx.com> Message-ID: Hello! On Thu, Nov 25, 2021 at 08:42:59PM +0300, Sergey Kandaurov wrote: [...] > > Full patch: > > > > # HG changeset patch > > # User Maxim Dounin > > # Date 1637847703 -10800 > > # Thu Nov 25 16:41:43 2021 +0300 > > # Node ID c960e182900a8d0b7f3041731ba416f2c7e69d14 > > # Parent 3443c02ca1d183fe52bf8af66627c94be2b2f785 > > HTTP/2: fixed "task already active" with sendfile in threads. > > > > With sendfile in threads, "task already active" alerts might appear in logs > > if a write event happens on the main HTTP/2 connection, triggering a sendfile > > in threads while another thread operation is already running. Observed > > with "aio threads; aio_write on; sendfile on;" and with thread event handlers > > modified to post a write event to the main HTTP/2 connection (though can > > happen without any modifications). > > > > Similarly, sendfile() with AIO preloading on FreeBSD can trigger duplicate > > aio operation, resulting in "second aio post" alerts. This is, however, > > harder to reproduce, especially on modern FreeBSD systems, since sendfile() > > usually does not return EBUSY. > > > > Fix is to avoid starting a sendfile operation if other thread operation > > is active by checking r->aio in the thread handler (and, similarly, in > > aio preload handler). The added check also makes duplicate calls protection > > redundant, so it is removed. > > > > [..] > > Overall, it looks good. Committed, thanks. -- Maxim Dounin http://mdounin.ru/ From arut at nginx.com Fri Nov 26 13:11:33 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Fri, 26 Nov 2021 16:11:33 +0300 Subject: [PATCH 3 of 3] QUIC: stream recv shutdown support In-Reply-To: References: Message-ID: <20211126131133.ew64tszf5vpjxii7@Romans-MacBook-Pro.local> On Thu, Nov 25, 2021 at 05:20:51PM +0300, Roman Arutyunyan wrote: > # HG changeset patch > # User Roman Arutyunyan > # Date 1637695967 -10800 > # Tue Nov 23 22:32:47 2021 +0300 > # Branch quic > # Node ID e1de02d829f7f85b1e2e6b289ec4c20318712321 > # Parent 3d2354bfa1a2a257b9f73772ad0836585be85a6c > QUIC: stream recv shutdown support. > > Recv shutdown sends STOP_SENDING to client. Both send and recv shutdown > functions are now called from stream cleanup handler. While here, setting > c->read->pending_eof is moved down to fix recv shutdown in the cleanup handler. This definitely needs some improvement. Now it's two patches. [..] -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1637931593 -10800 # Fri Nov 26 15:59:53 2021 +0300 # Branch quic # Node ID c2fa3e7689a4e286f45ccbac2288ade5966273b8 # Parent 3d2354bfa1a2a257b9f73772ad0836585be85a6c QUIC: do not shutdown write part of a client uni stream. diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -267,13 +267,20 @@ ngx_quic_shutdown_stream(ngx_connection_ return NGX_OK; } + qs = c->quic; + + if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0 + && (qs->id & NGX_QUIC_STREAM_UNIDIRECTIONAL)) + { + return NGX_OK; + } + wev = c->write; if (wev->error) { return NGX_OK; } - qs = c->quic; pc = qs->parent; qc = ngx_quic_get_connection(pc); -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1637932014 -10800 # Fri Nov 26 16:06:54 2021 +0300 # Branch quic # Node ID ed0cefd9fc434a7593f2f9e4b9a98ce65aaf05e9 # Parent c2fa3e7689a4e286f45ccbac2288ade5966273b8 QUIC: write and full stream shutdown support. Full stream shutdown is now called from stream cleanup handler instead of explicitly sending frames. The call is moved up not to be influenced by setting c->read->pending_eof, which was erroneously set too early. diff --git a/src/event/quic/ngx_event_quic_streams.c b/src/event/quic/ngx_event_quic_streams.c --- a/src/event/quic/ngx_event_quic_streams.c +++ b/src/event/quic/ngx_event_quic_streams.c @@ -13,6 +13,8 @@ #define NGX_QUIC_STREAM_GONE (void *) -1 +static ngx_int_t ngx_quic_shutdown_stream_send(ngx_connection_t *c); +static ngx_int_t ngx_quic_shutdown_stream_recv(ngx_connection_t *c); static ngx_quic_stream_t *ngx_quic_get_stream(ngx_connection_t *c, uint64_t id); static ngx_int_t ngx_quic_reject_stream(ngx_connection_t *c, uint64_t id); static void ngx_quic_init_stream_handler(ngx_event_t *ev); @@ -257,16 +259,31 @@ ngx_quic_reset_stream(ngx_connection_t * ngx_int_t ngx_quic_shutdown_stream(ngx_connection_t *c, int how) { + if (how == NGX_RW_SHUTDOWN || how == NGX_WRITE_SHUTDOWN) { + if (ngx_quic_shutdown_stream_send(c) != NGX_OK) { + return NGX_ERROR; + } + } + + if (how == NGX_RW_SHUTDOWN || how == NGX_READ_SHUTDOWN) { + if (ngx_quic_shutdown_stream_recv(c) != NGX_OK) { + return NGX_ERROR; + } + } + + return NGX_OK; +} + + +static ngx_int_t +ngx_quic_shutdown_stream_send(ngx_connection_t *c) +{ ngx_event_t *wev; ngx_connection_t *pc; ngx_quic_frame_t *frame; ngx_quic_stream_t *qs; ngx_quic_connection_t *qc; - if (how != NGX_WRITE_SHUTDOWN) { - return NGX_OK; - } - qs = c->quic; if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0 @@ -290,7 +307,7 @@ ngx_quic_shutdown_stream(ngx_connection_ } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic stream id:0x%xL shutdown", qs->id); + "quic stream id:0x%xL send shutdown", qs->id); frame->level = ssl_encryption_application; frame->type = NGX_QUIC_FT_STREAM; @@ -311,6 +328,55 @@ ngx_quic_shutdown_stream(ngx_connection_ } +static ngx_int_t +ngx_quic_shutdown_stream_recv(ngx_connection_t *c) +{ + ngx_event_t *rev; + ngx_connection_t *pc; + ngx_quic_frame_t *frame; + ngx_quic_stream_t *qs; + ngx_quic_connection_t *qc; + + qs = c->quic; + + if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) + && (qs->id & NGX_QUIC_STREAM_UNIDIRECTIONAL)) + { + return NGX_OK; + } + + rev = c->read; + + if (rev->pending_eof || rev->error) { + return NGX_OK; + } + + pc = qs->parent; + qc = ngx_quic_get_connection(pc); + + if (qc->conf->stream_close_code == 0) { + return NGX_OK; + } + + frame = ngx_quic_alloc_frame(pc); + if (frame == NULL) { + return NGX_ERROR; + } + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic stream id:0x%xL recv shutdown", qs->id); + + frame->level = ssl_encryption_application; + frame->type = NGX_QUIC_FT_STOP_SENDING; + frame->u.stop_sending.id = qs->id; + frame->u.stop_sending.error_code = qc->conf->stream_close_code; + + ngx_quic_queue_frame(qc, frame); + + return NGX_OK; +} + + static ngx_quic_stream_t * ngx_quic_get_stream(ngx_connection_t *c, uint64_t id) { @@ -925,30 +991,12 @@ ngx_quic_stream_cleanup_handler(void *da goto done; } + (void) ngx_quic_shutdown_stream(c, NGX_RW_SHUTDOWN); + c->read->pending_eof = 1; (void) ngx_quic_update_flow(c, qs->recv_last); - if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0 - || (qs->id & NGX_QUIC_STREAM_UNIDIRECTIONAL) == 0) - { - if (!c->read->pending_eof && !c->read->error - && qc->conf->stream_close_code) - { - frame = ngx_quic_alloc_frame(pc); - if (frame == NULL) { - goto done; - } - - frame->level = ssl_encryption_application; - frame->type = NGX_QUIC_FT_STOP_SENDING; - frame->u.stop_sending.id = qs->id; - frame->u.stop_sending.error_code = qc->conf->stream_close_code; - - ngx_quic_queue_frame(qc, frame); - } - } - if ((qs->id & NGX_QUIC_STREAM_SERVER_INITIATED) == 0) { frame = ngx_quic_alloc_frame(pc); if (frame == NULL) { @@ -968,37 +1016,8 @@ ngx_quic_stream_cleanup_handler(void *da } ngx_quic_queue_frame(qc, frame); - - if (qs->id & NGX_QUIC_STREAM_UNIDIRECTIONAL) { - /* do not send fin for client unidirectional streams */ - goto done; - } } - if (c->write->error) { - goto done; - } - - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, - "quic stream id:0x%xL send fin", qs->id); - - frame = ngx_quic_alloc_frame(pc); - if (frame == NULL) { - goto done; - } - - frame->level = ssl_encryption_application; - frame->type = NGX_QUIC_FT_STREAM; - frame->u.stream.off = 1; - frame->u.stream.len = 1; - frame->u.stream.fin = 1; - - frame->u.stream.stream_id = qs->id; - frame->u.stream.offset = c->sent; - frame->u.stream.length = 0; - - ngx_quic_queue_frame(qc, frame); - done: (void) ngx_quic_output(pc); diff --git a/src/os/unix/ngx_socket.h b/src/os/unix/ngx_socket.h --- a/src/os/unix/ngx_socket.h +++ b/src/os/unix/ngx_socket.h @@ -13,6 +13,8 @@ #define NGX_WRITE_SHUTDOWN SHUT_WR +#define NGX_READ_SHUTDOWN SHUT_RD +#define NGX_RW_SHUTDOWN SHUT_RDWR typedef int ngx_socket_t; From xeioex at nginx.com Mon Nov 29 14:27:38 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 29 Nov 2021 14:27:38 +0000 Subject: [njs] Fixed building unit tests binaries on MacOS with custom LDFLAGS. Message-ID: details: https://hg.nginx.org/njs/rev/94e5a03702f9 branches: changeset: 1751:94e5a03702f9 user: Dmitry Volyntsev date: Mon Nov 29 14:05:33 2021 +0000 description: Fixed building unit tests binaries on MacOS with custom LDFLAGS. diffstat: auto/make | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (21 lines): diff -r 7f72930cf1ac -r 94e5a03702f9 auto/make --- a/auto/make Mon Nov 22 13:37:11 2021 +0000 +++ b/auto/make Mon Nov 29 14:05:33 2021 +0000 @@ -136,7 +136,7 @@ do $NJS_BUILD_DIR/$njs_bin: $njs_src \\ $NJS_BUILD_DIR/libnjs.a - \$(NJS_CC) -o $NJS_BUILD_DIR/$njs_bin \$(NJS_CFLAGS) \\ + \$(NJS_LINK) -o $NJS_BUILD_DIR/$njs_bin \$(NJS_CFLAGS) \\ \$(NJS_LIB_INCS) $njs_dep_flags \\ $njs_src $NJS_BUILD_DIR/libnjs.a \\ $njs_dep_post -lm @@ -184,7 +184,7 @@ do $NJS_BUILD_DIR/$njs_bin: $njs_src \\ $NJS_BUILD_DIR/libnjs.a \\ $NJS_BUILD_DIR/$njs_externals_obj - \$(NJS_CC) -o $NJS_BUILD_DIR/$njs_bin \$(NJS_CFLAGS) \\ + \$(NJS_LINK) -o $NJS_BUILD_DIR/$njs_bin \$(NJS_CFLAGS) \\ $NJS_LIB_AUX_CFLAGS \$(NJS_LIB_INCS) \\ $njs_dep_flags \\ $NJS_BUILD_DIR/$njs_externals_obj \\ From xeioex at nginx.com Mon Nov 29 15:30:11 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 29 Nov 2021 15:30:11 +0000 Subject: [njs] Allowing to build njs util without interactive shell support. Message-ID: details: https://hg.nginx.org/njs/rev/144e430d0ed5 branches: changeset: 1752:144e430d0ed5 user: Dmitry Volyntsev date: Mon Nov 29 14:41:09 2021 +0000 description: Allowing to build njs util without interactive shell support. diffstat: auto/make | 21 ++------------------- auto/readline | 6 ++---- auto/summary | 5 +---- src/njs_shell.c | 24 +++++++++++++++++++----- 4 files changed, 24 insertions(+), 32 deletions(-) diffs (156 lines): diff -r 94e5a03702f9 -r 144e430d0ed5 auto/make --- a/auto/make Mon Nov 29 14:05:33 2021 +0000 +++ b/auto/make Mon Nov 29 14:41:09 2021 +0000 @@ -26,7 +26,7 @@ NJS_TYPES_VER = \$(NJS_VER) NPM = npm -default: $NJS_DEFAULT_TARGET +default: njs NJS_LIB_INCS = -Isrc -I$NJS_BUILD_DIR @@ -69,8 +69,6 @@ done # njs cli. -if [ $NJS_HAVE_READLINE = YES ]; then - cat << END >> $NJS_MAKEFILE $NJS_BUILD_DIR/njs: \\ @@ -84,21 +82,6 @@ cat << END >> $NJS_MAKEFILE END -else - -cat << END >> $NJS_MAKEFILE - -$NJS_BUILD_DIR/njs: - @echo - @echo " error: to make njs CLI \"readline\" library is required." - @echo - @exit 1 - -END - -fi - - # njs fuzzer. cat << END >> $NJS_MAKEFILE @@ -209,7 +192,7 @@ cat << END >> $NJS_MAKEFILE @exit 1 all: $NJS_BUILD_DIR/njs_auto_config.h \\ - $NJS_DEFAULT_TARGET ts test lib_test benchmark + njs ts test lib_test benchmark njs: $NJS_BUILD_DIR/njs_auto_config.h $NJS_BUILD_DIR/njs njs_fuzzer: $NJS_BUILD_DIR/njs_auto_config.h \\ diff -r 94e5a03702f9 -r 144e430d0ed5 auto/readline --- a/auto/readline Mon Nov 29 14:05:33 2021 +0000 +++ b/auto/readline Mon Nov 29 14:41:09 2021 +0000 @@ -68,14 +68,12 @@ if [ $njs_found = no ]; then . auto/feature fi -NJS_DEFAULT_TARGET=libnjs - if [ $njs_found = yes ]; then NJS_HAVE_READLINE=YES + njs_define=NJS_HAVE_READLINE . auto/define NJS_READLINE_LIB=$njs_feature_libs - NJS_DEFAULT_TARGET="$NJS_DEFAULT_TARGET njs" else NJS_HAVE_READLINE=NO - echo " - building interactive shell is not possible" + echo " - njs CLI is built without interactive shell support" fi diff -r 94e5a03702f9 -r 144e430d0ed5 auto/summary --- a/auto/summary Mon Nov 29 14:05:33 2021 +0000 +++ b/auto/summary Mon Nov 29 14:41:09 2021 +0000 @@ -21,9 +21,6 @@ fi echo echo " njs build dir: $NJS_BUILD_DIR" - -if [ $NJS_HAVE_READLINE = YES ]; then - echo " njs CLI: $NJS_BUILD_DIR/njs" -fi +echo " njs CLI: $NJS_BUILD_DIR/njs" echo diff -r 94e5a03702f9 -r 144e430d0ed5 src/njs_shell.c --- a/src/njs_shell.c Mon Nov 29 14:05:33 2021 +0000 +++ b/src/njs_shell.c Mon Nov 29 14:41:09 2021 +0000 @@ -7,7 +7,7 @@ #include -#ifndef NJS_FUZZER_TARGET +#if (!defined NJS_FUZZER_TARGET && defined NJS_HAVE_READLINE) #include #if (NJS_HAVE_EDITLINE) @@ -101,10 +101,13 @@ static njs_int_t njs_process_script(njs_ static njs_int_t njs_options_parse(njs_opts_t *opts, int argc, char **argv); static void njs_options_free(njs_opts_t *opts); static njs_int_t njs_process_file(njs_opts_t *opts, njs_vm_opt_t *vm_options); + +#ifdef NJS_HAVE_READLINE static njs_int_t njs_interactive_shell(njs_opts_t *opts, njs_vm_opt_t *vm_options); static njs_int_t njs_editline_init(void); static char *njs_completion_generator(const char *text, int state); +#endif #endif @@ -282,10 +285,16 @@ main(int argc, char **argv) vm_options.ast = opts.ast; vm_options.unhandled_rejection = opts.unhandled_rejection; +#ifdef NJS_HAVE_READLINE + if (opts.interactive) { ret = njs_interactive_shell(&opts, &vm_options); - } else if (opts.command) { + } else + +#endif + + if (opts.command) { vm = njs_create_vm(&opts, &vm_options); if (vm != NULL) { command.start = (u_char *) opts.command; @@ -314,9 +323,14 @@ njs_options_parse(njs_opts_t *opts, int njs_uint_t n; static const char help[] = - "Interactive njs shell.\n" + "njs [options] [-c string | script.js | -] [script args]\n" "\n" - "njs [options] [-c string | script.js | -] [script args]" + "Interactive shell: " +#ifdef NJS_HAVE_READLINE + "enabled\n" +#else + "disabled\n" +#endif "\n" "Options:\n" " -a print AST.\n" @@ -914,7 +928,7 @@ njs_process_script(njs_opts_t *opts, njs } -#ifndef NJS_FUZZER_TARGET +#if (!defined NJS_FUZZER_TARGET && defined NJS_HAVE_READLINE) static njs_int_t njs_interactive_shell(njs_opts_t *opts, njs_vm_opt_t *vm_options) From xeioex at nginx.com Mon Nov 29 16:52:08 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 29 Nov 2021 16:52:08 +0000 Subject: [njs] Tests: refactored JavaScript tests. Message-ID: details: https://hg.nginx.org/njs/rev/3b035831f64a branches: changeset: 1753:3b035831f64a user: Dmitry Volyntsev date: Mon Nov 29 16:50:41 2021 +0000 description: Tests: refactored JavaScript tests. A generic runner test/run is introduced. It runs all available tests in test/ directory. JavaScript files are expected to be compliant with Test262. diffstat: auto/make | 6 +- test/finalize | 11 + test/fs/methods.js | 599 ------------------------- test/fs/methods.t.js | 610 ++++++++++++++++++++++++++ test/fs/promises_01.t.js | 42 + test/fs/promises_02.t.js | 78 +++ test/fs/promises_03.t.js | 104 ++++ test/fs/promises_04.t.js | 196 ++++++++ test/fs/promises_05.t.js | 141 ++++++ test/fs/promises_06.t.js | 105 ++++ test/fs/promises_07.t.js | 239 ++++++++++ test/fs/promises_08.t.js | 86 +++ test/fs/promises_09.t.js | 109 ++++ test/harness/assert.js | 118 +++++ test/harness/compareArray.js | 42 + test/harness/compatBuffer.js | 3 + test/harness/compatFs.js | 11 + test/harness/compatPrint.js | 3 + test/harness/compatWebcrypto.js | 8 + test/harness/doneprintHandle.js | 22 + test/harness/runTsuite.js | 54 ++ test/harness/sta.js | 27 + test/harness/webCryptoUtils.js | 13 + test/js/async_await_add.js | 7 - test/js/async_await_add.t.js | 15 + test/js/async_await_blank.js | 5 - test/js/async_await_blank.t.js | 13 + test/js/async_await_catch.js | 5 - test/js/async_await_catch.t.js | 13 + test/js/async_await_finally.js | 6 - test/js/async_await_finally.t.js | 15 + test/js/async_await_for.js | 23 - test/js/async_await_for.t.js | 31 + test/js/async_await_inline.js | 11 - test/js/async_await_inline.t.js | 18 + test/js/async_await_many_call.js | 30 - test/js/async_await_many_call.t.js | 35 + test/js/async_await_reject.js | 5 - test/js/async_await_reject.t.js | 15 + test/js/async_await_stages.js | 28 - test/js/async_await_stages.t.js | 36 + test/js/async_await_throw.js | 12 - test/js/async_await_throw.t.js | 22 + test/js/async_await_throw_async.js | 15 - test/js/async_await_throw_async.t.js | 25 + test/js/async_await_throw_catch.js | 12 - test/js/async_await_throw_catch.t.js | 20 + test/js/async_await_throw_catch_async.js | 15 - test/js/async_await_throw_catch_async.t.js | 25 + test/js/async_await_try_catch.js | 19 - test/js/async_await_try_catch.t.js | 30 + test/js/async_await_try_finally.js | 20 - test/js/async_await_try_finally.t.js | 32 + test/js/async_await_try_resolve.js | 15 - test/js/async_await_try_resolve.t.js | 23 + test/js/async_await_try_throw.js | 14 - test/js/async_await_try_throw.t.js | 27 + test/js/async_await_try_throw_catch.js | 17 - test/js/async_await_try_throw_catch.t.js | 27 + test/js/fs_promises_001.js | 55 -- test/js/fs_promises_002.js | 72 --- test/js/fs_promises_003.js | 108 ---- test/js/fs_promises_004.js | 200 -------- test/js/fs_promises_005.js | 143 ------ test/js/fs_promises_006.js | 104 ---- test/js/fs_promises_007.js | 241 ---------- test/js/fs_promises_008.js | 79 --- test/js/fs_promises_009.js | 106 ---- test/js/promise_all.js | 9 - test/js/promise_all.t.js | 16 + test/js/promise_allSettled.js | 20 - test/js/promise_allSettled.t.js | 26 + test/js/promise_allSettled_string.js | 10 - test/js/promise_allSettled_string.t.js | 16 + test/js/promise_all_throw.js | 9 - test/js/promise_all_throw.t.js | 15 + test/js/promise_any.js | 8 - test/js/promise_any.t.js | 14 + test/js/promise_any_all_rejected.js | 7 - test/js/promise_any_all_rejected.t.js | 13 + test/js/promise_catch_then_throw_catch.js | 5 - test/js/promise_catch_then_throw_catch.t.js | 14 + test/js/promise_catch_throw.js | 3 - test/js/promise_catch_throw.t.js | 10 + test/js/promise_finally.js | 7 - test/js/promise_finally.t.js | 14 + test/js/promise_finally_throw.js | 2 - test/js/promise_finally_throw.t.js | 9 + test/js/promise_finally_throw_catch.js | 3 - test/js/promise_finally_throw_catch.t.js | 9 + test/js/promise_race.js | 12 - test/js/promise_race.t.js | 18 + test/js/promise_race_throw.js | 12 - test/js/promise_race_throw.t.js | 18 + test/js/promise_reject_catch.js | 1 - test/js/promise_reject_catch.t.js | 8 + test/js/promise_reject_post_catch.js | 2 - test/js/promise_reject_post_catch.t.js | 9 + test/js/promise_rejection_tracker.js | 1 - test/js/promise_rejection_tracker.t.js | 9 + test/js/promise_s01.t.js | 22 + test/js/promise_s02.t.js | 23 + test/js/promise_s03.t.js | 21 + test/js/promise_s04.t.js | 12 + test/js/promise_s05.t.js | 18 + test/js/promise_s06.t.js | 10 + test/js/promise_s07.t.js | 15 + test/js/promise_s08.t.js | 22 + test/js/promise_s09.t.js | 15 + test/js/promise_s1.js | 15 - test/js/promise_s10.js | 11 - test/js/promise_s10.t.js | 20 + test/js/promise_s11.js | 13 - test/js/promise_s11.t.js | 22 + test/js/promise_s12.js | 10 - test/js/promise_s12.t.js | 19 + test/js/promise_s13.js | 21 - test/js/promise_s13.t.js | 23 + test/js/promise_s14.js | 9 - test/js/promise_s14.t.js | 21 + test/js/promise_s15.js | 10 - test/js/promise_s15.t.js | 16 + test/js/promise_s16.js | 10 - test/js/promise_s16.t.js | 16 + test/js/promise_s17.js | 10 - test/js/promise_s17.t.js | 18 + test/js/promise_s18.js | 23 - test/js/promise_s18.t.js | 32 + test/js/promise_s19.js | 33 - test/js/promise_s19.t.js | 34 + test/js/promise_s2.js | 14 - test/js/promise_s20.js | 23 - test/js/promise_s20.t.js | 25 + test/js/promise_s21.js | 30 - test/js/promise_s21.t.js | 32 + test/js/promise_s22.js | 32 - test/js/promise_s22.t.js | 32 + test/js/promise_s23.js | 28 - test/js/promise_s23.t.js | 25 + test/js/promise_s24.js | 13 - test/js/promise_s24.t.js | 18 + test/js/promise_s25.js | 29 - test/js/promise_s25.t.js | 24 + test/js/promise_s26.js | 144 ------ test/js/promise_s26.t.js | 211 ++++++++ test/js/promise_s3.js | 11 - test/js/promise_s4.js | 6 - test/js/promise_s5.js | 7 - test/js/promise_s6.js | 4 - test/js/promise_s7.js | 12 - test/js/promise_s8.js | 13 - test/js/promise_s9.js | 10 - test/js/promise_set_timeout.js | 17 - test/js/promise_set_timeout.t.js | 21 + test/js/promise_then_throw.js | 2 - test/js/promise_then_throw.t.js | 9 + test/js/promise_then_throw_catch.js | 3 - test/js/promise_then_throw_catch.t.js | 12 + test/js/promise_then_throw_finally_catch.js | 4 - test/js/promise_then_throw_finally_catch.t.js | 13 + test/js/promise_two_first_then_throw.js | 6 - test/js/promise_two_first_then_throw.t.js | 13 + test/js/promise_two_then_throw.js | 5 - test/js/promise_two_then_throw.t.js | 12 + test/njs_expect_test.exp | 402 ----------------- test/options | 61 ++ test/prepare | 36 + test/report | 18 + test/setup | 39 + test/test262 | 64 ++ test/webcrypto/aes.js | 125 ----- test/webcrypto/aes.t.js | 96 ++++ test/webcrypto/aes_decoding.js | 118 ----- test/webcrypto/aes_decoding.t.js | 81 +++ test/webcrypto/derive.js | 151 ------ test/webcrypto/derive.t.js | 100 ++++ test/webcrypto/digest.js | 90 --- test/webcrypto/digest.t.js | 60 ++ test/webcrypto/rsa.js | 108 ---- test/webcrypto/rsa.t.js | 68 ++ test/webcrypto/rsa_decoding.js | 83 --- test/webcrypto/rsa_decoding.t.js | 41 + test/webcrypto/sign.js | 290 ------------ test/webcrypto/sign.t.js | 232 +++++++++ test/webcrypto/verify.js | 215 --------- test/webcrypto/verify.t.js | 155 ++++++ 186 files changed, 4544 insertions(+), 4238 deletions(-) diffs (truncated from 9574 to 1000 lines): diff -r 144e430d0ed5 -r 3b035831f64a auto/make --- a/auto/make Mon Nov 29 14:41:09 2021 +0000 +++ b/auto/make Mon Nov 29 16:50:41 2021 +0000 @@ -209,12 +209,16 @@ lib_test: $NJS_BUILD_DIR/njs_auto_config $NJS_BUILD_DIR/lvlhsh_unit_test $NJS_BUILD_DIR/unicode_unit_test +test262: njs + + test/test262 + unit_test: $NJS_BUILD_DIR/njs_auto_config.h \\ $NJS_BUILD_DIR/njs_unit_test $NJS_BUILD_DIR/njs_unit_test -test: expect_test unit_test +test: expect_test unit_test test262 benchmark: $NJS_BUILD_DIR/njs_auto_config.h \\ $NJS_BUILD_DIR/njs_benchmark diff -r 144e430d0ed5 -r 3b035831f64a test/finalize --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/finalize Mon Nov 29 16:50:41 2021 +0000 @@ -0,0 +1,11 @@ +#!/bin/sh + +# Copyright (C) Dmitry Volyntsev +# Copyright (C) NGINX, Inc. + +if [ -z "$NJS_TEST_VERBOSE" ]; then + verbose "Removing dir: $NJS_TEST_DIR\n" + verbose "\n" + + rm -fr $NJS_TEST_DIR +fi diff -r 144e430d0ed5 -r 3b035831f64a test/fs/methods.js --- a/test/fs/methods.js Mon Nov 29 14:41:09 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,599 +0,0 @@ -var fs = require('fs'); - -async function run(tlist) { - function validate(t, r, i) { - if (r.status == "fulfilled") { - return r.value === "SUCCESS"; - } - - if (r.status == "rejected" && t[i].exception) { - if (process.argv[2] === '--match-exception-text') { - /* is not compatible with node.js format */ - return r.reason.toString().startsWith(t[i].exception); - } - - return true; - } - - if (r.status == "rejected" && t[i].optional) { - return r.reason.toString().startsWith("Error: No such file or directory"); - } - - return false; - } - - for (let k = 0; k < tlist.length; k++) { - let ts = tlist[k]; - let results = await Promise.allSettled(ts.tests.map(t => ts.T(ts.prepare_args(t, ts.opts)))); - let r = results.map((r, i) => validate(ts.tests, r, i)); - - console.log(`${ts.name} ${r.every(v=>v == true) ? "SUCCESS" : "FAILED"}`); - - r.forEach((v, i) => { - if (!v) { - console.log(`FAILED ${i}: ${JSON.stringify(ts.tests[i])}\n with reason: ${results[i].reason}`); - } - }) - } -} - -function p(args, default_opts) { - let params = Object.assign({}, default_opts, args); - - let fname = params.args[0]; - - if (fname[0] == '@') { - let gen = `build/test/fs_test_${Math.round(Math.random() * 1000000)}`; - params.args = params.args.map(v => v); - params.args[0] = gen + fname.slice(1); - } - - return params; -} - -function promisify(f) { - return function (...args) { - return new Promise((resolve, reject) => { - function callback(err, result) { - if (err) { - return reject(err); - } else { - resolve(result); - } - } - - args.push(callback); - f.apply(this, args); - }); - }; -} - -async function method(name, params) { - let data = null; - - switch (params.type) { - case "sync": - try { - data = fs[name + "Sync"].apply(null, params.args); - - } catch (e) { - if (!params.stringify) { - throw e; - } - - data = Buffer.from(JSON.stringify(e)); - } - - break; - - case "callback": - data = await promisify(fs[name]).apply(null, params.args) - .catch(e => { - if (!params.stringify) { - throw e; - } - - return Buffer.from(JSON.stringify(e)); - }); - - break; - - case "promise": - data = await fs.promises[name].apply(null, params.args) - .catch(e => { - if (!params.stringify) { - throw e; - } - - return Buffer.from(JSON.stringify(e)); - }); - - break; - } - - return data; -} - -async function read_test(params) { - let data = await method("readFile", params); - - if (params.slice) { - data = data.slice.apply(data, params.slice); - } - - let success = true; - if (data instanceof Buffer) { - if (data.compare(params.expected) != 0) { - success = false; - } - - } else if (data != params.expected) { - success = false; - } - - if (!success) { - throw Error(`readFile unexpected data`); - } - - return 'SUCCESS'; -} - -let read_tests = [ - { args: ["test/fs/utf8"], expected: Buffer.from("??Z?") }, - { args: [Buffer.from("@test/fs/utf8").slice(1)], expected: Buffer.from("??Z?") }, - { args: ["test/fs/utf8", "utf8"], expected: "??Z?" }, - { args: ["test/fs/utf8", {encoding: "utf8", flags:"r+"}], expected: "??Z?" }, - { args: ["test/fs/nonexistent"], stringify: true, - expected: Buffer.from('{"errno":2,"code":"ENOENT","path":"test/fs/nonexistent","syscall":"open"}'), - exception: "Error: No such file or directory" }, - { args: ["test/fs/non_utf8", "utf8"], expected: "??" }, - { args: ["test/fs/non_utf8", {encoding: "hex"}], expected: "8080" }, - { args: ["test/fs/non_utf8", "base64"], expected: "gIA=" }, - { args: ["test/fs/ascii", "utf8"], expected: "x".repeat(600) }, - { args: ["test/fs/ascii", { encoding:"utf8", flags: "r+"}], expected: "x".repeat(600) }, - - { args: [Buffer.from([0x80, 0x80])], exception: "Error: No such file or directory" }, - { args: ['x'.repeat(8192)], exception: "TypeError: \"path\" is too long" }, - - { args: ["/proc/version"], slice:[0,5], expected: Buffer.from("Linux"), optional: true }, - { args: ["/proc/cpuinfo"], slice:[0,9], expected: Buffer.from("processor"), optional: true }, -]; - -let readFile_tsuite = { - name: "fs readFile", - T: read_test, - prepare_args: p, - opts: { type: "callback" }, - tests: read_tests, -}; - -let readFileSync_tsuite = { - name: "fs readFileSync", - T: read_test, - prepare_args: p, - opts: { type: "sync" }, - tests: read_tests, -}; - -let readFileP_tsuite = { - name: "fsp readFile", - T: read_test, - prepare_args: p, - opts: { type: "promise" }, - tests: read_tests, -}; - -async function write_test(params) { - let fname = params.args[0]; - - try { fs.unlinkSync(fname); } catch (e) {} - - let data = await method("writeFile", params).catch(e => ({error:e})); - - if (!data) { - data = fs.readFileSync(fname); - } - - try { fs.unlinkSync(fname); } catch (e) {} - - if (params.check) { - if (!params.check(data, params)) { - throw Error(`writeFile failed check`); - } - - } else if (params.exception) { - throw data.error; - - } else { - if (data.compare(params.expected) != 0) { - throw Error(`writeFile unexpected data`); - } - } - - return 'SUCCESS'; -} - -let write_tests = [ - { args: ["@", Buffer.from(Buffer.alloc(4).fill(65).buffer, 1)], - expected: Buffer.from("AAA") }, - { args: ["@", Buffer.from("XYZ"), "utf8"], expected: Buffer.from("XYZ") }, - { args: ["@", Buffer.from("XYZ"), {encoding: "utf8", mode: 0o666}], - expected: Buffer.from("XYZ") }, - { args: ["@", new DataView(Buffer.alloc(3).fill(66).buffer)], - expected: Buffer.from("BBB") }, - { args: ["@", new Uint8Array(Buffer.from("ABCD"))], - expected: Buffer.from("ABCD")}, - { args: ["@", "XYZ"], expected: Buffer.from("XYZ")}, - { args: ["@", "78797a", "hex"], expected: Buffer.from("xyz") }, - { args: ["@", "eHl6", "base64"], expected: Buffer.from("xyz") }, - { args: ["@", "eHl6", {encoding: "base64url"}], expected: Buffer.from("xyz"), - optional: true }, - { args: ["@", Symbol("XYZ")], exception: "TypeError: Cannot convert a Symbol value to a string"}, - { args: ["/invalid_path", "XYZ"], - check: (err, params) => { - let e = err.error; - - if (e.syscall != 'open') { - throw Error(`${e.syscall} unexpected syscall`); - } - - if (e.code != "EACCES" && e.code != "EROFS") { - throw Error(`${e.code} unexpected code`); - } - - return true; - } }, -]; - -let writeFile_tsuite = { - name: "fs writeFile", - T: write_test, - prepare_args: p, - opts: { type: "callback" }, - tests: write_tests, -}; - -let writeFileSync_tsuite = { - name: "fs writeFileSync", - T: write_test, - prepare_args: p, - opts: { type: "sync" }, - tests: write_tests, -}; - -let writeFileP_tsuite = { - name: "fsp writeFile", - T: write_test, - prepare_args: p, - opts: { type: "promise" }, - tests: write_tests, -}; - -async function append_test(params) { - let fname = params.args[0]; - - try { fs.unlinkSync(fname); } catch (e) {} - - let data = await method("appendFile", params).catch(e => ({error:e})); - data = await method("appendFile", params).catch(e => ({error:e})); - - if (!data) { - data = fs.readFileSync(fname); - } - - try { fs.unlinkSync(fname); } catch (e) {} - - if (params.check) { - if (!params.check(data, params)) { - throw Error(`appendFile failed check`); - } - - } else if (params.exception) { - throw data.error; - - } else { - if (data.compare(params.expected) != 0) { - throw Error(`appendFile unexpected data`); - } - } - - return 'SUCCESS'; -} - -let append_tests = [ - { args: ["@", Buffer.from(Buffer.alloc(4).fill(65).buffer, 1)], - expected: Buffer.from("AAAAAA") }, - { args: ["@", Buffer.from("XYZ"), "utf8"], expected: Buffer.from("XYZXYZ") }, - { args: ["@", Buffer.from("XYZ"), {encoding: "utf8", mode: 0o666}], - expected: Buffer.from("XYZXYZ") }, - { args: ["@", new DataView(Buffer.alloc(3).fill(66).buffer)], - expected: Buffer.from("BBBBBB") }, - { args: ["@", new Uint8Array(Buffer.from("ABCD"))], - expected: Buffer.from("ABCDABCD")}, - { args: ["@", "XYZ"], expected: Buffer.from("XYZXYZ")}, - { args: ["@", "78797a", "hex"], expected: Buffer.from("xyzxyz") }, - { args: ["@", "eHl6", "base64"], expected: Buffer.from("xyzxyz") }, - { args: ["@", "eHl6", {encoding: "base64url"}], expected: Buffer.from("xyzxyz"), - optional: true }, - { args: ["@", Symbol("XYZ")], exception: "TypeError: Cannot convert a Symbol value to a string"}, - { args: ["/invalid_path", "XYZ"], - check: (err, params) => { - let e = err.error; - - if (e.syscall != 'open') { - throw Error(`${e.syscall} unexpected syscall`); - } - - if (e.code != "EACCES" && e.code != "EROFS") { - throw Error(`${e.code} unexpected code`); - } - - return true; - } }, -]; - -let appendFile_tsuite = { - name: "fs appendFile", - T: append_test, - prepare_args: p, - opts: { type: "callback" }, - tests: append_tests, -}; - -let appendFileSync_tsuite = { - name: "fs appendFileSync", - T: append_test, - prepare_args: p, - opts: { type: "sync" }, - tests: append_tests, -}; - -let appendFileP_tsuite = { - name: "fsp appendFile", - T: append_test, - prepare_args: p, - opts: { type: "promise" }, - tests: append_tests, -}; - -async function realpath_test(params) { - let data = await method("realpath", params); - - if (!params.check(data)) { - throw Error(`realpath failed check`); - } - - return 'SUCCESS'; -} - -let realpath_tests = [ - { args: ["./build/test/.."], - check: (data) => data.endsWith("build") }, - { args: ["./build/test/", {encoding:'buffer'}], - check: (data) => data instanceof Buffer }, -]; - -let realpath_tsuite = { - name: "fs realpath", - T: realpath_test, - prepare_args: p, - opts: { type: "callback" }, - tests: realpath_tests, -}; - -let realpathSync_tsuite = { - name: "fs realpathSync", - T: realpath_test, - prepare_args: p, - opts: { type: "sync" }, - tests: realpath_tests, -}; - -let realpathP_tsuite = { - name: "fsp realpath", - T: realpath_test, - prepare_args: p, - opts: { type: "promise" }, - tests: realpath_tests, -}; - -async function stat_test(params) { - if (params.init) { - params.init(params); - } - - let stat = await method(params.method, params).catch(e => ({error:e})); - - if (params.check && !params.check(stat, params)) { - throw Error(`${params.method} failed check`); - } - - return 'SUCCESS'; -} - -function contains(arr, elts) { - return elts.every(el => { - let r = arr.some(v => el == v); - - if (!r) { - throw Error(`${el} is not found`); - } - - return r; - }); -} - -let stat_tests = [ - { args: ["/invalid_path"], - check: (err, params) => { - let e = err.error; - - if (e.syscall != params.method) { - throw Error(`${e.syscall} unexpected syscall`); - } - - if (e.code != "ENOENT") { - throw Error(`${e.code} unexpected code`); - } - - return true; - } }, - - { args: ["@_link"], - init: (params) => { - let lname = params.args[0]; - let fname = lname.slice(0, -5); - - /* making symbolic link. */ - - try { fs.unlinkSync(fname); fs.unlinkSync(lname); } catch (e) {} - - fs.writeFileSync(fname, fname); - - fname = fs.realpathSync(fname); - fs.symlinkSync(fname, lname); - }, - - check: (st, params) => { - switch (params.method) { - case "stat": - if (!st.isFile()) { - throw Error(`${params.args[0]} is not a file`); - } - - break; - - case "lstat": - if (!st.isSymbolicLink()) { - throw Error(`${params.args[0]} is not a link`); - } - - break; - } - - return true; - } }, - - { args: ["./build/"], - check: (st) => contains(Object.keys(st), - [ "atime", "atimeMs", "birthtime", "birthtimeMs", - "blksize", "blocks", "ctime", "ctimeMs", "dev", - "gid", "ino", "mode", "mtime", "mtimeMs","nlink", - "rdev", "size", "uid" ]) }, - - { args: ["./build/"], - check: (st) => Object.keys(st).every(p => { - let v = st[p]; - if (p == 'atime' || p == 'ctime' || p == 'mtime' || p == 'birthtime') { - if (!(v instanceof Date)) { - throw Error(`${p} is not an instance of Date`); - } - - return true; - } - - if ((typeof v) != 'number') { - throw Error(`${p} is not an instance of Number`); - } - - return true; - }) }, - - { args: ["./build/"], - check: (st) => ['atime', 'birthtime', 'ctime', 'mtime'].every(p => { - let date = st[p].valueOf(); - let num = st[p + 'Ms']; - - if (Math.abs(date - num) > 1) { - throw Error(`${p}:${date} != ${p+'Ms'}:${num}`); - } - - return true; - }) }, - - { args: ["./build/"], - check: (st) => ['isBlockDevice', - 'isCharacterDevice', - 'isDirectory', - 'isFIFO', - 'isFile', - 'isSocket', - 'isSymbolicLink'].every(m => { - - let r = st[m](); - if (!(r == (m == 'isDirectory'))) { - throw Error(`${m} is ${r}`); - } - - return true; - }) }, -]; - -let stat_tsuite = { - name: "fs stat", - T: stat_test, - prepare_args: p, - opts: { type: "callback", method: "stat" }, - tests: stat_tests, -}; - -let statSync_tsuite = { - name: "fs statSync", - T: stat_test, - prepare_args: p, - opts: { type: "sync", method: "stat" }, - tests: stat_tests, -}; - -let statP_tsuite = { - name: "fsp stat", - T: stat_test, - prepare_args: p, - opts: { type: "promise", method: "stat" }, - tests: stat_tests, -}; - -let lstat_tsuite = { - name: "fs lstat", - T: stat_test, - prepare_args: p, - opts: { type: "callback", method: "lstat" }, - tests: stat_tests, -}; - -let lstatSync_tsuite = { - name: "fs lstatSync", - T: stat_test, - prepare_args: p, - opts: { type: "sync", method: "lstat" }, - tests: stat_tests, -}; - -let lstatP_tsuite = { - name: "fsp lstat", - T: stat_test, - prepare_args: p, - opts: { type: "promise", method: "lstat" }, - tests: stat_tests, -}; - -run([ - readFile_tsuite, - readFileSync_tsuite, - readFileP_tsuite, - writeFile_tsuite, - writeFileSync_tsuite, - writeFileP_tsuite, - appendFile_tsuite, - appendFileSync_tsuite, - appendFileP_tsuite, - realpath_tsuite, - realpathSync_tsuite, - realpathP_tsuite, - stat_tsuite, - statSync_tsuite, - statP_tsuite, - lstat_tsuite, - lstatSync_tsuite, - lstatP_tsuite, -]); diff -r 144e430d0ed5 -r 3b035831f64a test/fs/methods.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/fs/methods.t.js Mon Nov 29 16:50:41 2021 +0000 @@ -0,0 +1,610 @@ +/*--- +includes: [compatFs.js, compatBuffer.js, runTsuite.js] +flags: [async] +---*/ + +function p(args, default_opts) { + let params = Object.assign({}, default_opts, args); + + let fname = params.args[0]; + + if (fname[0] == '@') { + let gen = `build/test/fs_test_${Math.round(Math.random() * 1000000)}`; + params.args = params.args.map(v => v); + params.args[0] = gen + fname.slice(1); + } + + return params; +} + +function promisify(f) { + return function (...args) { + return new Promise((resolve, reject) => { + function callback(err, result) { + if (err) { + return reject(err); + } else { + resolve(result); + } + } + + args.push(callback); + f.apply(this, args); + }); + }; +} + +async function method(name, params) { + let data = null; + + switch (params.type) { + case "sync": + data = fs[name + "Sync"].apply(null, params.args); + break; + + case "callback": + data = await promisify(fs[name]).apply(null, params.args); + break; + + case "promise": + data = await fs.promises[name].apply(null, params.args); + break; + } + + return data; +} + +async function read_test(params) { + let data = await method("readFile", params).catch(e => ({error:e})); + + if (params.slice && !data.error) { + data = data.slice.apply(data, params.slice); + } + + if (params.check) { + if (!params.check(data, params)) { + throw Error(`readFile failed check`); + } + + } else if (params.exception) { + throw data.error; + + } else { + let success = true; + if (data instanceof Buffer) { + if (data.compare(params.expected) != 0) { + success = false; + } + + } else if (data != params.expected) { + success = false; + } + + if (!success) { + throw Error(`readFile unexpected data`); + } + } + + return 'SUCCESS'; +} + +let read_tests = () => [ + { args: ["test/fs/utf8"], expected: Buffer.from("??Z?") }, + { args: [Buffer.from("@test/fs/utf8").slice(1)], expected: Buffer.from("??Z?") }, + { args: ["test/fs/utf8", "utf8"], expected: "??Z?" }, + { args: ["test/fs/utf8", {encoding: "utf8", flags:"r+"}], expected: "??Z?" }, + { args: ["test/fs/nonexistent"], + check: (err, params) => { + let e = err.error; + + if (e.syscall != 'open') { + throw Error(`${e.syscall} unexpected syscall`); + } + + if (e.code != "ENOENT") { + throw Error(`${e.code} unexpected code`); + } + + if (e.path != "test/fs/nonexistent") { + throw Error(`${e.path} unexpected path`); + } + + return true; + } }, + + { args: ["test/fs/non_utf8", "utf8"], expected: "??" }, + { args: ["test/fs/non_utf8", {encoding: "hex"}], expected: "8080" }, + { args: ["test/fs/non_utf8", "base64"], expected: "gIA=" }, + { args: ["test/fs/ascii", "utf8"], expected: "x".repeat(600) }, + { args: ["test/fs/ascii", { encoding:"utf8", flags: "r+"}], expected: "x".repeat(600) }, + + { args: [Buffer.from([0x80, 0x80])], exception: "Error: No such file or directory" }, + { args: ['x'.repeat(8192)], exception: "TypeError: \"path\" is too long" }, + + { args: ["/proc/version"], slice:[0,5], expected: Buffer.from("Linux"), + check: (data, params) => { + + if (data.error) { + let e = data.error; + if (e.syscall != 'open') { + throw Error(`${e.syscall} unexpected syscall`); + } + + return true; + } + + return data.compare(params.expected) == 0; + } }, + { args: ["/proc/cpuinfo"], slice:[0,9], expected: Buffer.from("processor"), + check: (data, params) => { + + if (data.error) { + let e = data.error; + if (e.syscall != 'open') { + throw Error(`${e.syscall} unexpected syscall`); + } + + return true; + } + + return data.compare(params.expected) == 0; + } }, +]; + +let readFile_tsuite = { + name: "fs readFile", + skip: () => (!has_fs() || !has_buffer()), + T: read_test, + prepare_args: p, + opts: { type: "callback" }, + get tests() { return read_tests() }, +}; + +let readFileSync_tsuite = { + name: "fs readFileSync", + skip: () => (!has_fs() || !has_buffer()), + T: read_test, + prepare_args: p, + opts: { type: "sync" }, + get tests() { return read_tests() }, +}; + +let readFileP_tsuite = { + name: "fsp readFile", + skip: () => (!has_fs() || !has_buffer()), + T: read_test, + prepare_args: p, + opts: { type: "promise" }, + get tests() { return read_tests() }, +}; + +async function write_test(params) { + let fname = params.args[0]; + + try { fs.unlinkSync(fname); } catch (e) {} + + let data = await method("writeFile", params).catch(e => ({error:e})); + + if (!data) { + data = fs.readFileSync(fname); + } + + try { fs.unlinkSync(fname); } catch (e) {} + + if (params.check) { + if (!params.check(data, params)) { + throw Error(`writeFile failed check`); + } + + } else if (params.exception) { + throw data.error; + + } else { + if (data.compare(params.expected) != 0) { + throw Error(`writeFile unexpected data`); + } + } + + return 'SUCCESS'; +} + +let write_tests = () => [ + { args: ["@", Buffer.from(Buffer.alloc(4).fill(65).buffer, 1)], + expected: Buffer.from("AAA") }, + { args: ["@", Buffer.from("XYZ"), "utf8"], expected: Buffer.from("XYZ") }, + { args: ["@", Buffer.from("XYZ"), {encoding: "utf8", mode: 0o666}], + expected: Buffer.from("XYZ") }, + { args: ["@", new DataView(Buffer.alloc(3).fill(66).buffer)], + expected: Buffer.from("BBB") }, + { args: ["@", new Uint8Array(Buffer.from("ABCD"))], + expected: Buffer.from("ABCD")}, + { args: ["@", "XYZ"], expected: Buffer.from("XYZ")}, + { args: ["@", "78797a", "hex"], expected: Buffer.from("xyz") }, + { args: ["@", "eHl6", "base64"], expected: Buffer.from("xyz") }, + { args: ["@", "eHl6", {encoding: "base64url"}], expected: Buffer.from("xyz"), + optional: true }, + { args: ["@", Symbol("XYZ")], exception: "TypeError: Cannot convert a Symbol value to a string"}, + { args: ["/invalid_path", "XYZ"], + check: (err, params) => { + let e = err.error; + + if (e.syscall != 'open') { + throw Error(`${e.syscall} unexpected syscall`); + } + + if (e.code != "EACCES" && e.code != "EROFS") { + throw Error(`${e.code} unexpected code`); + } + + return true; + } }, +]; + +let writeFile_tsuite = { + name: "fs writeFile", + skip: () => (!has_fs() || !has_buffer()), + T: write_test, + prepare_args: p, + opts: { type: "callback" }, + get tests() { return write_tests() }, +}; + +let writeFileSync_tsuite = { + name: "fs writeFileSync", + skip: () => (!has_fs() || !has_buffer()), + T: write_test, + prepare_args: p, + opts: { type: "sync" }, + get tests() { return write_tests() }, +}; + +let writeFileP_tsuite = { + name: "fsp writeFile", + skip: () => (!has_fs() || !has_buffer()), + T: write_test, + prepare_args: p, + opts: { type: "promise" }, + get tests() { return write_tests() }, +}; + +async function append_test(params) { + let fname = params.args[0]; + + try { fs.unlinkSync(fname); } catch (e) {} + + let data = await method("appendFile", params).catch(e => ({error:e})); + data = await method("appendFile", params).catch(e => ({error:e})); + + if (!data) { + data = fs.readFileSync(fname); + } + + try { fs.unlinkSync(fname); } catch (e) {} + + if (params.check) { + if (!params.check(data, params)) { + throw Error(`appendFile failed check`); + } + + } else if (params.exception) { + throw data.error; + + } else { + if (data.compare(params.expected) != 0) { + throw Error(`appendFile unexpected data`); + } + } + + return 'SUCCESS'; +} + +let append_tests = () => [ + { args: ["@", Buffer.from(Buffer.alloc(4).fill(65).buffer, 1)], + expected: Buffer.from("AAAAAA") }, + { args: ["@", Buffer.from("XYZ"), "utf8"], expected: Buffer.from("XYZXYZ") }, + { args: ["@", Buffer.from("XYZ"), {encoding: "utf8", mode: 0o666}], + expected: Buffer.from("XYZXYZ") }, + { args: ["@", new DataView(Buffer.alloc(3).fill(66).buffer)], + expected: Buffer.from("BBBBBB") }, + { args: ["@", new Uint8Array(Buffer.from("ABCD"))], + expected: Buffer.from("ABCDABCD")}, + { args: ["@", "XYZ"], expected: Buffer.from("XYZXYZ")}, + { args: ["@", "78797a", "hex"], expected: Buffer.from("xyzxyz") }, + { args: ["@", "eHl6", "base64"], expected: Buffer.from("xyzxyz") }, + { args: ["@", "eHl6", {encoding: "base64url"}], expected: Buffer.from("xyzxyz"), + optional: true }, + { args: ["@", Symbol("XYZ")], exception: "TypeError: Cannot convert a Symbol value to a string"}, + { args: ["/invalid_path", "XYZ"], + check: (err, params) => { + let e = err.error; + + if (e.syscall != 'open') { + throw Error(`${e.syscall} unexpected syscall`); + } + + if (e.code != "EACCES" && e.code != "EROFS") { + throw Error(`${e.code} unexpected code`); + } + + return true; + } }, +]; + +let appendFile_tsuite = { + name: "fs appendFile", + skip: () => (!has_fs() || !has_buffer()), + T: append_test, + prepare_args: p, + opts: { type: "callback" }, + get tests() { return append_tests() }, +}; + +let appendFileSync_tsuite = { + name: "fs appendFileSync", + skip: () => (!has_fs() || !has_buffer()), + T: append_test, + prepare_args: p, + opts: { type: "sync" }, + get tests() { return append_tests() }, +}; + +let appendFileP_tsuite = { + name: "fsp appendFile", + skip: () => (!has_fs() || !has_buffer()), + T: append_test, + prepare_args: p, + opts: { type: "promise" }, + get tests() { return append_tests() }, From pluknet at nginx.com Tue Nov 30 12:05:03 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 30 Nov 2021 15:05:03 +0300 Subject: [PATCH 2 of 4] Simplified sendfile(SF_NODISKIO) usage In-Reply-To: <4a954e89b1ae8539bbe0.1636604470@vm-bsd.mdounin.ru> References: <4a954e89b1ae8539bbe0.1636604470@vm-bsd.mdounin.ru> Message-ID: <20211130120503.552wawqq52udi3ag@MacBook-Air-Sergey.local> On Thu, Nov 11, 2021 at 07:21:10AM +0300, Maxim Dounin wrote: > # HG changeset patch > # User Maxim Dounin > # Date 1636603886 -10800 > # Thu Nov 11 07:11:26 2021 +0300 > # Node ID 4a954e89b1ae8539bbe08c5afc1d5c9828d82d6f > # Parent 0fb75ef9dbca698e5e855145cf6a12180a36d400 > Simplified sendfile(SF_NODISKIO) usage. > > Starting with FreeBSD 11, there is no need to use AIO operations to preload > data into cache for sendfile(SF_NODISKIO) to work. Instead, sendfile() > handles non-blocking loading data from disk by itself. It still can, however, > return EBUSY if a page is already being loaded (for example, by a different > process). If this happens, we now post an event for the next event loop > iteration, so sendfile() is retried "after a short period", as manpage > recommends. > > The limit of the number of EBUSY tolerated without any progress is preserved, > but now it does not result in an alert, since on an idle system event loop > iteration might be very short and EBUSY can happen many times in a row. > Instead, SF_NODISKIO is simply disabled for one call once the limit is > reached. > > With this change, sendfile(SF_NODISKIO) is now used automatically as long as > sendfile() is enabled, and no longer requires "aio on;". > > diff --git a/auto/os/freebsd b/auto/os/freebsd > --- a/auto/os/freebsd > +++ b/auto/os/freebsd > @@ -44,12 +44,10 @@ if [ $osreldate -gt 300007 ]; then > CORE_SRCS="$CORE_SRCS $FREEBSD_SENDFILE_SRCS" > fi > > -if [ $NGX_FILE_AIO = YES ]; then > - if [ $osreldate -gt 502103 ]; then > - echo " + sendfile()'s SF_NODISKIO found" > +if [ $osreldate -gt 1100000 ]; then > + echo " + sendfile()'s SF_NODISKIO found" > > - have=NGX_HAVE_AIO_SENDFILE . auto/have > - fi > + have=NGX_HAVE_SENDFILE_NODISKIO . auto/have > fi > > # POSIX semaphores We could check the exact __FreeBSD_version number 1100093 that was at the time the new sendfile() appeared, which is more accurate. https://cgit.freebsd.org/src/commit/?id=2bab0c553588 https://cgit.freebsd.org/src/tree/sys/sys/param.h?id=2bab0c553588#n48 Unfortunately, it was not bumped (same as with SF_NODISKIO in 5.2.1). > diff --git a/src/core/ngx_buf.h b/src/core/ngx_buf.h > --- a/src/core/ngx_buf.h > +++ b/src/core/ngx_buf.h > @@ -90,9 +90,6 @@ struct ngx_output_chain_ctx_s { > > #if (NGX_HAVE_FILE_AIO || NGX_COMPAT) > ngx_output_chain_aio_pt aio_handler; > -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) > - ssize_t (*aio_preload)(ngx_buf_t *file); > -#endif > #endif > > #if (NGX_THREADS || NGX_COMPAT) > diff --git a/src/core/ngx_connection.h b/src/core/ngx_connection.h > --- a/src/core/ngx_connection.h > +++ b/src/core/ngx_connection.h > @@ -185,7 +185,7 @@ struct ngx_connection_s { > > unsigned need_last_buf:1; > > -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) > +#if (NGX_HAVE_SENDFILE_NODISKIO || NGX_COMPAT) > unsigned busy_count:2; > #endif > > diff --git a/src/core/ngx_module.h b/src/core/ngx_module.h > --- a/src/core/ngx_module.h > +++ b/src/core/ngx_module.h > @@ -41,7 +41,7 @@ > #define NGX_MODULE_SIGNATURE_3 "0" > #endif > > -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) > +#if (NGX_HAVE_SENDFILE_NODISKIO || NGX_COMPAT) > #define NGX_MODULE_SIGNATURE_4 "1" > #else > #define NGX_MODULE_SIGNATURE_4 "0" > diff --git a/src/core/ngx_output_chain.c b/src/core/ngx_output_chain.c > --- a/src/core/ngx_output_chain.c > +++ b/src/core/ngx_output_chain.c > @@ -29,10 +29,6 @@ > > static ngx_inline ngx_int_t > ngx_output_chain_as_is(ngx_output_chain_ctx_t *ctx, ngx_buf_t *buf); > -#if (NGX_HAVE_AIO_SENDFILE) > -static ngx_int_t ngx_output_chain_aio_setup(ngx_output_chain_ctx_t *ctx, > - ngx_file_t *file); > -#endif > static ngx_int_t ngx_output_chain_add_copy(ngx_pool_t *pool, > ngx_chain_t **chain, ngx_chain_t *in); > static ngx_int_t ngx_output_chain_align_file_buf(ngx_output_chain_ctx_t *ctx, > @@ -283,12 +279,6 @@ ngx_output_chain_as_is(ngx_output_chain_ > buf->in_file = 0; > } > > -#if (NGX_HAVE_AIO_SENDFILE) > - if (ctx->aio_preload && buf->in_file) { > - (void) ngx_output_chain_aio_setup(ctx, buf->file); > - } > -#endif > - > if (ctx->need_in_memory && !ngx_buf_in_memory(buf)) { > return 0; > } > @@ -301,28 +291,6 @@ ngx_output_chain_as_is(ngx_output_chain_ > } > > > -#if (NGX_HAVE_AIO_SENDFILE) > - > -static ngx_int_t > -ngx_output_chain_aio_setup(ngx_output_chain_ctx_t *ctx, ngx_file_t *file) > -{ > - ngx_event_aio_t *aio; > - > - if (file->aio == NULL && ngx_file_aio_init(file, ctx->pool) != NGX_OK) { > - return NGX_ERROR; > - } > - > - aio = file->aio; > - > - aio->data = ctx->filter_ctx; > - aio->preload_handler = ctx->aio_preload; > - > - return NGX_OK; > -} > - > -#endif > - > - > static ngx_int_t > ngx_output_chain_add_copy(ngx_pool_t *pool, ngx_chain_t **chain, > ngx_chain_t *in) After this change, ngx_file_aio_init() doesn't need to be external. It is only used in ngx_file_aio_read() in corresponding ngx_*_aio_read.c. > diff --git a/src/event/ngx_event.h b/src/event/ngx_event.h > --- a/src/event/ngx_event.h > +++ b/src/event/ngx_event.h > @@ -147,10 +147,6 @@ struct ngx_event_aio_s { > > ngx_fd_t fd; > > -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) > - ssize_t (*preload_handler)(ngx_buf_t *file); > -#endif > - > #if (NGX_HAVE_EVENTFD) > int64_t res; > #endif > diff --git a/src/http/ngx_http_copy_filter_module.c b/src/http/ngx_http_copy_filter_module.c > --- a/src/http/ngx_http_copy_filter_module.c > +++ b/src/http/ngx_http_copy_filter_module.c > @@ -19,10 +19,6 @@ typedef struct { > static void ngx_http_copy_aio_handler(ngx_output_chain_ctx_t *ctx, > ngx_file_t *file); > static void ngx_http_copy_aio_event_handler(ngx_event_t *ev); > -#if (NGX_HAVE_AIO_SENDFILE) > -static ssize_t ngx_http_copy_aio_sendfile_preload(ngx_buf_t *file); > -static void ngx_http_copy_aio_sendfile_event_handler(ngx_event_t *ev); > -#endif > #endif > #if (NGX_THREADS) > static ngx_int_t ngx_http_copy_thread_handler(ngx_thread_task_t *task, > @@ -128,9 +124,6 @@ ngx_http_copy_filter(ngx_http_request_t > #if (NGX_HAVE_FILE_AIO) > if (ngx_file_aio && clcf->aio == NGX_HTTP_AIO_ON) { > ctx->aio_handler = ngx_http_copy_aio_handler; > -#if (NGX_HAVE_AIO_SENDFILE) > - ctx->aio_preload = ngx_http_copy_aio_sendfile_preload; > -#endif > } > #endif > > @@ -207,53 +200,6 @@ ngx_http_copy_aio_event_handler(ngx_even > ngx_http_run_posted_requests(c); > } > > - > -#if (NGX_HAVE_AIO_SENDFILE) > - > -static ssize_t > -ngx_http_copy_aio_sendfile_preload(ngx_buf_t *file) > -{ > - ssize_t n; > - static u_char buf[1]; > - ngx_event_aio_t *aio; > - ngx_http_request_t *r; > - ngx_output_chain_ctx_t *ctx; > - > - n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); > - > - if (n == NGX_AGAIN) { > - aio = file->file->aio; > - aio->handler = ngx_http_copy_aio_sendfile_event_handler; > - > - r = aio->data; > - r->main->blocked++; > - r->aio = 1; > - > - ctx = ngx_http_get_module_ctx(r, ngx_http_copy_filter_module); > - ctx->aio = 1; > - } > - > - return n; > -} > - > - > -static void > -ngx_http_copy_aio_sendfile_event_handler(ngx_event_t *ev) > -{ > - ngx_event_aio_t *aio; > - ngx_http_request_t *r; > - > - aio = ev->data; > - r = aio->data; > - > - r->main->blocked--; > - r->aio = 0; > - ev->complete = 0; > - > - r->connection->write->handler(r->connection->write); > -} > - > -#endif > #endif > > > diff --git a/src/os/unix/ngx_freebsd_sendfile_chain.c b/src/os/unix/ngx_freebsd_sendfile_chain.c > --- a/src/os/unix/ngx_freebsd_sendfile_chain.c > +++ b/src/os/unix/ngx_freebsd_sendfile_chain.c > @@ -32,22 +32,21 @@ > ngx_chain_t * > ngx_freebsd_sendfile_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit) > { > - int rc, flags; > - off_t send, prev_send, sent; > - size_t file_size; > - ssize_t n; > - ngx_uint_t eintr, eagain; > - ngx_err_t err; > - ngx_buf_t *file; > - ngx_event_t *wev; > - ngx_chain_t *cl; > - ngx_iovec_t header, trailer; > - struct sf_hdtr hdtr; > - struct iovec headers[NGX_IOVS_PREALLOCATE]; > - struct iovec trailers[NGX_IOVS_PREALLOCATE]; > -#if (NGX_HAVE_AIO_SENDFILE) > - ngx_uint_t ebusy; > - ngx_event_aio_t *aio; > + int rc, flags; > + off_t send, prev_send, sent; > + size_t file_size; > + ssize_t n; > + ngx_uint_t eintr, eagain; > + ngx_err_t err; > + ngx_buf_t *file; > + ngx_event_t *wev; > + ngx_chain_t *cl; > + ngx_iovec_t header, trailer; > + struct sf_hdtr hdtr; > + struct iovec headers[NGX_IOVS_PREALLOCATE]; > + struct iovec trailers[NGX_IOVS_PREALLOCATE]; > +#if (NGX_HAVE_SENDFILE_NODISKIO) > + ngx_uint_t ebusy; > #endif After ngx_event_aio_t *aio variable removal, this block could be placed under "eintr, eagain" line (which by itself looks unsorted). The remaining part looks good to me. > > wev = c->write; > @@ -77,11 +76,6 @@ ngx_freebsd_sendfile_chain(ngx_connectio > eagain = 0; > flags = 0; > > -#if (NGX_HAVE_AIO_SENDFILE && NGX_SUPPRESS_WARN) > - aio = NULL; > - file = NULL; > -#endif > - > header.iovs = headers; > header.nalloc = NGX_IOVS_PREALLOCATE; > > @@ -90,7 +84,7 @@ ngx_freebsd_sendfile_chain(ngx_connectio > > for ( ;; ) { > eintr = 0; > -#if (NGX_HAVE_AIO_SENDFILE) > +#if (NGX_HAVE_SENDFILE_NODISKIO) > ebusy = 0; > #endif > prev_send = send; > @@ -179,9 +173,8 @@ ngx_freebsd_sendfile_chain(ngx_connectio > > sent = 0; > > -#if (NGX_HAVE_AIO_SENDFILE) > - aio = file->file->aio; > - flags = (aio && aio->preload_handler) ? SF_NODISKIO : 0; > +#if (NGX_HAVE_SENDFILE_NODISKIO) > + flags = (c->busy_count <= 2) ? SF_NODISKIO : 0; > #endif > > rc = sendfile(file->file->fd, c->fd, file->file_pos, > @@ -199,7 +192,7 @@ ngx_freebsd_sendfile_chain(ngx_connectio > eintr = 1; > break; > > -#if (NGX_HAVE_AIO_SENDFILE) > +#if (NGX_HAVE_SENDFILE_NODISKIO) > case NGX_EBUSY: > ebusy = 1; > break; > @@ -252,54 +245,30 @@ ngx_freebsd_sendfile_chain(ngx_connectio > > in = ngx_chain_update_sent(in, sent); > > -#if (NGX_HAVE_AIO_SENDFILE) > +#if (NGX_HAVE_SENDFILE_NODISKIO) > > if (ebusy) { > - if (aio->event.active) { > - /* > - * tolerate duplicate calls; they can happen due to subrequests > - * or multiple calls of the next body filter from a filter > - */ > - > - if (sent) { > - c->busy_count = 0; > - } > - > - return in; > - } > - > if (sent == 0) { > c->busy_count++; > > - if (c->busy_count > 2) { > - ngx_log_error(NGX_LOG_ALERT, c->log, 0, > - "sendfile(%V) returned busy again", > - &file->file->name); > - > - c->busy_count = 0; > - aio->preload_handler = NULL; > - > - send = prev_send; > - continue; > - } > + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, > + "sendfile() busy, count:%d", c->busy_count); > > } else { > c->busy_count = 0; > } > > - n = aio->preload_handler(file); > - > - if (n > 0) { > - send = prev_send + sent; > - continue; > + if (wev->posted) { > + ngx_delete_posted_event(wev); > } > > + ngx_post_event(wev, &ngx_posted_next_events); > + > + wev->ready = 0; > return in; > } > > - if (flags == SF_NODISKIO) { > - c->busy_count = 0; > - } > + c->busy_count = 0; > > #endif > > From pluknet at nginx.com Tue Nov 30 12:08:59 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 30 Nov 2021 15:08:59 +0300 Subject: [PATCH 2 of 4] Simplified sendfile(SF_NODISKIO) usage In-Reply-To: <4a954e89b1ae8539bbe0.1636604470@vm-bsd.mdounin.ru> References: <4a954e89b1ae8539bbe0.1636604470@vm-bsd.mdounin.ru> Message-ID: > On 11 Nov 2021, at 07:21, Maxim Dounin wrote: > > # HG changeset patch > # User Maxim Dounin > # Date 1636603886 -10800 > # Thu Nov 11 07:11:26 2021 +0300 > # Node ID 4a954e89b1ae8539bbe08c5afc1d5c9828d82d6f > # Parent 0fb75ef9dbca698e5e855145cf6a12180a36d400 > Simplified sendfile(SF_NODISKIO) usage. > > Starting with FreeBSD 11, there is no need to use AIO operations to preload > data into cache for sendfile(SF_NODISKIO) to work. Instead, sendfile() > handles non-blocking loading data from disk by itself. It still can, however, > return EBUSY if a page is already being loaded (for example, by a different > process). If this happens, we now post an event for the next event loop > iteration, so sendfile() is retried "after a short period", as manpage > recommends. > > The limit of the number of EBUSY tolerated without any progress is preserved, > but now it does not result in an alert, since on an idle system event loop > iteration might be very short and EBUSY can happen many times in a row. > Instead, SF_NODISKIO is simply disabled for one call once the limit is > reached. > > With this change, sendfile(SF_NODISKIO) is now used automatically as long as > sendfile() is enabled, and no longer requires "aio on;". > Updated on top of the tip (a7a77549265e). diff -r 40098a93d224 -r 1078530b0e99 auto/os/freebsd --- a/auto/os/freebsd Thu Nov 11 06:10:55 2021 +0300 +++ b/auto/os/freebsd Thu Nov 11 07:11:26 2021 +0300 @@ -44,12 +44,10 @@ CORE_SRCS="$CORE_SRCS $FREEBSD_SENDFILE_SRCS" fi -if [ $NGX_FILE_AIO = YES ]; then - if [ $osreldate -gt 502103 ]; then - echo " + sendfile()'s SF_NODISKIO found" +if [ $osreldate -gt 1100000 ]; then + echo " + sendfile()'s SF_NODISKIO found" - have=NGX_HAVE_AIO_SENDFILE . auto/have - fi + have=NGX_HAVE_SENDFILE_NODISKIO . auto/have fi # POSIX semaphores diff -r 40098a93d224 -r 1078530b0e99 src/core/ngx_buf.h --- a/src/core/ngx_buf.h Thu Nov 11 06:10:55 2021 +0300 +++ b/src/core/ngx_buf.h Thu Nov 11 07:11:26 2021 +0300 @@ -90,9 +90,6 @@ #if (NGX_HAVE_FILE_AIO || NGX_COMPAT) ngx_output_chain_aio_pt aio_handler; -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) - ssize_t (*aio_preload)(ngx_buf_t *file); -#endif #endif #if (NGX_THREADS || NGX_COMPAT) diff -r 40098a93d224 -r 1078530b0e99 src/core/ngx_connection.h --- a/src/core/ngx_connection.h Thu Nov 11 06:10:55 2021 +0300 +++ b/src/core/ngx_connection.h Thu Nov 11 07:11:26 2021 +0300 @@ -185,7 +185,7 @@ unsigned need_last_buf:1; -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) +#if (NGX_HAVE_SENDFILE_NODISKIO || NGX_COMPAT) unsigned busy_count:2; #endif diff -r 40098a93d224 -r 1078530b0e99 src/core/ngx_module.h --- a/src/core/ngx_module.h Thu Nov 11 06:10:55 2021 +0300 +++ b/src/core/ngx_module.h Thu Nov 11 07:11:26 2021 +0300 @@ -41,7 +41,7 @@ #define NGX_MODULE_SIGNATURE_3 "0" #endif -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) +#if (NGX_HAVE_SENDFILE_NODISKIO || NGX_COMPAT) #define NGX_MODULE_SIGNATURE_4 "1" #else #define NGX_MODULE_SIGNATURE_4 "0" diff -r 40098a93d224 -r 1078530b0e99 src/core/ngx_output_chain.c --- a/src/core/ngx_output_chain.c Thu Nov 11 06:10:55 2021 +0300 +++ b/src/core/ngx_output_chain.c Thu Nov 11 07:11:26 2021 +0300 @@ -29,10 +29,6 @@ static ngx_inline ngx_int_t ngx_output_chain_as_is(ngx_output_chain_ctx_t *ctx, ngx_buf_t *buf); -#if (NGX_HAVE_AIO_SENDFILE) -static ngx_int_t ngx_output_chain_aio_setup(ngx_output_chain_ctx_t *ctx, - ngx_file_t *file); -#endif static ngx_int_t ngx_output_chain_add_copy(ngx_pool_t *pool, ngx_chain_t **chain, ngx_chain_t *in); static ngx_int_t ngx_output_chain_align_file_buf(ngx_output_chain_ctx_t *ctx, @@ -283,12 +279,6 @@ buf->in_file = 0; } -#if (NGX_HAVE_AIO_SENDFILE) - if (ctx->aio_preload && buf->in_file) { - (void) ngx_output_chain_aio_setup(ctx, buf->file); - } -#endif - if (ctx->need_in_memory && !ngx_buf_in_memory(buf)) { return 0; } @@ -301,28 +291,6 @@ } -#if (NGX_HAVE_AIO_SENDFILE) - -static ngx_int_t -ngx_output_chain_aio_setup(ngx_output_chain_ctx_t *ctx, ngx_file_t *file) -{ - ngx_event_aio_t *aio; - - if (file->aio == NULL && ngx_file_aio_init(file, ctx->pool) != NGX_OK) { - return NGX_ERROR; - } - - aio = file->aio; - - aio->data = ctx->filter_ctx; - aio->preload_handler = ctx->aio_preload; - - return NGX_OK; -} - -#endif - - static ngx_int_t ngx_output_chain_add_copy(ngx_pool_t *pool, ngx_chain_t **chain, ngx_chain_t *in) diff -r 40098a93d224 -r 1078530b0e99 src/event/ngx_event.h --- a/src/event/ngx_event.h Thu Nov 11 06:10:55 2021 +0300 +++ b/src/event/ngx_event.h Thu Nov 11 07:11:26 2021 +0300 @@ -147,10 +147,6 @@ ngx_fd_t fd; -#if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) - ssize_t (*preload_handler)(ngx_buf_t *file); -#endif - #if (NGX_HAVE_EVENTFD) int64_t res; #endif diff -r 40098a93d224 -r 1078530b0e99 src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c Thu Nov 11 06:10:55 2021 +0300 +++ b/src/http/ngx_http_copy_filter_module.c Thu Nov 11 07:11:26 2021 +0300 @@ -19,10 +19,6 @@ static void ngx_http_copy_aio_handler(ngx_output_chain_ctx_t *ctx, ngx_file_t *file); static void ngx_http_copy_aio_event_handler(ngx_event_t *ev); -#if (NGX_HAVE_AIO_SENDFILE) -static ssize_t ngx_http_copy_aio_sendfile_preload(ngx_buf_t *file); -static void ngx_http_copy_aio_sendfile_event_handler(ngx_event_t *ev); -#endif #endif #if (NGX_THREADS) static ngx_int_t ngx_http_copy_thread_handler(ngx_thread_task_t *task, @@ -128,9 +124,6 @@ #if (NGX_HAVE_FILE_AIO) if (ngx_file_aio && clcf->aio == NGX_HTTP_AIO_ON) { ctx->aio_handler = ngx_http_copy_aio_handler; -#if (NGX_HAVE_AIO_SENDFILE) - ctx->aio_preload = ngx_http_copy_aio_sendfile_preload; -#endif } #endif @@ -207,81 +200,6 @@ ngx_http_run_posted_requests(c); } - -#if (NGX_HAVE_AIO_SENDFILE) - -static ssize_t -ngx_http_copy_aio_sendfile_preload(ngx_buf_t *file) -{ - ssize_t n; - static u_char buf[1]; - ngx_event_aio_t *aio; - ngx_http_request_t *r; - ngx_output_chain_ctx_t *ctx; - - aio = file->file->aio; - r = aio->data; - - if (r->aio) { - /* - * tolerate sendfile() calls if another operation is already - * running; this can happen due to subrequests, multiple calls - * of the next body filter from a filter, or in HTTP/2 due to - * a write event on the main connection - */ - - return NGX_AGAIN; - } - - n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); - - if (n == NGX_AGAIN) { - aio->handler = ngx_http_copy_aio_sendfile_event_handler; - - r->main->blocked++; - r->aio = 1; - - ctx = ngx_http_get_module_ctx(r, ngx_http_copy_filter_module); - ctx->aio = 1; - } - - return n; -} - - -static void -ngx_http_copy_aio_sendfile_event_handler(ngx_event_t *ev) -{ - ngx_event_aio_t *aio; - ngx_connection_t *c; - ngx_http_request_t *r; - - aio = ev->data; - r = aio->data; - c = r->connection; - - r->main->blocked--; - r->aio = 0; - ev->complete = 0; - -#if (NGX_HTTP_V2) - - if (r->stream) { - /* - * for HTTP/2, update write event to make sure processing will - * reach the main connection to handle sendfile() preload - */ - - c->write->ready = 1; - c->write->active = 0; - } - -#endif - - c->write->handler(c->write); -} - -#endif #endif diff -r 40098a93d224 -r 1078530b0e99 src/os/unix/ngx_freebsd_sendfile_chain.c --- a/src/os/unix/ngx_freebsd_sendfile_chain.c Thu Nov 11 06:10:55 2021 +0300 +++ b/src/os/unix/ngx_freebsd_sendfile_chain.c Thu Nov 11 07:11:26 2021 +0300 @@ -32,22 +32,21 @@ ngx_chain_t * ngx_freebsd_sendfile_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit) { - int rc, flags; - off_t send, prev_send, sent; - size_t file_size; - ssize_t n; - ngx_uint_t eintr, eagain; - ngx_err_t err; - ngx_buf_t *file; - ngx_event_t *wev; - ngx_chain_t *cl; - ngx_iovec_t header, trailer; - struct sf_hdtr hdtr; - struct iovec headers[NGX_IOVS_PREALLOCATE]; - struct iovec trailers[NGX_IOVS_PREALLOCATE]; -#if (NGX_HAVE_AIO_SENDFILE) - ngx_uint_t ebusy; - ngx_event_aio_t *aio; + int rc, flags; + off_t send, prev_send, sent; + size_t file_size; + ssize_t n; + ngx_uint_t eintr, eagain; + ngx_err_t err; + ngx_buf_t *file; + ngx_event_t *wev; + ngx_chain_t *cl; + ngx_iovec_t header, trailer; + struct sf_hdtr hdtr; + struct iovec headers[NGX_IOVS_PREALLOCATE]; + struct iovec trailers[NGX_IOVS_PREALLOCATE]; +#if (NGX_HAVE_SENDFILE_NODISKIO) + ngx_uint_t ebusy; #endif wev = c->write; @@ -77,11 +76,6 @@ eagain = 0; flags = 0; -#if (NGX_HAVE_AIO_SENDFILE && NGX_SUPPRESS_WARN) - aio = NULL; - file = NULL; -#endif - header.iovs = headers; header.nalloc = NGX_IOVS_PREALLOCATE; @@ -90,7 +84,7 @@ for ( ;; ) { eintr = 0; -#if (NGX_HAVE_AIO_SENDFILE) +#if (NGX_HAVE_SENDFILE_NODISKIO) ebusy = 0; #endif prev_send = send; @@ -179,9 +173,8 @@ sent = 0; -#if (NGX_HAVE_AIO_SENDFILE) - aio = file->file->aio; - flags = (aio && aio->preload_handler) ? SF_NODISKIO : 0; +#if (NGX_HAVE_SENDFILE_NODISKIO) + flags = (c->busy_count <= 2) ? SF_NODISKIO : 0; #endif rc = sendfile(file->file->fd, c->fd, file->file_pos, @@ -199,7 +192,7 @@ eintr = 1; break; -#if (NGX_HAVE_AIO_SENDFILE) +#if (NGX_HAVE_SENDFILE_NODISKIO) case NGX_EBUSY: ebusy = 1; break; @@ -252,41 +245,30 @@ in = ngx_chain_update_sent(in, sent); -#if (NGX_HAVE_AIO_SENDFILE) +#if (NGX_HAVE_SENDFILE_NODISKIO) if (ebusy) { if (sent == 0) { c->busy_count++; - if (c->busy_count > 2) { - ngx_log_error(NGX_LOG_ALERT, c->log, 0, - "sendfile(%V) returned busy again", - &file->file->name); - - c->busy_count = 0; - aio->preload_handler = NULL; - - send = prev_send; - continue; - } + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "sendfile() busy, count:%d", c->busy_count); } else { c->busy_count = 0; } - n = aio->preload_handler(file); - - if (n > 0) { - send = prev_send + sent; - continue; + if (wev->posted) { + ngx_delete_posted_event(wev); } + ngx_post_event(wev, &ngx_posted_next_events); + + wev->ready = 0; return in; } - if (flags == SF_NODISKIO) { - c->busy_count = 0; - } + c->busy_count = 0; #endif -- Sergey Kandaurov From pluknet at nginx.com Tue Nov 30 12:15:50 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 30 Nov 2021 15:15:50 +0300 Subject: [PATCH 4 of 4] Support for sendfile(SF_NOCACHE) In-Reply-To: <10f96e74ae73e1c53a3f.1636604472@vm-bsd.mdounin.ru> References: <10f96e74ae73e1c53a3f.1636604472@vm-bsd.mdounin.ru> Message-ID: <20211130121550.2eh5sm5aedkph2oz@MacBook-Air-Sergey.local> On Thu, Nov 11, 2021 at 07:21:12AM +0300, Maxim Dounin wrote: > # HG changeset patch > # User Maxim Dounin > # Date 1636603897 -10800 > # Thu Nov 11 07:11:37 2021 +0300 > # Node ID 10f96e74ae73e1c53a3fd08e7e1c26754c8969ed > # Parent 98d3beb63f32cbb68d1cdcec385614d32129cad0 > Support for sendfile(SF_NOCACHE). > > The SF_NOCACHE flag, introduced in FreeBSD 11 along with the new non-blocking > sendfile() implementation by glebius@, makes it possible to use sendfile() > along with the "directio" directive. > > diff --git a/src/core/ngx_output_chain.c b/src/core/ngx_output_chain.c > --- a/src/core/ngx_output_chain.c > +++ b/src/core/ngx_output_chain.c > @@ -256,9 +256,11 @@ ngx_output_chain_as_is(ngx_output_chain_ > } > #endif > > +#if !(NGX_HAVE_SENDFILE_NODISKIO) > if (buf->in_file && buf->file->directio) { > return 0; > } > +#endif This probably deserves a comment, why it depends on such a macro test. Though, it should be pretty clear from the commit log. > > sendfile = ctx->sendfile; > > diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c > --- a/src/event/ngx_event_openssl.c > +++ b/src/event/ngx_event_openssl.c > @@ -2955,7 +2955,13 @@ ngx_ssl_sendfile(ngx_connection_t *c, ng > ngx_set_errno(0); > > #if (NGX_HAVE_SENDFILE_NODISKIO) > + > flags = (c->busy_count <= 2) ? SF_NODISKIO : 0; > + > + if (file->file->directio) { > + flags |= SF_NOCACHE; > + } > + > #else > flags = 0; > #endif > diff --git a/src/os/unix/ngx_freebsd_sendfile_chain.c b/src/os/unix/ngx_freebsd_sendfile_chain.c > --- a/src/os/unix/ngx_freebsd_sendfile_chain.c > +++ b/src/os/unix/ngx_freebsd_sendfile_chain.c > @@ -174,7 +174,13 @@ ngx_freebsd_sendfile_chain(ngx_connectio > sent = 0; > > #if (NGX_HAVE_SENDFILE_NODISKIO) > + > flags = (c->busy_count <= 2) ? SF_NODISKIO : 0; > + > + if (file->file->directio) { > + flags |= SF_NOCACHE; > + } > + > #endif > > rc = sendfile(file->file->fd, c->fd, file->file_pos, > Otherwise, looks good. From simon at reblaze.com Tue Nov 30 15:52:59 2021 From: simon at reblaze.com (Simon Marechal) Date: Tue, 30 Nov 2021 16:52:59 +0100 Subject: Async Nginx question Message-ID: Hello, I am trying to interface an asynchronous library with nginx, as a module. I already asked Maxim Dounin (thanks!) about this, he gave me a few pointers, and asked me to look at the memcache module about how it was done. I however wonder if that is a good example of what I am trying to achieve, as in my use case it's more of a content filter module. That means that when I return NGX_DONE from the NGX_HTTP_CONTENT_PHASE (is that the right phase I picked? I need the headers and body), I will run this asynchronous library. Once it has finished working, it should either return a "block" response, and continue with normal processing (returning a static file, proxy, etc.). While I have a notion of how I could return the "block" answer, I am not sure how to resume the normal course of operations in the other case. I hope I am making sense ... Is there a good module that runs at this phase and is asynchronous? Best regards, Simon -------------- next part -------------- An HTML attachment was scrubbed... URL: From xeioex at nginx.com Tue Nov 30 16:36:12 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 30 Nov 2021 16:36:12 +0000 Subject: [njs] Tests: making async/await test filenames shorter. Message-ID: details: https://hg.nginx.org/njs/rev/6aaf1c14c1e7 branches: changeset: 1754:6aaf1c14c1e7 user: Dmitry Volyntsev date: Mon Nov 29 18:14:25 2021 +0000 description: Tests: making async/await test filenames shorter. diffstat: test/js/async_add.t.js | 15 ++++++++++++ test/js/async_await_add.t.js | 15 ------------ test/js/async_await_blank.t.js | 13 ---------- test/js/async_await_catch.t.js | 13 ---------- test/js/async_await_finally.t.js | 15 ------------ test/js/async_await_for.t.js | 31 ------------------------- test/js/async_await_inline.t.js | 18 --------------- test/js/async_await_many_call.t.js | 35 ----------------------------- test/js/async_await_reject.t.js | 15 ------------ test/js/async_await_stages.t.js | 36 ------------------------------ test/js/async_await_throw.t.js | 22 ------------------ test/js/async_await_throw_async.t.js | 25 -------------------- test/js/async_await_throw_catch.t.js | 20 ---------------- test/js/async_await_throw_catch_async.t.js | 25 -------------------- test/js/async_await_try_catch.t.js | 30 ------------------------- test/js/async_await_try_finally.t.js | 32 -------------------------- test/js/async_await_try_resolve.t.js | 23 ------------------- test/js/async_await_try_throw.t.js | 27 ---------------------- test/js/async_await_try_throw_catch.t.js | 27 ---------------------- test/js/async_blank.t.js | 13 ++++++++++ test/js/async_catch.t.js | 13 ++++++++++ test/js/async_finally.t.js | 15 ++++++++++++ test/js/async_for.t.js | 31 +++++++++++++++++++++++++ test/js/async_inline.t.js | 18 +++++++++++++++ test/js/async_many_call.t.js | 35 +++++++++++++++++++++++++++++ test/js/async_reject.t.js | 15 ++++++++++++ test/js/async_stages.t.js | 36 ++++++++++++++++++++++++++++++ test/js/async_throw.t.js | 22 ++++++++++++++++++ test/js/async_throw_async.t.js | 25 ++++++++++++++++++++ test/js/async_throw_catch.t.js | 20 ++++++++++++++++ test/js/async_throw_catch_async.t.js | 25 ++++++++++++++++++++ test/js/async_try_catch.t.js | 30 +++++++++++++++++++++++++ test/js/async_try_finally.t.js | 32 ++++++++++++++++++++++++++ test/js/async_try_resolve.t.js | 23 +++++++++++++++++++ test/js/async_try_throw.t.js | 27 ++++++++++++++++++++++ test/js/async_try_throw_catch.t.js | 27 ++++++++++++++++++++++ 36 files changed, 422 insertions(+), 422 deletions(-) diffs (988 lines): diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_add.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_add.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,15 @@ +/*--- +includes: [] +flags: [async] +---*/ + +async function af(x) { + const y = await new Promise(resolve => {resolve(x + 10)}); + + return x + y; +} + +af(50).then(v => { + assert.sameValue(v, 110); +}) +.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_add.t.js --- a/test/js/async_await_add.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,15 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -async function af(x) { - const y = await new Promise(resolve => {resolve(x + 10)}); - - return x + y; -} - -af(50).then(v => { - assert.sameValue(v, 110); -}) -.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_blank.t.js --- a/test/js/async_await_blank.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,13 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -async function af(x) { - return x; -} - -af(12345).then(v => { - assert.sameValue(v, 12345) -}) -.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_catch.t.js --- a/test/js/async_await_catch.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,13 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -async function add(x) { - return await new Promise((resolve, reject) => {reject(x)}).catch(v => v + 1); -} - -add(50).then(v => { - assert.sameValue(v, 51); -}) -.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_finally.t.js --- a/test/js/async_await_finally.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,15 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -let called = false; -async function add(x) { - return await new Promise((resolve, reject) => {reject(x + 1)}) - .finally(() => {called = true}); -} - -add(50).catch(e => { - assert.sameValue(e, 51); - assert.sameValue(called, true, "finally was not invoked"); -}).then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_for.t.js --- a/test/js/async_await_for.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,31 +0,0 @@ -/*--- -includes: [compareArray.js] -flags: [async] ----*/ - -let stage = []; - -async function f() { - let sum = 0; - - stage.push(2); - - for (let x = 4; x < 14; x++) { - sum += await new Promise((resolve, reject) => {resolve(x)}); - - stage.push(x); - } - - stage.push("end"); - - return sum; -} - -stage.push(1); - -f().then(v => { - assert.sameValue(v, 85); - assert.compareArray(stage, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, "end"]); -}).then($DONE, $DONE); - -stage.push(3); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_inline.t.js --- a/test/js/async_await_inline.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,18 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -function pr(x) { - return new Promise(resolve => {resolve(x)}); -} - -async function add() { - const a = pr(20); - const b = pr(50); - return await a + await b; -} - -add().then(v => { - assert.sameValue(v, 70); -}).then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_many_call.t.js --- a/test/js/async_await_many_call.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,35 +0,0 @@ -/*--- -includes: [compareArray.js] -flags: [async] ----*/ - -async function test(name) { - let k1, k2; - - switch (name) { - case "First": - k1 = await Promise.resolve("SUN"); - k2 = await Promise.resolve("MOON"); - break; - - case "Second": - k1 = await Promise.resolve("CAT"); - k2 = await Promise.resolve("MOUSE"); - break; - - case "Third": - k1 = await Promise.resolve("MAN"); - k2 = await Promise.resolve("WOMAN"); - break; - - default: - break; - } - - return `${name}: ${k1} ${k2}`; -}; - -Promise.all(['First', 'Second', 'Third'].map(v => test(v))) -.then(results => { - assert.compareArray(results, ['First: SUN MOON','Second: CAT MOUSE','Third: MAN WOMAN']); -}).then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_reject.t.js --- a/test/js/async_await_reject.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,15 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -async function add(x) { - return await new Promise((resolve, reject) => {reject(x)}); -} - -add(50) -.then(v => $DONOTEVALUATE()) -.catch(v => { - assert.sameValue(v, 50); -}) -.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_stages.t.js --- a/test/js/async_await_stages.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,36 +0,0 @@ -/*--- -includes: [compareArray.js] -flags: [async] ----*/ - -function pr(x) { - return new Promise(resolve => {resolve(x)}) - .then(v => v).then(v => v); -} - -let stage = []; - -async function f() { - let sum = 0; - - stage.push(2); - - const a1 = await pr(10); - - stage.push(4); - - const a2 = await pr(20); - - stage.push(5); - - return a1 + a2; -} - -stage.push(1); - -f().then(v => { - stage.push(v); - assert.compareArray(stage, [1, 2, 3, 4, 5, 30]); -}).then($DONE, $DONE); - -stage.push(3); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_throw.t.js --- a/test/js/async_await_throw.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,22 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -function pr(x) { - return new Promise(resolve => {resolve(x)}).then(v => {throw v}); -} - -async function add(x) { - const a = await pr(x); - const b = await pr(x); - - return a + b; -} - -add(50) -.then(v => $DONOTEVALUATE()) -.catch(v => { - assert.sameValue(v, 50); -}) -.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_throw_async.t.js --- a/test/js/async_await_throw_async.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,25 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -function pr(x) { - return new Promise(resolve => {resolve(x)}); -} - -async function add(x) { - const a = await pr(x); - - throw a + 1; - - const b = await pr(x + 10); - - return a + b; -} - -add(50) -.then(v => $DONOTEVALUATE()) -.catch(v => { - assert.sameValue(v, 51); -}) -.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_throw_catch.t.js --- a/test/js/async_await_throw_catch.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,20 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -function pr(x) { - return new Promise(resolve => {resolve(x)}).then(v => {throw v}).catch(v => v); -} - -async function add(x) { - const a = await pr(x); - const b = await pr(x + 10); - - return a + b; -} - -add(50).then(v => { - assert.sameValue(v, 110); -}) -.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_throw_catch_async.t.js --- a/test/js/async_await_throw_catch_async.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,25 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -function pr(x) { - return new Promise(resolve => {resolve(x)}); -} - -async function add(x) { - const a = await pr(x); - - throw a + 1; - - const b = await pr(x + 10); - - return a + b; -} - -add(50) -.then(v => $DONOTEVALUATE()) -.catch(v => { - assert.sameValue(v, 51); -}) -.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_try_catch.t.js --- a/test/js/async_await_try_catch.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,30 +0,0 @@ -/*--- -includes: [compareArray.js] -flags: [async] ----*/ - -let stages = []; - -async function af() { - try { - await new Promise(function(resolve, reject) { - reject("reject"); - }); - - $DONOTEVALUATE(); - } - catch (v) { - stages.push(v); - } - finally { - stages.push('finally'); - } - - return "end"; -}; - -af().then(v => { - stages.push(v); - assert.compareArray(stages, ['reject', 'finally', 'end']); -}) -.then($DONE, $DONE) diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_try_finally.t.js --- a/test/js/async_await_try_finally.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,32 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -let stages = []; - -async function af() { - try { - await new Promise(function(resolve, reject) { - reject("reject"); - }); - - $DONOTEVALUATE(); - } - finally { - await new Promise(function(resolve, reject) { - reject("finally reject"); - }); - - $DONOTEVALUATE(); - } - - return "shouldn't happen: end"; -}; - -af() -.then(v => $DONOTEVALUATE()) -.catch(v => { - assert.sameValue(v, "finally reject"); -}) -.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_try_resolve.t.js --- a/test/js/async_await_try_resolve.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,23 +0,0 @@ -/*--- -includes: [] -flags: [async] ----*/ - -async function af() { - let key; - - try { - key = await Promise.resolve("key"); - key += ": resolve"; - - } catch (e) { - key += ": exception"; - } - - return key; -}; - -af().then(v => { - assert.sameValue(v, "key: resolve"); -}) -.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_try_throw.t.js --- a/test/js/async_await_try_throw.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,27 +0,0 @@ -/*--- -includes: [compareArray.js] -flags: [async] ----*/ - -let stages = []; - -async function af() { - try { - throw "try"; - - $DONOTEVALUATE(); - } - finally { - stages.push("finally"); - } - - return "shouldn't happen: end"; -}; - -af() -.then(v => $DONOTEVALUATE()) -.catch(v => { - stages.push(v); - assert.compareArray(stages, ['finally', 'try']); -}) -.then($DONE, $DONE) diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_await_try_throw_catch.t.js --- a/test/js/async_await_try_throw_catch.t.js Mon Nov 29 16:50:41 2021 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,27 +0,0 @@ -/*--- -includes: [compareArray.js] -flags: [async] ----*/ - -let stage = []; - -async function af() { - try { - throw "try"; - - $DONOTEVALUATE(); - } - catch (v) { - stage.push(v); - } - finally { - stage.push("finally"); - } - - return "end"; -}; - -af().then(v => { - stage.push(v); - assert.compareArray(stage, ['try', 'finally', 'end']) -}).then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_blank.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_blank.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,13 @@ +/*--- +includes: [] +flags: [async] +---*/ + +async function af(x) { + return x; +} + +af(12345).then(v => { + assert.sameValue(v, 12345) +}) +.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_catch.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_catch.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,13 @@ +/*--- +includes: [] +flags: [async] +---*/ + +async function add(x) { + return await new Promise((resolve, reject) => {reject(x)}).catch(v => v + 1); +} + +add(50).then(v => { + assert.sameValue(v, 51); +}) +.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_finally.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_finally.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,15 @@ +/*--- +includes: [] +flags: [async] +---*/ + +let called = false; +async function add(x) { + return await new Promise((resolve, reject) => {reject(x + 1)}) + .finally(() => {called = true}); +} + +add(50).catch(e => { + assert.sameValue(e, 51); + assert.sameValue(called, true, "finally was not invoked"); +}).then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_for.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_for.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,31 @@ +/*--- +includes: [compareArray.js] +flags: [async] +---*/ + +let stage = []; + +async function f() { + let sum = 0; + + stage.push(2); + + for (let x = 4; x < 14; x++) { + sum += await new Promise((resolve, reject) => {resolve(x)}); + + stage.push(x); + } + + stage.push("end"); + + return sum; +} + +stage.push(1); + +f().then(v => { + assert.sameValue(v, 85); + assert.compareArray(stage, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, "end"]); +}).then($DONE, $DONE); + +stage.push(3); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_inline.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_inline.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,18 @@ +/*--- +includes: [] +flags: [async] +---*/ + +function pr(x) { + return new Promise(resolve => {resolve(x)}); +} + +async function add() { + const a = pr(20); + const b = pr(50); + return await a + await b; +} + +add().then(v => { + assert.sameValue(v, 70); +}).then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_many_call.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_many_call.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,35 @@ +/*--- +includes: [compareArray.js] +flags: [async] +---*/ + +async function test(name) { + let k1, k2; + + switch (name) { + case "First": + k1 = await Promise.resolve("SUN"); + k2 = await Promise.resolve("MOON"); + break; + + case "Second": + k1 = await Promise.resolve("CAT"); + k2 = await Promise.resolve("MOUSE"); + break; + + case "Third": + k1 = await Promise.resolve("MAN"); + k2 = await Promise.resolve("WOMAN"); + break; + + default: + break; + } + + return `${name}: ${k1} ${k2}`; +}; + +Promise.all(['First', 'Second', 'Third'].map(v => test(v))) +.then(results => { + assert.compareArray(results, ['First: SUN MOON','Second: CAT MOUSE','Third: MAN WOMAN']); +}).then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_reject.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_reject.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,15 @@ +/*--- +includes: [] +flags: [async] +---*/ + +async function add(x) { + return await new Promise((resolve, reject) => {reject(x)}); +} + +add(50) +.then(v => $DONOTEVALUATE()) +.catch(v => { + assert.sameValue(v, 50); +}) +.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_stages.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_stages.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,36 @@ +/*--- +includes: [compareArray.js] +flags: [async] +---*/ + +function pr(x) { + return new Promise(resolve => {resolve(x)}) + .then(v => v).then(v => v); +} + +let stage = []; + +async function f() { + let sum = 0; + + stage.push(2); + + const a1 = await pr(10); + + stage.push(4); + + const a2 = await pr(20); + + stage.push(5); + + return a1 + a2; +} + +stage.push(1); + +f().then(v => { + stage.push(v); + assert.compareArray(stage, [1, 2, 3, 4, 5, 30]); +}).then($DONE, $DONE); + +stage.push(3); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_throw.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_throw.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,22 @@ +/*--- +includes: [] +flags: [async] +---*/ + +function pr(x) { + return new Promise(resolve => {resolve(x)}).then(v => {throw v}); +} + +async function add(x) { + const a = await pr(x); + const b = await pr(x); + + return a + b; +} + +add(50) +.then(v => $DONOTEVALUATE()) +.catch(v => { + assert.sameValue(v, 50); +}) +.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_throw_async.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_throw_async.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,25 @@ +/*--- +includes: [] +flags: [async] +---*/ + +function pr(x) { + return new Promise(resolve => {resolve(x)}); +} + +async function add(x) { + const a = await pr(x); + + throw a + 1; + + const b = await pr(x + 10); + + return a + b; +} + +add(50) +.then(v => $DONOTEVALUATE()) +.catch(v => { + assert.sameValue(v, 51); +}) +.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_throw_catch.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_throw_catch.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,20 @@ +/*--- +includes: [] +flags: [async] +---*/ + +function pr(x) { + return new Promise(resolve => {resolve(x)}).then(v => {throw v}).catch(v => v); +} + +async function add(x) { + const a = await pr(x); + const b = await pr(x + 10); + + return a + b; +} + +add(50).then(v => { + assert.sameValue(v, 110); +}) +.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_throw_catch_async.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_throw_catch_async.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,25 @@ +/*--- +includes: [] +flags: [async] +---*/ + +function pr(x) { + return new Promise(resolve => {resolve(x)}); +} + +async function add(x) { + const a = await pr(x); + + throw a + 1; + + const b = await pr(x + 10); + + return a + b; +} + +add(50) +.then(v => $DONOTEVALUATE()) +.catch(v => { + assert.sameValue(v, 51); +}) +.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_try_catch.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_try_catch.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,30 @@ +/*--- +includes: [compareArray.js] +flags: [async] +---*/ + +let stages = []; + +async function af() { + try { + await new Promise(function(resolve, reject) { + reject("reject"); + }); + + $DONOTEVALUATE(); + } + catch (v) { + stages.push(v); + } + finally { + stages.push('finally'); + } + + return "end"; +}; + +af().then(v => { + stages.push(v); + assert.compareArray(stages, ['reject', 'finally', 'end']); +}) +.then($DONE, $DONE) diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_try_finally.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_try_finally.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,32 @@ +/*--- +includes: [] +flags: [async] +---*/ + +let stages = []; + +async function af() { + try { + await new Promise(function(resolve, reject) { + reject("reject"); + }); + + $DONOTEVALUATE(); + } + finally { + await new Promise(function(resolve, reject) { + reject("finally reject"); + }); + + $DONOTEVALUATE(); + } + + return "shouldn't happen: end"; +}; + +af() +.then(v => $DONOTEVALUATE()) +.catch(v => { + assert.sameValue(v, "finally reject"); +}) +.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_try_resolve.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_try_resolve.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,23 @@ +/*--- +includes: [] +flags: [async] +---*/ + +async function af() { + let key; + + try { + key = await Promise.resolve("key"); + key += ": resolve"; + + } catch (e) { + key += ": exception"; + } + + return key; +}; + +af().then(v => { + assert.sameValue(v, "key: resolve"); +}) +.then($DONE, $DONE); diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_try_throw.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_try_throw.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,27 @@ +/*--- +includes: [compareArray.js] +flags: [async] +---*/ + +let stages = []; + +async function af() { + try { + throw "try"; + + $DONOTEVALUATE(); + } + finally { + stages.push("finally"); + } + + return "shouldn't happen: end"; +}; + +af() +.then(v => $DONOTEVALUATE()) +.catch(v => { + stages.push(v); + assert.compareArray(stages, ['finally', 'try']); +}) +.then($DONE, $DONE) diff -r 3b035831f64a -r 6aaf1c14c1e7 test/js/async_try_throw_catch.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_try_throw_catch.t.js Mon Nov 29 18:14:25 2021 +0000 @@ -0,0 +1,27 @@ +/*--- +includes: [compareArray.js] +flags: [async] +---*/ + +let stage = []; + +async function af() { + try { + throw "try"; + + $DONOTEVALUATE(); + } + catch (v) { + stage.push(v); + } + finally { + stage.push("finally"); + } + + return "end"; +}; + +af().then(v => { + stage.push(v); + assert.compareArray(stage, ['try', 'finally', 'end']) +}).then($DONE, $DONE); From xeioex at nginx.com Tue Nov 30 16:36:14 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 30 Nov 2021 16:36:14 +0000 Subject: [njs] Fixed catching of exception thrown in try block of async function. Message-ID: details: https://hg.nginx.org/njs/rev/748eca39acb3 branches: changeset: 1755:748eca39acb3 user: Dmitry Volyntsev date: Tue Nov 30 14:55:57 2021 +0000 description: Fixed catching of exception thrown in try block of async function. The bug was introduced in 92d10cd761e2 (0.7.0). diffstat: src/njs_async.c | 11 ++++++----- src/njs_async.h | 2 +- src/njs_function.c | 10 +++++++--- src/njs_function.h | 2 +- src/njs_vmcode.c | 4 ++-- test/js/async_throw_in_try_after_await.t.js | 25 +++++++++++++++++++++++++ 6 files changed, 42 insertions(+), 12 deletions(-) diffs (149 lines): diff -r 6aaf1c14c1e7 -r 748eca39acb3 src/njs_async.c --- a/src/njs_async.c Mon Nov 29 18:14:25 2021 +0000 +++ b/src/njs_async.c Tue Nov 30 14:55:57 2021 +0000 @@ -59,7 +59,7 @@ njs_await_fulfilled(njs_vm_t *vm, njs_va { njs_int_t ret; njs_value_t **cur_local, **cur_closures, **cur_temp, *value; - njs_frame_t *frame; + njs_frame_t *frame, *async_frame; njs_function_t *function; njs_async_ctx_t *ctx; njs_native_frame_t *top, *async; @@ -71,7 +71,8 @@ njs_await_fulfilled(njs_vm_t *vm, njs_va goto failed; } - async = ctx->await; + async_frame = ctx->await; + async = &async_frame->native; async->previous = vm->top_frame; function = async->function; @@ -87,7 +88,7 @@ njs_await_fulfilled(njs_vm_t *vm, njs_va vm->levels[NJS_LEVEL_TEMP] = async->temp; vm->top_frame = async; - vm->active_frame = (njs_frame_t *) async; + vm->active_frame = async_frame; *njs_scope_value(vm, ctx->index) = *value; vm->retval = *value; @@ -149,7 +150,7 @@ njs_await_rejected(njs_vm_t *vm, njs_val value = njs_arg(args, nargs, 1); - if (ctx->await->pc == ctx->pc) { + if (ctx->await->native.pc == ctx->pc) { (void) njs_function_call(vm, njs_function(&ctx->capability->reject), &njs_value_undefined, value, 1, &vm->retval); @@ -158,7 +159,7 @@ njs_await_rejected(njs_vm_t *vm, njs_val return NJS_ERROR; } - ctx->pc = ctx->await->pc; + ctx->pc = ctx->await->native.pc; return njs_await_fulfilled(vm, args, nargs, unused); } diff -r 6aaf1c14c1e7 -r 748eca39acb3 src/njs_async.h --- a/src/njs_async.h Mon Nov 29 18:14:25 2021 +0000 +++ b/src/njs_async.h Tue Nov 30 14:55:57 2021 +0000 @@ -10,7 +10,7 @@ typedef struct { njs_promise_capability_t *capability; - njs_native_frame_t *await; + njs_frame_t *await; uintptr_t index; u_char *pc; } njs_async_ctx_t; diff -r 6aaf1c14c1e7 -r 748eca39acb3 src/njs_function.c --- a/src/njs_function.c Mon Nov 29 18:14:25 2021 +0000 +++ b/src/njs_function.c Tue Nov 30 14:55:57 2021 +0000 @@ -800,12 +800,17 @@ njs_function_frame_free(njs_vm_t *vm, nj njs_int_t -njs_function_frame_save(njs_vm_t *vm, njs_native_frame_t *native, u_char *pc) +njs_function_frame_save(njs_vm_t *vm, njs_frame_t *frame, u_char *pc) { size_t value_count, n; njs_value_t *start, *end, *p, **new, *value, **local; njs_function_t *function; - njs_native_frame_t *active; + njs_native_frame_t *active, *native; + + *frame = *vm->active_frame; + frame->previous_active_frame = NULL; + + native = &frame->native; active = &vm->active_frame->native; value_count = njs_function_frame_value_count(active); @@ -816,7 +821,6 @@ njs_function_frame_save(njs_vm_t *vm, nj value = (njs_value_t *) (new + value_count + function->u.lambda->temp); - *native = *active; native->arguments = value; native->arguments_offset = value + (function->args_offset - 1); diff -r 6aaf1c14c1e7 -r 748eca39acb3 src/njs_function.h --- a/src/njs_function.h Mon Nov 29 18:14:25 2021 +0000 +++ b/src/njs_function.h Tue Nov 30 14:55:57 2021 +0000 @@ -116,7 +116,7 @@ njs_int_t njs_function_lambda_call(njs_v njs_int_t njs_function_native_call(njs_vm_t *vm); njs_native_frame_t *njs_function_frame_alloc(njs_vm_t *vm, size_t size); void njs_function_frame_free(njs_vm_t *vm, njs_native_frame_t *frame); -njs_int_t njs_function_frame_save(njs_vm_t *vm, njs_native_frame_t *native, +njs_int_t njs_function_frame_save(njs_vm_t *vm, njs_frame_t *native, u_char *pc); njs_object_type_t njs_function_object_type(njs_vm_t *vm, njs_function_t *function); diff -r 6aaf1c14c1e7 -r 748eca39acb3 src/njs_vmcode.c --- a/src/njs_vmcode.c Mon Nov 29 18:14:25 2021 +0000 +++ b/src/njs_vmcode.c Tue Nov 30 14:55:57 2021 +0000 @@ -1875,10 +1875,10 @@ njs_vmcode_await(njs_vm_t *vm, njs_vmcod frame = (njs_frame_t *) active; if (frame->exception.catch != NULL) { - ctx->await->pc = frame->exception.catch; + ctx->await->native.pc = frame->exception.catch; } else { - ctx->await->pc = ctx->pc; + ctx->await->native.pc = ctx->pc; } fulfilled->context = ctx; diff -r 6aaf1c14c1e7 -r 748eca39acb3 test/js/async_throw_in_try_after_await.t.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_throw_in_try_after_await.t.js Tue Nov 30 14:55:57 2021 +0000 @@ -0,0 +1,25 @@ +/*--- +includes: [] +flags: [async] +---*/ + +function pr(x) { + return new Promise(resolve => {resolve(x)}); +} + +async function add(x) { + try { + const a = await pr(x); + throw 'Oops'; + return a + b; + + } catch (e) { + return `catch: ${e.toString()}`; + } +} + +add(50) +.then( + v => assert.sameValue(v, 'catch: Oops'), + v => $DONOTEVALUATE(), +).then($DONE, $DONE);