From alexander.borisov at nginx.com Wed Sep 1 13:31:55 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Wed, 01 Sep 2021 13:31:55 +0000 Subject: [njs] Version bump. Message-ID: details: https://hg.nginx.org/njs/rev/5439e59a255e branches: changeset: 1697:5439e59a255e user: Alexander Borisov date: Wed Sep 01 16:31:08 2021 +0300 description: Version bump. diffstat: src/njs.h | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 65cd982d134f -r 5439e59a255e src/njs.h --- a/src/njs.h Tue Aug 31 14:04:23 2021 +0000 +++ b/src/njs.h Wed Sep 01 16:31:08 2021 +0300 @@ -11,7 +11,7 @@ #include -#define NJS_VERSION "0.6.2" +#define NJS_VERSION "0.7.0" #include /* STDOUT_FILENO, STDERR_FILENO */ From alexander.borisov at nginx.com Wed Sep 1 13:31:57 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Wed, 01 Sep 2021 13:31:57 +0000 Subject: [njs] Introduced Async/Await implementation. Message-ID: details: https://hg.nginx.org/njs/rev/92d10cd761e2 branches: changeset: 1698:92d10cd761e2 user: Alexander Borisov date: Wed Sep 01 16:31:08 2021 +0300 description: Introduced Async/Await implementation. This closes #419 issue on GitHub. diffstat: auto/sources | 1 + src/njs_async.c | 245 +++++++++++++++++++++++++++++++ src/njs_async.h | 30 +++ src/njs_builtin.c | 12 +- src/njs_disassembler.c | 3 + src/njs_function.c | 161 ++++++++++++++++++- src/njs_function.h | 80 +++++++-- src/njs_generator.c | 49 ++++++- src/njs_lexer.h | 3 + src/njs_main.h | 1 + src/njs_parser.c | 165 +++++++++++++++++--- src/njs_parser.h | 2 + src/njs_promise.c | 33 +--- src/njs_promise.h | 34 +++- src/njs_variable.c | 5 +- src/njs_vm.c | 4 +- src/njs_vm.h | 2 + src/njs_vmcode.c | 98 ++++++++++++- src/njs_vmcode.h | 9 + src/test/njs_unit_test.c | 76 +++++++++ test/js/async_await_add.js | 7 + test/js/async_await_blank.js | 5 + test/js/async_await_catch.js | 5 + test/js/async_await_finally.js | 6 + test/js/async_await_for.js | 23 ++ test/js/async_await_inline.js | 11 + test/js/async_await_reject.js | 5 + test/js/async_await_stages.js | 28 +++ test/js/async_await_throw.js | 12 + test/js/async_await_throw_async.js | 15 + test/js/async_await_throw_catch.js | 12 + test/js/async_await_throw_catch_async.js | 15 + test/js/async_await_try_catch.js | 19 ++ test/js/async_await_try_finally.js | 20 ++ test/js/async_await_try_throw.js | 14 + test/js/async_await_try_throw_catch.js | 17 ++ test/njs_expect_test.exp | 63 +++++++ 37 files changed, 1192 insertions(+), 98 deletions(-) diffs (truncated from 2023 to 1000 lines): diff -r 5439e59a255e -r 92d10cd761e2 auto/sources --- a/auto/sources Wed Sep 01 16:31:08 2021 +0300 +++ b/auto/sources Wed Sep 01 16:31:08 2021 +0300 @@ -60,6 +60,7 @@ NJS_LIB_SRCS=" \ src/njs_buffer.c \ src/njs_iterator.c \ src/njs_scope.c \ + src/njs_async.c \ " NJS_LIB_TEST_SRCS=" \ diff -r 5439e59a255e -r 92d10cd761e2 src/njs_async.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/njs_async.c Wed Sep 01 16:31:08 2021 +0300 @@ -0,0 +1,245 @@ + +/* + * Copyright (C) Alexander Borisov + * Copyright (C) Nginx, Inc. + */ + +#include + + +static void +njs_async_context_free(njs_vm_t *vm, njs_native_frame_t *frame); + + +njs_int_t +njs_async_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval) +{ + njs_int_t ret; + njs_value_t ctor; + njs_async_ctx_t *ctx; + njs_native_frame_t *frame; + + frame = vm->top_frame; + frame->retval = retval; + + ctx = frame->function->context; + + njs_set_function(&ctor, &vm->constructors[NJS_OBJ_TYPE_PROMISE]); + + ctx->capability = njs_promise_new_capability(vm, &ctor); + if (njs_slow_path(ctx->capability == NULL)) { + return NJS_ERROR; + } + + ret = njs_function_lambda_call(vm); + + if (ret == NJS_OK) { + ret = njs_function_call(vm, njs_function(&ctx->capability->resolve), + &njs_value_undefined, retval, 1, &vm->retval); + + } else if (ret == NJS_ERROR) { + if (njs_is_memory_error(vm, &vm->retval)) { + return NJS_ERROR; + } + + ret = njs_function_call(vm, njs_function(&ctx->capability->reject), + &njs_value_undefined, &vm->retval, 1, + &vm->retval); + } + + *retval = ctx->capability->promise; + + return ret; +} + + +njs_int_t +njs_await_fulfilled(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t unused) +{ + njs_int_t ret; + njs_value_t **cur_local, **cur_closures, **cur_temp, *value; + njs_frame_t *frame; + njs_async_ctx_t *ctx; + njs_native_frame_t *top, *async; + + ctx = vm->top_frame->function->context; + + value = njs_arg(args, nargs, 1); + if (njs_is_error(value)) { + goto failed; + } + + async = ctx->await; + + cur_local = vm->levels[NJS_LEVEL_LOCAL]; + cur_closures = vm->levels[NJS_LEVEL_CLOSURE]; + cur_temp = vm->levels[NJS_LEVEL_TEMP]; + top = vm->top_frame; + frame = vm->active_frame; + + vm->levels[NJS_LEVEL_LOCAL] = async->local; + vm->levels[NJS_LEVEL_CLOSURE] = njs_function_closures(async->function); + vm->levels[NJS_LEVEL_TEMP] = async->temp; + + vm->top_frame = async; + vm->active_frame = (njs_frame_t *) async; + + *njs_scope_value(vm, ctx->index) = *value; + vm->retval = *value; + + vm->top_frame->retval = &vm->retval; + + ret = njs_vmcode_interpreter(vm, async->pc); + + vm->levels[NJS_LEVEL_LOCAL] = cur_local; + vm->levels[NJS_LEVEL_CLOSURE] = cur_closures; + vm->levels[NJS_LEVEL_TEMP] = cur_temp; + + vm->top_frame = top; + vm->active_frame = frame; + + if (ret == NJS_OK) { + ret = njs_function_call(vm, njs_function(&ctx->capability->resolve), + &njs_value_undefined, &vm->retval, 1, &vm->retval); + + njs_async_context_free(vm, vm->top_frame); + + } else if (ret == NJS_ERROR) { + if (njs_is_memory_error(vm, &vm->retval)) { + return NJS_ERROR; + } + + value = &vm->retval; + + goto failed; + } + + return ret; + +failed: + + (void) njs_function_call(vm, njs_function(&ctx->capability->reject), + &njs_value_undefined, value, 1, &vm->retval); + + njs_async_context_free(vm, vm->top_frame); + + return NJS_ERROR; +} + + +njs_int_t +njs_await_rejected(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t unused) +{ + njs_value_t *value; + njs_async_ctx_t *ctx; + + ctx = vm->top_frame->function->context; + + value = njs_arg(args, nargs, 1); + + if (ctx->await->pc == ctx->pc) { + (void) njs_function_call(vm, njs_function(&ctx->capability->reject), + &njs_value_undefined, value, 1, &vm->retval); + + njs_async_context_free(vm, vm->top_frame); + + return NJS_ERROR; + } + + return njs_await_fulfilled(vm, args, nargs, unused); +} + + +static void +njs_async_context_free(njs_vm_t *vm, njs_native_frame_t *frame) +{ + njs_async_ctx_t *ctx; + + ctx = frame->function->context; + + njs_mp_free(vm->mem_pool, ctx->capability); + njs_mp_free(vm->mem_pool, ctx); + + frame->function->context = NULL; +} + + +static const njs_object_prop_t njs_async_constructor_properties[] = +{ + { + .type = NJS_PROPERTY, + .name = njs_string("name"), + .value = njs_string("AsyncFunction"), + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("length"), + .value = njs_value(NJS_NUMBER, 1, 1.0), + .configurable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("prototype"), + .value = njs_prop_handler(njs_object_prototype_create), + }, +}; + + +const njs_object_init_t njs_async_constructor_init = { + njs_async_constructor_properties, + njs_nitems(njs_async_constructor_properties), +}; + + +static const njs_object_prop_t njs_async_prototype_properties[] = +{ + { + .type = NJS_PROPERTY, + .name = njs_wellknown_symbol(NJS_SYMBOL_TO_STRING_TAG), + .value = njs_string("AsyncFunction"), + .configurable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("constructor"), + .value = njs_prop_handler(njs_object_prototype_create_constructor), + .configurable = 1, + }, +}; + + +const njs_object_init_t njs_async_prototype_init = { + njs_async_prototype_properties, + njs_nitems(njs_async_prototype_properties), +}; + + +const njs_object_type_init_t njs_async_function_type_init = { + .constructor = njs_native_ctor(njs_function_constructor, 1, 1), + .constructor_props = &njs_async_constructor_init, + .prototype_props = &njs_async_prototype_init, + .prototype_value = { .object = { .type = NJS_OBJECT } }, +}; + + +const njs_object_prop_t njs_async_function_instance_properties[] = +{ + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("length"), + .value = njs_prop_handler(njs_function_instance_length), + .configurable = 1, + }, +}; + + +const njs_object_init_t njs_async_function_instance_init = { + njs_async_function_instance_properties, + njs_nitems(njs_async_function_instance_properties), +}; diff -r 5439e59a255e -r 92d10cd761e2 src/njs_async.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/njs_async.h Wed Sep 01 16:31:08 2021 +0300 @@ -0,0 +1,30 @@ + +/* + * Copyright (C) Alexander Borisov + * Copyright (C) Nginx, Inc. + */ + +#ifndef _NJS_ASYNC_H_INCLUDED_ +#define _NJS_ASYNC_H_INCLUDED_ + + +typedef struct { + njs_promise_capability_t *capability; + njs_native_frame_t *await; + uintptr_t index; + u_char *pc; +} njs_async_ctx_t; + + +njs_int_t njs_async_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval); +njs_int_t njs_await_fulfilled(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t unused); +njs_int_t njs_await_rejected(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t unused); + + +extern const njs_object_type_init_t njs_async_function_type_init; +extern const njs_object_init_t njs_async_function_instance_init; + + +#endif /* _NJS_ASYNC_H_INCLUDED_ */ diff -r 5439e59a255e -r 92d10cd761e2 src/njs_builtin.c --- a/src/njs_builtin.c Wed Sep 01 16:31:08 2021 +0300 +++ b/src/njs_builtin.c Wed Sep 01 16:31:08 2021 +0300 @@ -74,6 +74,7 @@ static const njs_object_type_init_t *con &njs_symbol_type_init, &njs_string_type_init, &njs_function_type_init, + &njs_async_function_type_init, &njs_regexp_type_init, &njs_date_type_init, &njs_promise_type_init, @@ -181,6 +182,12 @@ njs_builtin_objects_create(njs_vm_t *vm) return NJS_ERROR; } + ret = njs_object_hash_init(vm, &shared->async_function_instance_hash, + &njs_async_function_instance_init); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; + } + ret = njs_object_hash_init(vm, &shared->arrow_instance_hash, &njs_arrow_instance_init); if (njs_slow_path(ret != NJS_OK)) { @@ -342,7 +349,7 @@ njs_builtin_objects_clone(njs_vm_t *vm, size_t size; njs_uint_t i; njs_object_t *object_prototype, *function_prototype, - *typed_array_prototype, *error_prototype, + *typed_array_prototype, *error_prototype, *async_prototype, *typed_array_ctor, *error_ctor; /* @@ -384,6 +391,9 @@ njs_builtin_objects_clone(njs_vm_t *vm, function_prototype = &vm->prototypes[NJS_OBJ_TYPE_FUNCTION].object; + async_prototype = &vm->prototypes[NJS_OBJ_TYPE_ASYNC_FUNCTION].object; + async_prototype->__proto__ = function_prototype; + for (i = NJS_OBJ_TYPE_OBJECT; i < NJS_OBJ_TYPE_NORMAL_MAX; i++) { vm->constructors[i].object.__proto__ = function_prototype; } diff -r 5439e59a255e -r 92d10cd761e2 src/njs_disassembler.c --- a/src/njs_disassembler.c Wed Sep 01 16:31:08 2021 +0300 +++ b/src/njs_disassembler.c Wed Sep 01 16:31:08 2021 +0300 @@ -156,6 +156,9 @@ static njs_code_name_t code_names[] = { { NJS_VMCODE_DEBUGGER, sizeof(njs_vmcode_debugger_t), njs_str("DEBUGGER ") }, + + { NJS_VMCODE_AWAIT, sizeof(njs_vmcode_await_t), + njs_str("AWAIT ") }, }; diff -r 5439e59a255e -r 92d10cd761e2 src/njs_function.c --- a/src/njs_function.c Wed Sep 01 16:31:08 2021 +0300 +++ b/src/njs_function.c Wed Sep 01 16:31:08 2021 +0300 @@ -9,9 +9,11 @@ njs_function_t * -njs_function_alloc(njs_vm_t *vm, njs_function_lambda_t *lambda) +njs_function_alloc(njs_vm_t *vm, njs_function_lambda_t *lambda, + njs_bool_t async) { size_t size; + njs_object_t *proto; njs_function_t *function; size = sizeof(njs_function_t) + lambda->nclosures * sizeof(njs_value_t *); @@ -34,12 +36,23 @@ njs_function_alloc(njs_vm_t *vm, njs_fun if (function->ctor) { function->object.shared_hash = vm->shared->function_instance_hash; + } else if (async) { + function->object.shared_hash = vm->shared->async_function_instance_hash; + } else { function->object.shared_hash = vm->shared->arrow_instance_hash; } - function->object.__proto__ = &vm->prototypes[NJS_OBJ_TYPE_FUNCTION].object; + if (async) { + proto = &vm->prototypes[NJS_OBJ_TYPE_ASYNC_FUNCTION].object; + + } else { + proto = &vm->prototypes[NJS_OBJ_TYPE_FUNCTION].object; + } + + function->object.__proto__ = proto; function->object.type = NJS_FUNCTION; + function->object.extensible = 1; return function; @@ -73,7 +86,8 @@ njs_vm_function_alloc(njs_vm_t *vm, njs_ njs_function_t * njs_function_value_copy(njs_vm_t *vm, njs_value_t *value) { - njs_function_t *function, *copy; + njs_function_t *function, *copy; + njs_object_type_t type; function = njs_function(value); @@ -87,9 +101,14 @@ njs_function_value_copy(njs_vm_t *vm, nj return NULL; } + type = njs_function_object_type(vm, function); + if (copy->ctor) { copy->object.shared_hash = vm->shared->function_instance_hash; + } else if (type == NJS_OBJ_TYPE_ASYNC_FUNCTION) { + copy->object.shared_hash = vm->shared->async_function_instance_hash; + } else { copy->object.shared_hash = vm->shared->arrow_instance_hash; } @@ -173,9 +192,10 @@ njs_function_name_set(njs_vm_t *vm, njs_ njs_function_t * njs_function_copy(njs_vm_t *vm, njs_function_t *function) { - size_t size, n; - njs_value_t **from, **to; - njs_function_t *copy; + size_t size, n; + njs_value_t **from, **to; + njs_function_t *copy; + njs_object_type_t type; n = (function->native) ? 0 : function->u.lambda->nclosures; @@ -187,7 +207,10 @@ njs_function_copy(njs_vm_t *vm, njs_func } *copy = *function; - copy->object.__proto__ = &vm->prototypes[NJS_OBJ_TYPE_FUNCTION].object; + + type = njs_function_object_type(vm, function); + + copy->object.__proto__ = &vm->prototypes[type].object; copy->object.shared = 0; if (n == 0) { @@ -404,6 +427,7 @@ njs_function_lambda_frame(njs_vm_t *vm, njs_value_t *value, *bound, **new, **temp; njs_frame_t *frame; njs_function_t *target; + njs_async_ctx_t *ctx; njs_native_frame_t *native_frame; njs_function_lambda_t *lambda; @@ -430,6 +454,17 @@ njs_function_lambda_frame(njs_vm_t *vm, lambda = target->u.lambda; } + if (njs_function_object_type(vm, target) == NJS_OBJ_TYPE_ASYNC_FUNCTION) { + ctx = njs_mp_alloc(vm->mem_pool, sizeof(njs_async_ctx_t)); + if (njs_slow_path(ctx == NULL)) { + njs_memory_error(vm); + return NJS_ERROR; + } + + ctx->await = NULL; + target->context = ctx; + } + args_count = function->args_offset + njs_max(nargs, lambda->nargs); value_count = args_count + njs_max(args_count, lambda->nlocal); @@ -724,6 +759,29 @@ njs_function_native_call(njs_vm_t *vm) } +njs_int_t +njs_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval) +{ + njs_native_frame_t *frame; + + frame = vm->top_frame; + frame->retval = retval; + + if (njs_function_object_type(vm, frame->function) + == NJS_OBJ_TYPE_ASYNC_FUNCTION) + { + return njs_async_function_frame_invoke(vm, retval); + } + + if (frame->native) { + return njs_function_native_call(vm); + + } else { + return njs_function_lambda_call(vm); + } +} + + void njs_function_frame_free(njs_vm_t *vm, njs_native_frame_t *native) { @@ -745,6 +803,69 @@ njs_function_frame_free(njs_vm_t *vm, nj njs_int_t +njs_function_frame_save(njs_vm_t *vm, njs_native_frame_t *native, u_char *pc) +{ + size_t value_count, n; + njs_value_t *start, *end, *p, **new, *value, **local; + njs_function_t *function; + njs_native_frame_t *active; + + active = &vm->active_frame->native; + value_count = njs_function_frame_value_count(active); + + function = active->function; + + new = (njs_value_t **) ((u_char *) native + NJS_FRAME_SIZE); + value = (njs_value_t *) (new + value_count + + function->u.lambda->temp); + + *native = *active; + + native->arguments = value; + native->arguments_offset = value + (function->args_offset - 1); + native->local = new + njs_function_frame_args_count(active); + native->temp = new + value_count; + native->pc = pc; + + start = njs_function_frame_values(active, &end); + p = native->arguments; + + while (start < end) { + *p = *start++; + *new++ = p++; + } + + /* Move all arguments. */ + + p = native->arguments; + local = native->local + function->args_offset; + + for (n = 0; n < function->args_count; n++) { + if (!njs_is_valid(p)) { + njs_set_undefined(p); + } + + *local++ = p++; + } + + return NJS_OK; +} + + +njs_object_type_t +njs_function_object_type(njs_vm_t *vm, njs_function_t *function) +{ + if (function->object.shared_hash.slot + == vm->shared->async_function_instance_hash.slot) + { + return NJS_OBJ_TYPE_ASYNC_FUNCTION; + } + + return NJS_OBJ_TYPE_FUNCTION; +} + + +njs_int_t njs_function_capture_closure(njs_vm_t *vm, njs_function_t *function, njs_function_lambda_t *lambda) { @@ -970,9 +1091,9 @@ njs_function_prototype_create(njs_vm_t * } -static njs_int_t +njs_int_t njs_function_constructor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, - njs_index_t unused) + njs_index_t async) { njs_chb_t chain; njs_int_t ret; @@ -997,13 +1118,27 @@ njs_function_constructor(njs_vm_t *vm, n NJS_TOKEN_ILLEGAL }; + static const njs_token_type_t safe_ast_async[] = { + NJS_TOKEN_END, + NJS_TOKEN_ASYNC_FUNCTION_EXPRESSION, + NJS_TOKEN_STATEMENT, + NJS_TOKEN_RETURN, + NJS_TOKEN_THIS, + NJS_TOKEN_ILLEGAL + }; + if (!vm->options.unsafe && nargs != 2) { goto fail; } njs_chb_init(&chain, vm->mem_pool); - njs_chb_append_literal(&chain, "(function("); + if (async) { + njs_chb_append_literal(&chain, "(async function("); + + } else { + njs_chb_append_literal(&chain, "(function("); + } for (i = 1; i < nargs - 1; i++) { ret = njs_value_to_chain(vm, &chain, njs_argument(args, i)); @@ -1055,7 +1190,7 @@ njs_function_constructor(njs_vm_t *vm, n */ node = parser.node; - type = &safe_ast[0]; + type = (async) ? &safe_ast_async[0] : &safe_ast[0]; for (; *type != NJS_TOKEN_ILLEGAL; type++, node = node->right) { if (node == NULL) { @@ -1097,7 +1232,7 @@ njs_function_constructor(njs_vm_t *vm, n lambda = ((njs_vmcode_function_t *) generator.code_start)->lambda; - function = njs_function_alloc(vm, lambda); + function = njs_function_alloc(vm, lambda, (njs_bool_t) async); if (njs_slow_path(function == NULL)) { return NJS_ERROR; } @@ -1147,7 +1282,7 @@ const njs_object_init_t njs_function_co }; -static njs_int_t +njs_int_t njs_function_instance_length(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval) { diff -r 5439e59a255e -r 92d10cd761e2 src/njs_function.h --- a/src/njs_function.h Wed Sep 01 16:31:08 2021 +0300 +++ b/src/njs_function.h Wed Sep 01 16:31:08 2021 +0300 @@ -85,7 +85,8 @@ struct njs_frame_s { }; -njs_function_t *njs_function_alloc(njs_vm_t *vm, njs_function_lambda_t *lambda); +njs_function_t *njs_function_alloc(njs_vm_t *vm, njs_function_lambda_t *lambda, + njs_bool_t async); njs_function_t *njs_function_value_copy(njs_vm_t *vm, njs_value_t *value); njs_int_t njs_function_name_set(njs_vm_t *vm, njs_function_t *function, njs_value_t *name, const char *prefix); @@ -96,6 +97,10 @@ njs_int_t njs_function_rest_parameters_i njs_native_frame_t *frame); njs_int_t njs_function_prototype_create(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval); +njs_int_t njs_function_constructor(njs_vm_t *vm, njs_value_t *args, + njs_uint_t nargs, njs_index_t unused); +njs_int_t njs_function_instance_length(njs_vm_t *vm, njs_object_prop_t *prop, + njs_value_t *value, njs_value_t *setval, njs_value_t *retval); njs_int_t njs_eval_function(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused); njs_int_t njs_function_native_frame(njs_vm_t *vm, njs_function_t *function, @@ -111,10 +116,15 @@ njs_int_t njs_function_lambda_call(njs_v njs_int_t njs_function_native_call(njs_vm_t *vm); njs_native_frame_t *njs_function_frame_alloc(njs_vm_t *vm, size_t size); void njs_function_frame_free(njs_vm_t *vm, njs_native_frame_t *frame); +njs_int_t njs_function_frame_save(njs_vm_t *vm, njs_native_frame_t *native, + u_char *pc); +njs_object_type_t njs_function_object_type(njs_vm_t *vm, + njs_function_t *function); njs_int_t njs_function_capture_closure(njs_vm_t *vm, njs_function_t *function, njs_function_lambda_t *lambda); njs_int_t njs_function_capture_global_closures(njs_vm_t *vm, njs_function_t *function); +njs_int_t njs_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval); njs_inline njs_function_lambda_t * @@ -162,23 +172,6 @@ njs_function_previous_frame(njs_native_f njs_inline njs_int_t -njs_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval) -{ - njs_native_frame_t *frame; - - frame = vm->top_frame; - frame->retval = retval; - - if (frame->native) { - return njs_function_native_call(vm); - - } else { - return njs_function_lambda_call(vm); - } -} - - -njs_inline njs_int_t njs_function_call(njs_vm_t *vm, njs_function_t *function, const njs_value_t *this, const njs_value_t *args, njs_uint_t nargs, njs_value_t *retval) @@ -210,6 +203,57 @@ njs_function_closures(const njs_function } +njs_inline size_t +njs_function_frame_size(njs_native_frame_t *frame) +{ + size_t size; + uintptr_t start; + + start = (uintptr_t) ((u_char *) frame + NJS_FRAME_SIZE); + size = ((uintptr_t) frame->arguments - start) / sizeof(njs_value_t *); + + return NJS_FRAME_SIZE + (size * sizeof(njs_value_t *)) + + (size * sizeof(njs_value_t)); +} + + +njs_inline size_t +njs_function_frame_args_count(njs_native_frame_t *frame) +{ + uintptr_t start; + + start = (uintptr_t) ((u_char *) frame + NJS_FRAME_SIZE); + + return ((uintptr_t) frame->local - start) / sizeof(njs_value_t *); +} + + +njs_inline size_t +njs_function_frame_value_count(njs_native_frame_t *frame) +{ + uintptr_t start; + + start = (uintptr_t) ((u_char *) frame + NJS_FRAME_SIZE); + + return ((uintptr_t) frame->temp - start) / sizeof(njs_value_t *); +} + + +njs_inline njs_value_t * +njs_function_frame_values(njs_native_frame_t *frame, njs_value_t **end) +{ + size_t count; + uintptr_t start; + + start = (uintptr_t) ((u_char *) frame + NJS_FRAME_SIZE); + count = ((uintptr_t) frame->arguments - start) / sizeof(njs_value_t *); + + *end = frame->arguments + count; + + return frame->arguments; +} + + extern const njs_object_type_init_t njs_function_type_init; extern const njs_object_init_t njs_function_instance_init; extern const njs_object_init_t njs_arrow_instance_init; diff -r 5439e59a255e -r 92d10cd761e2 src/njs_generator.c --- a/src/njs_generator.c Wed Sep 01 16:31:08 2021 +0300 +++ b/src/njs_generator.c Wed Sep 01 16:31:08 2021 +0300 @@ -325,6 +325,10 @@ static njs_int_t njs_generate_export_sta njs_generator_t *generator, njs_parser_node_t *node); static njs_int_t njs_generate_export_statement_end(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node); +static njs_int_t njs_generate_await(njs_vm_t *vm, njs_generator_t *generator, + njs_parser_node_t *node); +static njs_int_t njs_generate_await_end(njs_vm_t *vm, + njs_generator_t *generator, njs_parser_node_t *node); static njs_int_t njs_generate_wo_dest(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node); static njs_int_t njs_generate_wo_dest_after(njs_vm_t *vm, @@ -658,6 +662,7 @@ njs_generate(njs_vm_t *vm, njs_generator return njs_generate_array(vm, generator, node); case NJS_TOKEN_FUNCTION_EXPRESSION: + case NJS_TOKEN_ASYNC_FUNCTION_EXPRESSION: return njs_generate_function_expression(vm, generator, node); case NJS_TOKEN_FUNCTION: @@ -679,6 +684,7 @@ njs_generate(njs_vm_t *vm, njs_generator return njs_generate_name(vm, generator, node); case NJS_TOKEN_FUNCTION_DECLARATION: + case NJS_TOKEN_ASYNC_FUNCTION_DECLARATION: return njs_generate_function_declaration(vm, generator, node); case NJS_TOKEN_FUNCTION_CALL: @@ -702,6 +708,9 @@ njs_generate(njs_vm_t *vm, njs_generator case NJS_TOKEN_EXPORT: return njs_generate_export_statement(vm, generator, node); + case NJS_TOKEN_AWAIT: + return njs_generate_await(vm, generator, node); + default: njs_thread_log_debug("unknown token: %d", node->token); njs_internal_error(vm, "Generator failed: unknown token"); @@ -3055,6 +3064,7 @@ njs_generate_function_expression(njs_vm_ njs_generate_code(generator, njs_vmcode_function_t, function, NJS_VMCODE_FUNCTION, 1, node); function->lambda = lambda; + function->async = (node->token_type == NJS_TOKEN_ASYNC_FUNCTION_EXPRESSION); node->index = njs_generate_object_dest_index(vm, generator, node); if (njs_slow_path(node->index == NJS_INDEX_ERROR)) { @@ -3090,6 +3100,7 @@ njs_generate_function(njs_vm_t *vm, njs_ njs_generate_code(generator, njs_vmcode_function_t, function, NJS_VMCODE_FUNCTION, 1, node); function->lambda = lambda; + function->async = 0; node->index = njs_generate_object_dest_index(vm, generator, node); if (njs_slow_path(node->index == NJS_INDEX_ERROR)) { @@ -3555,6 +3566,7 @@ njs_generate_function_declaration(njs_vm njs_parser_node_t *node) { njs_int_t ret; + njs_bool_t async; njs_variable_t *var; njs_function_t *function; njs_function_lambda_t *lambda; @@ -3587,7 +3599,8 @@ njs_generate_function_declaration(njs_vm return ret; } - function = njs_function_alloc(vm, lambda); + async = (node->token_type == NJS_TOKEN_ASYNC_FUNCTION_DECLARATION); + function = njs_function_alloc(vm, lambda, async); if (njs_slow_path(function == NULL)) { return NJS_ERROR; } @@ -4656,6 +4669,40 @@ njs_generate_export_statement_end(njs_vm static njs_int_t +njs_generate_await(njs_vm_t *vm, njs_generator_t *generator, + njs_parser_node_t *node) +{ + njs_generator_next(generator, njs_generate, node->right); + + return njs_generator_after(vm, generator, + njs_queue_first(&generator->stack), node, + njs_generate_await_end, NULL, 0); +} + + +static njs_int_t +njs_generate_await_end(njs_vm_t *vm, njs_generator_t *generator, + njs_parser_node_t *node) +{ + njs_index_t index; + njs_vmcode_await_t *code; + + index = node->right->index; + + if (njs_slow_path(index == NJS_INDEX_ERROR)) { + return NJS_ERROR; + } + + njs_generate_code(generator, njs_vmcode_await_t, code, + NJS_VMCODE_AWAIT, 1, node); + code->retval = index; + node->index = index; + + return njs_generator_stack_pop(vm, generator, NULL); +} + + +static njs_int_t njs_generate_wo_dest(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node) { diff -r 5439e59a255e -r 92d10cd761e2 src/njs_lexer.h --- a/src/njs_lexer.h Wed Sep 01 16:31:08 2021 +0300 +++ b/src/njs_lexer.h Wed Sep 01 16:31:08 2021 +0300 @@ -148,6 +148,9 @@ typedef enum { NJS_TOKEN_ARGUMENT, NJS_TOKEN_RETURN, + NJS_TOKEN_ASYNC_FUNCTION_DECLARATION, + NJS_TOKEN_ASYNC_FUNCTION_EXPRESSION, + NJS_TOKEN_REGEXP, NJS_TOKEN_EXTERNAL, diff -r 5439e59a255e -r 92d10cd761e2 src/njs_main.h --- a/src/njs_main.h Wed Sep 01 16:31:08 2021 +0300 +++ b/src/njs_main.h Wed Sep 01 16:31:08 2021 +0300 @@ -73,6 +73,7 @@ #include #include #include +#include #include #include diff -r 5439e59a255e -r 92d10cd761e2 src/njs_parser.c --- a/src/njs_parser.c Wed Sep 01 16:31:08 2021 +0300 +++ b/src/njs_parser.c Wed Sep 01 16:31:08 2021 +0300 @@ -132,6 +132,10 @@ static njs_int_t njs_parser_unary_expres njs_lexer_token_t *token, njs_queue_link_t *current); static njs_int_t njs_parser_unary_expression_next(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current); +static njs_int_t njs_parser_await(njs_parser_t *parser, + njs_lexer_token_t *token, njs_queue_link_t *current); +static njs_int_t njs_parser_await_after(njs_parser_t *parser, + njs_lexer_token_t *token, njs_queue_link_t *current); static njs_int_t njs_parser_exponentiation_expression(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current); @@ -733,14 +737,6 @@ njs_parser_async_generator_expression(nj static njs_int_t -njs_parser_async_function_expression(njs_parser_t *parser, - njs_lexer_token_t *token, njs_queue_link_t *current) -{ - return njs_parser_not_supported(parser, token); -} - - -static njs_int_t njs_parser_generator_declaration(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { @@ -757,15 +753,11 @@ njs_parser_class_declaration(njs_parser_ static njs_int_t -njs_parser_function_or_generator(njs_parser_t *parser, - njs_lexer_token_t *token, njs_queue_link_t *current) +njs_parser_function_or_generator_handler(njs_parser_t *parser, + njs_lexer_token_t *token, njs_queue_link_t *current, njs_bool_t is_async) { njs_parser_node_t *node, *cur; - if (token->type != NJS_TOKEN_FUNCTION) { - return NJS_DECLINED; - } - cur = parser->node; if (token->type == NJS_TOKEN_MULTIPLICATION) { @@ -773,7 +765,13 @@ njs_parser_function_or_generator(njs_par njs_parser_next(parser, njs_parser_generator_declaration); } else { - node = njs_parser_node_new(parser, NJS_TOKEN_FUNCTION_DECLARATION); + if (is_async) { + node = njs_parser_node_new(parser, + NJS_TOKEN_ASYNC_FUNCTION_DECLARATION); + } else { + node = njs_parser_node_new(parser, NJS_TOKEN_FUNCTION_DECLARATION); + } + if (node == NULL) { return NJS_ERROR; } @@ -791,6 +789,18 @@ njs_parser_function_or_generator(njs_par static njs_int_t +njs_parser_function_or_generator(njs_parser_t *parser, + njs_lexer_token_t *token, njs_queue_link_t *current) +{ + if (token->type != NJS_TOKEN_FUNCTION) { + return NJS_DECLINED; + } + + return njs_parser_function_or_generator_handler(parser, token, current, 0); +} + + +static njs_int_t njs_parser_async_function_or_generator(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { @@ -807,13 +817,9 @@ njs_parser_async_function_or_generator(n return NJS_DECLINED; } - if (token->type == NJS_TOKEN_MULTIPLICATION) { - njs_parser_next(parser, njs_parser_generator_declaration); - } else { - njs_parser_next(parser, njs_parser_async_function_expression); - } - - return NJS_OK; + njs_lexer_consume_token(parser->lexer, 1); + + return njs_parser_function_or_generator_handler(parser, token, current, 1); } @@ -1078,6 +1084,8 @@ njs_parser_primary_expression_test(njs_p goto reference; } + njs_lexer_consume_token(parser->lexer, 1); From alexander.borisov at nginx.com Wed Sep 1 18:26:41 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Wed, 01 Sep 2021 18:26:41 +0000 Subject: [njs] Fixed order of code execution after await in try block. Message-ID: details: https://hg.nginx.org/njs/rev/4b018107386d branches: changeset: 1699:4b018107386d user: Alexander Borisov date: Wed Sep 01 21:25:10 2021 +0300 description: Fixed order of code execution after await in try block. The bug was introduced in 92d10cd761e2. diffstat: src/njs_async.c | 4 +++- test/js/async_await_try_resolve.js | 15 +++++++++++++++ test/njs_expect_test.exp | 2 ++ 3 files changed, 20 insertions(+), 1 deletions(-) diffs (49 lines): diff -r 92d10cd761e2 -r 4b018107386d src/njs_async.c --- a/src/njs_async.c Wed Sep 01 16:31:08 2021 +0300 +++ b/src/njs_async.c Wed Sep 01 21:25:10 2021 +0300 @@ -90,7 +90,7 @@ njs_await_fulfilled(njs_vm_t *vm, njs_va vm->top_frame->retval = &vm->retval; - ret = njs_vmcode_interpreter(vm, async->pc); + ret = njs_vmcode_interpreter(vm, ctx->pc); vm->levels[NJS_LEVEL_LOCAL] = cur_local; vm->levels[NJS_LEVEL_CLOSURE] = cur_closures; @@ -148,6 +148,8 @@ njs_await_rejected(njs_vm_t *vm, njs_val return NJS_ERROR; } + ctx->pc = ctx->await->pc; + return njs_await_fulfilled(vm, args, nargs, unused); } diff -r 92d10cd761e2 -r 4b018107386d test/js/async_await_try_resolve.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_await_try_resolve.js Wed Sep 01 21:25:10 2021 +0300 @@ -0,0 +1,15 @@ +async function af() { + let key; + + try { + key = await Promise.resolve("key"); + key += ": resolve"; + + } catch (e) { + key += ": exception"; + } + + return key; +}; + +af().then(v => console.log(v)); diff -r 92d10cd761e2 -r 4b018107386d test/njs_expect_test.exp --- a/test/njs_expect_test.exp Wed Sep 01 16:31:08 2021 +0300 +++ b/test/njs_expect_test.exp Wed Sep 01 21:25:10 2021 +0300 @@ -1171,3 +1171,5 @@ njs_run {"./test/js/async_await_try_thro finally end" +njs_run {"./test/js/async_await_try_resolve.js"} \ +"key: resolve" From noamc at qwilt.com Thu Sep 2 16:16:55 2021 From: noamc at qwilt.com (Noam Cvikel) Date: Thu, 2 Sep 2021 19:16:55 +0300 Subject: cache: move open to thread pool Message-ID: Hello, Didn't see a way to get into the thread after joining the list, but hopefully this still replies correctly. First of all thank you for the prompt response and updated patch. We implemented the updated async open patch, and started testing it in our lab. The static module part of the patch isn't updated yet, so we didn't apply it. While we're almost not using the static module, we'd still gladly test it and share a feedback, once the patch is updated. The patch itself is looking good. We've not encountered any errors or issues, and we see a significant performance improvement. We're working with spinning disks (HDD), and before applying the patch we would sometimes see the main thread get stuck on open IO anywhere between 2-15 seconds. After applying the patch, we see that an iteration of the worker's main loop doesn't take more than 10ms! Significantly reducing the penalty for other transactions. We will keep monitoring the changes and report. We would really like to see this patch integrated into the official branch. If we can do anything else to help make this happen - please let us know. Best, *Noam Cvikel* Qwilt | | noamc at qwilt.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From alexander.borisov at nginx.com Thu Sep 2 16:34:57 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Thu, 02 Sep 2021 16:34:57 +0000 Subject: [njs] Fixed async ctx erasing when a function is called multiple times. Message-ID: details: https://hg.nginx.org/njs/rev/3a7ffe641a77 branches: changeset: 1700:3a7ffe641a77 user: Alexander Borisov date: Thu Sep 02 19:32:27 2021 +0300 description: Fixed async ctx erasing when a function is called multiple times. The bug was introduced in 92d10cd761e2. diffstat: src/njs_async.c | 46 ++++++++++++++++++++------------------- src/njs_function.c | 12 ---------- src/njs_value.h | 1 + src/njs_vmcode.c | 14 ++++++++++- test/js/async_await_many_call.js | 30 ++++++++++++++++++++++++++ test/njs_expect_test.exp | 3 ++ 6 files changed, 70 insertions(+), 36 deletions(-) diffs (260 lines): diff -r 4b018107386d -r 3a7ffe641a77 src/njs_async.c --- a/src/njs_async.c Wed Sep 01 21:25:10 2021 +0300 +++ b/src/njs_async.c Thu Sep 02 19:32:27 2021 +0300 @@ -8,33 +8,33 @@ static void -njs_async_context_free(njs_vm_t *vm, njs_native_frame_t *frame); +njs_async_context_free(njs_vm_t *vm, njs_async_ctx_t *ctx); njs_int_t njs_async_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval) { - njs_int_t ret; - njs_value_t ctor; - njs_async_ctx_t *ctx; - njs_native_frame_t *frame; + njs_int_t ret; + njs_value_t ctor; + njs_native_frame_t *frame; + njs_promise_capability_t *capability; frame = vm->top_frame; frame->retval = retval; - ctx = frame->function->context; - njs_set_function(&ctor, &vm->constructors[NJS_OBJ_TYPE_PROMISE]); - ctx->capability = njs_promise_new_capability(vm, &ctor); - if (njs_slow_path(ctx->capability == NULL)) { + capability = njs_promise_new_capability(vm, &ctor); + if (njs_slow_path(capability == NULL)) { return NJS_ERROR; } + frame->function->context = capability; + ret = njs_function_lambda_call(vm); if (ret == NJS_OK) { - ret = njs_function_call(vm, njs_function(&ctx->capability->resolve), + ret = njs_function_call(vm, njs_function(&capability->resolve), &njs_value_undefined, retval, 1, &vm->retval); } else if (ret == NJS_ERROR) { @@ -42,12 +42,12 @@ njs_async_function_frame_invoke(njs_vm_t return NJS_ERROR; } - ret = njs_function_call(vm, njs_function(&ctx->capability->reject), + ret = njs_function_call(vm, njs_function(&capability->reject), &njs_value_undefined, &vm->retval, 1, &vm->retval); } - *retval = ctx->capability->promise; + *retval = capability->promise; return ret; } @@ -60,6 +60,7 @@ njs_await_fulfilled(njs_vm_t *vm, njs_va njs_int_t ret; njs_value_t **cur_local, **cur_closures, **cur_temp, *value; njs_frame_t *frame; + njs_function_t *function; njs_async_ctx_t *ctx; njs_native_frame_t *top, *async; @@ -71,6 +72,7 @@ njs_await_fulfilled(njs_vm_t *vm, njs_va } async = ctx->await; + function = async->function; cur_local = vm->levels[NJS_LEVEL_LOCAL]; cur_closures = vm->levels[NJS_LEVEL_CLOSURE]; @@ -90,8 +92,14 @@ njs_await_fulfilled(njs_vm_t *vm, njs_va vm->top_frame->retval = &vm->retval; + function->context = ctx->capability; + function->await = ctx; + ret = njs_vmcode_interpreter(vm, ctx->pc); + function->context = NULL; + function->await = NULL; + vm->levels[NJS_LEVEL_LOCAL] = cur_local; vm->levels[NJS_LEVEL_CLOSURE] = cur_closures; vm->levels[NJS_LEVEL_TEMP] = cur_temp; @@ -103,7 +111,7 @@ njs_await_fulfilled(njs_vm_t *vm, njs_va ret = njs_function_call(vm, njs_function(&ctx->capability->resolve), &njs_value_undefined, &vm->retval, 1, &vm->retval); - njs_async_context_free(vm, vm->top_frame); + njs_async_context_free(vm, ctx); } else if (ret == NJS_ERROR) { if (njs_is_memory_error(vm, &vm->retval)) { @@ -122,7 +130,7 @@ failed: (void) njs_function_call(vm, njs_function(&ctx->capability->reject), &njs_value_undefined, value, 1, &vm->retval); - njs_async_context_free(vm, vm->top_frame); + njs_async_context_free(vm, ctx); return NJS_ERROR; } @@ -143,7 +151,7 @@ njs_await_rejected(njs_vm_t *vm, njs_val (void) njs_function_call(vm, njs_function(&ctx->capability->reject), &njs_value_undefined, value, 1, &vm->retval); - njs_async_context_free(vm, vm->top_frame); + njs_async_context_free(vm, ctx); return NJS_ERROR; } @@ -155,16 +163,10 @@ njs_await_rejected(njs_vm_t *vm, njs_val static void -njs_async_context_free(njs_vm_t *vm, njs_native_frame_t *frame) +njs_async_context_free(njs_vm_t *vm, njs_async_ctx_t *ctx) { - njs_async_ctx_t *ctx; - - ctx = frame->function->context; - njs_mp_free(vm->mem_pool, ctx->capability); njs_mp_free(vm->mem_pool, ctx); - - frame->function->context = NULL; } diff -r 4b018107386d -r 3a7ffe641a77 src/njs_function.c --- a/src/njs_function.c Wed Sep 01 21:25:10 2021 +0300 +++ b/src/njs_function.c Thu Sep 02 19:32:27 2021 +0300 @@ -427,7 +427,6 @@ njs_function_lambda_frame(njs_vm_t *vm, njs_value_t *value, *bound, **new, **temp; njs_frame_t *frame; njs_function_t *target; - njs_async_ctx_t *ctx; njs_native_frame_t *native_frame; njs_function_lambda_t *lambda; @@ -454,17 +453,6 @@ njs_function_lambda_frame(njs_vm_t *vm, lambda = target->u.lambda; } - if (njs_function_object_type(vm, target) == NJS_OBJ_TYPE_ASYNC_FUNCTION) { - ctx = njs_mp_alloc(vm->mem_pool, sizeof(njs_async_ctx_t)); - if (njs_slow_path(ctx == NULL)) { - njs_memory_error(vm); - return NJS_ERROR; - } - - ctx->await = NULL; - target->context = ctx; - } - args_count = function->args_offset + njs_max(nargs, lambda->nargs); value_count = args_count + njs_max(args_count, lambda->nlocal); diff -r 4b018107386d -r 3a7ffe641a77 src/njs_value.h --- a/src/njs_value.h Wed Sep 01 21:25:10 2021 +0300 +++ b/src/njs_value.h Thu Sep 02 19:32:27 2021 +0300 @@ -284,6 +284,7 @@ struct njs_function_s { } u; void *context; + void *await; njs_value_t *bound; }; diff -r 4b018107386d -r 3a7ffe641a77 src/njs_vmcode.c --- a/src/njs_vmcode.c Wed Sep 01 21:25:10 2021 +0300 +++ b/src/njs_vmcode.c Thu Sep 02 19:32:27 2021 +0300 @@ -1827,7 +1827,6 @@ njs_vmcode_await(njs_vm_t *vm, njs_vmcod njs_native_frame_t *active; active = &vm->active_frame->native; - ctx = active->function->context; value = njs_scope_valid_value(vm, await->retval); if (njs_slow_path(value == NULL)) { @@ -1841,7 +1840,15 @@ njs_vmcode_await(njs_vm_t *vm, njs_vmcod return NJS_ERROR; } - if (ctx->await == NULL) { + ctx = active->function->await; + + if (ctx == NULL) { + ctx = njs_mp_alloc(vm->mem_pool, sizeof(njs_async_ctx_t)); + if (njs_slow_path(ctx == NULL)) { + njs_memory_error(vm); + return NJS_ERROR; + } + size = njs_function_frame_size(active); fulfilled = njs_promise_create_function(vm, size); @@ -1850,6 +1857,9 @@ njs_vmcode_await(njs_vm_t *vm, njs_vmcod } ctx->await = fulfilled->context; + ctx->capability = active->function->context; + + active->function->context = NULL; ret = njs_function_frame_save(vm, ctx->await, NULL); if (njs_slow_path(ret != NJS_OK)) { diff -r 4b018107386d -r 3a7ffe641a77 test/js/async_await_many_call.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/async_await_many_call.js Thu Sep 02 19:32:27 2021 +0300 @@ -0,0 +1,30 @@ +async function test(name) { + let k1, k2; + + switch (name) { + case "First": + k1 = await Promise.resolve("SUN"); + k2 = await Promise.resolve("MOON"); + break; + + case "Second": + k1 = await Promise.resolve("CAT"); + k2 = await Promise.resolve("MOUSE"); + break; + + case "Third": + k1 = await Promise.resolve("MAN"); + k2 = await Promise.resolve("WOMAN"); + break; + + default: + break; + } + + return `${name}: ${k1} ${k2}`; +}; + +Promise.all(['First', 'Second', 'Third'].map(v => test(v))) +.then(results => { + console.log(results) +}) diff -r 4b018107386d -r 3a7ffe641a77 test/njs_expect_test.exp --- a/test/njs_expect_test.exp Wed Sep 01 21:25:10 2021 +0300 +++ b/test/njs_expect_test.exp Thu Sep 02 19:32:27 2021 +0300 @@ -1173,3 +1173,6 @@ end" njs_run {"./test/js/async_await_try_resolve.js"} \ "key: resolve" + +njs_run {"./test/js/async_await_many_call.js"} \ +"\\\['First: SUN MOON','Second: CAT MOUSE','Third: MAN WOMAN']" From alexander.borisov at nginx.com Thu Sep 2 16:34:59 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Thu, 02 Sep 2021 16:34:59 +0000 Subject: [njs] Parser: fixed flag setting when parsing function arguments. Message-ID: details: https://hg.nginx.org/njs/rev/cd87eaf03e4f branches: changeset: 1701:cd87eaf03e4f user: Alexander Borisov date: Thu Sep 02 19:32:34 2021 +0300 description: Parser: fixed flag setting when parsing function arguments. The bug was introduced in 92d10cd761e2. diffstat: src/njs_parser.c | 8 ++++---- src/njs_parser.h | 2 +- src/test/njs_unit_test.c | 6 ++++++ 3 files changed, 11 insertions(+), 5 deletions(-) diffs (72 lines): diff -r 3a7ffe641a77 -r cd87eaf03e4f src/njs_parser.c --- a/src/njs_parser.c Thu Sep 02 19:32:27 2021 +0300 +++ b/src/njs_parser.c Thu Sep 02 19:32:34 2021 +0300 @@ -2723,13 +2723,13 @@ njs_parser_arguments(njs_parser_t *parse * ArgumentList , ) */ - parser->in_args = 1; - if (token->type == NJS_TOKEN_CLOSE_PARENTHESIS) { njs_lexer_consume_token(parser->lexer, 1); return njs_parser_stack_pop(parser); } + parser->scope->in_args = 1; + njs_parser_next(parser, njs_parser_argument_list); return njs_parser_after(parser, current, NULL, 1, @@ -2741,7 +2741,7 @@ static njs_int_t njs_parser_parenthesis_or_comma(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - parser->in_args = 0; + parser->scope->in_args = 0; if (token->type == NJS_TOKEN_CLOSE_PARENTHESIS) { njs_lexer_consume_token(parser->lexer, 1); @@ -3479,7 +3479,7 @@ njs_parser_await(njs_parser_t *parser, n node = parser->node; - if (parser->in_args) { + if (scope->in_args) { njs_parser_syntax_error(parser, "await in arguments not supported"); return NJS_ERROR; } diff -r 3a7ffe641a77 -r cd87eaf03e4f src/njs_parser.h --- a/src/njs_parser.h Thu Sep 02 19:32:27 2021 +0300 +++ b/src/njs_parser.h Thu Sep 02 19:32:34 2021 +0300 @@ -31,6 +31,7 @@ struct njs_parser_scope_s { uint8_t arrow_function; uint8_t dest_disable; uint8_t async; + uint8_t in_args; }; @@ -83,7 +84,6 @@ struct njs_parser_s { uintptr_t undefined_id; njs_bool_t strict_semicolon; uint32_t line; - njs_bool_t in_args; }; diff -r 3a7ffe641a77 -r cd87eaf03e4f src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Thu Sep 02 19:32:27 2021 +0300 +++ b/src/test/njs_unit_test.c Thu Sep 02 19:32:34 2021 +0300 @@ -20504,6 +20504,12 @@ static njs_unit_test_t njs_test[] = "(async function() {f(await 111)})"), njs_str("SyntaxError: await in arguments not supported in 1") }, + { njs_str("Promise.all([async () => [await x('X')]])"), + njs_str("[object Promise]") }, + + { njs_str("async () => [await x(1)(),]; async () => [await x(1)()]"), + njs_str("[object AsyncFunction]") }, + { njs_str("function f(a, b, c) {}" "(async function() {f(1, 'a', await 111)})"), njs_str("SyntaxError: await in arguments not supported in 1") }, From alexander.borisov at nginx.com Thu Sep 2 16:35:01 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Thu, 02 Sep 2021 16:35:01 +0000 Subject: [njs] Parser: removed dead store in await; Message-ID: details: https://hg.nginx.org/njs/rev/f2410c8b80d9 branches: changeset: 1702:f2410c8b80d9 user: Alexander Borisov date: Thu Sep 02 19:32:52 2021 +0300 description: Parser: removed dead store in await; The bug was introduced in 92d10cd761e2. diffstat: src/njs_parser.c | 2 -- 1 files changed, 0 insertions(+), 2 deletions(-) diffs (12 lines): diff -r cd87eaf03e4f -r f2410c8b80d9 src/njs_parser.c --- a/src/njs_parser.c Thu Sep 02 19:32:34 2021 +0300 +++ b/src/njs_parser.c Thu Sep 02 19:32:52 2021 +0300 @@ -3477,8 +3477,6 @@ njs_parser_await(njs_parser_t *parser, n return NJS_ERROR; } - node = parser->node; - if (scope->in_args) { njs_parser_syntax_error(parser, "await in arguments not supported"); return NJS_ERROR; From alexander.borisov at nginx.com Thu Sep 2 16:35:03 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Thu, 02 Sep 2021 16:35:03 +0000 Subject: [njs] Parser: catching errors parsing in await expression. Message-ID: details: https://hg.nginx.org/njs/rev/502be9e80244 branches: changeset: 1703:502be9e80244 user: Alexander Borisov date: Thu Sep 02 19:33:03 2021 +0300 description: Parser: catching errors parsing in await expression. The bug was introduced in 92d10cd761e2. diffstat: src/njs_parser.c | 4 ++++ src/test/njs_unit_test.c | 3 +++ 2 files changed, 7 insertions(+), 0 deletions(-) diffs (27 lines): diff -r f2410c8b80d9 -r 502be9e80244 src/njs_parser.c --- a/src/njs_parser.c Thu Sep 02 19:32:52 2021 +0300 +++ b/src/njs_parser.c Thu Sep 02 19:33:03 2021 +0300 @@ -3504,6 +3504,10 @@ static njs_int_t njs_parser_await_after(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { + if (parser->ret != NJS_OK) { + return njs_parser_failed(parser); + } + parser->target->right = parser->node; parser->node = parser->target; diff -r f2410c8b80d9 -r 502be9e80244 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Thu Sep 02 19:32:52 2021 +0300 +++ b/src/test/njs_unit_test.c Thu Sep 02 19:33:03 2021 +0300 @@ -20517,6 +20517,9 @@ static njs_unit_test_t njs_test[] = { njs_str("function f(a) {}" "(async function() {f('Number: ' + await 111)})"), njs_str("SyntaxError: await in arguments not supported in 1") }, + + { njs_str("async function af() {await encrypt({},}"), + njs_str("SyntaxError: Unexpected token \"}\" in 1") }, }; From alexander.borisov at nginx.com Thu Sep 2 16:35:05 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Thu, 02 Sep 2021 16:35:05 +0000 Subject: [njs] Added async support for object initializer. Message-ID: details: https://hg.nginx.org/njs/rev/68f8f7ead0fc branches: changeset: 1704:68f8f7ead0fc user: Alexander Borisov date: Thu Sep 02 19:33:32 2021 +0300 description: Added async support for object initializer. diffstat: src/njs_async.c | 14 +++--- src/njs_generator.c | 3 +- src/njs_lexer.h | 1 + src/njs_parser.c | 86 ++++++++++++++++++++++++++++++++++++++++++++--- src/test/njs_unit_test.c | 6 +++ 5 files changed, 96 insertions(+), 14 deletions(-) diffs (229 lines): diff -r 502be9e80244 -r 68f8f7ead0fc src/njs_async.c --- a/src/njs_async.c Thu Sep 02 19:33:03 2021 +0300 +++ b/src/njs_async.c Thu Sep 02 19:33:32 2021 +0300 @@ -174,13 +174,6 @@ static const njs_object_prop_t njs_asyn { { .type = NJS_PROPERTY, - .name = njs_string("name"), - .value = njs_string("AsyncFunction"), - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, .name = njs_string("length"), .value = njs_value(NJS_NUMBER, 1, 1.0), .configurable = 1, @@ -204,6 +197,13 @@ static const njs_object_prop_t njs_asyn { { .type = NJS_PROPERTY, + .name = njs_string("name"), + .value = njs_string("AsyncFunction"), + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, .name = njs_wellknown_symbol(NJS_SYMBOL_TO_STRING_TAG), .value = njs_string("AsyncFunction"), .configurable = 1, diff -r 502be9e80244 -r 68f8f7ead0fc src/njs_generator.c --- a/src/njs_generator.c Thu Sep 02 19:33:03 2021 +0300 +++ b/src/njs_generator.c Thu Sep 02 19:33:32 2021 +0300 @@ -666,6 +666,7 @@ njs_generate(njs_vm_t *vm, njs_generator return njs_generate_function_expression(vm, generator, node); case NJS_TOKEN_FUNCTION: + case NJS_TOKEN_ASYNC_FUNCTION: return njs_generate_function(vm, generator, node); case NJS_TOKEN_REGEXP: @@ -3100,7 +3101,7 @@ njs_generate_function(njs_vm_t *vm, njs_ njs_generate_code(generator, njs_vmcode_function_t, function, NJS_VMCODE_FUNCTION, 1, node); function->lambda = lambda; - function->async = 0; + function->async = (node->token_type == NJS_TOKEN_ASYNC_FUNCTION); node->index = njs_generate_object_dest_index(vm, generator, node); if (njs_slow_path(node->index == NJS_INDEX_ERROR)) { diff -r 502be9e80244 -r 68f8f7ead0fc src/njs_lexer.h --- a/src/njs_lexer.h Thu Sep 02 19:33:03 2021 +0300 +++ b/src/njs_lexer.h Thu Sep 02 19:33:32 2021 +0300 @@ -148,6 +148,7 @@ typedef enum { NJS_TOKEN_ARGUMENT, NJS_TOKEN_RETURN, + NJS_TOKEN_ASYNC_FUNCTION, NJS_TOKEN_ASYNC_FUNCTION_DECLARATION, NJS_TOKEN_ASYNC_FUNCTION_EXPRESSION, diff -r 502be9e80244 -r 68f8f7ead0fc src/njs_parser.c --- a/src/njs_parser.c Thu Sep 02 19:33:03 2021 +0300 +++ b/src/njs_parser.c Thu Sep 02 19:33:32 2021 +0300 @@ -58,6 +58,10 @@ static njs_int_t njs_parser_property_def njs_lexer_token_t *token, njs_queue_link_t *current); static njs_int_t njs_parser_computed_property_name_after(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current); +static njs_int_t njs_parser_computed_property_async_after(njs_parser_t *parser, + njs_lexer_token_t *token, njs_queue_link_t *current); +static njs_int_t njs_parser_computed_property_name_handler(njs_parser_t *parser, + njs_lexer_token_t *token, njs_queue_link_t *current, njs_bool_t async); static njs_int_t njs_parser_initializer(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current); @@ -1931,6 +1935,38 @@ njs_parser_property_definition(njs_parse case NJS_TOKEN_ELLIPSIS: return njs_parser_not_supported(parser, token); + case NJS_TOKEN_ASYNC: + token = njs_lexer_peek_token(parser->lexer, token, 0); + if (token == NULL) { + return NJS_ERROR; + } + + if (token->type == NJS_TOKEN_OPEN_BRACKET) { + njs_lexer_consume_token(parser->lexer, 2); + + njs_parser_next(parser, njs_parser_assignment_expression); + + return njs_parser_after(parser, current, temp, 1, + njs_parser_computed_property_async_after); + } + + if (!njs_lexer_token_is_identifier_name(token)) { + return njs_parser_failed(parser); + } + + next = njs_lexer_peek_token(parser->lexer, token, 0); + if (next == NULL) { + return NJS_ERROR; + } + + if (next->type == NJS_TOKEN_OPEN_PARENTHESIS) { + goto method_definition; + } + + njs_lexer_consume_token(parser->lexer, 1); + + return njs_parser_failed(parser); + default: if (!njs_lexer_token_is_identifier_name(token)) { return njs_parser_reject(parser); @@ -2044,7 +2080,24 @@ static njs_int_t njs_parser_computed_property_name_after(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { - njs_parser_node_t *expr; + return njs_parser_computed_property_name_handler(parser, token, current, 0); +} + + +static njs_int_t +njs_parser_computed_property_async_after(njs_parser_t *parser, + njs_lexer_token_t *token, njs_queue_link_t *current) +{ + return njs_parser_computed_property_name_handler(parser, token, current, 1); +} + + +static njs_int_t +njs_parser_computed_property_name_handler(njs_parser_t *parser, + njs_lexer_token_t *token, njs_queue_link_t *current, njs_bool_t async) +{ + njs_token_type_t type; + njs_parser_node_t *expr, *target; if (token->type != NJS_TOKEN_CLOSE_BRACKET) { return njs_parser_failed(parser); @@ -2057,20 +2110,24 @@ njs_parser_computed_property_name_after( return NJS_ERROR; } + target = parser->target; + /* * For further identification. * In njs_parser_property_definition_after() index will be reset to zero. */ parser->node->index = NJS_TOKEN_OPEN_BRACKET; - parser->target->right = parser->node; - - if (token->type == NJS_TOKEN_COLON) { + target->right = parser->node; + + if (!async && token->type == NJS_TOKEN_COLON) { return njs_parser_property_name(parser, current, 1); /* MethodDefinition */ } else if (token->type == NJS_TOKEN_OPEN_PARENTHESIS) { - expr = njs_parser_node_new(parser, NJS_TOKEN_FUNCTION); + type = (async) ? NJS_TOKEN_ASYNC_FUNCTION : NJS_TOKEN_FUNCTION; + + expr = njs_parser_node_new(parser, type); if (expr == NULL) { return NJS_ERROR; } @@ -7196,9 +7253,23 @@ static njs_int_t njs_parser_method_definition(njs_parser_t *parser, njs_lexer_token_t *token, njs_queue_link_t *current) { + njs_token_type_t type; njs_lexer_token_t *next; njs_parser_node_t *expr; + type = NJS_TOKEN_FUNCTION; + + if (token->type == NJS_TOKEN_ASYNC) { + njs_lexer_consume_token(parser->lexer, 1); + + token = njs_lexer_token(parser->lexer, 0); + if (token == NULL) { + return NJS_ERROR; + } + + type = NJS_TOKEN_ASYNC_FUNCTION; + } + switch (token->type) { /* PropertyName */ case NJS_TOKEN_STRING: @@ -7225,7 +7296,7 @@ njs_parser_method_definition(njs_parser_ return njs_parser_failed(parser); } - expr = njs_parser_node_new(parser, NJS_TOKEN_FUNCTION); + expr = njs_parser_node_new(parser, type); if (expr == NULL) { return NJS_ERROR; } @@ -7453,6 +7524,9 @@ njs_parser_function_lambda(njs_parser_t return NJS_ERROR; } + parser->scope->async = + (parser->node->token_type == NJS_TOKEN_ASYNC_FUNCTION); + parser->node = NULL; parser->target = expr; diff -r 502be9e80244 -r 68f8f7ead0fc src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Thu Sep 02 19:33:03 2021 +0300 +++ b/src/test/njs_unit_test.c Thu Sep 02 19:33:32 2021 +0300 @@ -20520,6 +20520,12 @@ static njs_unit_test_t njs_test[] = { njs_str("async function af() {await encrypt({},}"), njs_str("SyntaxError: Unexpected token \"}\" in 1") }, + + { njs_str("let x = {async af() {await Promise.resolve(1)}}; x.af"), + njs_str("[object AsyncFunction]") }, + + { njs_str("let name = 'af', x = {async [name]() {await Promise.resolve(1)}}; x.af"), + njs_str("[object AsyncFunction]") }, }; From luajit.io at gmail.com Fri Sep 3 04:08:32 2021 From: luajit.io at gmail.com (Jinhua Luo) Date: Fri, 3 Sep 2021 12:08:32 +0800 Subject: About the phase handlers init Message-ID: Hi, All. In the function ngx_http_init_phase_handlers, I found two places hard to understand. a) The n variable is as known as the next phase handler index. In the switch case NGX_HTTP_ACCESS_PHASE, it invokes n++, which seems to be redundant and wrong, While in switch case NGX_HTTP_POST_ACCESS_PHASE, it lacks n++. Think about that if the number of modules registers as NGX_HTTP_ACCESS_PHASE is 0, then the next index is 1 more than the correct number. Of course, the current codes have at least one NGX_HTTP_POST_ACCESS_PHASE module: ngx_http_access_module, but it contains the codes to jump to the next phase handler, then the jump target is bypass NGX_HTTP_POST_ACCESS_PHASE and to NGX_HTTP_PRECONTENT_PHASE? b) Does the handler runs in reverse order of registration order? for (j = cmcf->phases[i].handlers.nelts - 1; j >= 0; j--) Please help, thanks. From arut at nginx.com Fri Sep 3 14:26:20 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Fri, 03 Sep 2021 14:26:20 +0000 Subject: [nginx] Version bump. Message-ID: details: https://hg.nginx.org/nginx/rev/2b2607d13fe9 branches: changeset: 7920:2b2607d13fe9 user: Roman Arutyunyan date: Fri Sep 03 17:19:33 2021 +0300 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r b2d1a602b241 -r 2b2607d13fe9 src/core/nginx.h --- a/src/core/nginx.h Tue Aug 31 18:13:47 2021 +0300 +++ b/src/core/nginx.h Fri Sep 03 17:19:33 2021 +0300 @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1021002 -#define NGINX_VERSION "1.21.2" +#define nginx_version 1021003 +#define NGINX_VERSION "1.21.3" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From arut at nginx.com Fri Sep 3 14:26:23 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Fri, 03 Sep 2021 14:26:23 +0000 Subject: [nginx] Fixed debug logging. Message-ID: details: https://hg.nginx.org/nginx/rev/2245324a507a branches: changeset: 7921:2245324a507a user: Roman Arutyunyan date: Thu Sep 02 12:25:37 2021 +0300 description: Fixed debug logging. diffstat: src/http/v2/ngx_http_v2.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r 2b2607d13fe9 -r 2245324a507a src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Fri Sep 03 17:19:33 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Thu Sep 02 12:25:37 2021 +0300 @@ -4214,8 +4214,8 @@ ngx_http_v2_process_request_body(ngx_htt /* update chains */ - ngx_log_error(NGX_LOG_DEBUG, fc->log, 0, - "http2 body update chains"); + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, fc->log, 0, + "http2 body update chains"); rc = ngx_http_v2_filter_request_body(r); From xeioex at nginx.com Fri Sep 3 14:58:09 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Fri, 03 Sep 2021 14:58:09 +0000 Subject: [njs] Fixed AsyncFunction prototype property "name". Message-ID: details: https://hg.nginx.org/njs/rev/91d9301684db branches: changeset: 1705:91d9301684db user: Dmitry Volyntsev date: Fri Sep 03 14:57:50 2021 +0000 description: Fixed AsyncFunction prototype property "name". According to the spec the AsyncFunction prototype does not have own property "name". diffstat: src/njs_async.c | 7 ------- 1 files changed, 0 insertions(+), 7 deletions(-) diffs (17 lines): diff -r 68f8f7ead0fc -r 91d9301684db src/njs_async.c --- a/src/njs_async.c Thu Sep 02 19:33:32 2021 +0300 +++ b/src/njs_async.c Fri Sep 03 14:57:50 2021 +0000 @@ -197,13 +197,6 @@ static const njs_object_prop_t njs_asyn { { .type = NJS_PROPERTY, - .name = njs_string("name"), - .value = njs_string("AsyncFunction"), - .configurable = 1, - }, - - { - .type = NJS_PROPERTY, .name = njs_wellknown_symbol(NJS_SYMBOL_TO_STRING_TAG), .value = njs_string("AsyncFunction"), .configurable = 1, From mdounin at mdounin.ru Fri Sep 3 19:34:48 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 3 Sep 2021 22:34:48 +0300 Subject: About the phase handlers init In-Reply-To: References: Message-ID: Hello! On Fri, Sep 03, 2021 at 12:08:32PM +0800, Jinhua Luo wrote: > In the function ngx_http_init_phase_handlers, I found two places hard > to understand. > > a) The n variable is as known as the next phase handler index. In the > switch case NGX_HTTP_ACCESS_PHASE, it invokes n++, which seems to be > redundant and wrong, While in switch case NGX_HTTP_POST_ACCESS_PHASE, > it lacks n++. Think about that if the number of modules registers as > NGX_HTTP_ACCESS_PHASE is 0, then the next index is 1 more than the > correct number. Of course, the current codes have at least one > NGX_HTTP_POST_ACCESS_PHASE module: ngx_http_access_module, but it > contains the codes to jump to the next phase handler, then the jump > target is bypass NGX_HTTP_POST_ACCESS_PHASE and to > NGX_HTTP_PRECONTENT_PHASE? The NGX_HTTP_POST_ACCESS_PHASE is a special phase which is only used when there are modules in the NGX_HTTP_ACCESS_PHASE, and only contains a single phase handler, ngx_http_core_post_access_phase(). The post-access phase does not need to be present at all when there are no modules in the access phase. Further, when access phase checker decides to skip the access phase when access is allowed by a handler, and needs to switch to the next phase - it does not need to switch to post-access phase, but rather to the phase after it. Hence n++ in the access phase case (and not in the post-access phase). > b) Does the handler runs in reverse order of registration order? > > for (j = cmcf->phases[i].handlers.nelts - 1; j >= 0; j--) Yes. In particular, this ensures that addon modules are called first, before the built-in modules. -- Maxim Dounin http://mdounin.ru/ From luajit.io at gmail.com Sat Sep 4 15:48:08 2021 From: luajit.io at gmail.com (Jinhua Luo) Date: Sat, 4 Sep 2021 23:48:08 +0800 Subject: About the phase handlers init In-Reply-To: References: Message-ID: > Further, when access phase checker decides to skip the access phase when access is allowed by a handler, and needs to switch to the next phase - it does not need to switch to post-access phase, but rather to the phase after it. Hence n++ in the access phase case (and not in the post-access phase). Why bypass post access phase? Why access phase is so special? Maxim Dounin ?2021?9?4??? ??3:35??? > > Hello! > > On Fri, Sep 03, 2021 at 12:08:32PM +0800, Jinhua Luo wrote: > > > In the function ngx_http_init_phase_handlers, I found two places hard > > to understand. > > > > a) The n variable is as known as the next phase handler index. In the > > switch case NGX_HTTP_ACCESS_PHASE, it invokes n++, which seems to be > > redundant and wrong, While in switch case NGX_HTTP_POST_ACCESS_PHASE, > > it lacks n++. Think about that if the number of modules registers as > > NGX_HTTP_ACCESS_PHASE is 0, then the next index is 1 more than the > > correct number. Of course, the current codes have at least one > > NGX_HTTP_POST_ACCESS_PHASE module: ngx_http_access_module, but it > > contains the codes to jump to the next phase handler, then the jump > > target is bypass NGX_HTTP_POST_ACCESS_PHASE and to > > NGX_HTTP_PRECONTENT_PHASE? > > The NGX_HTTP_POST_ACCESS_PHASE is a special phase which is only > used when there are modules in the NGX_HTTP_ACCESS_PHASE, and only > contains a single phase handler, ngx_http_core_post_access_phase(). > The post-access phase does not need to be present at all when there > are no modules in the access phase. Further, when access phase > checker decides to skip the access phase when access is allowed by > a handler, and needs to switch to the next phase - it does not > need to switch to post-access phase, but rather to the phase after > it. Hence n++ in the access phase case (and not in the > post-access phase). > > > b) Does the handler runs in reverse order of registration order? > > > > for (j = cmcf->phases[i].handlers.nelts - 1; j >= 0; j--) > > Yes. In particular, this ensures that addon modules are called > first, before the built-in modules. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From luajit.io at gmail.com Sat Sep 4 15:51:26 2021 From: luajit.io at gmail.com (Jinhua Luo) Date: Sat, 4 Sep 2021 23:51:26 +0800 Subject: About the phase handlers init In-Reply-To: References: Message-ID: Sorry, typo. My question is why bypass post-access phase? Jinhua Luo ?2021?9?4??? ??11:48??? > > > Further, when access phase checker decides to skip the access phase when access is allowed by a handler, and needs to switch to the next phase - it does not need to switch to post-access phase, but rather to the phase after it. Hence n++ in the access phase case (and not in the post-access phase). > > Why bypass post access phase? Why access phase is so special? > > Maxim Dounin ?2021?9?4??? ??3:35??? > > > > Hello! > > > > On Fri, Sep 03, 2021 at 12:08:32PM +0800, Jinhua Luo wrote: > > > > > In the function ngx_http_init_phase_handlers, I found two places hard > > > to understand. > > > > > > a) The n variable is as known as the next phase handler index. In the > > > switch case NGX_HTTP_ACCESS_PHASE, it invokes n++, which seems to be > > > redundant and wrong, While in switch case NGX_HTTP_POST_ACCESS_PHASE, > > > it lacks n++. Think about that if the number of modules registers as > > > NGX_HTTP_ACCESS_PHASE is 0, then the next index is 1 more than the > > > correct number. Of course, the current codes have at least one > > > NGX_HTTP_POST_ACCESS_PHASE module: ngx_http_access_module, but it > > > contains the codes to jump to the next phase handler, then the jump > > > target is bypass NGX_HTTP_POST_ACCESS_PHASE and to > > > NGX_HTTP_PRECONTENT_PHASE? > > > > The NGX_HTTP_POST_ACCESS_PHASE is a special phase which is only > > used when there are modules in the NGX_HTTP_ACCESS_PHASE, and only > > contains a single phase handler, ngx_http_core_post_access_phase(). > > The post-access phase does not need to be present at all when there > > are no modules in the access phase. Further, when access phase > > checker decides to skip the access phase when access is allowed by > > a handler, and needs to switch to the next phase - it does not > > need to switch to post-access phase, but rather to the phase after > > it. Hence n++ in the access phase case (and not in the > > post-access phase). > > > > > b) Does the handler runs in reverse order of registration order? > > > > > > for (j = cmcf->phases[i].handlers.nelts - 1; j >= 0; j--) > > > > Yes. In particular, this ensures that addon modules are called > > first, before the built-in modules. > > > > -- > > Maxim Dounin > > http://mdounin.ru/ > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > http://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Sat Sep 4 18:28:07 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 4 Sep 2021 21:28:07 +0300 Subject: About the phase handlers init In-Reply-To: References: Message-ID: Hello! On Sat, Sep 04, 2021 at 11:51:26PM +0800, Jinhua Luo wrote: > Sorry, typo. My question is why bypass post-access phase? The post-access phase does nothing as long as access is allowed, it is only needed with "satisfy any;" to reject the request with the appropriate status code if no access phase handlers allowed access. Check the code, it's pretty clear. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Mon Sep 6 13:40:58 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 06 Sep 2021 13:40:58 +0000 Subject: [nginx] HTTP/2: fixed window updates when buffering in filters. Message-ID: details: https://hg.nginx.org/nginx/rev/e9f402bfe37e branches: changeset: 7922:e9f402bfe37e user: Maxim Dounin date: Mon Sep 06 14:54:47 2021 +0300 description: HTTP/2: fixed window updates when buffering in filters. In the body read handler, the window was incorrectly calculated based on the full buffer size instead of the amount of free space in the buffer. If the request body is buffered by a filter, and the buffer is not empty after the read event is generated by the filter to resume request body processing, this could result in "http2 negative window update" alerts. Further, in the body ready handler and in ngx_http_v2_state_read_data() the buffer wasn't cleared when the data were already written to disk, so the client might stuck without window updates. diffstat: src/http/v2/ngx_http_v2.c | 20 ++++++++++++++++++-- 1 files changed, 18 insertions(+), 2 deletions(-) diffs (43 lines): diff -r 2245324a507a -r e9f402bfe37e src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Mon Sep 06 14:54:47 2021 +0300 @@ -1148,10 +1148,18 @@ ngx_http_v2_state_read_data(ngx_http_v2_ ngx_http_finalize_request(r, rc); } - if (rc == NGX_AGAIN && !stream->no_flow_control) { + if (rc == NGX_AGAIN + && !stream->no_flow_control + && !r->request_body_no_buffering) + { buf = r->request_body->buf; + + if (r->request_body->busy == NULL) { + buf->pos = buf->start; + buf->last = buf->start; + } + window = buf->end - buf->last; - window -= h2c->state.length - size; if (window < stream->recv_window) { @@ -4459,10 +4467,18 @@ ngx_http_v2_read_client_request_body_han return; } + if (r->request_body->busy != NULL) { + return; + } + stream = r->stream; h2c = stream->connection; buf = r->request_body->buf; + + buf->pos = buf->start; + buf->last = buf->start; + window = buf->end - buf->start; if (h2c->state.stream == stream) { From mdounin at mdounin.ru Mon Sep 6 13:41:01 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 06 Sep 2021 13:41:01 +0000 Subject: [nginx] HTTP/2: fixed timers left after request body reading. Message-ID: details: https://hg.nginx.org/nginx/rev/4775c1d27378 branches: changeset: 7923:4775c1d27378 user: Maxim Dounin date: Mon Sep 06 14:54:48 2021 +0300 description: HTTP/2: fixed timers left after request body reading. Following rb->filter_need_buffering changes, request body reading is only finished after the filter chain is called and rb->last_saved is set. As such, with r->request_body_no_buffering, timer on fc->read is no longer removed when the last part of the body is received, potentially resulting in incorrect behaviour. The fix is to call ngx_http_v2_process_request_body() from the ngx_http_v2_read_unbuffered_request_body() function instead of directly calling ngx_http_v2_filter_request_body(), so the timer is properly removed. diffstat: src/http/v2/ngx_http_v2.c | 26 +++++++++++++------------- 1 files changed, 13 insertions(+), 13 deletions(-) diffs (75 lines): diff -r e9f402bfe37e -r 4775c1d27378 src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Mon Sep 06 14:54:47 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Mon Sep 06 14:54:48 2021 +0300 @@ -4263,7 +4263,7 @@ ngx_http_v2_process_request_body(ngx_htt rb->rest = 0; } - if (r->request_body_no_buffering) { + if (r->request_body_no_buffering && !flush) { break; } @@ -4296,7 +4296,10 @@ ngx_http_v2_process_request_body(ngx_htt ngx_add_timer(fc->read, clcf->client_body_timeout); if (r->request_body_no_buffering) { - ngx_post_event(fc->read, &ngx_posted_events); + if (!flush) { + ngx_post_event(fc->read, &ngx_posted_events); + } + return NGX_AGAIN; } @@ -4309,7 +4312,10 @@ ngx_http_v2_process_request_body(ngx_htt } if (r->request_body_no_buffering) { - ngx_post_event(fc->read, &ngx_posted_events); + if (!flush) { + ngx_post_event(fc->read, &ngx_posted_events); + } + return NGX_OK; } @@ -4527,7 +4533,6 @@ ngx_http_v2_read_unbuffered_request_body ngx_connection_t *fc; ngx_http_v2_stream_t *stream; ngx_http_v2_connection_t *h2c; - ngx_http_core_loc_conf_t *clcf; stream = r->stream; fc = r->connection; @@ -4551,14 +4556,14 @@ ngx_http_v2_read_unbuffered_request_body return NGX_HTTP_BAD_REQUEST; } - rc = ngx_http_v2_filter_request_body(r); - - if (rc != NGX_OK) { + rc = ngx_http_v2_process_request_body(r, NULL, 0, r->stream->in_closed, 1); + + if (rc != NGX_OK && rc != NGX_AGAIN) { stream->skip_data = 1; return rc; } - if (r->request_body->rest == 0 && r->request_body->last_saved) { + if (rc == NGX_OK) { return NGX_OK; } @@ -4606,11 +4611,6 @@ ngx_http_v2_read_unbuffered_request_body return NGX_HTTP_INTERNAL_SERVER_ERROR; } - if (stream->recv_window == 0) { - clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); - ngx_add_timer(fc->read, clcf->client_body_timeout); - } - stream->recv_window = window; return NGX_AGAIN; From mdounin at mdounin.ru Mon Sep 6 13:41:04 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 06 Sep 2021 13:41:04 +0000 Subject: [nginx] HTTP/2: optimized processing of small DATA frames. Message-ID: details: https://hg.nginx.org/nginx/rev/d9e009b39596 branches: changeset: 7924:d9e009b39596 user: Maxim Dounin date: Mon Sep 06 14:54:50 2021 +0300 description: HTTP/2: optimized processing of small DATA frames. The request body filter chain is no longer called after processing a DATA frame. Instead, we now post a read event to do this. This ensures that multiple small DATA frames read during the same event loop iteration are coalesced together, resulting in much faster processing. Since rb->buf can now contain unprocessed data, window update is no longer sent in ngx_http_v2_state_read_data() in case of flow control being used due to filter buffering. Instead, window will be updated by ngx_http_v2_read_client_request_body_handler() in the posted read event. diffstat: src/http/v2/ngx_http_v2.c | 72 +++++++++------------------------------------- 1 files changed, 15 insertions(+), 57 deletions(-) diffs (117 lines): diff -r 4775c1d27378 -r d9e009b39596 src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Mon Sep 06 14:54:48 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Mon Sep 06 14:54:50 2021 +0300 @@ -1092,7 +1092,7 @@ static u_char * ngx_http_v2_state_read_data(ngx_http_v2_connection_t *h2c, u_char *pos, u_char *end) { - size_t size, window; + size_t size; ngx_buf_t *buf; ngx_int_t rc; ngx_connection_t *fc; @@ -1148,40 +1148,6 @@ ngx_http_v2_state_read_data(ngx_http_v2_ ngx_http_finalize_request(r, rc); } - if (rc == NGX_AGAIN - && !stream->no_flow_control - && !r->request_body_no_buffering) - { - buf = r->request_body->buf; - - if (r->request_body->busy == NULL) { - buf->pos = buf->start; - buf->last = buf->start; - } - - window = buf->end - buf->last; - window -= h2c->state.length - size; - - if (window < stream->recv_window) { - ngx_log_error(NGX_LOG_ALERT, h2c->connection->log, 0, - "http2 negative window update"); - return ngx_http_v2_connection_error(h2c, - NGX_HTTP_V2_INTERNAL_ERROR); - } - - if (window > stream->recv_window) { - if (ngx_http_v2_send_window_update(h2c, stream->node->id, - window - stream->recv_window) - == NGX_ERROR) - { - return ngx_http_v2_connection_error(h2c, - NGX_HTTP_V2_INTERNAL_ERROR); - } - - stream->recv_window = window; - } - } - ngx_http_run_posted_requests(fc); } else if (size) { @@ -4263,22 +4229,6 @@ ngx_http_v2_process_request_body(ngx_htt rb->rest = 0; } - if (r->request_body_no_buffering && !flush) { - break; - } - - /* pass buffer to request body filter chain */ - - rc = ngx_http_v2_filter_request_body(r); - - if (rc != NGX_OK) { - return rc; - } - - if (rb->rest == 0) { - break; - } - if (size == 0) { break; } @@ -4287,6 +4237,14 @@ ngx_http_v2_process_request_body(ngx_htt ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, "http2 request body rest %O", rb->rest); + if (flush) { + rc = ngx_http_v2_filter_request_body(r); + + if (rc != NGX_OK) { + return rc; + } + } + if (rb->rest == 0 && rb->last_saved) { break; } @@ -4295,12 +4253,8 @@ ngx_http_v2_process_request_body(ngx_htt clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); ngx_add_timer(fc->read, clcf->client_body_timeout); - if (r->request_body_no_buffering) { - if (!flush) { - ngx_post_event(fc->read, &ngx_posted_events); - } - - return NGX_AGAIN; + if (!flush) { + ngx_post_event(fc->read, &ngx_posted_events); } return NGX_AGAIN; @@ -4469,6 +4423,10 @@ ngx_http_v2_read_client_request_body_han return; } + if (r->stream->no_flow_control) { + return; + } + if (r->request_body->rest == 0) { return; } From jlliudh at isoftstone.com Tue Sep 7 08:48:08 2021 From: jlliudh at isoftstone.com (Junli Liu) Date: Tue, 07 Sep 2021 16:48:08 +0800 Subject: =?UTF-8?Q?=5BPATCH=5D_OpenSSL-1=2E1=2E0=E9=94=9B=E6=AD=8Bupport_Asynchrono?= =?UTF-8?Q?us_Operations_of_SSL_with_openSSL-1=2E1=2E0?= Message-ID: <1631004460.6137272C.000.764D51BA00D0@fangwg01> # HG changeset patch # User Junli Liu # Date 1631003347 -28800 # Tue Sep 07 16:29:07 2021 +0800 # Node ID 301a837387ed63bb2e455942ef2ef79bc9aaa972 # Parent 2245324a507abc54cf0274fd1b1e81bfac7c1c73 OpenSSL-1.1.0?Support Asynchronous Operations of SSL with openSSL-1.1.0 Security is critical to the foundation of networking and Transport Layer Security (TLS) is the backbone protocol for Internet security today. But normally, the introduction of TLS usually leads to network performance degradation, because encryption and decryption need to consume more compute resources.OpenSSL-1.1.0 has involved features of asynchronous operations to improve the performance of network and often combined with hardware feature. This changeset make Nginx can work well with OpenSSL asynchronous operations when process http request. diff -r 2245324a507a -r 301a837387ed auto/lib/openssl/conf --- a/auto/lib/openssl/conf Thu Sep 02 12:25:37 2021 +0300 +++ b/auto/lib/openssl/conf Tue Sep 07 16:29:07 2021 +0800 @@ -139,4 +139,33 @@ exit 1 fi + OPENSSL_ASYNC= + if [ "$NGX_SSL_ASYNC" != NO ]; then + + OPENSSL_ASYNC=NO + + ngx_feature="OpenSSL library" + ngx_feature_name= + ngx_feature_run=no + ngx_feature_incs="#include " + ngx_feature_path= + ngx_feature_libs="-lssl -lcrypto" + ngx_feature_test="#ifndef SSL_MODE_ASYNC + error: not define async + #endif + " + . auto/feature + if [ $ngx_found = yes ]; then + have=NGX_SSL_ASYNC . auto/have + OPENSSL_ASYNC=YES + fi + fi + + if [ -n "$OPENSSL_ASYNC" -a "$OPENSSL_ASYNC" != YES ]; then +cat << END +$1: error: For using asynchronous mode, The OpenSSL must be version 1.1.0 or greater. + +END + exit 1 + fi fi diff -r 2245324a507a -r 301a837387ed src/core/nginx.c --- a/src/core/nginx.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/core/nginx.c Tue Sep 07 16:29:07 2021 +0800 @@ -182,6 +182,12 @@ static ngx_uint_t ngx_show_help; static ngx_uint_t ngx_show_version; static ngx_uint_t ngx_show_configure; +#if (NGX_SSL && NGX_SSL_ASYNC) +/* indicate that nginx start without ngx_ssl_init() + * which will involve OpenSSL configuration file to + * start OpenSSL engine */ +static ngx_uint_t ngx_no_ssl_init; +#endif static u_char *ngx_prefix; static u_char *ngx_error_log; static u_char *ngx_conf_file; @@ -238,7 +244,13 @@ /* STUB */ #if (NGX_OPENSSL) - ngx_ssl_init(log); +#if (NGX_SSL && NGX_SSL_ASYNC) + if (!ngx_no_ssl_init) { +#endif + ngx_ssl_init(log); +#if (NGX_SSL && NGX_SSL_ASYNC) + } +#endif #endif /* @@ -248,6 +260,9 @@ ngx_memzero(&init_cycle, sizeof(ngx_cycle_t)); init_cycle.log = log; +#if (NGX_SSL && NGX_SSL_ASYNC) + init_cycle.no_ssl_init = ngx_no_ssl_init; +#endif ngx_cycle = &init_cycle; init_cycle.pool = ngx_create_pool(1024, log); @@ -782,11 +797,17 @@ case 't': ngx_test_config = 1; +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_no_ssl_init = 1; +#endif break; case 'T': ngx_test_config = 1; ngx_dump_config = 1; +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_no_ssl_init = 1; +#endif break; case 'q': @@ -854,6 +875,9 @@ return NGX_ERROR; case 's': +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_no_ssl_init = 1; +#endif if (*p) { ngx_signal = (char *) p; @@ -879,6 +903,9 @@ default: ngx_log_stderr(0, "invalid option: \"%c\"", *(p - 1)); +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_no_ssl_init = 1; +#endif return NGX_ERROR; } } diff -r 2245324a507a -r 301a837387ed src/core/ngx_conf_file.h --- a/src/core/ngx_conf_file.h Thu Sep 02 12:25:37 2021 +0300 +++ b/src/core/ngx_conf_file.h Tue Sep 07 16:29:07 2021 +0800 @@ -129,6 +129,7 @@ ngx_conf_handler_pt handler; void *handler_conf; + ngx_flag_t no_ssl_init; }; diff -r 2245324a507a -r 301a837387ed src/core/ngx_connection.c --- a/src/core/ngx_connection.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/core/ngx_connection.c Tue Sep 07 16:29:07 2021 +0800 @@ -8,6 +8,7 @@ #include #include #include +#include ngx_os_io_t ngx_io; @@ -1048,7 +1049,14 @@ * for closed shared listening sockets unless * the events was explicitly deleted */ - +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_del_async_conn) { + if (c->num_async_fds) { + ngx_del_async_conn(c, NGX_DISABLE_EVENT); + c->num_async_fds--; + } + } +#endif ngx_del_event(c->read, NGX_READ_EVENT, 0); } else { @@ -1098,6 +1106,9 @@ { ngx_uint_t instance; ngx_event_t *rev, *wev; +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_event_t *aev; +#endif ngx_connection_t *c; /* disable warning: Win32 SOCKET is u_int while UNIX socket is int */ @@ -1131,11 +1142,18 @@ rev = c->read; wev = c->write; +#if (NGX_SSL && NGX_SSL_ASYNC) + aev = c->async; +#endif ngx_memzero(c, sizeof(ngx_connection_t)); c->read = rev; c->write = wev; +#if (NGX_SSL && NGX_SSL_ASYNC) + c->async = aev; +#endif + c->fd = s; c->log = log; @@ -1143,17 +1161,32 @@ ngx_memzero(rev, sizeof(ngx_event_t)); ngx_memzero(wev, sizeof(ngx_event_t)); +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_memzero(aev, sizeof(ngx_event_t)); +#endif rev->instance = !instance; wev->instance = !instance; +#if (NGX_SSL && NGX_SSL_ASYNC) + aev->instance = !instance; +#endif rev->index = NGX_INVALID_INDEX; wev->index = NGX_INVALID_INDEX; +#if (NGX_SSL && NGX_SSL_ASYNC) + aev->index = NGX_INVALID_INDEX; +#endif rev->data = c; wev->data = c; +#if (NGX_SSL && NGX_SSL_ASYNC) + aev->data = c; +#endif wev->write = 1; +#if (NGX_SSL && NGX_SSL_ASYNC) + aev->async = 1; +#endif return c; } @@ -1192,11 +1225,32 @@ ngx_del_timer(c->write); } +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async->timer_set) { + ngx_del_timer(c->async); + } + + if (c->async_enable && ngx_del_async_conn) { + if (c->num_async_fds) { + ngx_del_async_conn(c, NGX_DISABLE_EVENT); + c->num_async_fds--; + } + } +#endif + if (!c->shared) { if (ngx_del_conn) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else { +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_del_async_conn) { + if (c->num_async_fds) { + ngx_del_async_conn(c, NGX_DISABLE_EVENT); + c->num_async_fds--; + } + } +#endif if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } @@ -1215,8 +1269,17 @@ ngx_delete_posted_event(c->write); } +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async->posted) { + ngx_delete_posted_event(c->async); + } +#endif + c->read->closed = 1; c->write->closed = 1; +#if (NGX_SSL && NGX_SSL_ASYNC) + c->async->closed = 1; +#endif ngx_reusable_connection(c, 0); @@ -1226,6 +1289,9 @@ fd = c->fd; c->fd = (ngx_socket_t) -1; +#if (NGX_SSL && NGX_SSL_ASYNC) + c->async_fd = (ngx_socket_t) -1; +#endif if (c->shared) { return; diff -r 2245324a507a -r 301a837387ed src/core/ngx_connection.h --- a/src/core/ngx_connection.h Thu Sep 02 12:25:37 2021 +0300 +++ b/src/core/ngx_connection.h Tue Sep 07 16:29:07 2021 +0800 @@ -123,9 +123,14 @@ void *data; ngx_event_t *read; ngx_event_t *write; +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_event_t *async; +#endif ngx_socket_t fd; - +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_socket_t async_fd; +#endif ngx_recv_pt recv; ngx_send_pt send; ngx_recv_chain_pt recv_chain; @@ -149,6 +154,9 @@ #if (NGX_SSL || NGX_COMPAT) ngx_ssl_connection_t *ssl; +#if (NGX_SSL_ASYNC) + ngx_flag_t async_enable; +#endif #endif ngx_udp_connection_t *udp; @@ -184,6 +192,9 @@ unsigned tcp_nopush:2; /* ngx_connection_tcp_nopush_e */ unsigned need_last_buf:1; +#if (NGX_SSL && NGX_SSL_ASYNC) + unsigned num_async_fds:8; +#endif #if (NGX_HAVE_AIO_SENDFILE || NGX_COMPAT) unsigned busy_count:2; diff -r 2245324a507a -r 301a837387ed src/core/ngx_cycle.c --- a/src/core/ngx_cycle.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/core/ngx_cycle.c Tue Sep 07 16:29:07 2021 +0800 @@ -81,6 +81,9 @@ cycle->pool = pool; cycle->log = log; cycle->old_cycle = old_cycle; +#if (NGX_SSL && NGX_SSL_ASYNC) + cycle->no_ssl_init = old_cycle->no_ssl_init; +#endif cycle->conf_prefix.len = old_cycle->conf_prefix.len; cycle->conf_prefix.data = ngx_pstrdup(pool, &old_cycle->conf_prefix); @@ -270,6 +273,9 @@ conf.log = log; conf.module_type = NGX_CORE_MODULE; conf.cmd_type = NGX_MAIN_CONF; +#if (NGX_SSL && NGX_SSL_ASYNC) + conf.no_ssl_init = cycle->no_ssl_init; +#endif #if 0 log->log_level = NGX_LOG_DEBUG_ALL; diff -r 2245324a507a -r 301a837387ed src/core/ngx_cycle.h --- a/src/core/ngx_cycle.h Thu Sep 02 12:25:37 2021 +0300 +++ b/src/core/ngx_cycle.h Tue Sep 07 16:29:07 2021 +0800 @@ -73,6 +73,9 @@ ngx_connection_t *connections; ngx_event_t *read_events; ngx_event_t *write_events; +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_event_t *async_events; +#endif ngx_cycle_t *old_cycle; @@ -83,6 +86,9 @@ ngx_str_t error_log; ngx_str_t lock_file; ngx_str_t hostname; +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_flag_t no_ssl_init; +#endif }; diff -r 2245324a507a -r 301a837387ed src/event/modules/ngx_devpoll_module.c --- a/src/event/modules/ngx_devpoll_module.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/modules/ngx_devpoll_module.c Tue Sep 07 16:29:07 2021 +0800 @@ -94,6 +94,10 @@ ngx_devpoll_process_events, /* process the events */ ngx_devpoll_init, /* init the events */ ngx_devpoll_done, /* done the events */ +#if (NGX_SSL && NGX_SSL_ASYNC) + NULL, /* add an async conn */ + NULL, /* del an async conn */ +#endif } }; diff -r 2245324a507a -r 301a837387ed src/event/modules/ngx_epoll_module.c --- a/src/event/modules/ngx_epoll_module.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/modules/ngx_epoll_module.c Tue Sep 07 16:29:07 2021 +0800 @@ -122,6 +122,11 @@ #endif static ngx_int_t ngx_epoll_process_events(ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags); +#if (NGX_SSL && NGX_SSL_ASYNC) +static ngx_int_t ngx_epoll_add_async_connection(ngx_connection_t *c); +static ngx_int_t ngx_epoll_del_async_connection(ngx_connection_t *c, + ngx_uint_t flags); +#endif #if (NGX_HAVE_FILE_AIO) static void ngx_epoll_eventfd_handler(ngx_event_t *ev); @@ -196,6 +201,10 @@ ngx_epoll_process_events, /* process the events */ ngx_epoll_init, /* init the events */ ngx_epoll_done, /* done the events */ +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_epoll_add_async_connection, /* add an async conn */ + ngx_epoll_del_async_connection /* del an async conn */ +#endif } }; @@ -758,6 +767,53 @@ return NGX_OK; } +#if (NGX_SSL && NGX_SSL_ASYNC) +static ngx_int_t +ngx_epoll_add_async_connection(ngx_connection_t *c) +{ + struct epoll_event ee; + + ee.events = EPOLLIN|EPOLLOUT|EPOLLET|EPOLLRDHUP; + ee.data.ptr = (void *) ((uintptr_t) c | (c->async->async << 1) | c->async->instance); + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + "epoll add async connection: fd:%d ev:%08XD", c->async_fd, ee.events); + if (epoll_ctl(ep, EPOLL_CTL_ADD, c->async_fd, &ee) == -1) { + ngx_log_error(NGX_LOG_ALERT, c->log, ngx_errno, + "async add conn epoll_ctl(EPOLL_CTL_ADD, %d) failed", c->async_fd); + return NGX_ERROR; + } + + c->async->active = 1; + + return NGX_OK; +} + + +static ngx_int_t +ngx_epoll_del_async_connection(ngx_connection_t *c, ngx_uint_t flags) +{ + int op; + struct epoll_event ee; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "epoll del async connection: fd:%d", c->async_fd); + + op = EPOLL_CTL_DEL; + ee.events = 0; + ee.data.ptr = NULL; + if (epoll_ctl(ep, op, c->async_fd, &ee) == -1) { + ngx_log_error(NGX_LOG_ALERT, c->log, ngx_errno, + "async del conn epoll_ctl(%d, %d) failed", op, c->async_fd); + c->async_fd = -1; + return NGX_ERROR; + } + c->async_fd = -1; + c->async->active = 0; + + return NGX_OK; +} +#endif #if (NGX_HAVE_EVENTFD) @@ -791,6 +847,10 @@ ngx_event_t *rev, *wev; ngx_queue_t *queue; ngx_connection_t *c; +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_int_t async; + ngx_event_t *aev; +#endif /* NGX_TIMER_INFINITE == INFTIM */ @@ -837,7 +897,14 @@ c = event_list[i].data.ptr; instance = (uintptr_t) c & 1; +#if (NGX_SSL) +#if (NGX_SSL_ASYNC) + async = ((uintptr_t) c & 2) >> 1; +#endif + c = (ngx_connection_t *) ((uintptr_t) c & (uintptr_t) ~3); +#else c = (ngx_connection_t *) ((uintptr_t) c & (uintptr_t) ~1); +#endif rev = c->read; @@ -880,7 +947,11 @@ } #endif +#if (NGX_SSL && NGX_SSL_ASYNC) + if ((revents & EPOLLIN) && rev->active && !async) { +#else if ((revents & EPOLLIN) && rev->active) { +#endif #if (NGX_HAVE_EPOLLRDHUP) if (revents & EPOLLRDHUP) { @@ -904,7 +975,11 @@ wev = c->write; +#if (NGX_SSL && NGX_SSL_ASYNC) + if ((revents & EPOLLOUT) && wev->active && !async) { +#else if ((revents & EPOLLOUT) && wev->active) { +#endif if (c->fd == -1 || wev->instance != instance) { @@ -930,6 +1005,33 @@ wev->handler(wev); } } + +#if (NGX_SSL && NGX_SSL_ASYNC) + aev = c->async; + + if ((revents & EPOLLIN) && aev->active && async) { + + if (c->async_fd == -1 || aev->instance!= instance) { + /* + * the stale event from a file descriptor + * that was just closed in this iteration + */ + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, + "epoll: stale event %p", c); + continue; + } + + aev->ready = 1; + + if (flags & NGX_POST_EVENTS) { + ngx_post_event(aev, &ngx_posted_events); + + } else { + aev->handler(aev); + } + } +#endif } return NGX_OK; diff -r 2245324a507a -r 301a837387ed src/event/modules/ngx_eventport_module.c --- a/src/event/modules/ngx_eventport_module.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/modules/ngx_eventport_module.c Tue Sep 07 16:29:07 2021 +0800 @@ -187,6 +187,10 @@ ngx_eventport_process_events, /* process the events */ ngx_eventport_init, /* init the events */ ngx_eventport_done, /* done the events */ +#if (NGX_SSL && NGX_SSL_ASYNC) + NULL, /* add an async conn */ + NULL, /* del an async conn */ +#endif } }; diff -r 2245324a507a -r 301a837387ed src/event/modules/ngx_kqueue_module.c --- a/src/event/modules/ngx_kqueue_module.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/modules/ngx_kqueue_module.c Tue Sep 07 16:29:07 2021 +0800 @@ -92,7 +92,11 @@ #endif ngx_kqueue_process_events, /* process the events */ ngx_kqueue_init, /* init the events */ - ngx_kqueue_done /* done the events */ + ngx_kqueue_done, /* done the events */ +#if (NGX_SSL && NGX_SSL_ASYNC) + NULL, /* add an async conn */ + NULL, /* del an async conn */ +#endif } }; diff -r 2245324a507a -r 301a837387ed src/event/modules/ngx_poll_module.c --- a/src/event/modules/ngx_poll_module.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/modules/ngx_poll_module.c Tue Sep 07 16:29:07 2021 +0800 @@ -42,7 +42,11 @@ NULL, /* trigger a notify */ ngx_poll_process_events, /* process the events */ ngx_poll_init, /* init the events */ - ngx_poll_done /* done the events */ + ngx_poll_done, /* done the events */ +#if (NGX_SSL && NGX_SSL_ASYNC) + NULL, /* add an async conn */ + NULL, /* del an async conn */ +#endif } }; diff -r 2245324a507a -r 301a837387ed src/event/modules/ngx_select_module.c --- a/src/event/modules/ngx_select_module.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/modules/ngx_select_module.c Tue Sep 07 16:29:07 2021 +0800 @@ -50,7 +50,11 @@ NULL, /* trigger a notify */ ngx_select_process_events, /* process the events */ ngx_select_init, /* init the events */ - ngx_select_done /* done the events */ + ngx_select_done, /* done the events */ +#if (NGX_SSL && NGX_SSL_ASYNC) + NULL, /* add an async conn */ + NULL, /* del an async conn */ +#endif } }; diff -r 2245324a507a -r 301a837387ed src/event/modules/ngx_win32_select_module.c --- a/src/event/modules/ngx_win32_select_module.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/modules/ngx_win32_select_module.c Tue Sep 07 16:29:07 2021 +0800 @@ -52,7 +52,11 @@ NULL, /* trigger a notify */ ngx_select_process_events, /* process the events */ ngx_select_init, /* init the events */ - ngx_select_done /* done the events */ + ngx_select_done, /* done the events */ +#if (NGX_SSL && NGX_SSL_ASYNC) + NULL, /* add an async conn */ + NULL, /* del an async conn */ +#endif } }; diff -r 2245324a507a -r 301a837387ed src/event/ngx_event.c --- a/src/event/ngx_event.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/ngx_event.c Tue Sep 07 16:29:07 2021 +0800 @@ -169,8 +169,11 @@ &event_core_name, ngx_event_core_create_conf, /* create configuration */ ngx_event_core_init_conf, /* init configuration */ - +#if (NGX_SSL && NGX_SSL_ASYNC) + { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL } +#else { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL } +#endif }; @@ -237,11 +240,6 @@ } } - if (!ngx_queue_empty(&ngx_posted_next_events)) { - ngx_event_move_posted_next(cycle); - timer = 0; - } - delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags); @@ -758,6 +756,20 @@ wev[i].closed = 1; } +#if (NGX_SSL && NGX_SSL_ASYNC) + cycle->async_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, + cycle->log); + if (cycle->async_events == NULL) { + return NGX_ERROR; + } + + aev = cycle->async_events; + for (i = 0; i < cycle->connection_n; i++) { + aev[i].closed = 1; + aev[i].instance = 1; + } +#endif + i = cycle->connection_n; next = NULL; @@ -768,6 +780,10 @@ c[i].read = &cycle->read_events[i]; c[i].write = &cycle->write_events[i]; c[i].fd = (ngx_socket_t) -1; +#if (NGX_SSL && NGX_SSL_ASYNC) + c[i].async = &cycle->async_events[i]; + c[i].async_fd = (ngx_socket_t) -1; +#endif next = &c[i]; } while (i); diff -r 2245324a507a -r 301a837387ed src/event/ngx_event.h --- a/src/event/ngx_event.h Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/ngx_event.h Tue Sep 07 16:29:07 2021 +0800 @@ -32,6 +32,10 @@ unsigned write:1; +#if (NGX_SSL && NGX_SSL_ASYNC) + unsigned async:1; +#endif + unsigned accept:1; /* used to detect the stale events in kqueue and epoll */ @@ -101,6 +105,9 @@ int available; ngx_event_handler_pt handler; +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_event_handler_pt saved_handler; +#endif #if (NGX_HAVE_IOCP) @@ -184,6 +191,11 @@ ngx_int_t (*init)(ngx_cycle_t *cycle, ngx_msec_t timer); void (*done)(ngx_cycle_t *cycle); + +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_int_t (*add_async_conn)(ngx_connection_t *c); + ngx_int_t (*del_async_conn)(ngx_connection_t *c, ngx_uint_t flags); +#endif } ngx_event_actions_t; @@ -409,6 +421,11 @@ #define ngx_add_conn ngx_event_actions.add_conn #define ngx_del_conn ngx_event_actions.del_conn +#if (NGX_SSL && NGX_SSL_ASYNC) +#define ngx_add_async_conn ngx_event_actions.add_async_conn +#define ngx_del_async_conn ngx_event_actions.del_async_conn +#endif + #define ngx_notify ngx_event_actions.notify #define ngx_add_timer ngx_event_add_timer diff -r 2245324a507a -r 301a837387ed src/event/ngx_event_accept.c --- a/src/event/ngx_event_accept.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/ngx_event_accept.c Tue Sep 07 16:29:07 2021 +0800 @@ -244,6 +244,9 @@ rev->log = log; wev->log = log; +#if (NGX_SSL && NGX_SSL_ASYNC) + c->async->log = log; +#endif /* * TODO: MT: - ngx_atomic_fetch_add() @@ -409,6 +412,15 @@ #endif +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_del_async_conn) { + if (c->num_async_fds) { + ngx_del_async_conn(c, NGX_DISABLE_EVENT); + c->num_async_fds--; + } + } +#endif + if (ngx_del_event(c->read, NGX_READ_EVENT, NGX_DISABLE_EVENT) == NGX_ERROR) { diff -r 2245324a507a -r 301a837387ed src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Sep 07 16:29:07 2021 +0800 @@ -89,6 +89,12 @@ static char *ngx_openssl_engine(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); static void ngx_openssl_exit(ngx_cycle_t *cycle); +#if (NGX_SSL && NGX_SSL_ASYNC) +static void ngx_ssl_handshake_async_handler(ngx_event_t * aev); +static void ngx_ssl_read_async_handler(ngx_event_t * aev); +static void ngx_ssl_write_async_handler(ngx_event_t * aev); +static void ngx_ssl_shutdown_async_handler(ngx_event_t *aev); +#endif static ngx_command_t ngx_openssl_commands[] = { @@ -137,6 +143,16 @@ int ngx_ssl_stapling_index; +#if (NGX_SSL && NGX_SSL_ASYNC) +static void +ngx_ssl_empty_handler(ngx_event_t *aev) +{ + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, aev->log, 0, "ssl empty handler"); + + return; +} +#endif + ngx_int_t ngx_ssl_init(ngx_log_t *log) { @@ -385,6 +401,12 @@ SSL_CTX_set_mode(ssl->ctx, SSL_MODE_NO_AUTO_CHAIN); #endif +#if (NGX_SSL && NGX_SSL_ASYNC) + if (ssl->async_enable) { + SSL_CTX_set_mode(ssl->ctx, SSL_MODE_ASYNC); + } +#endif + SSL_CTX_set_read_ahead(ssl->ctx, 1); SSL_CTX_set_info_callback(ssl->ctx, ngx_ssl_info_callback); @@ -1662,7 +1684,9 @@ } c->ssl = sc; - +#if (NGX_SSL && NGX_SSL_ASYNC) + c->async_enable = ssl->async_enable; +#endif return NGX_OK; } @@ -1706,6 +1730,205 @@ } +#if (NGX_SSL && NGX_SSL_ASYNC) +ngx_int_t +ngx_ssl_async_process_fds(ngx_connection_t *c) +{ + OSSL_ASYNC_FD *add_fds = NULL; + OSSL_ASYNC_FD *del_fds = NULL; + size_t num_add_fds = 0; + size_t num_del_fds = 0; + unsigned loop = 0; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "ngx_ssl_async_process_fds called"); + + if (!ngx_del_async_conn || !ngx_add_async_conn) { + ngx_ssl_error(NGX_LOG_ALERT, c->log, 0, + "Async notifications not supported"); + return NGX_ERROR; + } + + SSL_get_changed_async_fds(c->ssl->connection, NULL, &num_add_fds, + NULL, &num_del_fds); + + if (num_add_fds) { + add_fds = ngx_alloc(num_add_fds * sizeof(OSSL_ASYNC_FD), c->log); + if (add_fds == NULL) { + ngx_ssl_error(NGX_LOG_ALERT, c->log, 0, + "Memory Allocation Error"); + return NGX_ERROR; + } + } + + if (num_del_fds) { + del_fds = ngx_alloc(num_del_fds * sizeof(OSSL_ASYNC_FD), c->log); + if (del_fds == NULL) { + ngx_ssl_error(NGX_LOG_ALERT, c->log, 0, + "Memory Allocation Error"); + if (add_fds) + ngx_free(add_fds); + return NGX_ERROR; + } + } + + SSL_get_changed_async_fds(c->ssl->connection, add_fds, &num_add_fds, + del_fds, &num_del_fds); + + if (num_del_fds) { + for (loop = 0; loop < num_del_fds; loop++) { + c->async_fd = del_fds[loop]; + if (c->num_async_fds) { + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, "%s: deleting fd = %d", __func__, c->async_fd); + ngx_del_async_conn(c, NGX_DISABLE_EVENT); + c->num_async_fds--; + } + } + } + if (num_add_fds) { + for (loop = 0; loop < num_add_fds; loop++) { + if (c->num_async_fds == 0) { + c->num_async_fds++; + c->async_fd = add_fds[loop]; + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, "%s: adding fd = %d", __func__, c->async_fd); + ngx_add_async_conn(c); + } + } + } + + if (add_fds) + ngx_free(add_fds); + if (del_fds) + ngx_free(del_fds); + + return NGX_OK; +} +#endif + +#if !defined(OPENSSL_IS_BORINGSSL) && (OPENSSL_VERSION_NUMBER >= 0x10101000L) +static ngx_int_t +ngx_ssl_read_early_data(ngx_connection_t *c, + u_char *buf, + size_t size, + size_t *readbytes) +{ + int errret; + + if (!SSL_is_server(c->ssl->connection)) { + return 0; + } + + if (c->ssl->read_early_state == SSL_READ_EARLY_DATA_FINISH) { + return 0; + } + + errret = SSL_read_early_data(c->ssl->connection, buf, + size, readbytes); + c->ssl->read_early_state = errret; + + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL_read_early_data: %d readbytes: %d " + "early_data_status: %d", + errret, *readbytes, + SSL_get_early_data_status(c->ssl->connection)); + + return errret; +} + +static ngx_int_t +ngx_ssl_handshake_early_data(ngx_connection_t *c) +{ + int sslerr, errret; + size_t size, readbytes = 0; + ngx_buf_t *b; + + if (!c->ssl->enable_early_data) { + SSL_set_max_early_data(c->ssl->connection, 0); + return 0; + } + + if (SSL_get_options(c->ssl->connection) & SSL_OP_NO_TLSv1_3) { + return 0; + } + + if (!SSL_is_server(c->ssl->connection)) { + return 0; + } + + if (c->ssl->read_early_state == SSL_READ_EARLY_DATA_FINISH) { + return 0; + } + + b = c->ssl->early_buf; +#if (TLS1_3_VERSION_DRAFT == 0x7f12) + size = SSL_get_max_early_data(c->ssl->session_ctx); +#else + size = SSL_get_max_early_data(c->ssl->connection); +#endif + if (size == 0) { + return 0; + } + + if (b == NULL) { + b = ngx_create_temp_buf(c->pool, size); + if (b == NULL) { + return 0; + } + + c->ssl->early_buf = b; + + } else if (b->start == NULL) { + + b->start = ngx_palloc(c->pool, size); + if (b->start == NULL) { + return 0; + } + + b->pos = b->start; + b->last = b->start; + b->end = b->last + size; + } + + + + errret = ngx_ssl_read_early_data(c, b->last, + b->end - b->last, &readbytes); + if (readbytes > 0) { + b->last += readbytes; + } + + if (errret != SSL_READ_EARLY_DATA_ERROR) { + if (SSL_get_early_data_status(c->ssl->connection) == + SSL_EARLY_DATA_ACCEPTED) { + switch (errret) { + case SSL_READ_EARLY_DATA_FINISH: + return 1; + case SSL_READ_EARLY_DATA_SUCCESS: + return 1; + } + } else { + return 0; + } + } + + sslerr = SSL_get_error(c->ssl->connection, 0); + switch (sslerr) { + case SSL_ERROR_WANT_WRITE: + case SSL_ERROR_WANT_ASYNC: + case SSL_ERROR_WANT_READ: + break; + default: + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL_get_error: %d while reading early data\n", sslerr); + return -2; + } + + return -1; +} +#endif + + + ngx_int_t ngx_ssl_handshake(ngx_connection_t *c) { @@ -1730,7 +1953,11 @@ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_do_handshake: %d", n); if (n == 1) { - +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } +#endif if (ngx_handle_read_event(c->read, 0) != NGX_OK) { return NGX_ERROR; } @@ -1785,7 +2012,17 @@ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_get_error: %d", sslerr); - if (sslerr == SSL_ERROR_WANT_READ) { + if (sslerr == SSL_ERROR_WANT_READ +#if !defined(OPENSSL_IS_BORINGSSL) && (OPENSSL_VERSION_NUMBER >= 0x10101000L) + || (sslerr == SSL_ERROR_NONE && + c->ssl->read_early_state == SSL_READ_EARLY_DATA_SUCCESS) +#endif + ) { +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } +#endif c->read->ready = 0; c->read->handler = ngx_ssl_handshake_handler; c->write->handler = ngx_ssl_handshake_handler; @@ -1802,6 +2039,12 @@ } if (sslerr == SSL_ERROR_WANT_WRITE) { + +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } +#endif c->write->ready = 0; c->read->handler = ngx_ssl_handshake_handler; c->write->handler = ngx_ssl_handshake_handler; @@ -1817,6 +2060,24 @@ return NGX_AGAIN; } +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && sslerr == SSL_ERROR_WANT_ASYNC) + { + c->async->handler = ngx_ssl_handshake_async_handler; + c->read->saved_handler = c->read->handler; + c->read->handler = ngx_ssl_empty_handler; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL ASYNC WANT recieved: \"%s\"", __func__); + + if (ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } + + return NGX_AGAIN; + } +#endif + err = (sslerr == SSL_ERROR_SYSCALL) ? ngx_errno : 0; c->ssl->no_wait_shutdown = 1; @@ -1844,6 +2105,29 @@ return NGX_ERROR; } +#if (NGX_SSL && NGX_SSL_ASYNC) +static void +ngx_ssl_handshake_async_handler(ngx_event_t *aev) +{ + ngx_connection_t *c; + + c = aev->data; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL handshake async handler"); + + aev->ready = 0; + aev->handler = ngx_ssl_empty_handler; + c->read->handler = c->read->saved_handler; + + if (ngx_ssl_handshake(c) == NGX_AGAIN) { + return; + } + + c->ssl->handler(c); +} +#endif + #ifdef SSL_READ_EARLY_DATA_SUCCESS @@ -2052,6 +2336,13 @@ return; } +#if (NGX_SSL && NGX_SSL_ASYNC) + /* + * empty the handler of async event to avoid + * going back to previous ssl handshake state + */ + c->async->handler = ngx_ssl_empty_handler; +#endif c->ssl->handler(c); } @@ -2151,7 +2442,32 @@ */ for ( ;; ) { - +#if !defined(OPENSSL_IS_BORINGSSL) && (OPENSSL_VERSION_NUMBER >= 0x10101000L) + if (c->ssl->enable_early_data && + c->ssl->early_buf && + c->ssl->early_buf->start && + (c->ssl->early_buf->last > c->ssl->early_buf->pos)) { + n = c->ssl->early_buf->last - c->ssl->early_buf->pos; + if (n > (int)size) + n = size; + ngx_memcpy(buf, c->ssl->early_buf->start, n); + c->ssl->early_buf->pos += n; + return n; + } + + if (c->ssl->enable_early_data && + !SSL_is_init_finished(c->ssl->connection) && + (c->ssl->read_early_state != SSL_READ_EARLY_DATA_FINISH) && + (SSL_get_early_data_status(c->ssl->connection) == + SSL_EARLY_DATA_ACCEPTED)) { + size_t readbytes = 0; + n = ngx_ssl_read_early_data(c, buf, size, &readbytes); + if (readbytes > 0) + n = readbytes; + else if (n == SSL_READ_EARLY_DATA_FINISH) + n = SSL_read(c->ssl->connection, buf, size); + } else +#endif n = SSL_read(c->ssl->connection, buf, size); ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_read: %d", n); @@ -2373,7 +2689,11 @@ #ifndef SSL_OP_NO_RENEGOTIATION - if (c->ssl->renegotiation) { + if (c->ssl->renegotiation +#if !defined(OPENSSL_IS_BORINGSSL) && (OPENSSL_VERSION_NUMBER >= 0x10101000L) + && (SSL_version(c->ssl->connection) != TLS1_3_VERSION) +#endif + ) { /* * disable renegotiation (CVE-2009-3555): * OpenSSL (at least up to 0.9.8l) does not handle disabled @@ -2398,7 +2718,11 @@ #endif if (n > 0) { - +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } +#endif if (c->ssl->saved_write_handler) { c->write->handler = c->ssl->saved_write_handler; @@ -2421,7 +2745,12 @@ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_get_error: %d", sslerr); - if (sslerr == SSL_ERROR_WANT_READ) { + if (sslerr == SSL_ERROR_WANT_READ) { +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } +#endif if (c->ssl->saved_write_handler) { @@ -2444,6 +2773,11 @@ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_read: want write"); +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } +#endif c->write->ready = 0; @@ -2463,6 +2797,22 @@ return NGX_AGAIN; } +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && sslerr == SSL_ERROR_WANT_ASYNC) { + c->async->handler = ngx_ssl_read_async_handler; + c->read->saved_handler = c->read->handler; + c->read->handler = ngx_ssl_empty_handler; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL ASYNC WANT recieved: \"%s\"", __func__); + + if (ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } + + return NGX_AGAIN; + } +#endif c->ssl->no_wait_shutdown = 1; c->ssl->no_send_shutdown = 1; @@ -2477,6 +2827,24 @@ return NGX_ERROR; } +#if (NGX_SSL && NGX_SSL_ASYNC) +static void +ngx_ssl_read_async_handler(ngx_event_t *aev) +{ + ngx_connection_t *c; + + c = aev->data; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL read async handler"); + + aev->ready = 0; + aev->handler = ngx_ssl_empty_handler; + c->read->handler = c->read->saved_handler; + + c->read->handler(c->read); +} +#endif static void ngx_ssl_write_handler(ngx_event_t *wev) @@ -2670,12 +3038,30 @@ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL to write: %uz", size); +#if !defined(OPENSSL_IS_BORINGSSL) && (OPENSSL_VERSION_NUMBER >= 0x10101000L) + if (c->ssl->enable_early_data && + !SSL_is_init_finished(c->ssl->connection) && + (SSL_get_early_data_status(c->ssl->connection) == + SSL_EARLY_DATA_ACCEPTED)) { + size_t wrttenbytes = 0; + n = SSL_write_early_data(c->ssl->connection, data, size, &wrttenbytes); + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL_write_early_data: %d written: %d", n, wrttenbytes); + if (wrttenbytes > 0) + n = wrttenbytes; + } else +#endif n = SSL_write(c->ssl->connection, data, size); ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_write: %d", n); if (n > 0) { +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } +#endif if (c->ssl->saved_read_handler) { c->read->handler = c->ssl->saved_read_handler; @@ -2713,6 +3099,11 @@ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_get_error: %d", sslerr); if (sslerr == SSL_ERROR_WANT_WRITE) { +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } +#endif if (c->ssl->saved_read_handler) { @@ -2735,7 +3126,11 @@ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_write: want read"); - +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } +#endif c->read->ready = 0; if (ngx_handle_read_event(c->read, 0) != NGX_OK) { @@ -2755,6 +3150,22 @@ return NGX_AGAIN; } +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && sslerr == SSL_ERROR_WANT_ASYNC) { + c->async->handler = ngx_ssl_write_async_handler; + c->read->saved_handler = c->read->handler; + c->read->handler = ngx_ssl_empty_handler; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL ASYNC WANT recieved: \"%s\"", __func__); + + if (ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } + + return NGX_AGAIN; + } +#endif c->ssl->no_wait_shutdown = 1; c->ssl->no_send_shutdown = 1; c->write->error = 1; @@ -2764,6 +3175,24 @@ return NGX_ERROR; } +#if (NGX_SSL && NGX_SSL_ASYNC) +static void +ngx_ssl_write_async_handler(ngx_event_t *aev) +{ + ngx_connection_t *c; + + c = aev->data; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL write async handler"); + + aev->ready = 0; + aev->handler = ngx_ssl_empty_handler; + c->read->handler = c->read->saved_handler; + + c->write->handler(c->write); +} +#endif #ifdef SSL_READ_EARLY_DATA_SUCCESS @@ -2903,6 +3332,13 @@ c->ssl->buf->start = NULL; } } +#if !defined(OPENSSL_IS_BORINGSSL) && (OPENSSL_VERSION_NUMBER >= 0x10101000L) + if (c->ssl->early_buf && c->ssl->early_buf->start) { + if (ngx_pfree(c->pool, c->ssl->early_buf->start) == NGX_OK) { + c->ssl->early_buf->start = NULL; + } + } +#endif } @@ -2913,6 +3349,11 @@ ngx_int_t rc; ngx_err_t err; ngx_uint_t tries; + + if (!c->ssl) { + return NGX_OK; + } + rc = NGX_OK; @@ -2924,6 +3365,31 @@ * an SSL handshake, while previous versions always return 0. * Avoid calling SSL_shutdown() if handshake wasn't completed. */ +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable) { + /* Check if there is inflight request */ + if (SSL_want_async(c->ssl->connection) && !c->timedout) { + c->async->handler = ngx_ssl_shutdown_async_handler; + ngx_ssl_async_process_fds(c); + ngx_add_timer(c->async, 300); + return NGX_AGAIN; + } + + /* Ignore errors from ngx_ssl_async_process_fds as + we want to carry on and close the SSL connection + anyway. */ + ngx_ssl_async_process_fds(c); + if (ngx_del_async_conn) { + if (c->num_async_fds) { + ngx_del_async_conn(c, NGX_DISABLE_EVENT); + c->num_async_fds--; + } + } + ngx_del_conn(c, NGX_DISABLE_EVENT); + } +#endif + SSL_free(c->ssl->connection); + c->ssl = NULL; goto done; } @@ -2976,12 +3442,19 @@ /* before 0.9.8m SSL_shutdown() returned 0 instead of -1 on errors */ + sslerr = SSL_get_error(c->ssl->connection, n); ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_get_error: %d", sslerr); if (sslerr == SSL_ERROR_WANT_READ || sslerr == SSL_ERROR_WANT_WRITE) { + +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_ssl_async_process_fds(c) == NGX_ERROR) { + return NGX_ERROR; + } +#endif c->read->handler = ngx_ssl_shutdown_handler; c->write->handler = ngx_ssl_shutdown_handler; @@ -3021,6 +3494,13 @@ rc = NGX_ERROR; done: +#if (NGX_SSL && NGX_SSL_ASYNC) + if (c->async_enable && n == -1) { + sslerr = SSL_get_error(c->ssl->connection, n); + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL_get_error async: %d", sslerr); + } +#endif if (c->ssl->shutdown_without_free) { c->ssl->shutdown_without_free = 0; @@ -3035,6 +3515,34 @@ return rc; } +#if (NGX_SSL && NGX_SSL_ASYNC) +static void +ngx_ssl_shutdown_async_handler(ngx_event_t *aev) +{ + ngx_connection_t *c; + ngx_connection_handler_pt handler; + + c = aev->data; + handler = c->ssl->handler; + + if (aev->timedout) { + c->timedout = 1; + } + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, aev->log, 0, + "SSL shutdown async handler"); + + aev->ready = 0; + aev->handler = ngx_ssl_empty_handler; + c->read->handler = c->read->saved_handler; + + if (ngx_ssl_shutdown(c) == NGX_AGAIN) { + return; + } + + handler(c); +} +#endif static void ngx_ssl_shutdown_handler(ngx_event_t *ev) @@ -3055,6 +3563,13 @@ return; } +#if (NGX_SSL && NGX_SSL_ASYNC) + /* + * empty the handler of async event to avoid + * going back to previous ssl shutdown state + */ + c->async->handler = ngx_ssl_empty_handler; +#endif handler(c); } diff -r 2245324a507a -r 301a837387ed src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Thu Sep 02 12:25:37 2021 +0300 +++ b/src/event/ngx_event_openssl.h Tue Sep 07 16:29:07 2021 +0800 @@ -82,6 +82,9 @@ SSL_CTX *ctx; ngx_log_t *log; size_t buffer_size; +#if (NGX_SSL && NGX_SSL_ASYNC) + ngx_flag_t async_enable; +#endif }; @@ -92,6 +95,9 @@ ngx_int_t last; ngx_buf_t *buf; size_t buffer_size; +#if !defined(OPENSSL_IS_BORINGSSL) && (OPENSSL_VERSION_NUMBER >= 0x10101000L) + ngx_buf_t *early_buf; +#endif ngx_connection_handler_pt handler; @@ -118,6 +124,10 @@ unsigned in_ocsp:1; unsigned early_preread:1; unsigned write_blocked:1; + unsigned enable_early_data:1; +#if !defined(OPENSSL_IS_BORINGSSL) && (OPENSSL_VERSION_NUMBER >= 0x10101000L) + int read_early_state; +#endif }; @@ -248,6 +258,9 @@ ngx_int_t ngx_ssl_check_host(ngx_connection_t *c, ngx_str_t *name); +#if (NGX_SSL && NGX_SSL_ASYNC) +#define ngx_ssl_waiting_for_async(c) SSL_waiting_for_async(c->ssl->connection) +#endif ngx_int_t ngx_ssl_get_protocol(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s); @@ -305,6 +318,9 @@ char *fmt, ...); void ngx_ssl_cleanup_ctx(void *data); +#if (NGX_SSL && NGX_SSL_ASYNC) +ngx_int_t ngx_ssl_async_process_fds(ngx_connection_t *c) ; +#endif extern int ngx_ssl_connection_index; extern int ngx_ssl_server_conf_index; diff -r 2245324a507a -r 301a837387ed src/http/modules/ngx_http_ssl_module.c --- a/src/http/modules/ngx_http_ssl_module.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/http/modules/ngx_http_ssl_module.c Tue Sep 07 16:29:07 2021 +0800 @@ -46,6 +46,10 @@ static char *ngx_http_ssl_enable(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) +static char *ngx_http_ssl_enable_async(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); +#endif static char *ngx_http_ssl_password_file(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); static char *ngx_http_ssl_session_cache(ngx_conf_t *cf, ngx_command_t *cmd, @@ -105,6 +109,15 @@ offsetof(ngx_http_ssl_srv_conf_t, enable), &ngx_http_ssl_deprecated }, +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + { ngx_string("ssl_async"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_FLAG, + ngx_http_ssl_enable_async, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_ssl_srv_conf_t, async_enable), + NULL }, +#endif + { ngx_string("ssl_certificate"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_array_slot, @@ -619,6 +632,9 @@ */ sscf->enable = NGX_CONF_UNSET; +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + sscf->async_enable = NGX_CONF_UNSET; +#endif sscf->prefer_server_ciphers = NGX_CONF_UNSET; sscf->early_data = NGX_CONF_UNSET; sscf->reject_handshake = NGX_CONF_UNSET; @@ -637,6 +653,7 @@ sscf->ocsp_cache_zone = NGX_CONF_UNSET_PTR; sscf->stapling = NGX_CONF_UNSET; sscf->stapling_verify = NGX_CONF_UNSET; + sscf->early_data = NGX_CONF_UNSET; return sscf; } @@ -661,6 +678,18 @@ } } +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + if (conf->async_enable == NGX_CONF_UNSET) { + if (prev->async_enable == NGX_CONF_UNSET) { + conf->async_enable = 0; + + } else { + conf->async_enable = prev->async_enable; + conf->file = prev->file; + conf->line = prev->line; + } + } +#endif ngx_conf_merge_value(conf->session_timeout, prev->session_timeout, 300); @@ -712,6 +741,8 @@ ngx_conf_merge_str_value(conf->stapling_responder, prev->stapling_responder, ""); + ngx_conf_merge_value(conf->early_data, prev->early_data, 1); + conf->ssl.log = cf->log; if (conf->enable) { @@ -735,6 +766,9 @@ conf->file, conf->line); return NGX_CONF_ERROR; } +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + conf->ssl.async_enable = conf->async_enable; +#endif } else if (!conf->reject_handshake) { ngx_log_error(NGX_LOG_EMERG, cf->log, 0, @@ -1054,6 +1088,40 @@ return NGX_CONF_OK; } +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) +static char * +ngx_http_ssl_enable_async(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_http_ssl_srv_conf_t *sscf = conf; + char *rv; + ngx_flag_t *pssl, *pssl_async; + + rv = ngx_conf_set_flag_slot(cf, cmd, conf); + + if (rv != NGX_CONF_OK) { + return rv; + } + + /* If ssl_async on is configured, then ssl on is configured by default + * This will align 'ssl_async on;' and 'listen port ssl' diretives + * */ + pssl = (ngx_flag_t *) ((char *)conf + offsetof(ngx_http_ssl_srv_conf_t, enable)); + pssl_async = (ngx_flag_t *) ((char *)conf + cmd->offset); + + if(*pssl_async) { + ngx_log_error(NGX_LOG_NOTICE, cf->log, 0, "Nginx enables async mode."); + } + + if(*pssl_async && *pssl != 1) { + *pssl = *pssl_async; + } + + sscf->file = cf->conf_file->file.name.data; + sscf->line = cf->conf_file->line; + + return NGX_CONF_OK; +} +#endif static char * ngx_http_ssl_password_file(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) diff -r 2245324a507a -r 301a837387ed src/http/modules/ngx_http_ssl_module.h --- a/src/http/modules/ngx_http_ssl_module.h Thu Sep 02 12:25:37 2021 +0300 +++ b/src/http/modules/ngx_http_ssl_module.h Tue Sep 07 16:29:07 2021 +0800 @@ -17,6 +17,10 @@ typedef struct { ngx_flag_t enable; +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + ngx_flag_t async_enable; +#endif + ngx_ssl_t ssl; ngx_flag_t prefer_server_ciphers; @@ -65,6 +69,8 @@ ngx_str_t stapling_file; ngx_str_t stapling_responder; + ngx_flag_t early_data; + u_char *file; ngx_uint_t line; } ngx_http_ssl_srv_conf_t; diff -r 2245324a507a -r 301a837387ed src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/http/ngx_http_request.c Tue Sep 07 16:29:07 2021 +0800 @@ -447,9 +447,21 @@ * We are trying to not hold c->buffer's memory for an idle connection. */ - if (ngx_pfree(c->pool, b->start) == NGX_OK) { - b->start = NULL; + /* For the Async implementation we need the same buffer to be used + * again on any async calls that have not completed. + * As such we need to turn off this optimisation if an async request + * is still in progress. + */ + +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + if ((c->async_enable && !ngx_ssl_waiting_for_async(c)) || !c->async_enable) { +#endif + if (ngx_pfree(c->pool, b->start) == NGX_OK) { + b->start = NULL; + } +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) } +#endif return; } @@ -755,6 +767,8 @@ } ngx_reusable_connection(c, 0); + + c->ssl->enable_early_data = sscf->early_data; rc = ngx_ssl_handshake(c); @@ -1551,12 +1565,21 @@ return n; } - if (rev->ready) { +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + if(c->async_enable) n = c->recv(c, r->header_in->last, r->header_in->end - r->header_in->last); - } else { - n = NGX_AGAIN; + else { +#endif + if (rev->ready) { + n = c->recv(c, r->header_in->last, + r->header_in->end - r->header_in->last); + } else { + n = NGX_AGAIN; + } +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) } +#endif if (n == NGX_AGAIN) { if (!rev->timer_set) { @@ -2048,7 +2071,7 @@ c = r->connection; -#if (NGX_HTTP_SSL) +#if 0 && (NGX_HTTP_SSL) if (r->http_connection->ssl) { long rc; @@ -3178,52 +3201,63 @@ * c->pool and are freed too. */ - b = c->buffer; - - if (ngx_pfree(c->pool, b->start) == NGX_OK) { - - /* - * the special note for ngx_http_keepalive_handler() that - * c->buffer's memory was freed - */ - - b->pos = NULL; - - } else { - b->pos = b->start; - b->last = b->start; + /* For the Async implementation we need the same buffer to be used + * again on any async calls that have not completed. + * As such we need to turn off this optimisation if an async request + * is still in progress. + */ + +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + if ((c->async_enable && !ngx_ssl_waiting_for_async(c)) || !c->async_enable) + { +#endif + b = c->buffer; + + if (ngx_pfree(c->pool, b->start) == NGX_OK) { + + /* + * the special note for ngx_http_keepalive_handler() that + * c->buffer's memory was freed + */ + + b->pos = NULL; + + } else { + b->pos = b->start; + b->last = b->start; + } + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "hc free: %p", + hc->free); + + if (hc->free) { + for (cl = hc->free; cl; /* void */) { + ln = cl; + cl = cl->next; + ngx_pfree(c->pool, ln->buf->start); + ngx_free_chain(c->pool, ln); + } + + hc->free = NULL; + } + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "hc busy: %p %i", + hc->busy, hc->nbusy); + + if (hc->busy) { + for (cl = hc->busy; cl; /* void */) { + ln = cl; + cl = cl->next; + ngx_pfree(c->pool, ln->buf->start); + ngx_free_chain(c->pool, ln); + } + + hc->busy = NULL; + hc->nbusy = 0; + } +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) } - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "hc free: %p", - hc->free); - - if (hc->free) { - for (cl = hc->free; cl; /* void */) { - ln = cl; - cl = cl->next; - ngx_pfree(c->pool, ln->buf->start); - ngx_free_chain(c->pool, ln); - } - - hc->free = NULL; - } - - ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "hc busy: %p %i", - hc->busy, hc->nbusy); - - if (hc->busy) { - for (cl = hc->busy; cl; /* void */) { - ln = cl; - cl = cl->next; - ngx_pfree(c->pool, ln->buf->start); - ngx_free_chain(c->pool, ln); - } - - hc->busy = NULL; - hc->nbusy = 0; - } - -#if (NGX_HTTP_SSL) if (c->ssl) { ngx_ssl_free_buffer(c); } @@ -3232,6 +3266,14 @@ rev->handler = ngx_http_keepalive_handler; if (wev->active && (ngx_event_flags & NGX_USE_LEVEL_EVENT)) { +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_del_async_conn) { + if (c->num_async_fds) { + ngx_del_async_conn(c, NGX_DISABLE_EVENT); + c->num_async_fds--; + } + } +#endif if (ngx_del_event(wev, NGX_WRITE_EVENT, 0) != NGX_OK) { ngx_http_close_connection(c); return; @@ -3356,14 +3398,26 @@ * c->buffer's memory for a keepalive connection. */ - if (ngx_pfree(c->pool, b->start) == NGX_OK) { - - /* - * the special note that c->buffer's memory was freed - */ - - b->pos = NULL; + /* For the Asynch implementation we need the same buffer to be used + * on subsequent read requests. As such we need to turn off this optimisation that + * frees the buffer between invocations as may end up with a buffer that is at a + * different address */ + +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + if ((c->async_enable && !ngx_ssl_waiting_for_async(c)) || !c->async_enable) + { +#endif + if (ngx_pfree(c->pool, b->start) == NGX_OK) { + + /* + * the special note that c->buffer's memory was freed + */ + + b->pos = NULL; + } +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) } +#endif return; } @@ -3453,6 +3507,14 @@ wev->handler = ngx_http_empty_handler; if (wev->active && (ngx_event_flags & NGX_USE_LEVEL_EVENT)) { +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_del_async_conn) { + if (c->num_async_fds) { + ngx_del_async_conn(c, NGX_DISABLE_EVENT); + c->num_async_fds--; + } + } +#endif if (ngx_del_event(wev, NGX_WRITE_EVENT, 0) != NGX_OK) { ngx_http_close_request(r, 0); return; diff -r 2245324a507a -r 301a837387ed src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/http/ngx_http_upstream.c Tue Sep 07 16:29:07 2021 +0800 @@ -1336,7 +1336,14 @@ if ((ngx_event_flags & NGX_USE_LEVEL_EVENT) && ev->active) { event = ev->write ? NGX_WRITE_EVENT : NGX_READ_EVENT; - +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_del_async_conn) { + if (c->num_async_fds) { + ngx_del_async_conn(c, NGX_DISABLE_EVENT); + c->num_async_fds--; + } + } +#endif if (ngx_del_event(ev, event, 0) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); @@ -1463,7 +1470,14 @@ if ((ngx_event_flags & NGX_USE_LEVEL_EVENT) && ev->active) { event = ev->write ? NGX_WRITE_EVENT : NGX_READ_EVENT; - +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_del_async_conn) { + if (c->num_async_fds) { + ngx_del_async_conn(c, NGX_DISABLE_EVENT); + c->num_async_fds--; + } + } +#endif if (ngx_del_event(ev, event, 0) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); @@ -1907,7 +1921,12 @@ "upstream SSL server name: \"%s\"", name.data); if (SSL_set_tlsext_host_name(c->ssl->connection, - (char *) name.data) +#ifdef OPENSSL_IS_BORINGSSL + (const char *) +#else + (char *) +#endif + name.data) == 0) { ngx_ssl_error(NGX_LOG_ERR, r->connection->log, 0, diff -r 2245324a507a -r 301a837387ed src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c Thu Sep 02 12:25:37 2021 +0300 +++ b/src/os/unix/ngx_process_cycle.c Tue Sep 07 16:29:07 2021 +0800 @@ -595,6 +595,14 @@ && !ngx_terminate && !ngx_quit) { + +#if (NGX_SSL) + /* Delay added to give KAE Driver time to cleanup + * if worker exit with non-zero code. */ + if(ngx_processes[i].status != 0) { + usleep(2000000); + } +#endif if (ngx_spawn_process(cycle, ngx_processes[i].proc, ngx_processes[i].data, ngx_processes[i].name, i) @@ -1031,6 +1039,14 @@ if (n == NGX_ERROR) { if (ngx_event_flags & NGX_USE_EPOLL_EVENT) { +#if (NGX_HTTP_SSL && NGX_SSL_ASYNC) + if (c->async_enable && ngx_del_async_conn) { + if (c->num_async_fds) { + ngx_del_async_conn(c, NGX_DISABLE_EVENT); + c->num_async_fds--; + } + } +#endif ngx_del_conn(c, 0); } From mdounin at mdounin.ru Tue Sep 7 15:22:03 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 07 Sep 2021 15:22:03 +0000 Subject: [nginx] nginx-1.21.3-RELEASE Message-ID: details: https://hg.nginx.org/nginx/rev/2217a9c1d0b8 branches: changeset: 7925:2217a9c1d0b8 user: Maxim Dounin date: Tue Sep 07 18:21:02 2021 +0300 description: nginx-1.21.3-RELEASE diffstat: docs/xml/nginx/changes.xml | 27 +++++++++++++++++++++++++++ 1 files changed, 27 insertions(+), 0 deletions(-) diffs (37 lines): diff -r d9e009b39596 -r 2217a9c1d0b8 docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml Mon Sep 06 14:54:50 2021 +0300 +++ b/docs/xml/nginx/changes.xml Tue Sep 07 18:21:02 2021 +0300 @@ -5,6 +5,33 @@ + + + + +??????????? ?????? ???? ??????? +??? ????????????? HTTP/2. + + +optimization of client request body reading +when using HTTP/2. + + + + + +?? ?????????? API ??? ????????? ???? ??????? +??? ????????????? HTTP/2 ? ??????????? ?????????????? ??????. + + +in request body filters internal API +when using HTTP/2 and buffering of the data being processed. + + + + + + From mdounin at mdounin.ru Tue Sep 7 15:22:06 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 07 Sep 2021 15:22:06 +0000 Subject: [nginx] release-1.21.3 tag Message-ID: details: https://hg.nginx.org/nginx/rev/a525013b8296 branches: changeset: 7926:a525013b8296 user: Maxim Dounin date: Tue Sep 07 18:21:03 2021 +0300 description: release-1.21.3 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r 2217a9c1d0b8 -r a525013b8296 .hgtags --- a/.hgtags Tue Sep 07 18:21:02 2021 +0300 +++ b/.hgtags Tue Sep 07 18:21:03 2021 +0300 @@ -463,3 +463,4 @@ ffcbb9980ee2bad27b4d7b1cd680b14ff47b29aa df34dcc9ac072ffd0945e5a1f3eb7987e8275375 release-1.21.0 a68ac0677f8553b1f84d357bc9da114731ab5f47 release-1.21.1 bfbc52374adcbf2f9060afd62de940f6fab3bba5 release-1.21.2 +2217a9c1d0b86026f22700b3c089545db1964f55 release-1.21.3 From tracey at archive.org Wed Sep 8 00:31:01 2021 From: tracey at archive.org (Tracey Jaquith) Date: Tue, 7 Sep 2021 17:31:01 -0700 Subject: [PATCH] Add optional "mp4_exact_start" nginx config off/on to show video between keyframes In-Reply-To: <20210628095320.px3ggmmoyjalyv5m@Romans-MacBook-Pro.local> References: <20210628095320.px3ggmmoyjalyv5m@Romans-MacBook-Pro.local> Message-ID: <1131DA6E-CC86-4885-AF67-D61747C4D6FB@archive.org> Hi Roman, Apologies for a long delay. I was across the country and 50% time for 2 months and took a couple weeks to catchup? Alright, your updated patch is looking good. I think the overall name change from ?mp4_exact_start? to ?mp4_seek_key_frame? sounds fine to me. I?ve compiled current head-of-master with your patch and tested on MacOSX and it?s looking to work the same as the prior patch, kudos! I want to add some temporary debug lines to make sure I understand (especially) the way you cleverly avoided an nginx alloc for extra entry :) and to test on linux. Both of those should be pretty straightforward and anticipate no issues/concerns. What sounds good for the next steps from your POV? If you imagine I wouldn?t be getting commit/push rights and added as a contributor, I?d love to add next to my name (thank you for adding that) somewhere something like: Tracey Jaquith, Internet Archive Tracey Jaquith tracey at archive.org Since I worked on this primarily for my job purposes and I?d love the idea that both myself and the Archive are porting upstream and idea, code, etc. Very appreciatively! -Tracey > On Jun 28, 2021, at 2:53 AM, Roman Arutyunyan wrote: > > Hi Tracey, > > On Tue, Jun 15, 2021 at 03:49:48PM -0700, Tracey Jaquith wrote: >> # HG changeset patch >> # User Tracey Jaquith >> # Date 1623797180 0 >> # Tue Jun 15 22:46:20 2021 +0000 >> # Node ID 1879d49fe0cf739f48287b5a38a83d3a1adab939 >> # Parent 5f765427c17ac8cf753967387562201cf4f78dc4 >> Add optional "mp4_exact_start" nginx config off/on to show video between keyframes. > > I've been thinking about a better name for this, but came up with nothing so > far. I feel like this name does not give the right clue to the user. > Moreover, when this feature is on, the start is not quite "exact", but shifted > a few milliseconds into the past. > >> archive.org has been using mod_h264_streaming with a similar "exact start" patch from me since 2013. >> We just moved to nginx mp4 module and are using this patch. >> The technique is to find the video keyframe just before the desired "start" time, and send >> that down the wire so video playback can start immediately. >> Next calculate how many video samples are between the keyframe and desired "start" time >> and update the STTS atom where those samples move the duration from (typically) 1001 to 1. >> This way, initial unwanted video frames play at ~1/30,000s -- so visually the >> video & audio start playing immediately. >> >> You can see an example before/after here (nginx binary built with mp4 module + patch): >> >> https://pi.archive.org/0/items/CSPAN_20160425_022500_2011_White_House_Correspondents_Dinner.mp4?start=12&end=30 >> https://pi.archive.org/0/items/CSPAN_20160425_022500_2011_White_House_Correspondents_Dinner.mp4?start=12&end=30&exact=1 >> >> Tested on linux and macosx. >> >> (this is me: https://github.com/traceypooh ) > > We have a few rules about patches and commit messages like 67-character limit > for the first line etc: > > http://nginx.org/en/docs/contributing_changes.html > >> diff -r 5f765427c17a -r 1879d49fe0cf src/http/modules/ngx_http_mp4_module.c >> --- a/src/http/modules/ngx_http_mp4_module.c Tue Jun 01 17:37:51 2021 +0300 >> +++ b/src/http/modules/ngx_http_mp4_module.c Tue Jun 15 22:46:20 2021 +0000 >> @@ -43,6 +43,7 @@ >> typedef struct { >> size_t buffer_size; >> size_t max_buffer_size; >> + ngx_flag_t exact_start; >> } ngx_http_mp4_conf_t; >> >> >> @@ -340,6 +341,13 @@ >> offsetof(ngx_http_mp4_conf_t, max_buffer_size), >> NULL }, >> >> + { ngx_string("mp4_exact_start"), >> + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > > NGX_CONF_TAKE1 -> NGX_CONF_FLAG > >> + ngx_conf_set_flag_slot, >> + NGX_HTTP_LOC_CONF_OFFSET, >> + offsetof(ngx_http_mp4_conf_t, exact_start), >> + NULL }, >> + >> ngx_null_command >> }; >> >> @@ -2156,6 +2164,83 @@ >> >> >> static ngx_int_t >> +ngx_http_mp4_exact_start_video(ngx_http_mp4_file_t *mp4, ngx_http_mp4_trak_t *trak) >> +{ >> + uint32_t n, speedup_samples, current_count; >> + ngx_uint_t sample_keyframe, start_sample_exact; >> + ngx_mp4_stts_entry_t *entry, *entries_array; >> + ngx_buf_t *data; >> + >> + data = trak->out[NGX_HTTP_MP4_STTS_DATA].buf; >> + >> + // Find the keyframe just before the desired start time - so that we can emit an mp4 >> + // where the first frame is a keyframe. We'll "speed up" the first frames to 1000x >> + // normal speed (typically), so they won't be noticed. But this way, perceptively, >> + // playback of the _video_ track can start immediately >> + // (and not have to wait until the keyframe _after_ the desired starting time frame). >> + start_sample_exact = trak->start_sample; >> + for (n = 0; n < trak->sync_samples_entries; n++) { >> + // each element of array is the sample number of a keyframe >> + // sync samples starts from 1 -- so subtract 1 >> + sample_keyframe = ngx_mp4_get_32value(trak->stss_data_buf.pos + (n * 4)) - 1; > > This can be simplified by introducing entry/end variables like we usually do. > > Also, we don't access trak->stss_data_buf directly, but prefer > trak->out[NGX_HTTP_MP4_STSS_ATOM].buf. > > ngx_http_mp4_crop_stss_data() provides an example of iterating over stss atom. > >> + if (sample_keyframe <= trak->start_sample) { >> + start_sample_exact = sample_keyframe; >> + } >> + if (sample_keyframe >= trak->start_sample) { >> + break; >> + } >> + } >> + >> + if (start_sample_exact < trak->start_sample) { >> + // We're going to prepend an entry with duration=1 for the frames we want to "not see". >> + // MOST of the time (eg: constant video framerate), >> + // we're taking a single element entry array and making it two. >> + speedup_samples = trak->start_sample - start_sample_exact; >> + >> + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, >> + "exact trak start_sample move %l to %l (speed up %d samples)\n", >> + trak->start_sample, start_sample_exact, speedup_samples); >> + >> + entries_array = ngx_palloc(mp4->request->pool, >> + (1 + trak->time_to_sample_entries) * sizeof(ngx_mp4_stts_entry_t)); >> + if (entries_array == NULL) { >> + return NGX_ERROR; >> + } >> + entry = &(entries_array[1]); >> + ngx_memcpy(entry, (ngx_mp4_stts_entry_t *)data->pos, >> + trak->time_to_sample_entries * sizeof(ngx_mp4_stts_entry_t)); > > This reallocation can be avoided. Look at NGX_HTTP_MP4_STSC_START buffer > as an example of that. A new 1-element optional buffer NGX_HTTP_MP4_STTS_START > can be introduced right before the stts atom data. > >> + current_count = ngx_mp4_get_32value(entry->count); >> + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, >> + "exact split in 2 video STTS entry from count:%d", current_count); >> + >> + if (current_count <= speedup_samples) { >> + return NGX_ERROR; >> + } >> + >> + ngx_mp4_set_32value(entry->count, current_count - speedup_samples); >> + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, >> + "exact split new[1]: count:%d duration:%d", >> + ngx_mp4_get_32value(entry->count), >> + ngx_mp4_get_32value(entry->duration)); >> + entry--; >> + ngx_mp4_set_32value(entry->count, speedup_samples); >> + ngx_mp4_set_32value(entry->duration, 1); >> + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, >> + "exact split new[0]: count:%d duration:1", >> + ngx_mp4_get_32value(entry->count)); >> + >> + data->pos = (u_char *) entry; >> + trak->time_to_sample_entries++; >> + trak->start_sample = start_sample_exact; >> + data->last = (u_char *) (entry + trak->time_to_sample_entries); >> + } >> + >> + return NGX_OK; >> +} >> + >> + >> +static ngx_int_t >> ngx_http_mp4_crop_stts_data(ngx_http_mp4_file_t *mp4, >> ngx_http_mp4_trak_t *trak, ngx_uint_t start) >> { >> @@ -2164,6 +2249,8 @@ >> ngx_buf_t *data; >> ngx_uint_t start_sample, entries, start_sec; >> ngx_mp4_stts_entry_t *entry, *end; >> + ngx_http_mp4_conf_t *conf; >> + > > No need for a new empty line here. > >> if (start) { >> start_sec = mp4->start; >> @@ -2238,6 +2325,10 @@ >> "start_sample:%ui, new count:%uD", >> trak->start_sample, count - rest); >> >> + conf = ngx_http_get_module_loc_conf(mp4->request, ngx_http_mp4_module); >> + if (conf->exact_start) { >> + ngx_http_mp4_exact_start_video(mp4, trak); >> + } >> } else { >> ngx_mp4_set_32value(entry->count, rest); >> data->last = (u_char *) (entry + 1); >> @@ -3590,6 +3681,7 @@ >> >> conf->buffer_size = NGX_CONF_UNSET_SIZE; >> conf->max_buffer_size = NGX_CONF_UNSET_SIZE; >> + conf->exact_start = NGX_CONF_UNSET; > > This is not enough, a merge is needed too. > >> >> return conf; >> } >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > I've made a POC patch which incorporates the issues I've mentioned. > I didn't test is properly and the directive name is still not perfect. > > -- > Roman Arutyunyan > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -Tracey @tracey_pooh TV Architect https://archive.org/tv -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Wed Sep 8 16:20:04 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 8 Sep 2021 19:20:04 +0300 Subject: =?UTF-8?Q?Re=3A_=5BPATCH=5D_OpenSSL-1=2E1=2E0=E9=94=9B=E6=AD=8Bupport_Asyn?= =?UTF-8?Q?chronous_Operations_of_SSL_with_openSSL-1=2E1=2E0?= In-Reply-To: <1631004460.6137272C.000.764D51BA00D0@fangwg01> References: <1631004460.6137272C.000.764D51BA00D0@fangwg01> Message-ID: Hello! On Tue, Sep 07, 2021 at 04:48:08PM +0800, Junli Liu wrote: > # HG changeset patch > # User Junli Liu > # Date 1631003347 -28800 > # Tue Sep 07 16:29:07 2021 +0800 > # Node ID 301a837387ed63bb2e455942ef2ef79bc9aaa972 > # Parent 2245324a507abc54cf0274fd1b1e81bfac7c1c73 > OpenSSL-1.1.0?Support Asynchronous Operations of SSL with openSSL-1.1.0 > > Security is critical to the foundation of networking and Transport Layer Security (TLS) is the backbone protocol for Internet security today. But normally, the introduction of TLS usually leads to network performance degradation, because encryption and decryption need to consume more compute resources.OpenSSL-1.1.0 has involved features of asynchronous operations to improve the performance of network and often combined with hardware feature. > > This changeset make Nginx can work well with OpenSSL asynchronous operations when process http request. This patch seems to be borrowed from Intel's async work (https://github.com/intel/asynch_mode_nginx). Do you have legal rights to submit this code? Also, our previous testing of the code by Intel suggests that the code is buggy and unstable, and only beneficial when using Intel QAT with RSA certificates, but not with ECDSA. Do you have any testing results which indicate that the patch is beneficial? See below for some obvious comments about the code (not a full review). > > diff -r 2245324a507a -r 301a837387ed auto/lib/openssl/conf > --- a/auto/lib/openssl/conf Thu Sep 02 12:25:37 2021 +0300 > +++ b/auto/lib/openssl/conf Tue Sep 07 16:29:07 2021 +0800 > @@ -139,4 +139,33 @@ > exit 1 > fi > > + OPENSSL_ASYNC= > + if [ "$NGX_SSL_ASYNC" != NO ]; then > + > + OPENSSL_ASYNC=NO > + > + ngx_feature="OpenSSL library" > + ngx_feature_name= > + ngx_feature_run=no > + ngx_feature_incs="#include " > + ngx_feature_path= > + ngx_feature_libs="-lssl -lcrypto" > + ngx_feature_test="#ifndef SSL_MODE_ASYNC > + error: not define async > + #endif > + " > + . auto/feature > + if [ $ngx_found = yes ]; then > + have=NGX_SSL_ASYNC . auto/have > + OPENSSL_ASYNC=YES > + fi > + fi > + > + if [ -n "$OPENSSL_ASYNC" -a "$OPENSSL_ASYNC" != YES ]; then > +cat << END > +$1: error: For using asynchronous mode, The OpenSSL must be version 1.1.0 or greater. > + > +END > + exit 1 > + fi > fi Environment variables is not something nginx configure is using to enable or disable features. Note well that these checks are not going to work when building OpenSSL with nginx as per "./configure --with-openssl=...". Given that it tests the SSL_MODE_ASYNC macro, a better idea would be to use compile-time checks in the code instead. > diff -r 2245324a507a -r 301a837387ed src/core/nginx.c > --- a/src/core/nginx.c Thu Sep 02 12:25:37 2021 +0300 > +++ b/src/core/nginx.c Tue Sep 07 16:29:07 2021 +0800 > @@ -182,6 +182,12 @@ > static ngx_uint_t ngx_show_help; > static ngx_uint_t ngx_show_version; > static ngx_uint_t ngx_show_configure; > +#if (NGX_SSL && NGX_SSL_ASYNC) > +/* indicate that nginx start without ngx_ssl_init() > + * which will involve OpenSSL configuration file to > + * start OpenSSL engine */ > +static ngx_uint_t ngx_no_ssl_init; > +#endif > static u_char *ngx_prefix; > static u_char *ngx_error_log; > static u_char *ngx_conf_file; > @@ -238,7 +244,13 @@ > > /* STUB */ > #if (NGX_OPENSSL) > - ngx_ssl_init(log); > +#if (NGX_SSL && NGX_SSL_ASYNC) > + if (!ngx_no_ssl_init) { > +#endif > + ngx_ssl_init(log); > +#if (NGX_SSL && NGX_SSL_ASYNC) > + } > +#endif > #endif > > /* > @@ -248,6 +260,9 @@ > > ngx_memzero(&init_cycle, sizeof(ngx_cycle_t)); > init_cycle.log = log; > +#if (NGX_SSL && NGX_SSL_ASYNC) > + init_cycle.no_ssl_init = ngx_no_ssl_init; > +#endif > ngx_cycle = &init_cycle; > > init_cycle.pool = ngx_create_pool(1024, log); > @@ -782,11 +797,17 @@ > > case 't': > ngx_test_config = 1; > +#if (NGX_SSL && NGX_SSL_ASYNC) > + ngx_no_ssl_init = 1; > +#endif > break; Note that this approach looks incorrect: testing nginx configuration may require properly initialized OpenSSL library, including various engines loaded, for example, to load default settings and/or private keys from hardware tokens. While skipping OpenSSL initialization might work for a proof-of-concept code such as Intel's async work, this is not the approach usable for generic-purpose server such as nginx. [...] -- Maxim Dounin http://mdounin.ru/ From arut at nginx.com Thu Sep 9 10:47:55 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 9 Sep 2021 13:47:55 +0300 Subject: cache: move open to thread pool In-Reply-To: References: Message-ID: <20210909104755.ei364dwlpoa4yafr@Romans-MacBook-Pro.local> Hi Noam, On Thu, Sep 02, 2021 at 07:16:55PM +0300, Noam Cvikel wrote: > Hello, > > Didn't see a way to get into the thread after joining the list, but > hopefully this still replies correctly. > > First of all thank you for the prompt response and updated patch. > We implemented the updated async open patch, and started testing it in our > lab. Good news, thanks. > The static module part of the patch isn't updated yet, so we didn't apply > it. > While we're almost not using the static module, we'd still gladly test it > and share a feedback, once the patch is updated. The latest patchset from me includes the static module part too, and it should work with the latest nginx source. We'd appreciate feedback from you on the static part too, if that's something you can do. > The patch itself is looking good. We've not encountered any errors or > issues, and we see a significant performance improvement. > We're working with spinning disks (HDD), and before applying the patch we > would sometimes see the main thread get stuck on open IO anywhere between > 2-15 seconds. > After applying the patch, we see that an iteration of the worker's main > loop doesn't take more than 10ms! Significantly reducing the penalty for > other transactions. Great. Can you measure the overall performance improvement? > We will keep monitoring the changes and report. We would really like to see > this patch integrated into the official branch. If we can do anything else > to help make this happen - please let us know. Thanks for you feedback. When you have more data or bugs to report, we'd appreciate that too. > Best, > > > > *Noam Cvikel* > Qwilt | | noamc at qwilt.com > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan From a at abemassry.com Fri Sep 10 02:35:32 2021 From: a at abemassry.com (=?iso-8859-1?q?Abe_Massry?=) Date: Thu, 09 Sep 2021 22:35:32 -0400 Subject: [PATCH] Add markdown to mime.types Message-ID: <95a61e228bc19f6b9917.1631241332@localhost.localdomain> # HG changeset patch # User Abe Massry # Date 1631238770 14400 # Thu Sep 09 21:52:50 2021 -0400 # Branch update-mime-types # Node ID 95a61e228bc19f6b9917671dfd2e6ff52e3e0294 # Parent a525013b82967148e6e4b7e0eadd23e288001816 Add markdown to mime.types In the chromimum browser a warning is displayed if a markdown mime type does not appear in the list of mime types on the server. The browser attempts to download the file but gives a warning saying that this type of file is usually displayed in the browser. Files with a mime type of markdown and a file extension of `.md` should be displayed as plain text in the browser and this change adds that to the default mime types that will ship with nginx. diff -r a525013b8296 -r 95a61e228bc1 conf/mime.types --- a/conf/mime.types Tue Sep 07 18:21:03 2021 +0300 +++ b/conf/mime.types Thu Sep 09 21:52:50 2021 -0400 @@ -9,6 +9,7 @@ application/atom+xml atom; application/rss+xml rss; + text/markdown md; text/mathml mml; text/plain txt; text/vnd.sun.j2me.app-descriptor jad; From milkc24 at gmail.com Fri Sep 10 06:42:10 2021 From: milkc24 at gmail.com (=?UTF-8?B?5pqX6Iqx?=) Date: Fri, 10 Sep 2021 14:42:10 +0800 Subject: Legacy Version support or unsupported Message-ID: Dear Sir/ Madam, I'm a college student from Malaysia, I would like to develop a secure web application for my final year project, I love nginx so much. So I got a question about the nginx legacy versions(e.g nginx 1.14.0), it's still supported by the nginx team. I mean if some bug hunter found some cve or hacker attack, or vulnerability found for the legacy version, will I be able to get the security patch release from your official web? It's wise to use the legacy version? or should i choose the latest version, as i am building a secure web app. Thank you so much. I look forward to hearing from you soon. -------------- next part -------------- An HTML attachment was scrubbed... URL: From maxim at nginx.com Fri Sep 10 07:59:05 2021 From: maxim at nginx.com (Maxim Konovalov) Date: Fri, 10 Sep 2021 10:59:05 +0300 Subject: Legacy Version support or unsupported In-Reply-To: References: Message-ID: Hi, On 10.09.2021 09:42, ?? wrote: > Dear Sir/ Madam, > > I'm a college student?from Malaysia, I would like to develop a secure > web application for my final year project, I love nginx so much. So I > got a question?about the nginx legacy versions(e.g nginx 1.14.0), it's > still supported by the nginx team. > > I mean if some bug hunter found some cve or hacker attack, or > vulnerability found for the legacy version, will I be able to get the > security patch release from your official web? > > It's wise to use the legacy version? or should i choose the latest > version, as i am building a secure web app. > Any releases from the nginx-1.14 branch are not supported for sure. For nginx open source we support the mainline and stable branches, which are 1.21 and 1.20 at the moment. You can learn more about how the mainline and stable nginx branches work in the blog post written a couple of years ago and dedicated to nginx 1.18 and 1.19 branches. https://www.nginx.com/blog/nginx-1-18-1-19-released/ I personally see no reasons to choose 1.14 releases these days and would encourage you to use the latest releases from the current mainline or stable (which is much more more conservative comparing to the mainline) branches such as nginx-1.21.3 or nginx-1.20.1, see http://nginx.org/en/download.html Just a reminder that we have a service that exposes the latest versions of various products we develop and maintain, i.e.: http://version.nginx.com/nginx/mainline http://version.nginx.com/nginx/stable -- Maxim Konovalov From milkc24 at gmail.com Fri Sep 10 08:06:48 2021 From: milkc24 at gmail.com (=?UTF-8?B?5pqX6Iqx?=) Date: Fri, 10 Sep 2021 16:06:48 +0800 Subject: Legacy Version support or unsupported In-Reply-To: References: Message-ID: Thank you for your reply. ??? On Fri, Sep 10, 2021 at 3:59 PM Maxim Konovalov wrote: > Hi, > > On 10.09.2021 09:42, ?? wrote: > > Dear Sir/ Madam, > > > > I'm a college student from Malaysia, I would like to develop a secure > > web application for my final year project, I love nginx so much. So I > > got a question about the nginx legacy versions(e.g nginx 1.14.0), it's > > still supported by the nginx team. > > > > I mean if some bug hunter found some cve or hacker attack, or > > vulnerability found for the legacy version, will I be able to get the > > security patch release from your official web? > > > > It's wise to use the legacy version? or should i choose the latest > > version, as i am building a secure web app. > > > Any releases from the nginx-1.14 branch are not supported for sure. > > For nginx open source we support the mainline and stable branches, which > are 1.21 and 1.20 at the moment. > > You can learn more about how the mainline and stable nginx branches work > in the blog post written a couple of years ago and dedicated to nginx > 1.18 and 1.19 branches. > > https://www.nginx.com/blog/nginx-1-18-1-19-released/ > > I personally see no reasons to choose 1.14 releases these days and would > encourage you to use the latest releases from the current mainline or > stable (which is much more more conservative comparing to the mainline) > branches such as nginx-1.21.3 or nginx-1.20.1, see > > http://nginx.org/en/download.html > > Just a reminder that we have a service that exposes the latest versions > of various products we develop and maintain, i.e.: > > http://version.nginx.com/nginx/mainline > http://version.nginx.com/nginx/stable > > -- > Maxim Konovalov > -------------- next part -------------- An HTML attachment was scrubbed... URL: From arut at nginx.com Tue Sep 14 11:31:44 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 14 Sep 2021 11:31:44 +0000 Subject: [nginx] Version bump. Message-ID: details: https://hg.nginx.org/nginx/rev/7189cb2b4c5d branches: changeset: 7927:7189cb2b4c5d user: Roman Arutyunyan date: Tue Sep 14 12:12:02 2021 +0300 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r a525013b8296 -r 7189cb2b4c5d src/core/nginx.h --- a/src/core/nginx.h Tue Sep 07 18:21:03 2021 +0300 +++ b/src/core/nginx.h Tue Sep 14 12:12:02 2021 +0300 @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1021003 -#define NGINX_VERSION "1.21.3" +#define nginx_version 1021004 +#define NGINX_VERSION "1.21.4" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From arut at nginx.com Tue Sep 14 11:31:47 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 14 Sep 2021 11:31:47 +0000 Subject: [nginx] Request body: do not create temp file if there's nothing to write. Message-ID: details: https://hg.nginx.org/nginx/rev/97cf8284fd19 branches: changeset: 7928:97cf8284fd19 user: Roman Arutyunyan date: Fri Sep 10 12:59:22 2021 +0300 description: Request body: do not create temp file if there's nothing to write. Do this only when the entire request body is empty and r->request_body_in_file_only is set. The issue manifested itself with missing warning "a client request body is buffered to a temporary file" when the entire rb->buf is full and all buffers are delayed by a filter. diffstat: src/http/ngx_http_request_body.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 7189cb2b4c5d -r 97cf8284fd19 src/http/ngx_http_request_body.c --- a/src/http/ngx_http_request_body.c Tue Sep 14 12:12:02 2021 +0300 +++ b/src/http/ngx_http_request_body.c Fri Sep 10 12:59:22 2021 +0300 @@ -1309,7 +1309,7 @@ ngx_http_request_body_save_filter(ngx_ht if (rb->rest > 0) { - if (rb->buf && rb->buf->last == rb->buf->end + if (rb->bufs && rb->buf && rb->buf->last == rb->buf->end && ngx_http_write_request_body(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; From mdounin at mdounin.ru Tue Sep 14 17:08:15 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 14 Sep 2021 20:08:15 +0300 Subject: [PATCH] Add markdown to mime.types In-Reply-To: <95a61e228bc19f6b9917.1631241332@localhost.localdomain> References: <95a61e228bc19f6b9917.1631241332@localhost.localdomain> Message-ID: Hello! On Thu, Sep 09, 2021 at 10:35:32PM -0400, Abe Massry wrote: > # HG changeset patch > # User Abe Massry > # Date 1631238770 14400 > # Thu Sep 09 21:52:50 2021 -0400 > # Branch update-mime-types > # Node ID 95a61e228bc19f6b9917671dfd2e6ff52e3e0294 > # Parent a525013b82967148e6e4b7e0eadd23e288001816 > Add markdown to mime.types > > In the chromimum browser a warning is displayed if a markdown > mime type does not appear in the list of mime types on the server. > The browser attempts to download the file but gives a warning > saying that this type of file is usually displayed in the > browser. > > Files with a mime type of markdown and a file extension of `.md` > should be displayed as plain text in the browser and this > change adds that to the default mime types that will ship with > nginx. > > diff -r a525013b8296 -r 95a61e228bc1 conf/mime.types > --- a/conf/mime.types Tue Sep 07 18:21:03 2021 +0300 > +++ b/conf/mime.types Thu Sep 09 21:52:50 2021 -0400 > @@ -9,6 +9,7 @@ > application/atom+xml atom; > application/rss+xml rss; > > + text/markdown md; > text/mathml mml; > text/plain txt; > text/vnd.sun.j2me.app-descriptor jad; > A side note: the "text/markdown" specification says that the charset attribute is required, and this is not something nginx provides unless the charset module is explicitly used. (see RFC 7763 and/or https://www.iana.org/assignments/media-types/text/markdown) -- Maxim Dounin http://mdounin.ru/ From jeremias.bosch at gmail.com Wed Sep 15 06:39:23 2021 From: jeremias.bosch at gmail.com (J B) Date: Wed, 15 Sep 2021 08:39:23 +0200 Subject: NGINX-QUIC, ALPN offering only Http/1.1 and h2, but not h3 Message-ID: Hello all, I played around with nginx-quic branch, following the blog post here https://www.nginx.com/blog/our-roadmap-quic-http-3-support-nginx/ I have trouble to get my browser to use http3 with the server. I checked with CURL http3 enabled - there it works when providing the http3 option, it does not when using --alt-svc option. I assume it's a configuration issue, or an issue with self-signed certificates, ... What did I do: 1. Build Docker (copy from blogpost) and generate self signed certs. ``` COPY ./nginx/csr.conf /root/csr.conf COPY ./nginx/cert.pass /etc/keys/cert.pass # generate self signed certificate RUN openssl genrsa -aes128 -passout "pass:supersecure" -out ca.key 4096 RUN openssl req -new -config csr.conf -key ca.key -out ca.csr -passin "pass:supersecure" RUN openssl x509 -req -days 365 -in ca.csr -signkey ca.key -out ca.crt -passin "pass:supersecure" # copy them to /etc/ssl/ RUN cp ca.crt /etc/ssl/certs/ RUN cp ca.key /etc/ssl/private/ RUN cp ca.csr /etc/ssl/private/ # setup ssl config COPY ./nginx/ssl.conf /etc/nginx/conf.d/ssl.conf EXPOSE 80 443 ``` 2. Run the Docker with docker run -it --rm -p 443:443/udp -p 443:443/tcp nginx_quic Testing: Using HTTP3 enabled curl ends up in: ``` curl -k -vvv --alt-svc altsvc.cache https://localhost:443 * Trying 127.0.0.1:443... * TCP_NODELAY set * Connected to localhost (127.0.0.1) port 443 (#0) * ALPN, offering h2 * ALPN, offering http/1.1 * successfully set certificate verify locations: * CAfile: /etc/ssl/certs/ca-certificates.crt CApath: /etc/ssl/certs * TLSv1.3 (OUT), TLS handshake, Client hello (1): * OpenSSL SSL_connect: SSL_ERROR_SYSCALL in connection to localhost:443 * Closing connection 0 curl: (35) OpenSSL SSL_connect: SSL_ERROR_SYSCALL in connection to localhost:443 ``` using http3 option on curl works as expected: ``` ./curl -v --http3 https://localhost:443/ * Trying 127.0.0.1:443... * Connect socket 5 over QUIC to 127.0.0.1:443 * Connected to localhost () port 443 (#0) * Using HTTP/3 Stream ID: 0 (easy handle 0x55c46567b290) > GET / HTTP/3 > Host: localhost > user-agent: curl/7.79.0-DEV > accept: */* > * ngh3_stream_recv returns 0 bytes and EAGAIN < HTTP/3 200 < server: nginx/1.21.3 < date: Tue, 14 Sep 2021 22:21:26 GMT < content-type: text/html < content-length: 615 < last-modified: Tue, 07 Sep 2021 15:21:03 GMT < etag: "6137835f-267" < alt-svc: h3=":443"; ma=2592000 < quic-status: quic < x-quic: quic < accept-ranges: bytes ```` Any Idea how to solve this? Best J. -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Fri Sep 17 11:22:28 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 17 Sep 2021 14:22:28 +0300 Subject: NGINX-QUIC, ALPN offering only Http/1.1 and h2, but not h3 In-Reply-To: References: Message-ID: > On 15 Sep 2021, at 09:39, J B wrote: > > Hello all, > > I played around with nginx-quic branch, following the blog post here https://www.nginx.com/blog/our-roadmap-quic-http-3-support-nginx/ > > I have trouble to get my browser to use http3 with the server. I checked with CURL http3 enabled - there it works when providing the http3 option, it does not when using --alt-svc option. > I assume it's a configuration issue, or an issue with self-signed certificates, ... > > > What did I do: > 1. Build Docker (copy from blogpost) and generate self signed certs. > > ``` > COPY ./nginx/csr.conf /root/csr.conf > COPY ./nginx/cert.pass /etc/keys/cert.pass > > # generate self signed certificate > RUN openssl genrsa -aes128 -passout "pass:supersecure" -out ca.key 4096 > RUN openssl req -new -config csr.conf -key ca.key -out ca.csr -passin "pass:supersecure" > RUN openssl x509 -req -days 365 -in ca.csr -signkey ca.key -out ca.crt -passin "pass:supersecure" > > # copy them to /etc/ssl/ > RUN cp ca.crt /etc/ssl/certs/ > RUN cp ca.key /etc/ssl/private/ > RUN cp ca.csr /etc/ssl/private/ > > # setup ssl config > COPY ./nginx/ssl.conf /etc/nginx/conf.d/ssl.conf > > EXPOSE 80 443 > ``` > > 2. Run the Docker with > docker run -it --rm -p 443:443/udp -p 443:443/tcp nginx_quic > > Testing: > > Using HTTP3 enabled curl ends up in: > ``` curl -k -vvv --alt-svc altsvc.cache https://localhost:443 > * Trying 127.0.0.1:443... > * TCP_NODELAY set > * Connected to localhost (127.0.0.1) port 443 (#0) > * ALPN, offering h2 > * ALPN, offering http/1.1 > * successfully set certificate verify locations: > * CAfile: /etc/ssl/certs/ca-certificates.crt > CApath: /etc/ssl/certs > * TLSv1.3 (OUT), TLS handshake, Client hello (1): > * OpenSSL SSL_connect: SSL_ERROR_SYSCALL in connection to localhost:443 > * Closing connection 0 > curl: (35) OpenSSL SSL_connect: SSL_ERROR_SYSCALL in connection to localhost:443 > ``` > This means you didn't proceed up to obtaining alternative services as specified in the Alt-SVC HTTP response field. Successful response would look like this: * TLSv1.3 (OUT), TLS handshake, Client hello (1): * TLSv1.3 (IN), TLS handshake, Server hello (2): * TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): * TLSv1.3 (IN), TLS handshake, Certificate (11): * TLSv1.3 (IN), TLS handshake, CERT verify (15): * TLSv1.3 (IN), TLS handshake, Finished (20): * TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): * TLSv1.3 (OUT), TLS handshake, Finished (20): * SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 ... < HTTP/1.1 200 OK < Server: nginx/1.21.3 < Date: Fri, 17 Sep 2021 11:12:26 GMT < Content-Type: text/plain * Added alt-svc: localhost:443 over h3-29 < Alt-Svc: h3-29=":443"; ma=86400 Note the last two lines. Then, on next curl command, with saved alt service: $ curl -vvv --alt-svc svc.txt ... * Alt-svc connecting from [h1]localhost:443 to [h3-29]localhost:443 > > using http3 option on curl works as expected: > ``` > ./curl -v --http3 https://localhost:443/ > * Trying 127.0.0.1:443... > * Connect socket 5 over QUIC to 127.0.0.1:443 > * Connected to localhost () port 443 (#0) > * Using HTTP/3 Stream ID: 0 (easy handle 0x55c46567b290) > > GET / HTTP/3 > > Host: localhost > > user-agent: curl/7.79.0-DEV > > accept: */* > > > * ngh3_stream_recv returns 0 bytes and EAGAIN > < HTTP/3 200 > < server: nginx/1.21.3 > < date: Tue, 14 Sep 2021 22:21:26 GMT > < content-type: text/html > < content-length: 615 > < last-modified: Tue, 07 Sep 2021 15:21:03 GMT > < etag: "6137835f-267" > < alt-svc: h3=":443"; ma=2592000 > < quic-status: quic > < x-quic: quic > < accept-ranges: bytes > ```` > > Any Idea how to solve this? > The latest curl uses quic draft-29 if connecting directly. I presume it would also expect the same version in Alt-Svc. So, I'd try to substitute it with "h3-29" as provided above. -- Sergey Kandaurov From robin.richtsfeld at gmail.com Sat Sep 18 17:19:24 2021 From: robin.richtsfeld at gmail.com (Robin Richtsfeld) Date: Sat, 18 Sep 2021 19:19:24 +0200 Subject: Added the SCRIPT_FILENAME, PATH_INFO, and PATH_TRANSLATED parameter. Message-ID: # HG changeset patch # User Robin Richtsfeld # Date 1631984576 -7200 # Sat Sep 18 19:02:56 2021 +0200 # Node ID 16927168a7b5712422021887f4a788e93d850159 # Parent 97cf8284fd19b30169231e43831e7787baba72f2 Added the SCRIPT_FILENAME, PATH_INFO, and PATH_TRANSLATED parameter. For reference see the official PHP FastCGI Example: https://www.nginx.com/resources/wiki/start/topics/examples/phpfcgi/ *diff -r 97cf8284fd19 -r 16927168a7b5 conf/fastcgi.conf* --- a/conf/fastcgi.conf Fri Sep 10 12:59:22 2021 +0300 +++ b/conf/fastcgi.conf Sat Sep 18 19:02:56 2021 +0200 @@ -6,6 +6,8 @@ fastcgi_param CONTENT_LENGTH $content_length; fastcgi_param SCRIPT_NAME $fastcgi_script_name; +fastcgi_param PATH_INFO $fastcgi_path_info; +fastcgi_param PATH_TRANSLATED $document_root$fastcgi_path_info; fastcgi_param REQUEST_URI $request_uri; fastcgi_param DOCUMENT_URI $document_uri; fastcgi_param DOCUMENT_ROOT $document_root; *diff -r 97cf8284fd19 -r 16927168a7b5 conf/fastcgi_params* --- a/conf/fastcgi_params Fri Sep 10 12:59:22 2021 +0300 +++ b/conf/fastcgi_params Sat Sep 18 19:02:56 2021 +0200 @@ -1,10 +1,13 @@ +fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; fastcgi_param QUERY_STRING $query_string; fastcgi_param REQUEST_METHOD $request_method; fastcgi_param CONTENT_TYPE $content_type; fastcgi_param CONTENT_LENGTH $content_length; fastcgi_param SCRIPT_NAME $fastcgi_script_name; +fastcgi_param PATH_INFO $fastcgi_path_info; +fastcgi_param PATH_TRANSLATED $document_root$fastcgi_path_info; fastcgi_param REQUEST_URI $request_uri; fastcgi_param DOCUMENT_URI $document_uri; fastcgi_param DOCUMENT_ROOT $document_root; -------------- next part -------------- An HTML attachment was scrubbed... URL: From xeioex at nginx.com Mon Sep 20 13:10:54 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 20 Sep 2021 13:10:54 +0000 Subject: [njs] Style. Message-ID: details: https://hg.nginx.org/njs/rev/d1a43dc93e9d branches: changeset: 1706:d1a43dc93e9d user: Dmitry Volyntsev date: Fri Sep 17 18:29:40 2021 +0000 description: Style. diffstat: src/njs_buffer.c | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diffs (27 lines): diff -r 91d9301684db -r d1a43dc93e9d src/njs_buffer.c --- a/src/njs_buffer.c Fri Sep 03 14:57:50 2021 +0000 +++ b/src/njs_buffer.c Fri Sep 17 18:29:40 2021 +0000 @@ -663,12 +663,12 @@ njs_buffer_compare_array(njs_vm_t *vm, n njs_int_t ret; njs_typed_array_t *source, *target; - source = njs_buffer_slot(vm , val1, "source"); + source = njs_buffer_slot(vm, val1, "source"); if (njs_slow_path(source == NULL)) { return NJS_ERROR; } - target = njs_buffer_slot(vm , val2, "target"); + target = njs_buffer_slot(vm, val2, "target"); if (njs_slow_path(target == NULL)) { return NJS_ERROR; } @@ -868,7 +868,7 @@ njs_buffer_is_buffer(njs_vm_t *vm, njs_v is = 0; - array = njs_buffer_slot(vm , njs_arg(args, nargs, 1), "source"); + array = njs_buffer_slot(vm, njs_arg(args, nargs, 1), "source"); if (njs_fast_path(array != NULL && array->object.__proto__ == &vm->prototypes[NJS_OBJ_TYPE_BUFFER].object)) From xeioex at nginx.com Mon Sep 20 13:10:56 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Mon, 20 Sep 2021 13:10:56 +0000 Subject: [njs] Fixed njs_buffer_slot(). Message-ID: details: https://hg.nginx.org/njs/rev/6feba0e602ee branches: changeset: 1707:6feba0e602ee user: Dmitry Volyntsev date: Fri Sep 17 18:29:40 2021 +0000 description: Fixed njs_buffer_slot(). Previously, njs_buffer_slot() might return NULL value without setting corresponding exception where user code expects it. In addition the function is split into two functions. The internal one does not set anything to vm->retval. This function has to be used by property handlers, because they are expected not to modify vm->retval. diffstat: src/njs_buffer.c | 42 ++++++++++++++++++++++++------------------ src/test/njs_unit_test.c | 6 ++++++ 2 files changed, 30 insertions(+), 18 deletions(-) diffs (82 lines): diff -r d1a43dc93e9d -r 6feba0e602ee src/njs_buffer.c --- a/src/njs_buffer.c Fri Sep 17 18:29:40 2021 +0000 +++ b/src/njs_buffer.c Fri Sep 17 18:29:40 2021 +0000 @@ -572,30 +572,36 @@ njs_buffer_byte_length(njs_vm_t *vm, njs static njs_typed_array_t * +njs_buffer_slot_internal(njs_vm_t *vm, njs_value_t *value) +{ + njs_typed_array_t *array; + + if (njs_is_object(value)) { + array = njs_object_proto_lookup(njs_object(value), NJS_TYPED_ARRAY, + njs_typed_array_t); + + if (array != NULL && array->type == NJS_OBJ_TYPE_UINT8_ARRAY) { + return array; + } + } + + return NULL; +} + + +static njs_typed_array_t * njs_buffer_slot(njs_vm_t *vm, njs_value_t *value, const char *name) { njs_typed_array_t *array; - if (njs_slow_path(!njs_is_object(value))) { - goto failed; - } - - array = njs_object_proto_lookup(njs_object(value), NJS_TYPED_ARRAY, - njs_typed_array_t); - - if (njs_slow_path(array != NULL - && array->type != NJS_OBJ_TYPE_UINT8_ARRAY)) - { - goto failed; + array = njs_buffer_slot_internal(vm, value); + if (njs_slow_path(array == NULL)) { + njs_type_error(vm, "\"%s\" argument must be an instance " + "of Buffer or Uint8Array", name); + return NULL; } return array; - -failed: - - njs_type_error(vm, "\"%s\" argument must be an instance " - "of Buffer or Uint8Array", name); - return NULL; } @@ -902,7 +908,7 @@ njs_buffer_prototype_length(njs_vm_t *vm { njs_typed_array_t *array; - array = njs_buffer_slot(vm, value, "this"); + array = njs_buffer_slot_internal(vm, value); if (njs_slow_path(array == NULL)) { njs_set_undefined(retval); return NJS_DECLINED; diff -r d1a43dc93e9d -r 6feba0e602ee src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Fri Sep 17 18:29:40 2021 +0000 +++ b/src/test/njs_unit_test.c Fri Sep 17 18:29:40 2021 +0000 @@ -19538,6 +19538,12 @@ static njs_unit_test_t njs_test[] = "})"), njs_str("true") }, + { njs_str("Buffer.from([1,2]).equals(new ArrayBuffer(1))"), + njs_str("TypeError: \"target\" argument must be an instance of Buffer or Uint8Array") }, + + { njs_str("Buffer.from([1,2]).equals(1)"), + njs_str("TypeError: \"target\" argument must be an instance of Buffer or Uint8Array") }, + { njs_str("var buf = Buffer.alloc(4);" "buf.fill('ZXZpbA==', 'base64')"), njs_str("evil") }, From mdounin at mdounin.ru Mon Sep 20 15:35:16 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 20 Sep 2021 18:35:16 +0300 Subject: Added the SCRIPT_FILENAME, PATH_INFO, and PATH_TRANSLATED parameter. In-Reply-To: References: Message-ID: Hello! On Sat, Sep 18, 2021 at 07:19:24PM +0200, Robin Richtsfeld wrote: > # HG changeset patch > # User Robin Richtsfeld > # Date 1631984576 -7200 > # Sat Sep 18 19:02:56 2021 +0200 > # Node ID 16927168a7b5712422021887f4a788e93d850159 > # Parent 97cf8284fd19b30169231e43831e7787baba72f2 > Added the SCRIPT_FILENAME, PATH_INFO, and PATH_TRANSLATED parameter. > > For reference see the official PHP FastCGI Example: > https://www.nginx.com/resources/wiki/start/topics/examples/phpfcgi/ > > *diff -r 97cf8284fd19 -r 16927168a7b5 conf/fastcgi.conf* > --- a/conf/fastcgi.conf Fri Sep 10 12:59:22 2021 +0300 > +++ b/conf/fastcgi.conf Sat Sep 18 19:02:56 2021 +0200 > @@ -6,6 +6,8 @@ > fastcgi_param CONTENT_LENGTH $content_length; > > fastcgi_param SCRIPT_NAME $fastcgi_script_name; > +fastcgi_param PATH_INFO $fastcgi_path_info; > +fastcgi_param PATH_TRANSLATED $document_root$fastcgi_path_info; > fastcgi_param REQUEST_URI $request_uri; > fastcgi_param DOCUMENT_URI $document_uri; > fastcgi_param DOCUMENT_ROOT $document_root; > *diff -r 97cf8284fd19 -r 16927168a7b5 conf/fastcgi_params* > --- a/conf/fastcgi_params Fri Sep 10 12:59:22 2021 +0300 > +++ b/conf/fastcgi_params Sat Sep 18 19:02:56 2021 +0200 > @@ -1,10 +1,13 @@ > > +fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; > fastcgi_param QUERY_STRING $query_string; > fastcgi_param REQUEST_METHOD $request_method; > fastcgi_param CONTENT_TYPE $content_type; > fastcgi_param CONTENT_LENGTH $content_length; > > fastcgi_param SCRIPT_NAME $fastcgi_script_name; > +fastcgi_param PATH_INFO $fastcgi_path_info; > +fastcgi_param PATH_TRANSLATED $document_root$fastcgi_path_info; > fastcgi_param REQUEST_URI $request_uri; > fastcgi_param DOCUMENT_URI $document_uri; > fastcgi_param DOCUMENT_ROOT $document_root; Thanks for the patch, but no. In no particular order: - The wiki is not something official, it's user-contributed content with mostly arbitrary quality, use it with caution. Official documentation is available at nginx.org/docs/. - The fastcgi_params file lacks SCRIPT_FILENAME intentionally. If you are using PHP and want to set it to $document_root$fastcgi_script_name (which is not always the case), you can use fastcgi.conf file. In contrast, fastcgi_params can be used when you want to set SCRIPT_FILENAME to something different, for example, to use a specific directory (see nginx.conf for an example, http://hg.nginx.org/nginx/file/tip/conf/nginx.conf#l63). - PATH_INFO is, similarly, to be set explicitly when you've configured nginx to provide one, see http://nginx.org/r/fastcgi_split_path_info. - PATH_TRANSLATED is implementation-defined, and nginx does not try to define it. If you want to use it, it is up to your specific environment to set it to something. Hope this helps. -- Maxim Dounin http://mdounin.ru/ From tracey at archive.org Mon Sep 20 19:39:15 2021 From: tracey at archive.org (Tracey Jaquith) Date: Mon, 20 Sep 2021 12:39:15 -0700 Subject: [PATCH] Add optional "mp4_exact_start" nginx config off/on to show video between keyframes In-Reply-To: <20210628095320.px3ggmmoyjalyv5m@Romans-MacBook-Pro.local> References: <20210628095320.px3ggmmoyjalyv5m@Romans-MacBook-Pro.local> Message-ID: <5F32216C-A041-454C-A73C-0E1C259E434C@archive.org> Hi Roman, I had an idea for considering for the feature name / config flag / function name, in case of interest. What about ?startfast?? (or even ?faststart?, I suppose) That parallels nicely with the `qt-faststart` utility and the `ffmpeg -movflags faststart` where the moov atom is moved to the front of the mp4. Since the `mp4` module is already rewriting a smaller moov atom for the desired clip, *and* the mp4 module will move the moov atom to the front (in case the source mp4 file has moov atom at the back), It seems like ?startfast? might convey the moov atom approach *and* the concept that we?re going to send early visually undesired frames out at ~30,000 fps :) For your consideration, thanks, -Tracey > On Jun 28, 2021, at 2:53 AM, Roman Arutyunyan wrote: > > Hi Tracey, > > On Tue, Jun 15, 2021 at 03:49:48PM -0700, Tracey Jaquith wrote: >> # HG changeset patch >> # User Tracey Jaquith > >> # Date 1623797180 0 >> # Tue Jun 15 22:46:20 2021 +0000 >> # Node ID 1879d49fe0cf739f48287b5a38a83d3a1adab939 >> # Parent 5f765427c17ac8cf753967387562201cf4f78dc4 >> Add optional "mp4_exact_start" nginx config off/on to show video between keyframes. > > I've been thinking about a better name for this, but came up with nothing so > far. I feel like this name does not give the right clue to the user. > Moreover, when this feature is on, the start is not quite "exact", but shifted > a few milliseconds into the past. > >> archive.org has been using mod_h264_streaming with a similar "exact start" patch from me since 2013. >> We just moved to nginx mp4 module and are using this patch. >> The technique is to find the video keyframe just before the desired "start" time, and send >> that down the wire so video playback can start immediately. >> Next calculate how many video samples are between the keyframe and desired "start" time >> and update the STTS atom where those samples move the duration from (typically) 1001 to 1. >> This way, initial unwanted video frames play at ~1/30,000s -- so visually the >> video & audio start playing immediately. >> >> You can see an example before/after here (nginx binary built with mp4 module + patch): >> >> https://pi.archive.org/0/items/CSPAN_20160425_022500_2011_White_House_Correspondents_Dinner.mp4?start=12&end=30 >> https://pi.archive.org/0/items/CSPAN_20160425_022500_2011_White_House_Correspondents_Dinner.mp4?start=12&end=30&exact=1 >> >> Tested on linux and macosx. >> >> (this is me: https://github.com/traceypooh ) > > We have a few rules about patches and commit messages like 67-character limit > for the first line etc: > > http://nginx.org/en/docs/contributing_changes.html > >> diff -r 5f765427c17a -r 1879d49fe0cf src/http/modules/ngx_http_mp4_module.c >> --- a/src/http/modules/ngx_http_mp4_module.c Tue Jun 01 17:37:51 2021 +0300 >> +++ b/src/http/modules/ngx_http_mp4_module.c Tue Jun 15 22:46:20 2021 +0000 >> @@ -43,6 +43,7 @@ >> typedef struct { >> size_t buffer_size; >> size_t max_buffer_size; >> + ngx_flag_t exact_start; >> } ngx_http_mp4_conf_t; >> >> >> @@ -340,6 +341,13 @@ >> offsetof(ngx_http_mp4_conf_t, max_buffer_size), >> NULL }, >> >> + { ngx_string("mp4_exact_start"), >> + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > > NGX_CONF_TAKE1 -> NGX_CONF_FLAG > >> + ngx_conf_set_flag_slot, >> + NGX_HTTP_LOC_CONF_OFFSET, >> + offsetof(ngx_http_mp4_conf_t, exact_start), >> + NULL }, >> + >> ngx_null_command >> }; >> >> @@ -2156,6 +2164,83 @@ >> >> >> static ngx_int_t >> +ngx_http_mp4_exact_start_video(ngx_http_mp4_file_t *mp4, ngx_http_mp4_trak_t *trak) >> +{ >> + uint32_t n, speedup_samples, current_count; >> + ngx_uint_t sample_keyframe, start_sample_exact; >> + ngx_mp4_stts_entry_t *entry, *entries_array; >> + ngx_buf_t *data; >> + >> + data = trak->out[NGX_HTTP_MP4_STTS_DATA].buf; >> + >> + // Find the keyframe just before the desired start time - so that we can emit an mp4 >> + // where the first frame is a keyframe. We'll "speed up" the first frames to 1000x >> + // normal speed (typically), so they won't be noticed. But this way, perceptively, >> + // playback of the _video_ track can start immediately >> + // (and not have to wait until the keyframe _after_ the desired starting time frame). >> + start_sample_exact = trak->start_sample; >> + for (n = 0; n < trak->sync_samples_entries; n++) { >> + // each element of array is the sample number of a keyframe >> + // sync samples starts from 1 -- so subtract 1 >> + sample_keyframe = ngx_mp4_get_32value(trak->stss_data_buf.pos + (n * 4)) - 1; > > This can be simplified by introducing entry/end variables like we usually do. > > Also, we don't access trak->stss_data_buf directly, but prefer > trak->out[NGX_HTTP_MP4_STSS_ATOM].buf. > > ngx_http_mp4_crop_stss_data() provides an example of iterating over stss atom. > >> + if (sample_keyframe <= trak->start_sample) { >> + start_sample_exact = sample_keyframe; >> + } >> + if (sample_keyframe >= trak->start_sample) { >> + break; >> + } >> + } >> + >> + if (start_sample_exact < trak->start_sample) { >> + // We're going to prepend an entry with duration=1 for the frames we want to "not see". >> + // MOST of the time (eg: constant video framerate), >> + // we're taking a single element entry array and making it two. >> + speedup_samples = trak->start_sample - start_sample_exact; >> + >> + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, >> + "exact trak start_sample move %l to %l (speed up %d samples)\n", >> + trak->start_sample, start_sample_exact, speedup_samples); >> + >> + entries_array = ngx_palloc(mp4->request->pool, >> + (1 + trak->time_to_sample_entries) * sizeof(ngx_mp4_stts_entry_t)); >> + if (entries_array == NULL) { >> + return NGX_ERROR; >> + } >> + entry = &(entries_array[1]); >> + ngx_memcpy(entry, (ngx_mp4_stts_entry_t *)data->pos, >> + trak->time_to_sample_entries * sizeof(ngx_mp4_stts_entry_t)); > > This reallocation can be avoided. Look at NGX_HTTP_MP4_STSC_START buffer > as an example of that. A new 1-element optional buffer NGX_HTTP_MP4_STTS_START > can be introduced right before the stts atom data. > >> + current_count = ngx_mp4_get_32value(entry->count); >> + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, >> + "exact split in 2 video STTS entry from count:%d", current_count); >> + >> + if (current_count <= speedup_samples) { >> + return NGX_ERROR; >> + } >> + >> + ngx_mp4_set_32value(entry->count, current_count - speedup_samples); >> + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, >> + "exact split new[1]: count:%d duration:%d", >> + ngx_mp4_get_32value(entry->count), >> + ngx_mp4_get_32value(entry->duration)); >> + entry--; >> + ngx_mp4_set_32value(entry->count, speedup_samples); >> + ngx_mp4_set_32value(entry->duration, 1); >> + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, >> + "exact split new[0]: count:%d duration:1", >> + ngx_mp4_get_32value(entry->count)); >> + >> + data->pos = (u_char *) entry; >> + trak->time_to_sample_entries++; >> + trak->start_sample = start_sample_exact; >> + data->last = (u_char *) (entry + trak->time_to_sample_entries); >> + } >> + >> + return NGX_OK; >> +} >> + >> + >> +static ngx_int_t >> ngx_http_mp4_crop_stts_data(ngx_http_mp4_file_t *mp4, >> ngx_http_mp4_trak_t *trak, ngx_uint_t start) >> { >> @@ -2164,6 +2249,8 @@ >> ngx_buf_t *data; >> ngx_uint_t start_sample, entries, start_sec; >> ngx_mp4_stts_entry_t *entry, *end; >> + ngx_http_mp4_conf_t *conf; >> + > > No need for a new empty line here. > >> if (start) { >> start_sec = mp4->start; >> @@ -2238,6 +2325,10 @@ >> "start_sample:%ui, new count:%uD", >> trak->start_sample, count - rest); >> >> + conf = ngx_http_get_module_loc_conf(mp4->request, ngx_http_mp4_module); >> + if (conf->exact_start) { >> + ngx_http_mp4_exact_start_video(mp4, trak); >> + } >> } else { >> ngx_mp4_set_32value(entry->count, rest); >> data->last = (u_char *) (entry + 1); >> @@ -3590,6 +3681,7 @@ >> >> conf->buffer_size = NGX_CONF_UNSET_SIZE; >> conf->max_buffer_size = NGX_CONF_UNSET_SIZE; >> + conf->exact_start = NGX_CONF_UNSET; > > This is not enough, a merge is needed too. > >> >> return conf; >> } >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > I've made a POC patch which incorporates the issues I've mentioned. > I didn't test is properly and the directive name is still not perfect. > > -- > Roman Arutyunyan > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -Tracey @tracey_pooh TV Architect https://archive.org/tv -------------- next part -------------- An HTML attachment was scrubbed... URL: From greeshma.avadhootha at gmail.com Tue Sep 21 04:56:10 2021 From: greeshma.avadhootha at gmail.com (Greeshma A) Date: Mon, 20 Sep 2021 21:56:10 -0700 Subject: Unit test for proxy_protocol Message-ID: I am trying to test the following config : For proxy_protocol I have made changes in the nginx source code to support variable input parameter as well. Im writing unit tests for this. However, Im not sure how to configure the map directive. The variable $ssl_preread_server_nameis wrong. I need to give the destination address and port. What would I have to give in the key part of the map definition? ie something like map $remote_addr:$remote_port $proxy_state { # conditions } stream { %%TEST_GLOBALS_STREAM%% map $ssl_preread_server_name $proxy_state{ default off; 127.0.0.1:8084 on; } server { listen 127.0.0.1:8080; proxy_pass 127.0.0.1:8081; proxy_protocol on; } server { listen 127.0.0.1:8082; proxy_pass 127.0.0.1:8081; proxy_protocol off; } server { listen 127.0.0.1:8083; proxy_pass 127.0.0.1:8081; proxy_protocol $proxy_state; } server { listen 127.0.0.1:8084; proxy_pass 127.0.0.1:8081; proxy_protocol $proxy_state; } } -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- #!/usr/bin/perl # Tests for stream proxy module with haproxy protocol. ############################################################################### use warnings; use strict; use Test::More; use IO::Select; use Socket qw/ $CRLF /; BEGIN { use FindBin; chdir($FindBin::Bin); } use lib '../nginx-tests/lib'; use Test::Nginx; use Test::Nginx::Stream qw/ stream /; ############################################################################### select STDERR; $| = 1; select STDOUT; $| = 1; my $t = Test::Nginx->new()->has(qw/stream/) ->write_file_expand('nginx.conf', <<'EOF'); %%TEST_GLOBALS%% daemon off; events { } stream { %%TEST_GLOBALS_STREAM%% map $ssl_preread_server_name $proxy_state{ default off; 127.0.0.1:8084 on; } server { listen 127.0.0.1:8080; proxy_pass 127.0.0.1:8081; proxy_protocol on; } server { listen 127.0.0.1:8082; proxy_pass 127.0.0.1:8081; proxy_protocol off; } server { listen 127.0.0.1:8083; proxy_pass 127.0.0.1:8081; proxy_protocol $proxy_state; } server { listen 127.0.0.1:8084; proxy_pass 127.0.0.1:8081; proxy_protocol $proxy_state; } } EOF $t->run_daemon(\&stream_daemon); $t->run()->plan(4); $t->waitforsocket('127.0.0.1:' . port(8081)); ############################################################################### # Proxy header format # PROXY # PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n my $dp = port(8080); my $s = stream('127.0.0.1:' . $dp); my $data = $s->io('close'); my $sp = $s->sockport(); $DB::single = 1; my $dp2 = port(8083); my $s2 = stream('127.0.0.1:' . $dp2); my $data2 = $s2->io('close'); my $sp2 = $s2->sockport(); $DB::single = 1; is($data, "PROXY TCP4 127.0.0.1 127.0.0.1 $sp $dp${CRLF}close", 'protocol on'); is(stream('127.0.0.1:' . port(8082))->io('close'), 'close', 'protocol off'); is($data2, "PROXY TCP4 127.0.0.1 127.0.0.1 $sp2 $dp2${CRLF}close", 'protocol variable - on'); is(stream('127.0.0.1:' . port(8084))->io('close'), 'close', 'protocol variable - off'); ############################################################################### sub stream_daemon { my $server = IO::Socket::INET->new( Proto => 'tcp', LocalAddr => '127.0.0.1:' . port(8081), Listen => 5, Reuse => 1 ) or die "Can't create listening socket: $!\n"; my $sel = IO::Select->new($server); local $SIG{PIPE} = 'IGNORE'; while (my @ready = $sel->can_read) { foreach my $fh (@ready) { if ($server == $fh) { my $new = $fh->accept; $new->autoflush(1); $sel->add($new); } elsif (stream_handle_client($fh)) { $sel->remove($fh); $fh->close; } } } } sub stream_handle_client { my ($client) = @_; log2c("(new connection $client)"); $client->sysread(my $buffer, 65536) or return 1; log2i("$client $buffer"); log2o("$client $buffer"); $client->syswrite($buffer); return $buffer =~ /close/; } sub log2i { Test::Nginx::log_core('|| <<', @_); } sub log2o { Test::Nginx::log_core('|| >>', @_); } sub log2c { Test::Nginx::log_core('||', @_); } ############################################################################### From pluknet at nginx.com Tue Sep 21 09:43:14 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 21 Sep 2021 12:43:14 +0300 Subject: Unit test for proxy_protocol In-Reply-To: References: Message-ID: <65D7B07E-DB80-4EB9-BD0C-86F52E971549@nginx.com> > On 21 Sep 2021, at 07:56, Greeshma A wrote: > > I am trying to test the following config : > For proxy_protocol I have made changes in the nginx source code to support variable input parameter as well. > Im writing unit tests for this. However, Im not sure how to configure the map directive. The variable $ssl_preread_server_nameis wrong. I need to give the destination address and port. What would I have to give in the key part of the map definition? > ie something like > map $remote_addr:$remote_port $proxy_state { > # conditions > } For destination you'd need $server_addr / $server_port variables. See stream_map.t as a rough example. You might also want to look at these variables: $proxy_protocol_server_addr / $proxy_protocol_server_port. All of them described on this page: http://nginx.org/en/docs/stream/ngx_stream_core_module.html > > stream { > %%TEST_GLOBALS_STREAM%% > > map $ssl_preread_server_name $proxy_state{ > default off; > 127.0.0.1:8084 on; > } > > server { > listen 127.0.0.1:8080; > proxy_pass 127.0.0.1:8081; > proxy_protocol on; > } > > server { > listen 127.0.0.1:8082; > proxy_pass 127.0.0.1:8081; > proxy_protocol off; > } > > server { > listen 127.0.0.1:8083; > proxy_pass 127.0.0.1:8081; > proxy_protocol $proxy_state; > } > > server { > listen 127.0.0.1:8084; > proxy_pass 127.0.0.1:8081; > proxy_protocol $proxy_state; > } > } > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Sergey Kandaurov From greeshma.avadhootha at gmail.com Tue Sep 21 14:46:17 2021 From: greeshma.avadhootha at gmail.com (Greeshma A) Date: Tue, 21 Sep 2021 07:46:17 -0700 Subject: Unit test for proxy_protocol In-Reply-To: <65D7B07E-DB80-4EB9-BD0C-86F52E971549@nginx.com> References: <65D7B07E-DB80-4EB9-BD0C-86F52E971549@nginx.com> Message-ID: Thanks for pointing to the variable I can use. What variable would capture the ip address and port number together? As far as I see, it's either the ip address or port? How can I combine them in the map definition? Can I put a colon in the map definition like follows map $proxy_protocol_addr:$proxy_protocol_port $proxy_state{ } On Tue, Sep 21, 2021, 2:43 AM Sergey Kandaurov wrote: > > > On 21 Sep 2021, at 07:56, Greeshma A > wrote: > > > > I am trying to test the following config : > > For proxy_protocol I have made changes in the nginx source code to > support variable input parameter as well. > > Im writing unit tests for this. However, Im not sure how to configure > the map directive. The variable $ssl_preread_server_nameis wrong. I need > to give the destination address and port. What would I have to give in the > key part of the map definition? > > ie something like > > map $remote_addr:$remote_port $proxy_state { > > # conditions > > } > > For destination you'd need $server_addr / $server_port variables. > See stream_map.t as a rough example. > You might also want to look at these variables: > $proxy_protocol_server_addr / $proxy_protocol_server_port. > All of them described on this page: > http://nginx.org/en/docs/stream/ngx_stream_core_module.html > > > > > stream { > > %%TEST_GLOBALS_STREAM%% > > > > map $ssl_preread_server_name $proxy_state{ > > default off; > > 127.0.0.1:8084 on; > > } > > > > server { > > listen 127.0.0.1:8080; > > proxy_pass 127.0.0.1:8081; > > proxy_protocol on; > > } > > > > server { > > listen 127.0.0.1:8082; > > proxy_pass 127.0.0.1:8081; > > proxy_protocol off; > > } > > > > server { > > listen 127.0.0.1:8083; > > proxy_pass 127.0.0.1:8081; > > proxy_protocol $proxy_state; > > } > > > > server { > > listen 127.0.0.1:8084; > > proxy_pass 127.0.0.1:8081; > > proxy_protocol $proxy_state; > > } > > } > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > -- > Sergey Kandaurov > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Tue Sep 21 14:54:54 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 21 Sep 2021 17:54:54 +0300 Subject: Unit test for proxy_protocol In-Reply-To: References: <65D7B07E-DB80-4EB9-BD0C-86F52E971549@nginx.com> Message-ID: > On 21 Sep 2021, at 17:46, Greeshma A wrote: > > Thanks for pointing to the variable I can use. > > What variable would capture the ip address and port number together? > As far as I see, it's either the ip address or port? > How can I combine them in the map definition? > Can I put a colon in the map definition like follows > > map $proxy_protocol_addr:$proxy_protocol_port $proxy_state{ > > } > Yes, you can in modern nginx versions. See the very beginning of the reference: http://nginx.org/r/map -- Sergey Kandaurov From greeshma.avadhootha at gmail.com Tue Sep 21 15:55:20 2021 From: greeshma.avadhootha at gmail.com (Greeshma A) Date: Tue, 21 Sep 2021 08:55:20 -0700 Subject: Unit test for proxy_protocol In-Reply-To: References: <65D7B07E-DB80-4EB9-BD0C-86F52E971549@nginx.com> Message-ID: Thanks! It worked :) On Tue, Sep 21, 2021 at 7:55 AM Sergey Kandaurov wrote: > > > > On 21 Sep 2021, at 17:46, Greeshma A > wrote: > > > > Thanks for pointing to the variable I can use. > > > > What variable would capture the ip address and port number together? > > As far as I see, it's either the ip address or port? > > How can I combine them in the map definition? > > Can I put a colon in the map definition like follows > > > > map $proxy_protocol_addr:$proxy_protocol_port $proxy_state{ > > > > } > > > > Yes, you can in modern nginx versions. > See the very beginning of the reference: > http://nginx.org/r/map > > -- > Sergey Kandaurov > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From greeshma.avadhootha at gmail.com Tue Sep 21 17:14:13 2021 From: greeshma.avadhootha at gmail.com (Greeshma A) Date: Tue, 21 Sep 2021 10:14:13 -0700 Subject: Perl tests in nginx Message-ID: Are there any perl tests in nginx to test bad configuration? -Greeshma -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Wed Sep 22 10:29:26 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 22 Sep 2021 13:29:26 +0300 Subject: Perl tests in nginx In-Reply-To: References: Message-ID: > On 21 Sep 2021, at 20:14, Greeshma A wrote: > > Are there any perl tests in nginx to test bad configuration? > No. -- Sergey Kandaurov From vl at nginx.com Wed Sep 22 15:19:08 2021 From: vl at nginx.com (Vladimir Homutov) Date: Wed, 22 Sep 2021 15:19:08 +0000 Subject: [nginx] Stream: added half-close support. Message-ID: details: https://hg.nginx.org/nginx/rev/bfad703459b4 branches: changeset: 7929:bfad703459b4 user: Vladimir Homutov date: Wed Sep 22 10:20:00 2021 +0300 description: Stream: added half-close support. The "proxy_half_close" directive enables handling of TCP half close. If enabled, connection to proxied server is kept open until both read ends get EOF. Write end shutdown is properly transmitted via proxy. diffstat: src/stream/ngx_stream_proxy_module.c | 36 ++++++++++++++++++++++++++++++++++++ src/stream/ngx_stream_upstream.h | 1 + 2 files changed, 37 insertions(+), 0 deletions(-) diffs (92 lines): diff -r 97cf8284fd19 -r bfad703459b4 src/stream/ngx_stream_proxy_module.c --- a/src/stream/ngx_stream_proxy_module.c Fri Sep 10 12:59:22 2021 +0300 +++ b/src/stream/ngx_stream_proxy_module.c Wed Sep 22 10:20:00 2021 +0300 @@ -31,6 +31,7 @@ typedef struct { ngx_uint_t next_upstream_tries; ngx_flag_t next_upstream; ngx_flag_t proxy_protocol; + ngx_flag_t half_close; ngx_stream_upstream_local_t *local; ngx_flag_t socket_keepalive; @@ -245,6 +246,13 @@ static ngx_command_t ngx_stream_proxy_c offsetof(ngx_stream_proxy_srv_conf_t, proxy_protocol), NULL }, + { ngx_string("proxy_half_close"), + NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_STREAM_SRV_CONF_OFFSET, + offsetof(ngx_stream_proxy_srv_conf_t, half_close), + NULL }, + #if (NGX_STREAM_SSL) { ngx_string("proxy_ssl"), @@ -1755,6 +1763,24 @@ ngx_stream_proxy_process(ngx_stream_sess } if (dst) { + + if (dst->type == SOCK_STREAM && pscf->half_close + && src->read->eof && !u->half_closed && !dst->buffered) + { + if (ngx_shutdown_socket(dst->fd, NGX_WRITE_SHUTDOWN) == -1) { + ngx_connection_error(c, ngx_socket_errno, + ngx_shutdown_socket_n " failed"); + + ngx_stream_proxy_finalize(s, NGX_STREAM_INTERNAL_SERVER_ERROR); + return; + } + + u->half_closed = 1; + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, + "stream proxy %s socket shutdown", + from_upstream ? "client" : "upstream"); + } + if (ngx_handle_write_event(dst->write, 0) != NGX_OK) { ngx_stream_proxy_finalize(s, NGX_STREAM_INTERNAL_SERVER_ERROR); return; @@ -1833,6 +1859,13 @@ ngx_stream_proxy_test_finalize(ngx_strea return NGX_DECLINED; } + if (pscf->half_close) { + /* avoid closing live connections until both read ends get EOF */ + if (!(c->read->eof && pc->read->eof && !c->buffered && !pc->buffered)) { + return NGX_DECLINED; + } + } + handler = c->log->handler; c->log->handler = NULL; @@ -2052,6 +2085,7 @@ ngx_stream_proxy_create_srv_conf(ngx_con conf->proxy_protocol = NGX_CONF_UNSET; conf->local = NGX_CONF_UNSET_PTR; conf->socket_keepalive = NGX_CONF_UNSET; + conf->half_close = NGX_CONF_UNSET; #if (NGX_STREAM_SSL) conf->ssl_enable = NGX_CONF_UNSET; @@ -2110,6 +2144,8 @@ ngx_stream_proxy_merge_srv_conf(ngx_conf ngx_conf_merge_value(conf->socket_keepalive, prev->socket_keepalive, 0); + ngx_conf_merge_value(conf->half_close, prev->half_close, 0); + #if (NGX_STREAM_SSL) ngx_conf_merge_value(conf->ssl_enable, prev->ssl_enable, 0); diff -r 97cf8284fd19 -r bfad703459b4 src/stream/ngx_stream_upstream.h --- a/src/stream/ngx_stream_upstream.h Fri Sep 10 12:59:22 2021 +0300 +++ b/src/stream/ngx_stream_upstream.h Wed Sep 22 10:20:00 2021 +0300 @@ -142,6 +142,7 @@ typedef struct { ngx_stream_upstream_state_t *state; unsigned connected:1; unsigned proxy_protocol:1; + unsigned half_closed:1; } ngx_stream_upstream_t; From devnexen at gmail.com Thu Sep 23 17:29:21 2021 From: devnexen at gmail.com (David CARLIER) Date: Thu, 23 Sep 2021 18:29:21 +0100 Subject: [PATCH]: Improve Haiku build Message-ID: Hi dear list, here a patch proposal for the haiku os. Kind regards. -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx.diff Type: text/x-patch Size: 2394 bytes Desc: not available URL: From mdounin at mdounin.ru Fri Sep 24 02:52:26 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 24 Sep 2021 05:52:26 +0300 Subject: [PATCH]: Improve Haiku build In-Reply-To: References: Message-ID: Hello! On Thu, Sep 23, 2021 at 06:29:21PM +0100, David CARLIER wrote: > Hi dear list, > > here a patch proposal for the haiku os. > > Kind regards. > changeset: 7930:96ffec978b80 > tag: improve_haiku_support_disable_unsupported_code_paths_adding_specific_build_ > tag: qbase > tag: qtip > tag: tip > user: David Carlier > date: Thu Sep 23 18:13:21 2021 +0000 > summary: Improve Haiku support/disable unsupported code paths, adding specific build config components. > > diff -r bfad703459b4 -r 96ffec978b80 auto/os/conf > --- a/auto/os/conf Wed Sep 22 10:20:00 2021 +0300 > +++ b/auto/os/conf Thu Sep 23 18:13:21 2021 +0000 > @@ -27,6 +27,10 @@ > . auto/os/win32 > ;; > > + Haiku:*) > + . auto/os/haiku > + ;; > + > DragonFly:*) > have=NGX_FREEBSD . auto/have_headers > CORE_INCS="$UNIX_INCS" > diff -r bfad703459b4 -r 96ffec978b80 auto/sources > --- a/auto/sources Wed Sep 22 10:20:00 2021 +0300 > +++ b/auto/sources Thu Sep 23 18:13:21 2021 +0000 > @@ -208,6 +208,8 @@ > DARWIN_SRCS=src/os/unix/ngx_darwin_init.c > DARWIN_SENDFILE_SRCS=src/os/unix/ngx_darwin_sendfile_chain.c > > +HAIKU_DEPS="src/os/unix/ngx_haiku_config.h" > + > > WIN32_INCS="$CORE_INCS $EVENT_INCS src/os/win32" > > diff -r bfad703459b4 -r 96ffec978b80 src/core/ngx_config.h > --- a/src/core/ngx_config.h Wed Sep 22 10:20:00 2021 +0300 > +++ b/src/core/ngx_config.h Thu Sep 23 18:13:21 2021 +0000 > @@ -37,6 +37,8 @@ > #elif (NGX_WIN32) > #include > > +#elif (NGX_HAIKU) > +#include > > #else /* POSIX */ > #include > diff -r bfad703459b4 -r 96ffec978b80 src/os/unix/ngx_process.c > --- a/src/os/unix/ngx_process.c Wed Sep 22 10:20:00 2021 +0300 > +++ b/src/os/unix/ngx_process.c Thu Sep 23 18:13:21 2021 +0000 > @@ -87,7 +87,9 @@ > ngx_spawn_process(ngx_cycle_t *cycle, ngx_spawn_proc_pt proc, void *data, > char *name, ngx_int_t respawn) > { > +#if !(NGX_HAIKU) > u_long on; > +#endif > ngx_pid_t pid; > ngx_int_t s; > > @@ -142,6 +144,7 @@ > return NGX_INVALID_PID; > } > > +#if !(NGX_HAIKU) > on = 1; > if (ioctl(ngx_processes[s].channel[0], FIOASYNC, &on) == -1) { > ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > @@ -156,6 +159,7 @@ > ngx_close_channel(ngx_processes[s].channel, cycle->log); > return NGX_INVALID_PID; > } > +#endif > > if (fcntl(ngx_processes[s].channel[0], F_SETFD, FD_CLOEXEC) == -1) { > ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > Thanks for the patch. It looks broken though, as it lacks the ngx_haiku_config.h file. Further, it might be a better idea to avoid introducing HaikuOS-specific files and defines, and keep changes you want to introduce to better support HaikuOS under generic POSIX support instead. Note well that simply disabling FIOASYNC isn't a good solution. While it might not be used now, it is certainly will be important when interprocess communications via channels will used for additional things, such as passing open file descriptors of log files from master to worker processes (https://trac.nginx.org/nginx/ticket/376). -- Maxim Dounin http://mdounin.ru/ From andre.romcke at gmail.com Mon Sep 27 08:55:26 2021 From: andre.romcke at gmail.com (=?UTF-8?B?QW5kcsOpIFLDuG1ja2U=?=) Date: Mon, 27 Sep 2021 10:55:26 +0200 Subject: [PATCH] Add image/avif to conf/mime.types In-Reply-To: References: Message-ID: Format is stable and broader AVIF support (& likely also adoption) is incoming: - About 1/2 size compared to jpeg, 2/3 of webp, and roughly 1/1 with JPEG XL* - Already supported in Chrome and Firefox: - Also in Chromium** so soon in Edge, Opera, ... - And apparently landed in Webkit*** Kind regards. * JPEG XL bitstream is frozen, but still work in progress & not supported out of the box: - https://en.wikipedia.org/wiki/JPEG_XL - https://caniuse.com/jpegxl ** https://bugs.chromium.org/p/chromium/issues/detail?id=960620 *** https://bugs.webkit.org/show_bug.cgi?id=207750 -------------- next part -------------- A non-text attachment was scrubbed... Name: avif_mime_type.diff Type: application/octet-stream Size: 528 bytes Desc: not available URL: From mdounin at mdounin.ru Mon Sep 27 13:18:06 2021 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Mon, 27 Sep 2021 16:18:06 +0300 Subject: [PATCH 0 of 2] KTLS / SSL_sendfile() support Message-ID: Hello! This patch series add kernel TLS / SSL_sendfile() support. Works on FreeBSD 13.0+ and Linux with kernel 4.13+ (at least 5.2 is recommended, tested with 5.11). The following questions need additional testing/attention: - What about EINTR? Looks like it simply results in SSL_ERROR_WANT_WRITE, so might need extra checking to make sure there will be another write event. - What about SSL_sendfile(), early data and write blocking? Ref. c->ssl->write_blocked, 7431:294162223c7c by pluknet at . Looks like it is not a problem with SSL_sendfile(), but needs further checking. - What about FreeBSD aio sendfile (aka SF_NODISKIO)? Might be easy enough to support. Review and testing appreciated. -- Maxim Dounin From mdounin at mdounin.ru Mon Sep 27 13:18:07 2021 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Mon, 27 Sep 2021 16:18:07 +0300 Subject: [PATCH 1 of 2] Style: added missing "static" specifiers In-Reply-To: References: Message-ID: <8f0fd60c33c106fba5f1.1632748687@vm-bsd.mdounin.ru> # HG changeset patch # User Maxim Dounin # Date 1632717373 -10800 # Mon Sep 27 07:36:13 2021 +0300 # Node ID 8f0fd60c33c106fba5f1ce3cafe990f15fcccc0c # Parent bfad703459b4e2416548ac66f548e96c2197d9cc Style: added missing "static" specifiers. Mostly found by gcc -Wtraditional, per "non-static declaration of ... follows static declaration [-Wtraditional]" warnings. diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -2767,7 +2767,7 @@ ngx_ssl_write(ngx_connection_t *c, u_cha #ifdef SSL_READ_EARLY_DATA_SUCCESS -ssize_t +static ssize_t ngx_ssl_write_early(ngx_connection_t *c, u_char *data, size_t size) { int n, sslerr; diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c +++ b/src/stream/ngx_stream_ssl_module.c @@ -23,7 +23,8 @@ static ngx_int_t ngx_stream_ssl_init_con ngx_connection_t *c); static void ngx_stream_ssl_handshake_handler(ngx_connection_t *c); #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME -int ngx_stream_ssl_servername(ngx_ssl_conn_t *ssl_conn, int *ad, void *arg); +static int ngx_stream_ssl_servername(ngx_ssl_conn_t *ssl_conn, int *ad, + void *arg); #endif #ifdef SSL_R_CERT_CB_ERROR static int ngx_stream_ssl_certificate(ngx_ssl_conn_t *ssl_conn, void *arg); @@ -434,7 +435,7 @@ ngx_stream_ssl_handshake_handler(ngx_con #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME -int +static int ngx_stream_ssl_servername(ngx_ssl_conn_t *ssl_conn, int *ad, void *arg) { return SSL_TLSEXT_ERR_OK; @@ -445,7 +446,7 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t #ifdef SSL_R_CERT_CB_ERROR -int +static int ngx_stream_ssl_certificate(ngx_ssl_conn_t *ssl_conn, void *arg) { ngx_str_t cert, key; From mdounin at mdounin.ru Mon Sep 27 13:18:08 2021 From: mdounin at mdounin.ru (=?utf-8?q?Maxim_Dounin?=) Date: Mon, 27 Sep 2021 16:18:08 +0300 Subject: [PATCH 2 of 2] SSL: SSL_sendfile() support with kernel TLS In-Reply-To: References: Message-ID: # HG changeset patch # User Maxim Dounin # Date 1632717779 -10800 # Mon Sep 27 07:42:59 2021 +0300 # Node ID ff514bf17f7f2257dcf036c5c973b74672cefa9a # Parent 8f0fd60c33c106fba5f1ce3cafe990f15fcccc0c SSL: SSL_sendfile() support with kernel TLS. Requires OpenSSL 3.0 compiled with "enable-ktls" option. Further, KTLS needs to be enabled in kernel, and in OpenSSL, either via OpenSSL configuration file or with "ssl_conf_command Options KTLS;" in nginx configuration. On FreeBSD, kernel TLS is available starting with FreeBSD 13.0, and can be enabled with "sysctl kern.ipc.tls.enable=1" and "kldload ktls_ocf". On Linux, kernel TLS is available starting with kernel 4.13 (at least 5.2 is recommended), and needs kernel compiled with CONFIG_TLS=y (with CONFIG_TLS=m, which is used at least on Ubuntu 21.04 by default, the tls module needs to be loaded with "modprobe tls"). diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -47,6 +47,8 @@ static void ngx_ssl_write_handler(ngx_ev static ssize_t ngx_ssl_write_early(ngx_connection_t *c, u_char *data, size_t size); #endif +static ssize_t ngx_ssl_sendfile(ngx_connection_t *c, ngx_buf_t *file, + size_t size); static void ngx_ssl_read_handler(ngx_event_t *rev); static void ngx_ssl_shutdown_handler(ngx_event_t *ev); static void ngx_ssl_connection_error(ngx_connection_t *c, int sslerr, @@ -1764,6 +1766,16 @@ ngx_ssl_handshake(ngx_connection_t *c) #endif #endif +#ifdef BIO_get_ktls_send + + if (BIO_get_ktls_send(SSL_get_wbio(c->ssl->connection)) == 1) { + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "BIO_get_ktls_send(): 1"); + c->ssl->sendfile = 1; + } + +#endif + rc = ngx_ssl_ocsp_validate(c); if (rc == NGX_ERROR) { @@ -1899,6 +1911,16 @@ ngx_ssl_try_early_data(ngx_connection_t c->read->ready = 1; c->write->ready = 1; +#ifdef BIO_get_ktls_send + + if (BIO_get_ktls_send(SSL_get_wbio(c->ssl->connection)) == 1) { + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "BIO_get_ktls_send(): 1"); + c->ssl->sendfile = 1; + } + +#endif + rc = ngx_ssl_ocsp_validate(c); if (rc == NGX_ERROR) { @@ -2502,10 +2524,11 @@ ngx_ssl_write_handler(ngx_event_t *wev) ngx_chain_t * ngx_ssl_send_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit) { - int n; - ngx_uint_t flush; - ssize_t send, size; - ngx_buf_t *buf; + int n; + ngx_uint_t flush; + ssize_t send, size, file_size; + ngx_buf_t *buf; + ngx_chain_t *cl; if (!c->ssl->buffer) { @@ -2579,6 +2602,11 @@ ngx_ssl_send_chain(ngx_connection_t *c, continue; } + if (in->buf->in_file && c->ssl->sendfile) { + flush = 1; + break; + } + size = in->buf->last - in->buf->pos; if (size > buf->end - buf->last) { @@ -2610,8 +2638,35 @@ ngx_ssl_send_chain(ngx_connection_t *c, size = buf->last - buf->pos; if (size == 0) { + + if (in && in->buf->in_file && send < limit) { + + /* coalesce the neighbouring file bufs */ + + cl = in; + file_size = (size_t) ngx_chain_coalesce_file(&cl, limit - send); + + n = ngx_ssl_sendfile(c, in->buf, file_size); + + if (n == NGX_ERROR) { + return NGX_CHAIN_ERROR; + } + + if (n == NGX_AGAIN) { + break; + } + + in = ngx_chain_update_sent(in, n); + + send += n; + flush = 0; + + continue; + } + buf->flush = 0; c->buffered &= ~NGX_SSL_BUFFERED; + return in; } @@ -2636,7 +2691,7 @@ ngx_ssl_send_chain(ngx_connection_t *c, buf->pos = buf->start; buf->last = buf->start; - if (in == NULL || send == limit) { + if (in == NULL || send >= limit) { break; } } @@ -2882,6 +2937,150 @@ ngx_ssl_write_early(ngx_connection_t *c, #endif +static ssize_t +ngx_ssl_sendfile(ngx_connection_t *c, ngx_buf_t *file, size_t size) +{ +#ifdef BIO_get_ktls_send + + int sslerr; + ssize_t n; + ngx_err_t err; + + ngx_ssl_clear_error(c->log); + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL to sendfile: @%O %uz", + file->file_pos, size); + + ngx_set_errno(0); + + n = SSL_sendfile(c->ssl->connection, file->file->fd, file->file_pos, + size, 0); + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_sendfile: %d", n); + + if (n > 0) { + + if (c->ssl->saved_read_handler) { + + c->read->handler = c->ssl->saved_read_handler; + c->ssl->saved_read_handler = NULL; + c->read->ready = 1; + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + return NGX_ERROR; + } + + ngx_post_event(c->read, &ngx_posted_events); + } + + c->sent += n; + + return n; + } + + if (n == 0) { + + /* + * if sendfile returns zero, then someone has truncated the file, + * so the offset became beyond the end of the file + */ + + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "SSL_sendfile() reported that \"%s\" was truncated at %O", + file->file->name.data, file->file_pos); + + return NGX_ERROR; + } + + sslerr = SSL_get_error(c->ssl->connection, n); + + if (sslerr == SSL_ERROR_ZERO_RETURN) { + + /* + * OpenSSL fails to return SSL_ERROR_SYSCALL if an error + * happens during writing after close_notify alert from the + * peer, and returns SSL_ERROR_ZERO_RETURN instead + */ + + sslerr = SSL_ERROR_SYSCALL; + } + + if (sslerr == SSL_ERROR_SSL + && ERR_GET_REASON(ERR_peek_error()) == SSL_R_UNINITIALIZED + && ngx_errno != 0) + { + /* + * OpenSSL fails to return SSL_ERROR_SYSCALL if an error + * happens in sendfile(), and returns SSL_ERROR_SSL with + * SSL_R_UNINITIALIZED reason instead + */ + + sslerr = SSL_ERROR_SYSCALL; + } + + err = (sslerr == SSL_ERROR_SYSCALL) ? ngx_errno : 0; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0, "SSL_get_error: %d", sslerr); + + if (sslerr == SSL_ERROR_WANT_WRITE) { + + if (c->ssl->saved_read_handler) { + + c->read->handler = c->ssl->saved_read_handler; + c->ssl->saved_read_handler = NULL; + c->read->ready = 1; + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + return NGX_ERROR; + } + + ngx_post_event(c->read, &ngx_posted_events); + } + + c->write->ready = 0; + return NGX_AGAIN; + } + + if (sslerr == SSL_ERROR_WANT_READ) { + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL_sendfile: want read"); + + c->read->ready = 0; + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + return NGX_ERROR; + } + + /* + * we do not set the timer because there is already + * the write event timer + */ + + if (c->ssl->saved_read_handler == NULL) { + c->ssl->saved_read_handler = c->read->handler; + c->read->handler = ngx_ssl_read_handler; + } + + return NGX_AGAIN; + } + + c->ssl->no_wait_shutdown = 1; + c->ssl->no_send_shutdown = 1; + c->write->error = 1; + + ngx_ssl_connection_error(c, sslerr, err, "SSL_sendfile() failed"); + +#else + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "SSL_sendfile() not available"); +#endif + + return NGX_ERROR; +} + + static void ngx_ssl_read_handler(ngx_event_t *rev) { diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -109,6 +109,7 @@ struct ngx_ssl_connection_s { unsigned handshake_rejected:1; unsigned renegotiation:1; unsigned buffer:1; + unsigned sendfile:1; unsigned no_wait_shutdown:1; unsigned no_send_shutdown:1; unsigned shutdown_without_free:1; diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c +++ b/src/http/ngx_http_request.c @@ -607,7 +607,7 @@ ngx_http_alloc_request(ngx_connection_t } #if (NGX_HTTP_SSL) - if (c->ssl) { + if (c->ssl && !c->ssl->sendfile) { r->main_filter_need_in_memory = 1; } #endif diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -1683,9 +1683,6 @@ ngx_http_upstream_ssl_init_connection(ng return; } - c->sendfile = 0; - u->output.sendfile = 0; - if (u->conf->ssl_server_name || u->conf->ssl_verify) { if (ngx_http_upstream_ssl_name(r, u, c) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, @@ -1791,6 +1788,11 @@ ngx_http_upstream_ssl_handshake(ngx_http } } + if (!c->ssl->sendfile) { + c->sendfile = 0; + u->output.sendfile = 0; + } + c->write->handler = ngx_http_upstream_handler; c->read->handler = ngx_http_upstream_handler; From spacewanderlzx at gmail.com Tue Sep 28 02:27:55 2021 From: spacewanderlzx at gmail.com (Zexuan Luo) Date: Tue, 28 Sep 2021 10:27:55 +0800 Subject: Inconsistent time measurement in Nginx Message-ID: Hi, Nginx developers: Currently, the request_time uses ngx_timeofday to get the time, which finally will call gettimeofday. Meanwhile, the upstream_x_time series uses ngx_current_msec to calculate the time, which finally will call ngx_monotonic_time. On Linux, the gettimeofday will call clock_gettime(CLOCK_REALTIME, &ts) while the ngx_monotonic_time will call clock_gettime(CLOCK_MONOTONIC_COARSE, &ts). So the request_time uses CLOCK_REALTIME and the upstream_x_time series uses CLOCK_MONOTONIC_COARSE. As they are different sources, sometimes we observe that the upstream_response_time is larger than request_time. This behavior is unexpected and has caused bug in our software. A similar report can be also found in https://stackoverflow.com/questions/53978695/how-can-request-time-be-less-than-upstream-response-time-in-nginx. Is this behavior intended? Why not use the same time measurement for all metrics? Thanks for your reply. From mdounin at mdounin.ru Tue Sep 28 13:44:16 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 28 Sep 2021 16:44:16 +0300 Subject: Inconsistent time measurement in Nginx In-Reply-To: References: Message-ID: Hello! On Tue, Sep 28, 2021 at 10:27:55AM +0800, Zexuan Luo wrote: > Hi, Nginx developers: > > Currently, the request_time uses ngx_timeofday to get the time, which > finally will call gettimeofday. Meanwhile, the upstream_x_time series > uses ngx_current_msec to calculate the time, which finally will call > ngx_monotonic_time. > > On Linux, the gettimeofday will call clock_gettime(CLOCK_REALTIME, > &ts) while the ngx_monotonic_time will call > clock_gettime(CLOCK_MONOTONIC_COARSE, &ts). > > So the request_time uses CLOCK_REALTIME and the upstream_x_time series > uses CLOCK_MONOTONIC_COARSE. As they are different sources, sometimes > we observe that the upstream_response_time is larger than > request_time. This behavior is unexpected and has caused bug in our > software. > > A similar report can be also found in > https://stackoverflow.com/questions/53978695/how-can-request-time-be-less-than-upstream-response-time-in-nginx. > > Is this behavior intended? Why not use the same time measurement for > all metrics? Thanks for your reply. The $upstream_response_time variable was changed to use ngx_current_msec in 59fc60585f1e (http://hg.nginx.org/nginx/rev/59fc60585f1e) as part of $upstream_connect_time introduction, because it is easier to code and more appropriate for time interval measurements, especially following introduction of CLOCK_MONOTONIC usage. The $response_time still uses ngx_timeofday() for mostly historic reasons. Likely it will be changed to use ngx_current_msec, though this needs to be done with care, notably because ngx_current_msec can overflow on 32-bit platforms. At some point there was a patch by Sergey Kandaurov to do the change, though it was postponed due to questions on what happens on overflows in limit_rate handling. -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Wed Sep 29 14:05:22 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 29 Sep 2021 14:05:22 +0000 Subject: [njs] Fixed function constructor for cloned VMs. Message-ID: details: https://hg.nginx.org/njs/rev/06d4768b37d8 branches: changeset: 1708:06d4768b37d8 user: Dmitry Volyntsev date: Wed Sep 29 13:45:26 2021 +0000 description: Fixed function constructor for cloned VMs. Previously a shared "keywords_hash" and "values_hash" were used while compiling functions in runtime. This led to populating a shared hash with elements allocated in a cloned VM. Which resulted in heap-use-after-free when next cloned VM accesses the shared hashes. diffstat: src/njs_function.c | 4 +++- src/njs_generator.c | 24 ++++++++++++++---------- src/njs_lexer.c | 5 +++-- src/njs_lexer.h | 2 +- src/njs_module.c | 2 +- src/njs_vm.c | 6 ++++-- src/njs_vm.h | 1 + src/test/njs_unit_test.c | 11 +++++++++++ 8 files changed, 38 insertions(+), 17 deletions(-) diffs (200 lines): diff -r 6feba0e602ee -r 06d4768b37d8 src/njs_function.c --- a/src/njs_function.c Fri Sep 17 18:29:40 2021 +0000 +++ b/src/njs_function.c Wed Sep 29 13:45:26 2021 +0000 @@ -1156,7 +1156,8 @@ njs_function_constructor(njs_vm_t *vm, n file = njs_str_value("runtime"); - ret = njs_lexer_init(vm, &lexer, &file, str.start, str.start + str.length); + ret = njs_lexer_init(vm, &lexer, &file, str.start, str.start + str.length, + 1); if (njs_slow_path(ret != NJS_OK)) { return ret; } @@ -1206,6 +1207,7 @@ njs_function_constructor(njs_vm_t *vm, n } njs_memzero(&generator, sizeof(njs_generator_t)); + generator.runtime = 1; code = njs_generate_scope(vm, &generator, scope, &njs_entry_anonymous); if (njs_slow_path(code == NULL)) { diff -r 6feba0e602ee -r 06d4768b37d8 src/njs_generator.c --- a/src/njs_generator.c Fri Sep 17 18:29:40 2021 +0000 +++ b/src/njs_generator.c Wed Sep 29 13:45:26 2021 +0000 @@ -277,8 +277,8 @@ static njs_int_t njs_generate_inc_dec_op static njs_int_t njs_generate_function_declaration(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node); static njs_int_t njs_generate_function_scope(njs_vm_t *vm, - njs_function_lambda_t *lambda, njs_parser_node_t *node, - const njs_str_t *name, njs_uint_t depth); + njs_generator_t *generator, njs_function_lambda_t *lambda, + njs_parser_node_t *node, const njs_str_t *name); static njs_int_t njs_generate_scope_end(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node); static int64_t njs_generate_lambda_variables(njs_vm_t *vm, @@ -3056,8 +3056,8 @@ njs_generate_function_expression(njs_vm_ return NJS_ERROR; } - ret = njs_generate_function_scope(vm, lambda, node, &lex_entry->name, - generator->depth); + ret = njs_generate_function_scope(vm, generator, lambda, node, + &lex_entry->name); if (njs_slow_path(ret != NJS_OK)) { return ret; } @@ -3093,7 +3093,7 @@ njs_generate_function(njs_vm_t *vm, njs_ name = module ? &njs_entry_module : &njs_entry_anonymous; - ret = njs_generate_function_scope(vm, lambda, node, name, generator->depth); + ret = njs_generate_function_scope(vm, generator, lambda, node, name); if (njs_slow_path(ret != NJS_OK)) { return ret; } @@ -3594,8 +3594,8 @@ njs_generate_function_declaration(njs_vm return NJS_ERROR; } - ret = njs_generate_function_scope(vm, lambda, node, &lex_entry->name, - generator->depth); + ret = njs_generate_function_scope(vm, generator, lambda, node, + &lex_entry->name); if (njs_slow_path(ret != NJS_OK)) { return ret; } @@ -3617,23 +3617,27 @@ njs_generate_function_declaration(njs_vm static njs_int_t -njs_generate_function_scope(njs_vm_t *vm, njs_function_lambda_t *lambda, - njs_parser_node_t *node, const njs_str_t *name, njs_uint_t depth) +njs_generate_function_scope(njs_vm_t *vm, njs_generator_t *prev, + njs_function_lambda_t *lambda, njs_parser_node_t *node, + const njs_str_t *name) { njs_arr_t *arr; njs_bool_t module; + njs_uint_t depth; njs_vm_code_t *code; njs_generator_t generator; njs_parser_node_t *file_node; - njs_memzero(&generator, sizeof(njs_generator_t)); + depth = prev->depth; if (++depth >= NJS_FUNCTION_MAX_DEPTH) { njs_range_error(vm, "Maximum function nesting depth exceeded"); return NJS_ERROR; } + njs_memzero(&generator, sizeof(njs_generator_t)); generator.depth = depth; + generator.runtime = prev->runtime; node = node->right; diff -r 6feba0e602ee -r 06d4768b37d8 src/njs_lexer.c --- a/src/njs_lexer.c Fri Sep 17 18:29:40 2021 +0000 +++ b/src/njs_lexer.c Wed Sep 29 13:45:26 2021 +0000 @@ -290,7 +290,7 @@ static const njs_lexer_multi_t njs_assi njs_int_t njs_lexer_init(njs_vm_t *vm, njs_lexer_t *lexer, njs_str_t *file, - u_char *start, u_char *end) + u_char *start, u_char *end, njs_uint_t runtime) { njs_memzero(lexer, sizeof(njs_lexer_t)); @@ -298,7 +298,8 @@ njs_lexer_init(njs_vm_t *vm, njs_lexer_t lexer->start = start; lexer->end = end; lexer->line = 1; - lexer->keywords_hash = &vm->shared->keywords_hash; + lexer->keywords_hash = (runtime) ? &vm->keywords_hash + : &vm->shared->keywords_hash; lexer->mem_pool = vm->mem_pool; njs_queue_init(&lexer->preread); diff -r 6feba0e602ee -r 06d4768b37d8 src/njs_lexer.h --- a/src/njs_lexer.h Fri Sep 17 18:29:40 2021 +0000 +++ b/src/njs_lexer.h Wed Sep 29 13:45:26 2021 +0000 @@ -271,7 +271,7 @@ typedef struct { njs_int_t njs_lexer_init(njs_vm_t *vm, njs_lexer_t *lexer, njs_str_t *file, - u_char *start, u_char *end); + u_char *start, u_char *end, njs_uint_t runtime); njs_lexer_token_t *njs_lexer_token(njs_lexer_t *lexer, njs_bool_t with_end_line); diff -r 6feba0e602ee -r 06d4768b37d8 src/njs_module.c --- a/src/njs_module.c Fri Sep 17 18:29:40 2021 +0000 +++ b/src/njs_module.c Wed Sep 29 13:45:26 2021 +0000 @@ -185,7 +185,7 @@ njs_parser_module(njs_parser_t *parser, } ret = njs_lexer_init(parser->vm, &temp->lexer, &info.file, text.start, - text.start + text.length); + text.start + text.length, 0); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } diff -r 6feba0e602ee -r 06d4768b37d8 src/njs_vm.c --- a/src/njs_vm.c Fri Sep 17 18:29:40 2021 +0000 +++ b/src/njs_vm.c Wed Sep 29 13:45:26 2021 +0000 @@ -138,7 +138,7 @@ njs_vm_compile(njs_vm_t *vm, u_char **st njs_module_reset(vm); } - ret = njs_lexer_init(vm, &lexer, &vm->options.file, *start, end); + ret = njs_lexer_init(vm, &lexer, &vm->options.file, *start, end, 0); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } @@ -317,9 +317,11 @@ njs_vm_init(njs_vm_t *vm) return NJS_ERROR; } + njs_lvlhsh_init(&vm->values_hash); + njs_lvlhsh_init(&vm->keywords_hash); njs_lvlhsh_init(&vm->modules_hash); + njs_lvlhsh_init(&vm->events_hash); - njs_lvlhsh_init(&vm->events_hash); njs_queue_init(&vm->posted_events); njs_queue_init(&vm->promise_events); diff -r 6feba0e602ee -r 06d4768b37d8 src/njs_vm.h --- a/src/njs_vm.h Fri Sep 17 18:29:40 2021 +0000 +++ b/src/njs_vm.h Wed Sep 29 13:45:26 2021 +0000 @@ -147,6 +147,7 @@ struct njs_vm_s { njs_frame_t *active_frame; njs_rbtree_t *variables_hash; + njs_lvlhsh_t keywords_hash; njs_lvlhsh_t values_hash; njs_arr_t *modules; diff -r 6feba0e602ee -r 06d4768b37d8 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Fri Sep 17 18:29:40 2021 +0000 +++ b/src/test/njs_unit_test.c Wed Sep 29 13:45:26 2021 +0000 @@ -21028,6 +21028,17 @@ static njs_unit_test_t njs_shared_test[ njs_str("TypeError: \"path\" must be a string or Buffer\n" " at fs.readFileSync (native)\n" " at main (:1)\n") }, + + { njs_str("var f = new Function('return 1;'); f();"), + njs_str("1") }, + + { njs_str("var sum = new Function('a', 'b', 'return a + b');" + "sum(2, 4);"), + njs_str("6") }, + + { njs_str("var sum = new Function('a, b', 'return a + b');" + "sum(2, 4);"), + njs_str("6") }, }; From xeioex at nginx.com Wed Sep 29 16:14:39 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 29 Sep 2021 16:14:39 +0000 Subject: [njs] Tests: introducing repeat argument for unit tests. Message-ID: details: https://hg.nginx.org/njs/rev/56e3f06da4f0 branches: changeset: 1709:56e3f06da4f0 user: Dmitry Volyntsev date: Wed Sep 29 16:13:36 2021 +0000 description: Tests: introducing repeat argument for unit tests. diffstat: src/test/njs_unit_test.c | 13 ++++++++++++- 1 files changed, 12 insertions(+), 1 deletions(-) diffs (38 lines): diff -r 06d4768b37d8 -r 56e3f06da4f0 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Wed Sep 29 13:45:26 2021 +0000 +++ b/src/test/njs_unit_test.c Wed Sep 29 16:13:36 2021 +0000 @@ -22797,6 +22797,7 @@ njs_get_options(njs_opts_t *opts, int ar "Options:\n" " -d print disassembled code.\n" " -f PATTERN1[|PATTERN2..] filter test suites to run.\n" + " -r count overrides repeat count for tests.\n" " -v verbose mode.\n"; for (i = 1; i < argc; i++) { @@ -22829,6 +22830,15 @@ njs_get_options(njs_opts_t *opts, int ar njs_stderror("option \"-f\" requires argument\n"); return NJS_ERROR; + case 'r': + if (++i < argc) { + opts->repeat = atoi(argv[i]); + break; + } + + njs_stderror("option \"-r\" requires argument\n"); + return NJS_ERROR; + case 'v': opts->verbose = 1; break; @@ -23033,8 +23043,9 @@ main(int argc, char **argv) op = suite->opts; + op.disassemble = opts.disassemble; + op.repeat = opts.repeat ? opts.repeat : op.repeat; op.verbose = opts.verbose; - op.disassemble = opts.disassemble; ret = suite->run(suite->tests, suite->n, &suite->name, &op, &stat); if (ret != NJS_OK) { From bes.internal at gmail.com Wed Sep 29 22:24:48 2021 From: bes.internal at gmail.com (bes) Date: Thu, 30 Sep 2021 01:24:48 +0300 Subject: [PATCH] http_image_filter_module: Add HEIC and AVIF support / Add output format option Message-ID: Hi, Alex. With your patch and this configuration: image_filter resize 10000000 -; (something wider than input image) image_filter convert webp; no conversion occurs but return headers contain content-type: image/webp I expect that when specifying the conversion filter it will apply anyway. Most likely there is a shortcut somewhere in the algorithm, where after the condition code goes directly to the output, but I could not find it. -------------- next part -------------- An HTML attachment was scrubbed... URL: From arut at nginx.com Thu Sep 30 13:48:11 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 30 Sep 2021 16:48:11 +0300 Subject: [PATCH] Add optional "mp4_exact_start" nginx config off/on to show video between keyframes In-Reply-To: <5F32216C-A041-454C-A73C-0E1C259E434C@archive.org> References: <20210628095320.px3ggmmoyjalyv5m@Romans-MacBook-Pro.local> <5F32216C-A041-454C-A73C-0E1C259E434C@archive.org> Message-ID: <20210930134811.epttik4joflf2qj6@Romans-MacBook-Pro.local> Hi Tracey, On Mon, Sep 20, 2021 at 12:39:15PM -0700, Tracey Jaquith wrote: > Hi Roman, > > I had an idea for considering for the feature name / config flag / function name, in case of interest. > > What about ?startfast?? (or even ?faststart?, I suppose) > > That parallels nicely with the `qt-faststart` utility and the `ffmpeg -movflags faststart` > where the moov atom is moved to the front of the mp4. > > Since the `mp4` module is already rewriting a smaller moov atom for the desired clip, > *and* the mp4 module will move the moov atom to the front > (in case the source mp4 file has moov atom at the back), > It seems like ?startfast? might convey the moov atom approach *and* the concept > that we?re going to send early visually undesired frames out at ~30,000 fps :) > > For your consideration, thanks, > -Tracey Thanks for your suggestion. Currently we're considering the name "mp4_start_key_frame" which has the word "start" in it, which is the argument that enables the feature. But there's something more important I want to talk about. While doing internal review of the patch, we were conserned with some potential problems the patch could introduce. Specifically, when the video track has B-frames, PTS - DTS delay is stored in the "ctts" atom, which was not changed by the patch. This means that some frames from the hidden part of the video could show up in the visible part of it. I believe this could be handled properly, but the solution would be much more sophisticated than just zeroing out the initial part of ctts. Another problem is track delay, which was obvious from the start. The hidden part of the video still takes some time to play, which ruins synchronization between tracks. This may or may not be noticable in particular cases, but anyway the problem is still there. I've reimplemented the feature by using mp4 edit lists. In a nutshell, all frames up to the latest key frame are included in the video. Then, the initial part of the video is hidden from presentation by cutting it with an edit list. Looks like this solution does not have the problems I mentioned above. Can you try the new patch in your environment? We would really appreciate your feedback. > > On Jun 28, 2021, at 2:53 AM, Roman Arutyunyan wrote: > > > > Hi Tracey, > > > > On Tue, Jun 15, 2021 at 03:49:48PM -0700, Tracey Jaquith wrote: > >> # HG changeset patch > >> # User Tracey Jaquith > > >> # Date 1623797180 0 > >> # Tue Jun 15 22:46:20 2021 +0000 > >> # Node ID 1879d49fe0cf739f48287b5a38a83d3a1adab939 > >> # Parent 5f765427c17ac8cf753967387562201cf4f78dc4 > >> Add optional "mp4_exact_start" nginx config off/on to show video between keyframes. > > > > I've been thinking about a better name for this, but came up with nothing so > > far. I feel like this name does not give the right clue to the user. > > Moreover, when this feature is on, the start is not quite "exact", but shifted > > a few milliseconds into the past. > > > >> archive.org has been using mod_h264_streaming with a similar "exact start" patch from me since 2013. > >> We just moved to nginx mp4 module and are using this patch. > >> The technique is to find the video keyframe just before the desired "start" time, and send > >> that down the wire so video playback can start immediately. > >> Next calculate how many video samples are between the keyframe and desired "start" time > >> and update the STTS atom where those samples move the duration from (typically) 1001 to 1. > >> This way, initial unwanted video frames play at ~1/30,000s -- so visually the > >> video & audio start playing immediately. > >> > >> You can see an example before/after here (nginx binary built with mp4 module + patch): > >> > >> https://pi.archive.org/0/items/CSPAN_20160425_022500_2011_White_House_Correspondents_Dinner.mp4?start=12&end=30 > >> https://pi.archive.org/0/items/CSPAN_20160425_022500_2011_White_House_Correspondents_Dinner.mp4?start=12&end=30&exact=1 > >> > >> Tested on linux and macosx. > >> > >> (this is me: https://github.com/traceypooh ) > > > > We have a few rules about patches and commit messages like 67-character limit > > for the first line etc: > > > > http://nginx.org/en/docs/contributing_changes.html > > > >> diff -r 5f765427c17a -r 1879d49fe0cf src/http/modules/ngx_http_mp4_module.c > >> --- a/src/http/modules/ngx_http_mp4_module.c Tue Jun 01 17:37:51 2021 +0300 > >> +++ b/src/http/modules/ngx_http_mp4_module.c Tue Jun 15 22:46:20 2021 +0000 > >> @@ -43,6 +43,7 @@ > >> typedef struct { > >> size_t buffer_size; > >> size_t max_buffer_size; > >> + ngx_flag_t exact_start; > >> } ngx_http_mp4_conf_t; > >> > >> > >> @@ -340,6 +341,13 @@ > >> offsetof(ngx_http_mp4_conf_t, max_buffer_size), > >> NULL }, > >> > >> + { ngx_string("mp4_exact_start"), > >> + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > > > > NGX_CONF_TAKE1 -> NGX_CONF_FLAG > > > >> + ngx_conf_set_flag_slot, > >> + NGX_HTTP_LOC_CONF_OFFSET, > >> + offsetof(ngx_http_mp4_conf_t, exact_start), > >> + NULL }, > >> + > >> ngx_null_command > >> }; > >> > >> @@ -2156,6 +2164,83 @@ > >> > >> > >> static ngx_int_t > >> +ngx_http_mp4_exact_start_video(ngx_http_mp4_file_t *mp4, ngx_http_mp4_trak_t *trak) > >> +{ > >> + uint32_t n, speedup_samples, current_count; > >> + ngx_uint_t sample_keyframe, start_sample_exact; > >> + ngx_mp4_stts_entry_t *entry, *entries_array; > >> + ngx_buf_t *data; > >> + > >> + data = trak->out[NGX_HTTP_MP4_STTS_DATA].buf; > >> + > >> + // Find the keyframe just before the desired start time - so that we can emit an mp4 > >> + // where the first frame is a keyframe. We'll "speed up" the first frames to 1000x > >> + // normal speed (typically), so they won't be noticed. But this way, perceptively, > >> + // playback of the _video_ track can start immediately > >> + // (and not have to wait until the keyframe _after_ the desired starting time frame). > >> + start_sample_exact = trak->start_sample; > >> + for (n = 0; n < trak->sync_samples_entries; n++) { > >> + // each element of array is the sample number of a keyframe > >> + // sync samples starts from 1 -- so subtract 1 > >> + sample_keyframe = ngx_mp4_get_32value(trak->stss_data_buf.pos + (n * 4)) - 1; > > > > This can be simplified by introducing entry/end variables like we usually do. > > > > Also, we don't access trak->stss_data_buf directly, but prefer > > trak->out[NGX_HTTP_MP4_STSS_ATOM].buf. > > > > ngx_http_mp4_crop_stss_data() provides an example of iterating over stss atom. > > > >> + if (sample_keyframe <= trak->start_sample) { > >> + start_sample_exact = sample_keyframe; > >> + } > >> + if (sample_keyframe >= trak->start_sample) { > >> + break; > >> + } > >> + } > >> + > >> + if (start_sample_exact < trak->start_sample) { > >> + // We're going to prepend an entry with duration=1 for the frames we want to "not see". > >> + // MOST of the time (eg: constant video framerate), > >> + // we're taking a single element entry array and making it two. > >> + speedup_samples = trak->start_sample - start_sample_exact; > >> + > >> + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, > >> + "exact trak start_sample move %l to %l (speed up %d samples)\n", > >> + trak->start_sample, start_sample_exact, speedup_samples); > >> + > >> + entries_array = ngx_palloc(mp4->request->pool, > >> + (1 + trak->time_to_sample_entries) * sizeof(ngx_mp4_stts_entry_t)); > >> + if (entries_array == NULL) { > >> + return NGX_ERROR; > >> + } > >> + entry = &(entries_array[1]); > >> + ngx_memcpy(entry, (ngx_mp4_stts_entry_t *)data->pos, > >> + trak->time_to_sample_entries * sizeof(ngx_mp4_stts_entry_t)); > > > > This reallocation can be avoided. Look at NGX_HTTP_MP4_STSC_START buffer > > as an example of that. A new 1-element optional buffer NGX_HTTP_MP4_STTS_START > > can be introduced right before the stts atom data. > > > >> + current_count = ngx_mp4_get_32value(entry->count); > >> + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, > >> + "exact split in 2 video STTS entry from count:%d", current_count); > >> + > >> + if (current_count <= speedup_samples) { > >> + return NGX_ERROR; > >> + } > >> + > >> + ngx_mp4_set_32value(entry->count, current_count - speedup_samples); > >> + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, > >> + "exact split new[1]: count:%d duration:%d", > >> + ngx_mp4_get_32value(entry->count), > >> + ngx_mp4_get_32value(entry->duration)); > >> + entry--; > >> + ngx_mp4_set_32value(entry->count, speedup_samples); > >> + ngx_mp4_set_32value(entry->duration, 1); > >> + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, > >> + "exact split new[0]: count:%d duration:1", > >> + ngx_mp4_get_32value(entry->count)); > >> + > >> + data->pos = (u_char *) entry; > >> + trak->time_to_sample_entries++; > >> + trak->start_sample = start_sample_exact; > >> + data->last = (u_char *) (entry + trak->time_to_sample_entries); > >> + } > >> + > >> + return NGX_OK; > >> +} > >> + > >> + > >> +static ngx_int_t > >> ngx_http_mp4_crop_stts_data(ngx_http_mp4_file_t *mp4, > >> ngx_http_mp4_trak_t *trak, ngx_uint_t start) > >> { > >> @@ -2164,6 +2249,8 @@ > >> ngx_buf_t *data; > >> ngx_uint_t start_sample, entries, start_sec; > >> ngx_mp4_stts_entry_t *entry, *end; > >> + ngx_http_mp4_conf_t *conf; > >> + > > > > No need for a new empty line here. > > > >> if (start) { > >> start_sec = mp4->start; > >> @@ -2238,6 +2325,10 @@ > >> "start_sample:%ui, new count:%uD", > >> trak->start_sample, count - rest); > >> > >> + conf = ngx_http_get_module_loc_conf(mp4->request, ngx_http_mp4_module); > >> + if (conf->exact_start) { > >> + ngx_http_mp4_exact_start_video(mp4, trak); > >> + } > >> } else { > >> ngx_mp4_set_32value(entry->count, rest); > >> data->last = (u_char *) (entry + 1); > >> @@ -3590,6 +3681,7 @@ > >> > >> conf->buffer_size = NGX_CONF_UNSET_SIZE; > >> conf->max_buffer_size = NGX_CONF_UNSET_SIZE; > >> + conf->exact_start = NGX_CONF_UNSET; > > > > This is not enough, a merge is needed too. > > > >> > >> return conf; > >> } > >> _______________________________________________ > >> nginx-devel mailing list > >> nginx-devel at nginx.org > >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > I've made a POC patch which incorporates the issues I've mentioned. > > I didn't test is properly and the directive name is still not perfect. > > > > -- > > Roman Arutyunyan > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -Tracey > @tracey_pooh > TV Architect https://archive.org/tv > > > > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1633003526 -10800 # Thu Sep 30 15:05:26 2021 +0300 # Node ID 7527700ec8b19e3dd59561ee9b5a2cd87b07d5cd # Parent 97cf8284fd19b30169231e43831e7787baba72f2 Mp4: mp4_start_key_frame directive. The directive enables including all frames from start time to the most recent key frame in the result. Those frames are removed from presentation timeline using mp4 edit lists. Based on a patch by Tracey Jaquith, Internet Archive. diff --git a/src/http/modules/ngx_http_mp4_module.c b/src/http/modules/ngx_http_mp4_module.c --- a/src/http/modules/ngx_http_mp4_module.c +++ b/src/http/modules/ngx_http_mp4_module.c @@ -11,31 +11,33 @@ #define NGX_HTTP_MP4_TRAK_ATOM 0 #define NGX_HTTP_MP4_TKHD_ATOM 1 -#define NGX_HTTP_MP4_MDIA_ATOM 2 -#define NGX_HTTP_MP4_MDHD_ATOM 3 -#define NGX_HTTP_MP4_HDLR_ATOM 4 -#define NGX_HTTP_MP4_MINF_ATOM 5 -#define NGX_HTTP_MP4_VMHD_ATOM 6 -#define NGX_HTTP_MP4_SMHD_ATOM 7 -#define NGX_HTTP_MP4_DINF_ATOM 8 -#define NGX_HTTP_MP4_STBL_ATOM 9 -#define NGX_HTTP_MP4_STSD_ATOM 10 -#define NGX_HTTP_MP4_STTS_ATOM 11 -#define NGX_HTTP_MP4_STTS_DATA 12 -#define NGX_HTTP_MP4_STSS_ATOM 13 -#define NGX_HTTP_MP4_STSS_DATA 14 -#define NGX_HTTP_MP4_CTTS_ATOM 15 -#define NGX_HTTP_MP4_CTTS_DATA 16 -#define NGX_HTTP_MP4_STSC_ATOM 17 -#define NGX_HTTP_MP4_STSC_START 18 -#define NGX_HTTP_MP4_STSC_DATA 19 -#define NGX_HTTP_MP4_STSC_END 20 -#define NGX_HTTP_MP4_STSZ_ATOM 21 -#define NGX_HTTP_MP4_STSZ_DATA 22 -#define NGX_HTTP_MP4_STCO_ATOM 23 -#define NGX_HTTP_MP4_STCO_DATA 24 -#define NGX_HTTP_MP4_CO64_ATOM 25 -#define NGX_HTTP_MP4_CO64_DATA 26 +#define NGX_HTTP_MP4_EDTS_ATOM 2 +#define NGX_HTTP_MP4_ELST_ATOM 3 +#define NGX_HTTP_MP4_MDIA_ATOM 4 +#define NGX_HTTP_MP4_MDHD_ATOM 5 +#define NGX_HTTP_MP4_HDLR_ATOM 6 +#define NGX_HTTP_MP4_MINF_ATOM 7 +#define NGX_HTTP_MP4_VMHD_ATOM 8 +#define NGX_HTTP_MP4_SMHD_ATOM 9 +#define NGX_HTTP_MP4_DINF_ATOM 10 +#define NGX_HTTP_MP4_STBL_ATOM 11 +#define NGX_HTTP_MP4_STSD_ATOM 12 +#define NGX_HTTP_MP4_STTS_ATOM 13 +#define NGX_HTTP_MP4_STTS_DATA 14 +#define NGX_HTTP_MP4_STSS_ATOM 15 +#define NGX_HTTP_MP4_STSS_DATA 16 +#define NGX_HTTP_MP4_CTTS_ATOM 17 +#define NGX_HTTP_MP4_CTTS_DATA 18 +#define NGX_HTTP_MP4_STSC_ATOM 19 +#define NGX_HTTP_MP4_STSC_START 20 +#define NGX_HTTP_MP4_STSC_DATA 21 +#define NGX_HTTP_MP4_STSC_END 22 +#define NGX_HTTP_MP4_STSZ_ATOM 23 +#define NGX_HTTP_MP4_STSZ_DATA 24 +#define NGX_HTTP_MP4_STCO_ATOM 25 +#define NGX_HTTP_MP4_STCO_DATA 26 +#define NGX_HTTP_MP4_CO64_ATOM 27 +#define NGX_HTTP_MP4_CO64_DATA 28 #define NGX_HTTP_MP4_LAST_ATOM NGX_HTTP_MP4_CO64_DATA @@ -43,6 +45,7 @@ typedef struct { size_t buffer_size; size_t max_buffer_size; + ngx_flag_t start_key_frame; } ngx_http_mp4_conf_t; @@ -54,6 +57,25 @@ typedef struct { typedef struct { + u_char size[4]; + u_char name[4]; +} ngx_mp4_edts_atom_t; + + +typedef struct { + u_char size[4]; + u_char name[4]; + u_char version[1]; + u_char flags[3]; + u_char count[4]; + u_char duration[8]; + u_char time[8]; + u_char rate[2]; + u_char reserved[2]; +} ngx_mp4_elst_atom_t; + + +typedef struct { uint32_t timescale; uint32_t time_to_sample_entries; uint32_t sample_to_chunk_entries; @@ -70,6 +92,9 @@ typedef struct { ngx_uint_t end_chunk_samples; uint64_t start_chunk_samples_size; uint64_t end_chunk_samples_size; + uint64_t movie_duration; + uint64_t media_duration; + uint64_t media_prefix; off_t start_offset; off_t end_offset; @@ -85,6 +110,8 @@ typedef struct { ngx_buf_t trak_atom_buf; ngx_buf_t tkhd_atom_buf; + ngx_buf_t edts_atom_buf; + ngx_buf_t elst_atom_buf; ngx_buf_t mdia_atom_buf; ngx_buf_t mdhd_atom_buf; ngx_buf_t hdlr_atom_buf; @@ -111,6 +138,8 @@ typedef struct { ngx_buf_t co64_atom_buf; ngx_buf_t co64_data_buf; + ngx_mp4_edts_atom_t edts_atom; + ngx_mp4_elst_atom_t elst_atom; ngx_mp4_stsc_entry_t stsc_start_chunk_entry; ngx_mp4_stsc_entry_t stsc_end_chunk_entry; } ngx_http_mp4_trak_t; @@ -267,6 +296,8 @@ static ngx_int_t ngx_http_mp4_read_smhd_ uint64_t atom_data_size); static ngx_int_t ngx_http_mp4_read_stbl_atom(ngx_http_mp4_file_t *mp4, uint64_t atom_data_size); +static void ngx_http_mp4_update_edts_atom(ngx_http_mp4_file_t *mp4, + ngx_http_mp4_trak_t *trak); static void ngx_http_mp4_update_stbl_atom(ngx_http_mp4_file_t *mp4, ngx_http_mp4_trak_t *trak); static ngx_int_t ngx_http_mp4_read_stsd_atom(ngx_http_mp4_file_t *mp4, @@ -277,6 +308,8 @@ static ngx_int_t ngx_http_mp4_update_stt ngx_http_mp4_trak_t *trak); static ngx_int_t ngx_http_mp4_crop_stts_data(ngx_http_mp4_file_t *mp4, ngx_http_mp4_trak_t *trak, ngx_uint_t start); +static uint32_t ngx_http_mp4_seek_key_frame(ngx_http_mp4_file_t *mp4, + ngx_http_mp4_trak_t *trak, uint32_t start_sample); static ngx_int_t ngx_http_mp4_read_stss_atom(ngx_http_mp4_file_t *mp4, uint64_t atom_data_size); static ngx_int_t ngx_http_mp4_update_stss_atom(ngx_http_mp4_file_t *mp4, @@ -340,6 +373,13 @@ static ngx_command_t ngx_http_mp4_comma offsetof(ngx_http_mp4_conf_t, max_buffer_size), NULL }, + { ngx_string("mp4_start_key_frame"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_mp4_conf_t, start_key_frame), + NULL }, + ngx_null_command }; @@ -826,6 +866,7 @@ ngx_http_mp4_process(ngx_http_mp4_file_t trak[i].size += trak[i].hdlr_size; ngx_http_mp4_update_mdia_atom(mp4, &trak[i]); trak[i].size += trak[i].tkhd_size; + ngx_http_mp4_update_edts_atom(mp4, &trak[i]); ngx_http_mp4_update_trak_atom(mp4, &trak[i]); mp4->moov_size += trak[i].size; @@ -1587,6 +1628,7 @@ ngx_http_mp4_read_tkhd_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); trak->tkhd_size = atom_size; + trak->movie_duration = duration; ngx_mp4_set_32value(tkhd_atom->size, atom_size); @@ -1749,6 +1791,7 @@ ngx_http_mp4_read_mdhd_atom(ngx_http_mp4 trak = ngx_mp4_last_trak(mp4); trak->mdhd_size = atom_size; trak->timescale = timescale; + trak->media_duration = duration; ngx_mp4_set_32value(mdhd_atom->size, atom_size); @@ -1962,6 +2005,77 @@ ngx_http_mp4_read_stbl_atom(ngx_http_mp4 static void +ngx_http_mp4_update_edts_atom(ngx_http_mp4_file_t *mp4, + ngx_http_mp4_trak_t *trak) +{ + ngx_buf_t *atom; + ngx_mp4_elst_atom_t *elst_atom; + ngx_mp4_edts_atom_t *edts_atom; + ngx_mp4_mdhd_atom_t *mdhd_atom; + ngx_mp4_mdhd64_atom_t *mdhd64_atom; + + if (trak->media_prefix == 0) { + return; + } + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, + "mp4 edts atom update cut:%uL", trak->media_prefix); + + atom = &trak->mdhd_atom_buf; + mdhd_atom = (ngx_mp4_mdhd_atom_t *) atom->pos; + mdhd64_atom = (ngx_mp4_mdhd64_atom_t *) atom->pos; + + if (mdhd_atom->version[0] == 0) { + ngx_mp4_set_32value(mdhd_atom->duration, + trak->media_duration + trak->media_prefix); + + } else { + ngx_mp4_set_64value(mdhd64_atom->duration, + trak->media_duration + trak->media_prefix); + } + + edts_atom = &trak->edts_atom; + ngx_mp4_set_32value(edts_atom->size, sizeof(ngx_mp4_edts_atom_t) + + sizeof(ngx_mp4_elst_atom_t)); + ngx_mp4_set_atom_name(edts_atom, 'e', 'd', 't', 's'); + + atom = &trak->edts_atom_buf; + atom->temporary = 1; + atom->pos = (u_char *) edts_atom; + atom->last = (u_char *) edts_atom + sizeof(ngx_mp4_edts_atom_t); + + trak->out[NGX_HTTP_MP4_EDTS_ATOM].buf = atom; + + elst_atom = &trak->elst_atom; + ngx_mp4_set_32value(elst_atom->size, sizeof(ngx_mp4_elst_atom_t)); + ngx_mp4_set_atom_name(elst_atom, 'e', 'l', 's', 't'); + + elst_atom->version[0] = 1; + elst_atom->flags[0] = 0; + elst_atom->flags[1] = 0; + elst_atom->flags[2] = 0; + + ngx_mp4_set_32value(elst_atom->count, 1); + ngx_mp4_set_64value(elst_atom->duration, trak->movie_duration); + ngx_mp4_set_64value(elst_atom->time, trak->media_prefix); + + elst_atom->rate[0] = 0; + elst_atom->rate[1] = 1; + elst_atom->reserved[0] = 0; + elst_atom->reserved[1] = 0; + + atom = &trak->elst_atom_buf; + atom->temporary = 1; + atom->pos = (u_char *) elst_atom; + atom->last = (u_char *) elst_atom + sizeof(ngx_mp4_elst_atom_t); + + trak->out[NGX_HTTP_MP4_ELST_ATOM].buf = atom; + + trak->size += sizeof(ngx_mp4_edts_atom_t) + sizeof(ngx_mp4_elst_atom_t); +} + + +static void ngx_http_mp4_update_stbl_atom(ngx_http_mp4_file_t *mp4, ngx_http_mp4_trak_t *trak) { @@ -2159,7 +2273,7 @@ static ngx_int_t ngx_http_mp4_crop_stts_data(ngx_http_mp4_file_t *mp4, ngx_http_mp4_trak_t *trak, ngx_uint_t start) { - uint32_t count, duration, rest; + uint32_t count, duration, rest, key_prefix; uint64_t start_time; ngx_buf_t *data; ngx_uint_t start_sample, entries, start_sec; @@ -2229,6 +2343,25 @@ ngx_http_mp4_crop_stts_data(ngx_http_mp4 found: if (start) { + key_prefix = ngx_http_mp4_seek_key_frame(mp4, trak, start_sample); + + start_sample -= key_prefix; + + while (rest < key_prefix) { + trak->media_prefix += rest * duration; + key_prefix -= rest; + + entry--; + entries++; + + count = ngx_mp4_get_32value(entry->count); + duration = ngx_mp4_get_32value(entry->duration); + rest = count; + } + + trak->media_prefix += key_prefix * duration; + rest -= key_prefix; + ngx_mp4_set_32value(entry->count, count - rest); data->pos = (u_char *) entry; trak->time_to_sample_entries = entries; @@ -2253,6 +2386,49 @@ found: } +static uint32_t +ngx_http_mp4_seek_key_frame(ngx_http_mp4_file_t *mp4, ngx_http_mp4_trak_t *trak, + uint32_t start_sample) +{ + uint32_t key_prefix, sample, *entry, *end; + ngx_buf_t *data; + ngx_http_mp4_conf_t *conf; + + conf = ngx_http_get_module_loc_conf(mp4->request, ngx_http_mp4_module); + if (!conf->start_key_frame) { + return 0; + } + + data = trak->out[NGX_HTTP_MP4_STSS_DATA].buf; + if (data == NULL) { + return 0; + } + + entry = (uint32_t *) data->pos; + end = (uint32_t *) data->last; + + /* sync samples starts from 1 */ + start_sample++; + + key_prefix = 0; + + while (entry < end) { + sample = ngx_mp4_get_32value(entry); + if (sample > start_sample) { + break; + } + + key_prefix = start_sample - sample; + entry++; + } + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0, + "mp4 key frame prefix:%uD", key_prefix); + + return key_prefix; +} + + typedef struct { u_char size[4]; u_char name[4]; @@ -3590,6 +3766,7 @@ ngx_http_mp4_create_conf(ngx_conf_t *cf) conf->buffer_size = NGX_CONF_UNSET_SIZE; conf->max_buffer_size = NGX_CONF_UNSET_SIZE; + conf->start_key_frame = NGX_CONF_UNSET; return conf; } @@ -3604,6 +3781,7 @@ ngx_http_mp4_merge_conf(ngx_conf_t *cf, ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, 512 * 1024); ngx_conf_merge_size_value(conf->max_buffer_size, prev->max_buffer_size, 10 * 1024 * 1024); + ngx_conf_merge_value(conf->start_key_frame, prev->start_key_frame, 0); return NGX_CONF_OK; }