From noreply at nginx.com Tue Jul 2 02:32:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 2 Jul 2024 02:32:02 +0000 (UTC) Subject: [njs] Fixed attribute initialization for external properties. Message-ID: <20240702023202.637CE4870F@pubserv1.nginx> details: https://github.com/nginx/njs/commit/286d00b93ffa54958aab9a930a844ba4ba83f095 branches: master commit: 286d00b93ffa54958aab9a930a844ba4ba83f095 user: Dmitry Volyntsev date: Fri, 21 Jun 2024 19:14:35 -0700 description: Fixed attribute initialization for external properties. --- src/njs_extern.c | 8 +++----- src/test/njs_unit_test.c | 5 +++++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/njs_extern.c b/src/njs_extern.c index df51f9b7..9ec1c1c9 100644 --- a/src/njs_extern.c +++ b/src/njs_extern.c @@ -236,11 +236,9 @@ njs_external_prop_handler(njs_vm_t *vm, njs_object_prop_t *self, return NJS_ERROR; } - if (slots != NULL) { - prop->writable = slots->writable; - prop->configurable = slots->configurable; - prop->enumerable = slots->enumerable; - } + prop->writable = self->writable; + prop->configurable = self->configurable; + prop->enumerable = self->enumerable; lhq.value = prop; njs_string_get(&self->name, &lhq.key); diff --git a/src/test/njs_unit_test.c b/src/test/njs_unit_test.c index 88f8083e..d78c5b71 100644 --- a/src/test/njs_unit_test.c +++ b/src/test/njs_unit_test.c @@ -21819,6 +21819,11 @@ static njs_unit_test_t njs_webcrypto_test[] = { njs_str("let buf = new Uint32Array(4);" "buf === crypto.getRandomValues(buf)"), njs_str("true") }, + + { njs_str("crypto.subtle;" + "var d = Object.getOwnPropertyDescriptor(crypto, 'subtle');" + "d.enumerable && !d.configurable && d.writable"), + njs_str("true") }, }; From noreply at nginx.com Tue Jul 2 02:32:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 2 Jul 2024 02:32:02 +0000 (UTC) Subject: [njs] Fixed Object.values() and Object.entries() with shared properties. Message-ID: <20240702023202.6F28E48712@pubserv1.nginx> details: https://github.com/nginx/njs/commit/69072164673d6dbe069ef05a4b38ef0a7a9d0908 branches: master commit: 69072164673d6dbe069ef05a4b38ef0a7a9d0908 user: Dmitry Volyntsev date: Wed, 26 Jun 2024 16:39:59 -0700 description: Fixed Object.values() and Object.entries() with shared properties. Previously, the functions directly copied a shared object from a shared_hash. The copying is nessessary for lazy instantiation of shared properties. This fixes #743 issue on Github. --- src/njs_object.c | 111 ++++++++++------------------------------------- src/test/njs_unit_test.c | 6 +++ 2 files changed, 30 insertions(+), 87 deletions(-) diff --git a/src/njs_object.c b/src/njs_object.c index 1b92437c..5a5970c9 100644 --- a/src/njs_object.c +++ b/src/njs_object.c @@ -1076,82 +1076,14 @@ njs_get_own_ordered_keys(njs_vm_t *vm, const njs_object_t *object, } -static njs_int_t -njs_add_obj_prop_kind(njs_vm_t *vm, const njs_object_t *object, - const njs_lvlhsh_t *hash, njs_lvlhsh_query_t *lhq, - uint32_t flags, njs_array_t *items) -{ - njs_int_t ret; - njs_value_t value, *v, value1; - njs_array_t *entry; - njs_object_prop_t *prop; - - ret = njs_lvlhsh_find(hash, lhq); - if (ret != NJS_OK) { - return NJS_DECLINED; - } - - prop = (njs_object_prop_t *) (lhq->value); - - if (prop->type != NJS_ACCESSOR) { - v = njs_prop_value(prop); - - } else { - if (njs_is_data_descriptor(prop)) { - v = njs_prop_value(prop); - goto add; - } - - if (njs_prop_getter(prop) == NULL) { - v = njs_value_arg(&njs_value_undefined); - goto add; - } - - v = &value1; - - njs_set_object(&value, (njs_object_t *) object); - ret = njs_function_apply(vm, njs_prop_getter(prop), &value, 1, v); - if (ret != NJS_OK) { - return NJS_ERROR; - } - } - -add: - if (njs_object_enum_kind(flags) != NJS_ENUM_VALUES) { - entry = njs_array_alloc(vm, 0, 2, 0); - if (njs_slow_path(entry == NULL)) { - return NJS_ERROR; - } - - njs_string_copy(&entry->start[0], &prop->name); - njs_value_assign(&entry->start[1], v); - - njs_set_array(&value, entry); - v = &value; - } - - ret = njs_array_add(vm, items, v); - if (njs_slow_path(ret != NJS_OK)) { - return NJS_ERROR; - } - - return NJS_OK; -} - - static njs_int_t njs_object_own_enumerate_object(njs_vm_t *vm, const njs_object_t *object, const njs_object_t *parent, njs_array_t *items, uint32_t flags) { - njs_int_t ret; - uint32_t i; - njs_array_t *items_sorted; - njs_lvlhsh_each_t lhe; - njs_lvlhsh_query_t lhq; - - lhq.proto = &njs_object_hash_proto; - - njs_lvlhsh_each_init(&lhe, &njs_object_hash_proto); + uint32_t i; + njs_int_t ret; + njs_array_t *items_sorted, *entry; + njs_value_t value, retval; switch (njs_object_enum_kind(flags)) { case NJS_ENUM_KEYS: @@ -1174,26 +1106,31 @@ njs_object_own_enumerate_object(njs_vm_t *vm, const njs_object_t *object, return NJS_ERROR; } - for (i = 0; i< items_sorted->length; i++) { + njs_set_object(&value, (njs_object_t *) object); - lhe.key_hash = 0; - njs_object_property_key_set(&lhq, &items_sorted->start[i], - lhe.key_hash); + for (i = 0; i< items_sorted->length; i++) { + ret = njs_value_property(vm, &value, &items_sorted->start[i], + &retval); + if (njs_slow_path(ret != NJS_OK)) { + njs_array_destroy(vm, items_sorted); + return NJS_ERROR; + } - ret = njs_add_obj_prop_kind(vm, object, &object->hash, &lhq, flags, - items); - if (ret != NJS_DECLINED) { - if (ret != NJS_OK) { + if (njs_object_enum_kind(flags) != NJS_ENUM_VALUES) { + entry = njs_array_alloc(vm, 0, 2, 0); + if (njs_slow_path(entry == NULL)) { return NJS_ERROR; } - } else { - ret = njs_add_obj_prop_kind(vm, object, &object->shared_hash, - &lhq, flags, items); - njs_assert(ret != NJS_DECLINED); - if (ret != NJS_OK) { - return NJS_ERROR; - } + njs_string_copy(&entry->start[0], &items_sorted->start[i]); + njs_value_assign(&entry->start[1], &retval); + + njs_set_array(&retval, entry); + } + + ret = njs_array_add(vm, items, &retval); + if (njs_slow_path(ret != NJS_OK)) { + return NJS_ERROR; } } diff --git a/src/test/njs_unit_test.c b/src/test/njs_unit_test.c index d78c5b71..e63b0336 100644 --- a/src/test/njs_unit_test.c +++ b/src/test/njs_unit_test.c @@ -22693,6 +22693,12 @@ static njs_unit_test_t njs_shared_test[] = { njs_str("var v = Math.round(Math.random() * 1000); ExternalNull.set(v);" "ExternalNull.get() == v"), njs_str("true") }, + +#if (NJS_HAVE_OPENSSL) + { njs_str("var cr = Object.entries(global).filter((v) => v[0] == 'crypto')[0][1];" + "cr.abc = 1; cr.abc"), + njs_str("1") }, +#endif }; From noreply at nginx.com Tue Jul 2 02:33:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 2 Jul 2024 02:33:02 +0000 (UTC) Subject: [njs] Fixed "global" property handler when deleting. Message-ID: <20240702023302.6A20548712@pubserv1.nginx> details: https://github.com/nginx/njs/commit/89aca305c471a17fa5bd83a94f091a9bdde7c6a3 branches: master commit: 89aca305c471a17fa5bd83a94f091a9bdde7c6a3 user: Dmitry Volyntsev date: Wed, 26 Jun 2024 19:12:38 -0700 description: Fixed "global" property handler when deleting. This fixes #734 issue. --- src/njs_builtin.c | 7 ++++--- src/test/njs_unit_test.c | 3 +++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/njs_builtin.c b/src/njs_builtin.c index 6c38cbee..3ff6f547 100644 --- a/src/njs_builtin.c +++ b/src/njs_builtin.c @@ -783,13 +783,14 @@ njs_global_this_object(njs_vm_t *vm, njs_object_prop_t *self, njs_object_prop_t *prop; njs_lvlhsh_query_t lhq; + if (retval == NULL) { + return NJS_DECLINED; + } + njs_value_assign(retval, global); if (njs_slow_path(setval != NULL)) { njs_value_assign(retval, setval); - - } else if (njs_slow_path(retval == NULL)) { - return NJS_DECLINED; } prop = njs_object_prop_alloc(vm, &self->name, retval, 1); diff --git a/src/test/njs_unit_test.c b/src/test/njs_unit_test.c index e63b0336..96c922bc 100644 --- a/src/test/njs_unit_test.c +++ b/src/test/njs_unit_test.c @@ -12885,6 +12885,9 @@ static njs_unit_test_t njs_test[] = { njs_str("var ex; try {({}) instanceof this} catch (e) {ex = e}; ex"), njs_str("TypeError: right argument is not callable") }, + { njs_str("delete global.global; global"), + njs_str("ReferenceError: \"global\" is not defined") }, + { njs_str("njs"), njs_str("[object njs]") }, From noreply at nginx.com Tue Jul 2 02:34:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 2 Jul 2024 02:34:02 +0000 (UTC) Subject: [njs] Fixed constructor property of an iterator object. Message-ID: <20240702023402.DFCA848716@pubserv1.nginx> details: https://github.com/nginx/njs/commit/7f55a51f23113e3fc8a57d2598f3b87101098bb2 branches: master commit: 7f55a51f23113e3fc8a57d2598f3b87101098bb2 user: Dmitry Volyntsev date: Wed, 26 Jun 2024 18:50:43 -0700 description: Fixed constructor property of an iterator object. This fixes #737 issue on Github. --- src/njs_object.c | 4 ++++ src/test/njs_unit_test.c | 3 +++ 2 files changed, 7 insertions(+) diff --git a/src/njs_object.c b/src/njs_object.c index 5a5970c9..f2aa46a3 100644 --- a/src/njs_object.c +++ b/src/njs_object.c @@ -2168,6 +2168,10 @@ njs_object_prototype_create_constructor(njs_vm_t *vm, njs_object_prop_t *prop, found: + if (njs_flathsh_is_empty(&vm->constructors[index].object.shared_hash)) { + index = NJS_OBJ_TYPE_OBJECT; + } + njs_set_function(&constructor, &njs_vm_ctor(vm, index)); setval = &constructor; diff --git a/src/test/njs_unit_test.c b/src/test/njs_unit_test.c index 96c922bc..530cf6ff 100644 --- a/src/test/njs_unit_test.c +++ b/src/test/njs_unit_test.c @@ -7450,6 +7450,9 @@ static njs_unit_test_t njs_test[] = "[i.next(), i.next(), i.next(), i.next()].map((x) => x.value)"), njs_str("1,2,3,") }, + { njs_str("[].values().constructor()"), + njs_str("[object Object]") }, + { njs_str("var a = [], i = a.values();" "a.push(1); a.push(2); a.push(3);" "[i.next(), i.next(), i.next(), i.next()].map((x) => x.value)"), From noreply at nginx.com Tue Jul 2 02:35:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 2 Jul 2024 02:35:02 +0000 (UTC) Subject: [njs] Avoiding explicit length calculation for strings. Message-ID: <20240702023502.CE92348718@pubserv1.nginx> details: https://github.com/nginx/njs/commit/911cacd055e0f6e469edee086802f9be2cb4710c branches: master commit: 911cacd055e0f6e469edee086802f9be2cb4710c user: Dmitry Volyntsev date: Wed, 26 Jun 2024 22:33:57 -0700 description: Avoiding explicit length calculation for strings. --- src/njs_error.c | 8 +----- src/njs_extern.c | 7 +---- src/njs_generator.c | 51 +++++++-------------------------- src/njs_json.c | 2 +- src/njs_regexp.c | 24 ++-------------- src/njs_string.c | 78 --------------------------------------------------- src/njs_string.h | 4 +-- src/njs_typed_array.c | 41 ++++++++------------------- src/njs_typed_array.h | 2 +- src/njs_vm.c | 10 ++----- 10 files changed, 31 insertions(+), 196 deletions(-) diff --git a/src/njs_error.c b/src/njs_error.c index 5c2c6c3c..376205d7 100644 --- a/src/njs_error.c +++ b/src/njs_error.c @@ -31,17 +31,11 @@ void njs_error_new(njs_vm_t *vm, njs_value_t *dst, njs_object_t *proto, u_char *start, size_t size) { - ssize_t length; njs_int_t ret; njs_value_t string; njs_object_t *error; - length = njs_utf8_length(start, size); - if (njs_slow_path(length < 0)) { - length = 0; - } - - ret = njs_string_new(vm, &string, start, size, length); + ret = njs_string_create(vm, &string, start, size); if (njs_slow_path(ret != NJS_OK)) { return; } diff --git a/src/njs_extern.c b/src/njs_extern.c index 9ec1c1c9..621cb76d 100644 --- a/src/njs_extern.c +++ b/src/njs_extern.c @@ -18,7 +18,6 @@ njs_external_add(njs_vm_t *vm, njs_arr_t *protos, const njs_external_t *external, njs_uint_t n) { size_t size; - ssize_t length; njs_int_t ret; njs_lvlhsh_t *hash; const u_char *start; @@ -119,12 +118,8 @@ njs_external_add(njs_vm_t *vm, njs_arr_t *protos, } else { start = (u_char *) external->u.property.value; size = njs_strlen(start); - length = njs_utf8_length(start, size); - if (njs_slow_path(length < 0)) { - length = 0; - } - ret = njs_string_new(vm, &prop->u.value, start, size, length); + ret = njs_string_create(vm, &prop->u.value, start, size); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } diff --git a/src/njs_generator.c b/src/njs_generator.c index 6846ec6b..2e81a407 100644 --- a/src/njs_generator.c +++ b/src/njs_generator.c @@ -1062,7 +1062,6 @@ static njs_int_t njs_generate_var_statement_after(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node) { - ssize_t length; njs_int_t ret; njs_variable_t *var; njs_parser_node_t *lvalue, *expr; @@ -1104,14 +1103,8 @@ njs_generate_var_statement_after(njs_vm_t *vm, njs_generator_t *generator, return NJS_ERROR; } - length = njs_utf8_length(lex_entry->name.start, lex_entry->name.length); - if (njs_slow_path(length < 0)) { - return NJS_ERROR; - } - - ret = njs_string_new(vm, &expr->u.value.data.u.lambda->name, - lex_entry->name.start, lex_entry->name.length, - length); + ret = njs_string_create(vm, &expr->u.value.data.u.lambda->name, + lex_entry->name.start, lex_entry->name.length); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } @@ -3030,7 +3023,6 @@ static njs_int_t njs_generate_global_property_set(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node_dst, njs_parser_node_t *node_src) { - ssize_t length; njs_int_t ret; njs_value_t property; njs_variable_t *var; @@ -3050,13 +3042,8 @@ njs_generate_global_property_set(njs_vm_t *vm, njs_generator_t *generator, return NJS_ERROR; } - length = njs_utf8_length(lex_entry->name.start, lex_entry->name.length); - if (njs_slow_path(length < 0)) { - return NJS_ERROR; - } - - ret = njs_string_new(vm, &property, lex_entry->name.start, - lex_entry->name.length, length); + ret = njs_string_create(vm, &property, lex_entry->name.start, + lex_entry->name.length); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } @@ -3641,7 +3628,6 @@ static njs_int_t njs_generate_function_expression(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node) { - ssize_t length; njs_int_t ret; njs_variable_t *var; njs_function_lambda_t *lambda; @@ -3671,13 +3657,8 @@ njs_generate_function_expression(njs_vm_t *vm, njs_generator_t *generator, return ret; } - length = njs_utf8_length(lex_entry->name.start, lex_entry->name.length); - if (njs_slow_path(length < 0)) { - return NJS_ERROR; - } - - ret = njs_string_new(vm, &lambda->name, lex_entry->name.start, - lex_entry->name.length, length); + ret = njs_string_create(vm, &lambda->name, lex_entry->name.start, + lex_entry->name.length); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } @@ -4207,7 +4188,6 @@ static njs_int_t njs_generate_function_declaration(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node) { - ssize_t length; njs_int_t ret; njs_bool_t async; njs_variable_t *var; @@ -4232,13 +4212,8 @@ njs_generate_function_declaration(njs_vm_t *vm, njs_generator_t *generator, return NJS_ERROR; } - length = njs_utf8_length(lex_entry->name.start, lex_entry->name.length); - if (njs_slow_path(length < 0)) { - return NJS_ERROR; - } - - ret = njs_string_new(vm, &lambda->name, lex_entry->name.start, - lex_entry->name.length, length); + ret = njs_string_create(vm, &lambda->name, lex_entry->name.start, + lex_entry->name.length); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } @@ -5413,7 +5388,6 @@ static njs_int_t njs_generate_global_reference(njs_vm_t *vm, njs_generator_t *generator, njs_parser_node_t *node, njs_bool_t exception) { - ssize_t length; njs_int_t ret; njs_index_t index; njs_value_t property; @@ -5441,13 +5415,8 @@ njs_generate_global_reference(njs_vm_t *vm, njs_generator_t *generator, return NJS_ERROR; } - length = njs_utf8_length(lex_entry->name.start, lex_entry->name.length); - if (njs_slow_path(length < 0)) { - return NJS_ERROR; - } - - ret = njs_string_new(vm, &property, lex_entry->name.start, - lex_entry->name.length, length); + ret = njs_string_create(vm, &property, lex_entry->name.start, + lex_entry->name.length); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } diff --git a/src/njs_json.c b/src/njs_json.c index 2b73cd85..85c5d0e8 100644 --- a/src/njs_json.c +++ b/src/njs_json.c @@ -1798,7 +1798,7 @@ njs_dump_terminal(njs_json_stringify_t *stringify, njs_chb_t *chain, njs_chb_append_literal(chain, "["); - (void) njs_typed_array_to_chain(stringify->vm, chain, array, NULL); + njs_typed_array_to_chain(stringify->vm, chain, array, NULL); njs_chb_append_literal(chain, "]"); diff --git a/src/njs_regexp.c b/src/njs_regexp.c index 74a4c23b..1c7cfe87 100644 --- a/src/njs_regexp.c +++ b/src/njs_regexp.c @@ -30,8 +30,6 @@ static njs_int_t njs_regexp_exec(njs_vm_t *vm, njs_value_t *r, njs_value_t *s, unsigned flags, njs_value_t *retval); static njs_array_t *njs_regexp_exec_result(njs_vm_t *vm, njs_value_t *r, njs_utf8_t utf8, njs_string_prop_t *string, njs_regex_match_data_t *data); -static njs_int_t njs_regexp_string_create(njs_vm_t *vm, njs_value_t *value, - u_char *start, uint32_t size, int32_t length); const njs_value_t njs_string_lindex = njs_string("lastIndex"); @@ -1016,7 +1014,7 @@ njs_regexp_exec_result(njs_vm_t *vm, njs_value_t *r, njs_utf8_t utf8, { u_char *start; size_t c; - int32_t size, length; + int32_t size; uint32_t index; njs_int_t ret; njs_uint_t i, n; @@ -1050,15 +1048,7 @@ njs_regexp_exec_result(njs_vm_t *vm, njs_value_t *r, njs_utf8_t utf8, start = &string->start[c]; size = njs_regex_capture(match_data, n + 1) - c; - if (utf8 == NJS_STRING_UTF8) { - length = njs_max(njs_utf8_length(start, size), 0); - - } else { - length = size; - } - - ret = njs_regexp_string_create(vm, &array->start[i], start, size, - length); + ret = njs_string_create(vm, &array->start[i], start, size); if (njs_slow_path(ret != NJS_OK)) { goto fail; } @@ -1316,16 +1306,6 @@ njs_regexp_exec(njs_vm_t *vm, njs_value_t *r, njs_value_t *s, unsigned flags, } -static njs_int_t -njs_regexp_string_create(njs_vm_t *vm, njs_value_t *value, u_char *start, - uint32_t size, int32_t length) -{ - length = (length >= 0) ? length : 0; - - return njs_string_new(vm, value, start, size, length); -} - - njs_int_t njs_regexp_prototype_symbol_replace(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval) diff --git a/src/njs_string.c b/src/njs_string.c index c07016b5..ea22755b 100644 --- a/src/njs_string.c +++ b/src/njs_string.c @@ -535,84 +535,6 @@ njs_string_copy(njs_value_t *dst, njs_value_t *src) } -/* - * njs_string_validate() validates an UTF-8 string, evaluates its length, - * sets njs_string_prop_t struct. - */ - -njs_int_t -njs_string_validate(njs_vm_t *vm, njs_string_prop_t *string, njs_value_t *value) -{ - u_char *start; - size_t new_size, map_offset; - ssize_t size, length; - uint32_t *map; - - size = value->short_string.size; - - if (size != NJS_STRING_LONG) { - string->start = value->short_string.start; - length = value->short_string.length; - - if (length == 0 && length != size) { - length = njs_utf8_length(value->short_string.start, size); - - if (njs_slow_path(length < 0)) { - /* Invalid UTF-8 string. */ - return length; - } - - value->short_string.length = length; - } - - } else { - string->start = value->long_string.data->start; - size = value->long_string.size; - length = value->long_string.data->length; - - if (length == 0 && length != size) { - length = njs_utf8_length(string->start, size); - - if (length != size) { - if (njs_slow_path(length < 0)) { - /* Invalid UTF-8 string. */ - return length; - } - - if (length > NJS_STRING_MAP_STRIDE) { - /* - * Reallocate the long string with offset map - * after the string. - */ - map_offset = njs_string_map_offset(size); - new_size = map_offset + njs_string_map_size(length); - - start = njs_mp_alloc(vm->mem_pool, new_size); - if (njs_slow_path(start == NULL)) { - njs_memory_error(vm); - return NJS_ERROR; - } - - memcpy(start, string->start, size); - string->start = start; - value->long_string.data->start = start; - - map = (uint32_t *) (start + map_offset); - map[0] = 0; - } - } - - value->long_string.data->length = length; - } - } - - string->size = size; - string->length = length; - - return length; -} - - static njs_int_t njs_string_constructor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval) diff --git a/src/njs_string.h b/src/njs_string.h index a83c406a..9b478875 100644 --- a/src/njs_string.h +++ b/src/njs_string.h @@ -149,8 +149,6 @@ void njs_string_truncate(njs_value_t *value, uint32_t size, uint32_t length); uint32_t njs_string_trim(const njs_value_t *value, njs_string_prop_t *string, unsigned mode); void njs_string_copy(njs_value_t *dst, njs_value_t *src); -njs_int_t njs_string_validate(njs_vm_t *vm, njs_string_prop_t *string, - njs_value_t *value); njs_int_t njs_string_cmp(const njs_value_t *val1, const njs_value_t *val2); void njs_string_slice_string_prop(njs_string_prop_t *dst, const njs_string_prop_t *string, const njs_slice_prop_t *slice); @@ -197,7 +195,7 @@ njs_string_calc_length(njs_utf8_t utf8, const u_char *start, size_t size) default: length = njs_utf8_length(start, size); - return (length >= 0) ? length : 0; + return length; } } diff --git a/src/njs_typed_array.c b/src/njs_typed_array.c index 42426c06..2a485a19 100644 --- a/src/njs_typed_array.c +++ b/src/njs_typed_array.c @@ -2054,11 +2054,11 @@ njs_typed_array_prototype_sort(njs_vm_t *vm, njs_value_t *args, } -njs_int_t +void njs_typed_array_to_chain(njs_vm_t *vm, njs_chb_t *chain, njs_typed_array_t *array, njs_value_t *sep) { - size_t size, length, arr_length; + size_t length; uint32_t i; njs_string_prop_t separator; @@ -2068,29 +2068,18 @@ njs_typed_array_to_chain(njs_vm_t *vm, njs_chb_t *chain, (void) njs_string_prop(&separator, sep); - arr_length = njs_typed_array_length(array); + length = njs_typed_array_length(array); - if (arr_length == 0) { - return 0; + if (length == 0) { + return; } - for (i = 0; i < arr_length; i++) { + for (i = 0; i < length; i++) { njs_number_to_chain(vm, chain, njs_typed_array_prop(array, i)); njs_chb_append(chain, separator.start, separator.size); } njs_chb_drop(chain, separator.size); - - size = njs_chb_size(chain); - - if (njs_utf8_length(separator.start, separator.size) >= 0) { - length = size - (separator.size - separator.length) * (arr_length - 1); - - } else { - length = 0; - } - - return length; } @@ -2098,8 +2087,7 @@ static njs_int_t njs_typed_array_prototype_join(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval) { - u_char *p; - size_t size, length, arr_length; + size_t length; njs_int_t ret; njs_chb_t chain; njs_value_t *this, *separator; @@ -2117,7 +2105,7 @@ njs_typed_array_prototype_join(njs_vm_t *vm, njs_value_t *args, return NJS_ERROR; } - arr_length = njs_typed_array_length(array); + length = njs_typed_array_length(array); separator = njs_arg(args, nargs, 1); @@ -2133,7 +2121,7 @@ njs_typed_array_prototype_join(njs_vm_t *vm, njs_value_t *args, } } - if (arr_length == 0) { + if (length == 0) { njs_value_assign(retval, &njs_string_empty); return NJS_OK; } @@ -2145,18 +2133,13 @@ njs_typed_array_prototype_join(njs_vm_t *vm, njs_value_t *args, NJS_CHB_MP_INIT(&chain, vm); - length = njs_typed_array_to_chain(vm, &chain, array, separator); - size = njs_chb_size(&chain); + njs_typed_array_to_chain(vm, &chain, array, separator); - p = njs_string_alloc(vm, retval, size, length); - if (njs_slow_path(p == NULL)) { - return NJS_ERROR; - } + ret = njs_string_create_chb(vm, retval, &chain); - njs_chb_join_to(&chain, p); njs_chb_destroy(&chain); - return NJS_OK; + return ret; } diff --git a/src/njs_typed_array.h b/src/njs_typed_array.h index 47177fad..0e3b2f4e 100644 --- a/src/njs_typed_array.h +++ b/src/njs_typed_array.h @@ -14,7 +14,7 @@ njs_array_buffer_t *njs_typed_array_writable(njs_vm_t *vm, njs_typed_array_t *array); njs_int_t njs_typed_array_set_value(njs_vm_t *vm, njs_typed_array_t *array, uint32_t index, njs_value_t *setval); -njs_int_t njs_typed_array_to_chain(njs_vm_t *vm, njs_chb_t *chain, +void njs_typed_array_to_chain(njs_vm_t *vm, njs_chb_t *chain, njs_typed_array_t *array, njs_value_t *sep); njs_int_t njs_typed_array_prototype_slice(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t copy, njs_value_t *retval); diff --git a/src/njs_vm.c b/src/njs_vm.c index b0fe353b..908c40c8 100644 --- a/src/njs_vm.c +++ b/src/njs_vm.c @@ -1103,7 +1103,6 @@ njs_value_t * njs_vm_value_enumerate(njs_vm_t *vm, njs_value_t *value, uint32_t flags, njs_value_t *retval) { - ssize_t length; njs_int_t ret; njs_value_t *val; njs_array_t *keys; @@ -1143,18 +1142,13 @@ njs_vm_value_enumerate(njs_vm_t *vm, njs_value_t *value, uint32_t flags, continue; } - length = njs_utf8_length(lex_entry->name.start, lex_entry->name.length); - if (njs_slow_path(length < 0)) { - return NULL; - } - val = njs_array_push(vm, keys); if (njs_slow_path(value == NULL)) { return NULL; } - ret = njs_string_new(vm, val, lex_entry->name.start, - lex_entry->name.length, length); + ret = njs_string_create(vm, val, lex_entry->name.start, + lex_entry->name.length); if (njs_slow_path(ret != NJS_OK)) { return NULL; } From noreply at nginx.com Tue Jul 2 02:36:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 2 Jul 2024 02:36:02 +0000 (UTC) Subject: [njs] QuickJS: disabling eval() and string normalize. Message-ID: <20240702023602.BD8C24871A@pubserv1.nginx> details: https://github.com/nginx/njs/commit/c773ebcaad703e704220d7f8f9fc40c78f50d779 branches: master commit: c773ebcaad703e704220d7f8f9fc40c78f50d779 user: Dmitry Volyntsev date: Fri, 21 Jun 2024 00:06:46 -0700 description: QuickJS: disabling eval() and string normalize. --- external/njs_shell.c | 2 +- src/qjs.c | 18 ++++++++++++++++-- src/qjs.h | 2 +- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/external/njs_shell.c b/external/njs_shell.c index e00f2529..672215c1 100644 --- a/external/njs_shell.c +++ b/external/njs_shell.c @@ -2811,7 +2811,7 @@ njs_engine_qjs_init(njs_engine_t *engine, njs_opts_t *opts) return NJS_ERROR; } - engine->u.qjs.ctx = qjs_new_context(engine->u.qjs.rt); + engine->u.qjs.ctx = qjs_new_context(engine->u.qjs.rt, 1); if (engine->u.qjs.ctx == NULL) { njs_stderror("JS_NewContext() failed\n"); return NJS_ERROR; diff --git a/src/qjs.c b/src/qjs.c index 83b43ad7..0a31b748 100644 --- a/src/qjs.c +++ b/src/qjs.c @@ -8,16 +8,30 @@ JSContext * -qjs_new_context(JSRuntime *rt) +qjs_new_context(JSRuntime *rt, _Bool eval) { JSContext *ctx; qjs_module_t **module; - ctx = JS_NewContext(rt); + ctx = JS_NewContextRaw(rt); if (ctx == NULL) { return NULL; } + JS_AddIntrinsicBaseObjects(ctx); + JS_AddIntrinsicDate(ctx); + JS_AddIntrinsicRegExp(ctx); + JS_AddIntrinsicJSON(ctx); + JS_AddIntrinsicProxy(ctx); + JS_AddIntrinsicMapSet(ctx); + JS_AddIntrinsicTypedArrays(ctx); + JS_AddIntrinsicPromise(ctx); + JS_AddIntrinsicBigInt(ctx); + + if (eval) { + JS_AddIntrinsicEval(ctx); + } + for (module = qjs_modules; *module != NULL; module++) { if ((*module)->init(ctx, (*module)->name) == NULL) { return NULL; diff --git a/src/qjs.h b/src/qjs.h index 2307d4d9..71e23d78 100644 --- a/src/qjs.h +++ b/src/qjs.h @@ -39,7 +39,7 @@ typedef struct { } qjs_module_t; -JSContext *qjs_new_context(JSRuntime *rt); +JSContext *qjs_new_context(JSRuntime *rt, _Bool eval); JSValue qjs_buffer_alloc(JSContext *ctx, size_t size); From noreply at nginx.com Tue Jul 2 02:36:03 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Tue, 2 Jul 2024 02:36:03 +0000 (UTC) Subject: [njs] HTTP: simplifed r.subrequest() code. Message-ID: <20240702023603.7E3D24871B@pubserv1.nginx> details: https://github.com/nginx/njs/commit/7dda4b2ff1fe605e8d7b2723a0b855d43fe101a8 branches: master commit: 7dda4b2ff1fe605e8d7b2723a0b855d43fe101a8 user: Dmitry Volyntsev date: Thu, 27 Jun 2024 23:47:11 -0700 description: HTTP: simplifed r.subrequest() code. Moving promise callbacks from ctx->promise_callbacks to the arguments of ngx_js_event_t. --- nginx/ngx_http_js_module.c | 246 +++++++++++++-------------------------------- 1 file changed, 71 insertions(+), 175 deletions(-) diff --git a/nginx/ngx_http_js_module.c b/nginx/ngx_http_js_module.c index 260cd497..0c8a3b11 100644 --- a/nginx/ngx_http_js_module.c +++ b/nginx/ngx_http_js_module.c @@ -59,7 +59,6 @@ typedef struct { njs_opaque_value_t request_body; njs_opaque_value_t response_body; ngx_str_t redirect_uri; - ngx_array_t promise_callbacks; ngx_int_t filter; ngx_buf_t *buf; @@ -216,9 +215,6 @@ static njs_int_t ngx_http_js_periodic_session_variables(njs_vm_t *vm, njs_value_t *retval); static njs_int_t ngx_http_js_ext_subrequest(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval); -static ngx_int_t ngx_http_js_subrequest(ngx_http_request_t *r, - njs_str_t *uri_arg, njs_str_t *args_arg, njs_function_t *callback, - ngx_http_request_t **sr); static ngx_int_t ngx_http_js_subrequest_done(ngx_http_request_t *r, void *data, ngx_int_t rc); static njs_int_t ngx_http_js_ext_get_parent(njs_vm_t *vm, @@ -3049,74 +3045,23 @@ ngx_http_js_periodic_session_variables(njs_vm_t *vm, njs_object_prop_t *prop, } -static njs_int_t -ngx_http_js_promise_trampoline(njs_vm_t *vm, njs_value_t *args, - njs_uint_t nargs, njs_index_t unused, njs_value_t *retval) -{ - ngx_uint_t i; - njs_function_t *callback; - ngx_http_js_cb_t *cb, *cbs; - ngx_http_js_ctx_t *ctx; - ngx_http_request_t *r; - - r = njs_vm_external(vm, ngx_http_js_request_proto_id, - njs_arg(args, nargs, 1)); - ctx = ngx_http_get_module_ctx(r->parent, ngx_http_js_module); - - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, - "js subrequest promise trampoline parent ctx: %p", ctx); - - if (ctx == NULL) { - njs_vm_error(vm, "js subrequest: failed to get the parent context"); - return NJS_ERROR; - } - - cbs = ctx->promise_callbacks.elts; - - if (cbs == NULL) { - goto fail; - } - - cb = NULL; - - for (i = 0; i < ctx->promise_callbacks.nelts; i++) { - if (cbs[i].request == r) { - cb = &cbs[i]; - cb->request = NULL; - break; - } - } - - if (cb == NULL) { - goto fail; - } - - callback = njs_value_function(njs_value_arg(&cb->callbacks[0])); - - return njs_vm_call(vm, callback, njs_argument(args, 1), 1); - -fail: - - njs_vm_error(vm, "js subrequest: promise callback not found"); - - return NJS_ERROR; -} - - static njs_int_t ngx_http_js_ext_subrequest(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused, njs_value_t *retval) { - ngx_int_t rc, promise; - njs_str_t uri_arg, args_arg, method_name, body_arg; - ngx_uint_t i, method, methods_max, has_body, detached; - njs_value_t *value, *arg, *options; - njs_function_t *callback; - ngx_http_js_cb_t *cb, *cbs; - ngx_http_js_ctx_t *ctx; - njs_opaque_value_t lvalue; - ngx_http_request_t *r, *sr; - ngx_http_request_body_t *rb; + ngx_int_t rc, flags; + njs_str_t uri_arg, args_arg, method_name, body_arg; + ngx_str_t uri, rargs; + ngx_uint_t method, methods_max, has_body, detached, + promise; + njs_value_t *value, *arg, *options; + ngx_js_event_t *event; + njs_function_t *callback; + ngx_http_js_ctx_t *ctx; + njs_opaque_value_t lvalue; + ngx_http_request_t *r, *sr; + ngx_http_request_body_t *rb; + ngx_http_post_subrequest_t *ps; static const struct { ngx_str_t name; @@ -3178,7 +3123,6 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, args_arg.length = 0; args_arg.start = NULL; has_body = 0; - promise = 0; detached = 0; arg = njs_arg(args, nargs, 2); @@ -3267,21 +3211,71 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, return NJS_ERROR; } - if (!detached && callback == NULL) { - callback = njs_vm_function_alloc(vm, ngx_http_js_promise_trampoline, 0, - 0); - if (callback == NULL) { - goto memory_error; + promise = 0; + flags = NGX_HTTP_SUBREQUEST_BACKGROUND; + + njs_value_undefined_set(retval); + + if (!detached) { + ps = ngx_palloc(r->pool, sizeof(ngx_http_post_subrequest_t)); + if (ps == NULL) { + njs_vm_memory_error(ctx->vm); + return NJS_ERROR; + } + + promise = !!(callback == NULL); + + event = njs_mp_zalloc(njs_vm_memory_pool(ctx->vm), + sizeof(ngx_js_event_t) + + promise * (sizeof(njs_opaque_value_t) * 2)); + if (njs_slow_path(event == NULL)) { + njs_vm_memory_error(ctx->vm); + return NJS_ERROR; } - promise = 1; + event->vm = ctx->vm; + event->fd = ctx->event_id++; + + if (promise) { + event->args = (njs_value_t *) &event[1]; + rc = njs_vm_promise_create(ctx->vm, retval, + njs_value_arg(event->args)); + if (rc != NJS_OK) { + return NJS_ERROR; + } + + callback = njs_value_function(njs_value_arg(event->args)); + } + + event->function = callback; + + ps->handler = ngx_http_js_subrequest_done; + ps->data = event; + + flags |= NGX_HTTP_SUBREQUEST_IN_MEMORY; + + } else { + ps = NULL; + event = NULL; } - rc = ngx_http_js_subrequest(r, &uri_arg, &args_arg, callback, &sr); - if (rc != NGX_OK) { + uri.len = uri_arg.length; + uri.data = uri_arg.start; + + rargs.len = args_arg.length; + rargs.data = args_arg.start; + + if (ngx_http_subrequest(r, &uri, rargs.len ? &rargs : NULL, &sr, ps, flags) + != NGX_OK) + { + njs_vm_error(ctx->vm, "subrequest creation failed"); return NJS_ERROR; } + if (event != NULL) { + ngx_js_add_event(ctx, event); + } + if (method != methods_max) { sr->method = methods[method].value; sr->method_name = methods[method].name; @@ -3325,41 +3319,6 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, sr->headers_in.chunked = 0; } - if (promise) { - cbs = ctx->promise_callbacks.elts; - - if (cbs == NULL) { - if (ngx_array_init(&ctx->promise_callbacks, r->pool, 4, - sizeof(ngx_http_js_cb_t)) != NGX_OK) - { - goto memory_error; - } - } - - cb = NULL; - - for (i = 0; i < ctx->promise_callbacks.nelts; i++) { - if (cbs[i].request == NULL) { - cb = &cbs[i]; - break; - } - } - - if (i == ctx->promise_callbacks.nelts) { - cb = ngx_array_push(&ctx->promise_callbacks); - if (cb == NULL) { - goto memory_error; - } - } - - cb->request = sr; - - return njs_vm_promise_create(vm, retval, - njs_value_arg(&cb->callbacks)); - } - - njs_value_undefined_set(retval); - return NJS_OK; memory_error: @@ -3370,69 +3329,6 @@ memory_error: } -static ngx_int_t -ngx_http_js_subrequest(ngx_http_request_t *r, njs_str_t *uri_arg, - njs_str_t *args_arg, njs_function_t *callback, ngx_http_request_t **sr) -{ - ngx_int_t flags; - ngx_str_t uri, args; - ngx_js_event_t *event; - ngx_http_js_ctx_t *ctx; - ngx_http_post_subrequest_t *ps; - - ctx = ngx_http_get_module_ctx(r, ngx_http_js_module); - - flags = NGX_HTTP_SUBREQUEST_BACKGROUND; - - if (callback != NULL) { - ps = ngx_palloc(r->pool, sizeof(ngx_http_post_subrequest_t)); - if (ps == NULL) { - njs_vm_error(ctx->vm, "internal error"); - return NJS_ERROR; - } - - event = njs_mp_zalloc(njs_vm_memory_pool(ctx->vm), - sizeof(ngx_js_event_t)); - if (njs_slow_path(event == NULL)) { - njs_vm_memory_error(ctx->vm); - return NJS_ERROR; - } - - event->vm = ctx->vm; - event->function = callback; - event->fd = ctx->event_id++; - - ps->handler = ngx_http_js_subrequest_done; - ps->data = event; - - flags |= NGX_HTTP_SUBREQUEST_IN_MEMORY; - - } else { - ps = NULL; - event = NULL; - } - - uri.len = uri_arg->length; - uri.data = uri_arg->start; - - args.len = args_arg->length; - args.data = args_arg->start; - - if (ngx_http_subrequest(r, &uri, args.len ? &args : NULL, sr, ps, flags) - != NGX_OK) - { - njs_vm_error(ctx->vm, "subrequest creation failed"); - return NJS_ERROR; - } - - if (event != NULL) { - ngx_js_add_event(ctx, event); - } - - return NJS_OK; -} - - static ngx_int_t ngx_http_js_subrequest_done(ngx_http_request_t *r, void *data, ngx_int_t rc) { From an0291170 at gmail.com Tue Jul 2 14:31:02 2024 From: an0291170 at gmail.com (anlex N) Date: Tue, 2 Jul 2024 22:31:02 +0800 Subject: The Apache Http Server architecture Message-ID: Hello Teacher! I value your feedback very much Nice to meet you. My name is anlex N . I am a verified google maintainer of popular open source projects. I am reviewing system-design-primer . I can not find "Apache Http Server architecture" docs in the Apache Http Server website. Where can I find these docs? By the way, system-design-primer just demonstrates NGINX architecture and HAProxy architecture, Is Apache Http Server not better than NGINX and HAProxy? The Apache Http Server website says "event" (not "worker") mode supports big scale. Is it OK? -------------- next part -------------- An HTML attachment was scrubbed... URL: From noreply at nginx.com Sat Jul 6 01:28:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Sat, 6 Jul 2024 01:28:02 +0000 (UTC) Subject: [njs] Fixed Function constructor handling when called without arguments. Message-ID: <20240706012802.35B8947F59@pubserv1.nginx> details: https://github.com/nginx/njs/commit/b593dd4aba0f5c730c1d90072cdee7dd9a93beed branches: master commit: b593dd4aba0f5c730c1d90072cdee7dd9a93beed user: Vadim Zhestikov date: Tue, 2 Jul 2024 14:55:03 -0700 description: Fixed Function constructor handling when called without arguments. Corrected the behavior of Function.constructor() when invoked without arguments relative to an object. --- src/njs_function.c | 8 +++++--- src/test/njs_unit_test.c | 3 +++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/njs_function.c b/src/njs_function.c index bfdf3f35..c677be57 100644 --- a/src/njs_function.c +++ b/src/njs_function.c @@ -1054,9 +1054,11 @@ njs_function_constructor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_chb_append_literal(&chain, "){"); - ret = njs_value_to_chain(vm, &chain, njs_argument(args, nargs - 1)); - if (njs_slow_path(ret < NJS_OK)) { - return ret; + if (nargs > 1) { + ret = njs_value_to_chain(vm, &chain, njs_argument(args, nargs - 1)); + if (njs_slow_path(ret < NJS_OK)) { + return ret; + } } njs_chb_append_literal(&chain, "})"); diff --git a/src/test/njs_unit_test.c b/src/test/njs_unit_test.c index 530cf6ff..2f0e318c 100644 --- a/src/test/njs_unit_test.c +++ b/src/test/njs_unit_test.c @@ -14147,6 +14147,9 @@ static njs_unit_test_t njs_test[] = { njs_str("Function.constructor === Function"), njs_str("true") }, + { njs_str("Function.constructor()"), + njs_str("[object Function]") }, + { njs_str("function f() {} f.__proto__ === Function.prototype"), njs_str("true") }, From arut at nginx.com Mon Jul 8 14:20:58 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 8 Jul 2024 18:20:58 +0400 Subject: [PATCH 1 of 9] Upstream: re-resolvable servers In-Reply-To: <56aeae9355df8a2ee07e.1718317736@fedora-wsl.local> References: <56aeae9355df8a2ee07e.1718317736@fedora-wsl.local> Message-ID: <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> Hi, On Thu, Jun 13, 2024 at 03:28:56PM -0700, Aleksei Bavshin wrote: > # HG changeset patch > # User Ruslan Ermilov > # Date 1392462754 -14400 > # Sat Feb 15 15:12:34 2014 +0400 > # Node ID 56aeae9355df8a2ee07e21b65b6869747dd9ee45 > # Parent 02e9411009b987f408214ab4a8b6b6093f843bcd > Upstream: re-resolvable servers. > > Specifying the upstream server by a hostname together with the > "resolve" parameter will make the hostname to be periodically > resolved, and upstream servers added/removed as necessary. > > This requires a "resolver" at the "http" configuration block. > > The "resolver_timeout" parameter also affects when the failed > DNS requests will be attempted again. Responses with NXDOMAIN > will be attempted again in 10 seconds. > > Upstream has a configuration generation number that is incremented each > time servers are added/removed to the primary/backup list. This number > is remembered by the peer.init method, and if peer.get detects a change > in configuration, it returns NGX_BUSY. > > Each server has a reference counter. It is incremented by peer.get and > decremented by peer.free. When a server is removed, it is removed from > the list of servers and is marked as "zombie". The memory allocated by > a zombie peer is freed only when its reference count becomes zero. > > Re-resolvable servers utilize timers that also hold a reference. A > reference is also held while upstream keepalive caches an idle > connection. > > Co-authored-by: Roman Arutyunyan > Co-authored-by: Sergey Kandaurov > Co-authored-by: Vladimir Homutov I feel like it would be easier to merge this patch, SRV resolve and preresolve in a single change. > diff --git a/src/http/modules/ngx_http_upstream_hash_module.c b/src/http/modules/ngx_http_upstream_hash_module.c > --- a/src/http/modules/ngx_http_upstream_hash_module.c > +++ b/src/http/modules/ngx_http_upstream_hash_module.c > @@ -24,6 +24,9 @@ typedef struct { > > typedef struct { > ngx_http_complex_value_t key; > +#if (NGX_HTTP_UPSTREAM_ZONE) > + ngx_uint_t config; > +#endif > ngx_http_upstream_chash_points_t *points; > } ngx_http_upstream_hash_srv_conf_t; > > @@ -49,6 +52,8 @@ static ngx_int_t ngx_http_upstream_get_h > > static ngx_int_t ngx_http_upstream_init_chash(ngx_conf_t *cf, > ngx_http_upstream_srv_conf_t *us); > +static ngx_int_t ngx_http_upstream_update_chash(ngx_pool_t *pool, > + ngx_http_upstream_srv_conf_t *us); > static int ngx_libc_cdecl > ngx_http_upstream_chash_cmp_points(const void *one, const void *two); > static ngx_uint_t ngx_http_upstream_find_chash_point( > @@ -178,11 +183,18 @@ ngx_http_upstream_get_hash_peer(ngx_peer > > ngx_http_upstream_rr_peers_rlock(hp->rrp.peers); > > - if (hp->tries > 20 || hp->rrp.peers->single || hp->key.len == 0) { > + if (hp->tries > 20 || hp->rrp.peers->number < 2 || hp->key.len == 0) { > ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > return hp->get_rr_peer(pc, &hp->rrp); > } > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (hp->rrp.peers->config && hp->rrp.config != *hp->rrp.peers->config) { > + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > + return hp->get_rr_peer(pc, &hp->rrp); > + } > +#endif > + > now = ngx_time(); > > pc->cached = 0; > @@ -262,6 +274,7 @@ ngx_http_upstream_get_hash_peer(ngx_peer > } > > hp->rrp.current = peer; > + ngx_http_upstream_rr_peer_ref(hp->rrp.peers, peer); > > pc->sockaddr = peer->sockaddr; > pc->socklen = peer->socklen; > @@ -285,6 +298,26 @@ ngx_http_upstream_get_hash_peer(ngx_peer > static ngx_int_t > ngx_http_upstream_init_chash(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us) > { > + if (ngx_http_upstream_init_round_robin(cf, us) != NGX_OK) { > + return NGX_ERROR; > + } > + > + us->peer.init = ngx_http_upstream_init_chash_peer; > + > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (us->shm_zone) { > + return NGX_OK; > + } > +#endif > + > + return ngx_http_upstream_update_chash(cf->pool, us); > +} > + > + > +static ngx_int_t > +ngx_http_upstream_update_chash(ngx_pool_t *pool, > + ngx_http_upstream_srv_conf_t *us) > +{ > u_char *host, *port, c; > size_t host_len, port_len, size; > uint32_t hash, base_hash; > @@ -299,25 +332,32 @@ ngx_http_upstream_init_chash(ngx_conf_t > u_char byte[4]; > } prev_hash; > > - if (ngx_http_upstream_init_round_robin(cf, us) != NGX_OK) { > - return NGX_ERROR; > + hcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_hash_module); > + > + if (hcf->points) { > + ngx_free(hcf->points); > + hcf->points = NULL; > } > > - us->peer.init = ngx_http_upstream_init_chash_peer; > - > peers = us->peer.data; > npoints = peers->total_weight * 160; > > size = sizeof(ngx_http_upstream_chash_points_t) > - + sizeof(ngx_http_upstream_chash_point_t) * (npoints - 1); > + - sizeof(ngx_http_upstream_chash_point_t) > + + sizeof(ngx_http_upstream_chash_point_t) * npoints; > > - points = ngx_palloc(cf->pool, size); > + points = pool ? ngx_palloc(pool, size) : ngx_alloc(size, ngx_cycle->log); > if (points == NULL) { > return NGX_ERROR; > } > > points->number = 0; > > + if (npoints == 0) { > + hcf->points = points; > + return NGX_OK; > + } > + > for (peer = peers->peer; peer; peer = peer->next) { > server = &peer->server; > > @@ -401,7 +441,6 @@ ngx_http_upstream_init_chash(ngx_conf_t > > points->number = i + 1; > > - hcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_hash_module); > hcf->points = points; > > return NGX_OK; > @@ -481,7 +520,22 @@ ngx_http_upstream_init_chash_peer(ngx_ht > > ngx_http_upstream_rr_peers_rlock(hp->rrp.peers); > > - hp->hash = ngx_http_upstream_find_chash_point(hcf->points, hash); > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (hp->rrp.peers->config > + && (hcf->points == NULL || hcf->config != *hp->rrp.peers->config)) > + { > + if (ngx_http_upstream_update_chash(NULL, us) != NGX_OK) { > + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > + return NGX_ERROR; > + } > + > + hcf->config = *hp->rrp.peers->config; > + } > +#endif > + > + if (hcf->points->number) { > + hp->hash = ngx_http_upstream_find_chash_point(hcf->points, hash); > + } > > ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > > @@ -517,6 +571,20 @@ ngx_http_upstream_get_chash_peer(ngx_pee > pc->cached = 0; > pc->connection = NULL; > > + if (hp->rrp.peers->number == 0) { > + pc->name = hp->rrp.peers->name; > + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > + return NGX_BUSY; > + } > + > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (hp->rrp.peers->config && hp->rrp.config != *hp->rrp.peers->config) { > + pc->name = hp->rrp.peers->name; > + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > + return NGX_BUSY; > + } > +#endif > + > now = ngx_time(); > hcf = hp->conf; > > @@ -597,6 +665,7 @@ ngx_http_upstream_get_chash_peer(ngx_pee > found: > > hp->rrp.current = best; > + ngx_http_upstream_rr_peer_ref(hp->rrp.peers, best); > > pc->sockaddr = best->sockaddr; > pc->socklen = best->socklen; > @@ -664,6 +733,7 @@ ngx_http_upstream_hash(ngx_conf_t *cf, n > } > > uscf->flags = NGX_HTTP_UPSTREAM_CREATE > + |NGX_HTTP_UPSTREAM_MODIFY > |NGX_HTTP_UPSTREAM_WEIGHT > |NGX_HTTP_UPSTREAM_MAX_CONNS > |NGX_HTTP_UPSTREAM_MAX_FAILS > diff --git a/src/http/modules/ngx_http_upstream_ip_hash_module.c b/src/http/modules/ngx_http_upstream_ip_hash_module.c > --- a/src/http/modules/ngx_http_upstream_ip_hash_module.c > +++ b/src/http/modules/ngx_http_upstream_ip_hash_module.c > @@ -163,11 +163,19 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p > > ngx_http_upstream_rr_peers_rlock(iphp->rrp.peers); > > - if (iphp->tries > 20 || iphp->rrp.peers->single) { > + if (iphp->tries > 20 || iphp->rrp.peers->number < 2) { > ngx_http_upstream_rr_peers_unlock(iphp->rrp.peers); > return iphp->get_rr_peer(pc, &iphp->rrp); > } > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (iphp->rrp.peers->config && iphp->rrp.config != *iphp->rrp.peers->config) > + { > + ngx_http_upstream_rr_peers_unlock(iphp->rrp.peers); > + return iphp->get_rr_peer(pc, &iphp->rrp); > + } > +#endif > + > now = ngx_time(); > > pc->cached = 0; > @@ -232,6 +240,7 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p > } > > iphp->rrp.current = peer; > + ngx_http_upstream_rr_peer_ref(iphp->rrp.peers, peer); > > pc->sockaddr = peer->sockaddr; > pc->socklen = peer->socklen; > @@ -268,6 +277,7 @@ ngx_http_upstream_ip_hash(ngx_conf_t *cf > uscf->peer.init_upstream = ngx_http_upstream_init_ip_hash; > > uscf->flags = NGX_HTTP_UPSTREAM_CREATE > + |NGX_HTTP_UPSTREAM_MODIFY > |NGX_HTTP_UPSTREAM_WEIGHT > |NGX_HTTP_UPSTREAM_MAX_CONNS > |NGX_HTTP_UPSTREAM_MAX_FAILS > diff --git a/src/http/modules/ngx_http_upstream_least_conn_module.c b/src/http/modules/ngx_http_upstream_least_conn_module.c > --- a/src/http/modules/ngx_http_upstream_least_conn_module.c > +++ b/src/http/modules/ngx_http_upstream_least_conn_module.c > @@ -124,6 +124,12 @@ ngx_http_upstream_get_least_conn_peer(ng > > ngx_http_upstream_rr_peers_wlock(peers); > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (peers->config && rrp->config != *peers->config) { > + goto busy; > + } > +#endif > + > best = NULL; > total = 0; > > @@ -244,6 +250,7 @@ ngx_http_upstream_get_least_conn_peer(ng > best->conns++; > > rrp->current = best; > + ngx_http_upstream_rr_peer_ref(peers, best); > > n = p / (8 * sizeof(uintptr_t)); > m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); > @@ -278,8 +285,18 @@ failed: > } > > ngx_http_upstream_rr_peers_wlock(peers); > + > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (peers->config && rrp->config != *peers->config) { > + goto busy; > + } > +#endif This block is useless. > } > > +#if (NGX_HTTP_UPSTREAM_ZONE) > +busy: > +#endif > + > ngx_http_upstream_rr_peers_unlock(peers); > > pc->name = peers->name; > @@ -303,6 +320,7 @@ ngx_http_upstream_least_conn(ngx_conf_t > uscf->peer.init_upstream = ngx_http_upstream_init_least_conn; > > uscf->flags = NGX_HTTP_UPSTREAM_CREATE > + |NGX_HTTP_UPSTREAM_MODIFY > |NGX_HTTP_UPSTREAM_WEIGHT > |NGX_HTTP_UPSTREAM_MAX_CONNS > |NGX_HTTP_UPSTREAM_MAX_FAILS > diff --git a/src/http/modules/ngx_http_upstream_random_module.c b/src/http/modules/ngx_http_upstream_random_module.c > --- a/src/http/modules/ngx_http_upstream_random_module.c > +++ b/src/http/modules/ngx_http_upstream_random_module.c > @@ -17,6 +17,9 @@ typedef struct { > > typedef struct { > ngx_uint_t two; > +#if (NGX_HTTP_UPSTREAM_ZONE) > + ngx_uint_t config; > +#endif > ngx_http_upstream_random_range_t *ranges; > } ngx_http_upstream_random_srv_conf_t; > > @@ -127,6 +130,11 @@ ngx_http_upstream_update_random(ngx_pool > > rcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_random_module); > > + if (rcf->ranges) { > + ngx_free(rcf->ranges); > + rcf->ranges = NULL; > + } > + > peers = us->peer.data; > > size = peers->number * sizeof(ngx_http_upstream_random_range_t); > @@ -186,11 +194,15 @@ ngx_http_upstream_init_random_peer(ngx_h > ngx_http_upstream_rr_peers_rlock(rp->rrp.peers); > > #if (NGX_HTTP_UPSTREAM_ZONE) > - if (rp->rrp.peers->shpool && rcf->ranges == NULL) { > + if (rp->rrp.peers->config > + && (rcf->ranges == NULL || rcf->config != *rp->rrp.peers->config)) > + { > if (ngx_http_upstream_update_random(NULL, us) != NGX_OK) { > ngx_http_upstream_rr_peers_unlock(rp->rrp.peers); > return NGX_ERROR; > } > + > + rcf->config = *rp->rrp.peers->config; > } > #endif > > @@ -220,11 +232,18 @@ ngx_http_upstream_get_random_peer(ngx_pe > > ngx_http_upstream_rr_peers_rlock(peers); > > - if (rp->tries > 20 || peers->single) { > + if (rp->tries > 20 || peers->number < 2) { > ngx_http_upstream_rr_peers_unlock(peers); > return ngx_http_upstream_get_round_robin_peer(pc, rrp); > } > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (peers->config && rrp->config != *peers->config) { > + ngx_http_upstream_rr_peers_unlock(peers); > + return ngx_http_upstream_get_round_robin_peer(pc, rrp); > + } > +#endif > + > pc->cached = 0; > pc->connection = NULL; > > @@ -274,6 +293,7 @@ ngx_http_upstream_get_random_peer(ngx_pe > } > > rrp->current = peer; > + ngx_http_upstream_rr_peer_ref(peers, peer); > > if (now - peer->checked > peer->fail_timeout) { > peer->checked = now; > @@ -314,11 +334,18 @@ ngx_http_upstream_get_random2_peer(ngx_p > > ngx_http_upstream_rr_peers_wlock(peers); > > - if (rp->tries > 20 || peers->single) { > + if (rp->tries > 20 || peers->number < 2) { > ngx_http_upstream_rr_peers_unlock(peers); > return ngx_http_upstream_get_round_robin_peer(pc, rrp); > } > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (peers->config && rrp->config != *peers->config) { > + ngx_http_upstream_rr_peers_unlock(peers); > + return ngx_http_upstream_get_round_robin_peer(pc, rrp); > + } > +#endif > + > pc->cached = 0; > pc->connection = NULL; > > @@ -384,6 +411,7 @@ ngx_http_upstream_get_random2_peer(ngx_p > } > > rrp->current = peer; > + ngx_http_upstream_rr_peer_ref(peers, peer); > > if (now - peer->checked > peer->fail_timeout) { > peer->checked = now; > @@ -467,6 +495,7 @@ ngx_http_upstream_random(ngx_conf_t *cf, > uscf->peer.init_upstream = ngx_http_upstream_init_random; > > uscf->flags = NGX_HTTP_UPSTREAM_CREATE > + |NGX_HTTP_UPSTREAM_MODIFY > |NGX_HTTP_UPSTREAM_WEIGHT > |NGX_HTTP_UPSTREAM_MAX_CONNS > |NGX_HTTP_UPSTREAM_MAX_FAILS > diff --git a/src/http/modules/ngx_http_upstream_zone_module.c b/src/http/modules/ngx_http_upstream_zone_module.c > --- a/src/http/modules/ngx_http_upstream_zone_module.c > +++ b/src/http/modules/ngx_http_upstream_zone_module.c > @@ -18,6 +18,10 @@ static ngx_http_upstream_rr_peers_t *ngx > ngx_slab_pool_t *shpool, ngx_http_upstream_srv_conf_t *uscf); > static ngx_http_upstream_rr_peer_t *ngx_http_upstream_zone_copy_peer( > ngx_http_upstream_rr_peers_t *peers, ngx_http_upstream_rr_peer_t *src); > +static void ngx_http_upstream_zone_set_single( > + ngx_http_upstream_srv_conf_t *uscf); > +static void ngx_http_upstream_zone_remove_peer_locked( > + ngx_http_upstream_rr_peers_t *peers, ngx_http_upstream_rr_peer_t *peer); > > > static ngx_command_t ngx_http_upstream_zone_commands[] = { > @@ -33,6 +37,11 @@ static ngx_command_t ngx_http_upstream_ > }; > > > +static ngx_int_t ngx_http_upstream_zone_init_worker(ngx_cycle_t *cycle); > +static void ngx_http_upstream_zone_resolve_timer(ngx_event_t *event); > +static void ngx_http_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx); These declarations should be moved up to other functions declarations. > static ngx_http_module_t ngx_http_upstream_zone_module_ctx = { > NULL, /* preconfiguration */ > NULL, /* postconfiguration */ > @@ -55,7 +64,7 @@ ngx_module_t ngx_http_upstream_zone_mod > NGX_HTTP_MODULE, /* module type */ > NULL, /* init master */ > NULL, /* init module */ > - NULL, /* init process */ > + ngx_http_upstream_zone_init_worker, /* init process */ > NULL, /* init thread */ > NULL, /* exit thread */ > NULL, /* exit process */ > @@ -188,9 +197,15 @@ ngx_http_upstream_zone_copy_peers(ngx_sl > ngx_http_upstream_srv_conf_t *uscf) > { > ngx_str_t *name; > + ngx_uint_t *config; > ngx_http_upstream_rr_peer_t *peer, **peerp; > ngx_http_upstream_rr_peers_t *peers, *backup; > > + config = ngx_slab_calloc(shpool, sizeof(ngx_uint_t)); > + if (config == NULL) { > + return NULL; > + } > + > peers = ngx_slab_alloc(shpool, sizeof(ngx_http_upstream_rr_peers_t)); > if (peers == NULL) { > return NULL; > @@ -214,6 +229,7 @@ ngx_http_upstream_zone_copy_peers(ngx_sl > peers->name = name; > > peers->shpool = shpool; > + peers->config = config; > > for (peerp = &peers->peer; *peerp; peerp = &peer->next) { > /* pool is unlocked */ > @@ -223,6 +239,17 @@ ngx_http_upstream_zone_copy_peers(ngx_sl > } > > *peerp = peer; > + peer->id = (*peers->config)++; > + } > + > + for (peerp = &peers->resolve; *peerp; peerp = &peer->next) { > + peer = ngx_http_upstream_zone_copy_peer(peers, *peerp); > + if (peer == NULL) { > + return NULL; > + } > + > + *peerp = peer; > + peer->id = (*peers->config)++; > } > > if (peers->next == NULL) { > @@ -239,6 +266,7 @@ ngx_http_upstream_zone_copy_peers(ngx_sl > backup->name = name; > > backup->shpool = shpool; > + backup->config = config; > > for (peerp = &backup->peer; *peerp; peerp = &peer->next) { > /* pool is unlocked */ > @@ -248,6 +276,17 @@ ngx_http_upstream_zone_copy_peers(ngx_sl > } > > *peerp = peer; > + peer->id = (*backup->config)++; > + } > + > + for (peerp = &backup->resolve; *peerp; peerp = &peer->next) { > + peer = ngx_http_upstream_zone_copy_peer(backup, *peerp); > + if (peer == NULL) { > + return NULL; > + } > + > + *peerp = peer; > + peer->id = (*backup->config)++; > } > > peers->next = backup; > @@ -279,6 +318,7 @@ ngx_http_upstream_zone_copy_peer(ngx_htt > dst->sockaddr = NULL; > dst->name.data = NULL; > dst->server.data = NULL; > + dst->host = NULL; > } > > dst->sockaddr = ngx_slab_calloc_locked(pool, sizeof(ngx_sockaddr_t)); > @@ -301,12 +341,37 @@ ngx_http_upstream_zone_copy_peer(ngx_htt > } > > ngx_memcpy(dst->server.data, src->server.data, src->server.len); > + > + if (src->host) { > + dst->host = ngx_slab_calloc_locked(pool, > + sizeof(ngx_http_upstream_host_t)); > + if (dst->host == NULL) { > + goto failed; > + } > + > + dst->host->name.data = ngx_slab_alloc_locked(pool, > + src->host->name.len); > + if (dst->host->name.data == NULL) { > + goto failed; > + } > + > + dst->host->peers = peers; > + dst->host->peer = dst; > + > + dst->host->name.len = src->host->name.len; > + ngx_memcpy(dst->host->name.data, src->host->name.data, > + src->host->name.len); > + } > } > > return dst; > > failed: > > + if (dst->host) { > + ngx_slab_free_locked(pool, dst->host); > + } > + > if (dst->server.data) { > ngx_slab_free_locked(pool, dst->server.data); > } > @@ -323,3 +388,337 @@ failed: > > return NULL; > } > + > + > +static void > +ngx_http_upstream_zone_set_single(ngx_http_upstream_srv_conf_t *uscf) > +{ > + ngx_http_upstream_rr_peers_t *peers; > + > + peers = uscf->peer.data; > + > + if (peers->number == 1 > + && (peers->next == NULL || peers->next->number == 0)) > + { > + peers->single = 1; > + > + } else { > + peers->single = 0; > + } > +} > + > + > +static void > +ngx_http_upstream_zone_remove_peer_locked(ngx_http_upstream_rr_peers_t *peers, > + ngx_http_upstream_rr_peer_t *peer) > +{ > + peers->total_weight -= peer->weight; > + peers->number--; > + peers->tries -= (peer->down == 0); > + (*peers->config)++; > + peers->weighted = (peers->total_weight != peers->number); > + > + ngx_http_upstream_rr_peer_free(peers, peer); > +} > + > + > +static ngx_int_t > +ngx_http_upstream_zone_init_worker(ngx_cycle_t *cycle) > +{ > + ngx_uint_t i; > + ngx_event_t *event; > + ngx_http_upstream_rr_peer_t *peer; > + ngx_http_upstream_rr_peers_t *peers; > + ngx_http_upstream_srv_conf_t *uscf, **uscfp; > + ngx_http_upstream_main_conf_t *umcf; > + > + if (ngx_process != NGX_PROCESS_WORKER > + && ngx_process != NGX_PROCESS_SINGLE) > + { > + return NGX_OK; > + } > + > + umcf = ngx_http_cycle_get_module_main_conf(cycle, ngx_http_upstream_module); > + > + if (umcf == NULL) { > + return NGX_OK; > + } > + > + uscfp = umcf->upstreams.elts; > + > + for (i = 0; i < umcf->upstreams.nelts; i++) { > + > + uscf = uscfp[i]; > + > + if (uscf->shm_zone == NULL) { > + continue; > + } > + > + peers = uscf->peer.data; > + > + do { > + ngx_http_upstream_rr_peers_wlock(peers); > + > + for (peer = peers->resolve; peer; peer = peer->next) { > + > + if (peer->host->worker != ngx_worker) { > + continue; > + } > + > + event = &peer->host->event; > + ngx_memzero(event, sizeof(ngx_event_t)); > + > + event->data = uscf; > + event->handler = ngx_http_upstream_zone_resolve_timer; > + event->log = cycle->log; > + event->cancelable = 1; > + > + ngx_http_upstream_rr_peer_ref(peers, peer); In open source nginx a template cannot be deleted since there's no API. As a result, there's no reason in increase the reference counter here. > + ngx_add_timer(event, 1); > + } > + > + ngx_http_upstream_rr_peers_unlock(peers); > + > + peers = peers->next; > + > + } while (peers); > + } > + > + return NGX_OK; > +} > + > + > +static void > +ngx_http_upstream_zone_resolve_timer(ngx_event_t *event) > +{ > + ngx_resolver_ctx_t *ctx; > + ngx_http_upstream_host_t *host; > + ngx_http_upstream_rr_peer_t *template; > + ngx_http_upstream_rr_peers_t *peers; > + ngx_http_upstream_srv_conf_t *uscf; > + > + host = (ngx_http_upstream_host_t *) event; > + uscf = event->data; > + peers = host->peers; > + template = host->peer; > + > + if (template->zombie) { > + (void) ngx_http_upstream_rr_peer_unref(peers, template); > + > + ngx_shmtx_lock(&peers->shpool->mutex); > + > + if (host->service.len) { > + ngx_slab_free_locked(peers->shpool, host->service.data); > + } > + > + ngx_slab_free_locked(peers->shpool, host->name.data); > + ngx_slab_free_locked(peers->shpool, host); > + ngx_shmtx_unlock(&peers->shpool->mutex); > + > + return; > + } Since a template cannot be deleted, it cannot become a zombie as well. This block is useless. > + ctx = ngx_resolve_start(uscf->resolver, NULL); > + if (ctx == NULL) { > + goto retry; > + } > + > + if (ctx == NGX_NO_RESOLVER) { > + ngx_log_error(NGX_LOG_ERR, event->log, 0, > + "no resolver defined to resolve %V", &host->name); > + return; > + } > + > + ctx->name = host->name; > + ctx->handler = ngx_http_upstream_zone_resolve_handler; > + ctx->data = host; > + ctx->timeout = uscf->resolver_timeout; > + ctx->cancelable = 1; > + > + if (ngx_resolve_name(ctx) == NGX_OK) { > + return; > + } > + > +retry: > + > + ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); > +} > + > + > +static void > +ngx_http_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx) > +{ > + time_t now; > + in_port_t port; > + ngx_msec_t timer; > + ngx_uint_t i, j; > + ngx_event_t *event; > + ngx_resolver_addr_t *addr; > + ngx_http_upstream_host_t *host; > + ngx_http_upstream_rr_peer_t *peer, *template, **peerp; > + ngx_http_upstream_rr_peers_t *peers; > + ngx_http_upstream_srv_conf_t *uscf; > + > + host = ctx->data; > + event = &host->event; > + uscf = event->data; > + peers = host->peers; > + template = host->peer; > + > + ngx_http_upstream_rr_peers_wlock(peers); > + > + if (template->zombie) { > + (void) ngx_http_upstream_rr_peer_unref(peers, template); > + > + ngx_http_upstream_rr_peers_unlock(peers); > + > + ngx_shmtx_lock(&peers->shpool->mutex); > + ngx_slab_free_locked(peers->shpool, host->name.data); > + ngx_slab_free_locked(peers->shpool, host); > + ngx_shmtx_unlock(&peers->shpool->mutex); > + > + ngx_resolve_name_done(ctx); > + > + return; > + } Again, this block is useless. > + now = ngx_time(); > + > + if (ctx->state) { > + ngx_log_error(NGX_LOG_ERR, event->log, 0, > + "%V could not be resolved (%i: %s)", > + &ctx->name, ctx->state, > + ngx_resolver_strerror(ctx->state)); > + > + if (ctx->state != NGX_RESOLVE_NXDOMAIN) { > + ngx_http_upstream_rr_peers_unlock(peers); > + > + ngx_resolve_name_done(ctx); > + > + ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); > + return; > + } > + > + /* NGX_RESOLVE_NXDOMAIN */ > + > + ctx->naddrs = 0; > + } > + > +#if (NGX_DEBUG) > + { > + u_char text[NGX_SOCKADDR_STRLEN]; > + size_t len; > + > + for (i = 0; i < ctx->naddrs; i++) { > + len = ngx_sock_ntop(ctx->addrs[i].sockaddr, ctx->addrs[i].socklen, > + text, NGX_SOCKADDR_STRLEN, 0); > + > + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, event->log, 0, > + "name %V was resolved to %*s", &host->name, len, text); > + } > + } > +#endif > + > + for (peerp = &peers->peer; *peerp; /* void */ ) { > + peer = *peerp; > + > + if (peer->host != host) { > + goto next; > + } > + > + for (j = 0; j < ctx->naddrs; j++) { > + > + addr = &ctx->addrs[j]; > + > + if (addr->name.len == 0 > + && ngx_cmp_sockaddr(peer->sockaddr, peer->socklen, > + addr->sockaddr, addr->socklen, 0) > + == NGX_OK) > + { > + addr->name.len = 1; > + goto next; > + } > + } > + > + *peerp = peer->next; > + ngx_http_upstream_zone_remove_peer_locked(peers, peer); > + > + ngx_http_upstream_zone_set_single(uscf); > + > + continue; > + > + next: > + > + peerp = &peer->next; > + } > + > + for (i = 0; i < ctx->naddrs; i++) { > + > + addr = &ctx->addrs[i]; > + > + if (addr->name.len == 1) { > + addr->name.len = 0; > + continue; > + } > + > + ngx_shmtx_lock(&peers->shpool->mutex); > + peer = ngx_http_upstream_zone_copy_peer(peers, NULL); > + ngx_shmtx_unlock(&peers->shpool->mutex); > + > + if (peer == NULL) { > + ngx_log_error(NGX_LOG_ERR, event->log, 0, > + "cannot add new server to upstream \"%V\", " > + "memory exhausted", peers->name); > + break; > + } > + > + ngx_memcpy(peer->sockaddr, addr->sockaddr, addr->socklen); > + > + port = ((struct sockaddr_in *) template->sockaddr)->sin_port; > + > + switch (peer->sockaddr->sa_family) { > +#if (NGX_HAVE_INET6) > + case AF_INET6: > + ((struct sockaddr_in6 *) peer->sockaddr)->sin6_port = port; > + break; > +#endif > + default: /* AF_INET */ > + ((struct sockaddr_in *) peer->sockaddr)->sin_port = port; > + } > + > + peer->socklen = addr->socklen; > + > + peer->name.len = ngx_sock_ntop(peer->sockaddr, peer->socklen, > + peer->name.data, NGX_SOCKADDR_STRLEN, 1); > + > + peer->host = template->host; > + peer->server = template->server; > + > + peer->weight = template->weight; > + peer->effective_weight = peer->weight; > + peer->max_conns = template->max_conns; > + peer->max_fails = template->max_fails; > + peer->fail_timeout = template->fail_timeout; > + peer->down = template->down; > + > + *peerp = peer; > + peerp = &peer->next; > + > + peers->number++; > + peers->tries += (peer->down == 0); > + peers->total_weight += peer->weight; > + peers->weighted = (peers->total_weight != peers->number); > + peer->id = (*peers->config)++; > + > + ngx_http_upstream_zone_set_single(uscf); > + } > + > + ngx_http_upstream_rr_peers_unlock(peers); > + > + timer = (ngx_msec_t) 1000 * (ctx->valid > now ? ctx->valid - now + 1 : 1); > + timer = ngx_min(timer, uscf->resolver_timeout); The last line was added to facilitate faster recycle of zombie templates. Since there are no zombie templates here, the line can be removed. > + ngx_resolve_name_done(ctx); > + > + ngx_add_timer(event, timer); > +} > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -1565,6 +1565,26 @@ ngx_http_upstream_connect(ngx_http_reque > > u->state->peer = u->peer.name; > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (u->upstream && u->upstream->shm_zone > + && (u->upstream->flags & NGX_HTTP_UPSTREAM_MODIFY) > + ) { Style: ')' should be move to the line above. > + u->state->peer = ngx_palloc(r->pool, > + sizeof(ngx_str_t) + u->peer.name->len); > + if (u->state->peer == NULL) { > + ngx_http_upstream_finalize_request(r, u, > + NGX_HTTP_INTERNAL_SERVER_ERROR); > + return; > + } > + > + u->state->peer->len = u->peer.name->len; > + u->state->peer->data = (u_char *) (u->state->peer + 1); > + ngx_memcpy(u->state->peer->data, u->peer.name->data, u->peer.name->len); > + > + u->peer.name = u->state->peer; > + } > +#endif > + > if (rc == NGX_BUSY) { > ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "no live upstreams"); > ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_NOLIVE); > @@ -6066,6 +6086,7 @@ ngx_http_upstream(ngx_conf_t *cf, ngx_co > u.no_port = 1; > > uscf = ngx_http_upstream_add(cf, &u, NGX_HTTP_UPSTREAM_CREATE > + |NGX_HTTP_UPSTREAM_MODIFY > |NGX_HTTP_UPSTREAM_WEIGHT > |NGX_HTTP_UPSTREAM_MAX_CONNS > |NGX_HTTP_UPSTREAM_MAX_FAILS > @@ -6151,7 +6172,11 @@ ngx_http_upstream(ngx_conf_t *cf, ngx_co > return rv; > } > > - if (uscf->servers->nelts == 0) { > + if (uscf->servers->nelts == 0 > +#if (NGX_HTTP_UPSTREAM_ZONE) > + && uscf->shm_zone == NULL > +#endif In open source nginx empty upstreams are not allowed, irrespective of the zone. No new servers can appear in the upstream during runtime since there's no API. > + ) { > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > "no servers are inside upstream"); > return NGX_CONF_ERROR; > @@ -6171,6 +6196,9 @@ ngx_http_upstream_server(ngx_conf_t *cf, > ngx_url_t u; > ngx_int_t weight, max_conns, max_fails; > ngx_uint_t i; > +#if (NGX_HTTP_UPSTREAM_ZONE) > + ngx_uint_t resolve; > +#endif > ngx_http_upstream_server_t *us; > > us = ngx_array_push(uscf->servers); > @@ -6186,6 +6214,9 @@ ngx_http_upstream_server(ngx_conf_t *cf, > max_conns = 0; > max_fails = 1; > fail_timeout = 10; > +#if (NGX_HTTP_UPSTREAM_ZONE) > + resolve = 0; > +#endif > > for (i = 2; i < cf->args->nelts; i++) { > > @@ -6274,6 +6305,13 @@ ngx_http_upstream_server(ngx_conf_t *cf, > continue; > } > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (ngx_strcmp(value[i].data, "resolve") == 0) { > + resolve = 1; > + continue; > + } > +#endif > + > goto invalid; > } > > @@ -6282,6 +6320,13 @@ ngx_http_upstream_server(ngx_conf_t *cf, > u.url = value[1]; > u.default_port = 80; > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (resolve) { > + /* resolve at run time */ > + u.no_resolve = 1; > + } > +#endif > + > if (ngx_parse_url(cf->pool, &u) != NGX_OK) { > if (u.err) { > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > @@ -6292,8 +6337,45 @@ ngx_http_upstream_server(ngx_conf_t *cf, > } > > us->name = u.url; > + > +#if (NGX_HTTP_UPSTREAM_ZONE) > + > + if (resolve && u.naddrs == 0) { > + ngx_addr_t *addr; > + > + /* save port */ > + > + addr = ngx_pcalloc(cf->pool, sizeof(ngx_addr_t)); > + if (addr == NULL) { > + return NGX_CONF_ERROR; > + } > + > + addr->sockaddr = ngx_palloc(cf->pool, u.socklen); > + if (addr->sockaddr == NULL) { > + return NGX_CONF_ERROR; > + } > + > + ngx_memcpy(addr->sockaddr, &u.sockaddr, u.socklen); > + > + addr->socklen = u.socklen; > + > + us->addrs = addr; > + us->naddrs = 1; > + > + us->host = u.host; > + > + } else { > + us->addrs = u.addrs; > + us->naddrs = u.naddrs; > + } > + > +#else > + > us->addrs = u.addrs; > us->naddrs = u.naddrs; > + > +#endif > + > us->weight = weight; > us->max_conns = max_conns; > us->max_fails = max_fails; > diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h > --- a/src/http/ngx_http_upstream.h > +++ b/src/http/ngx_http_upstream.h > @@ -104,7 +104,11 @@ typedef struct { > > unsigned backup:1; > > - NGX_COMPAT_BEGIN(6) > +#if (NGX_HTTP_UPSTREAM_ZONE) > + ngx_str_t host; > +#endif > + > + NGX_COMPAT_BEGIN(4) > NGX_COMPAT_END > } ngx_http_upstream_server_t; > > @@ -115,6 +119,7 @@ typedef struct { > #define NGX_HTTP_UPSTREAM_FAIL_TIMEOUT 0x0008 > #define NGX_HTTP_UPSTREAM_DOWN 0x0010 > #define NGX_HTTP_UPSTREAM_BACKUP 0x0020 > +#define NGX_HTTP_UPSTREAM_MODIFY 0x0040 > #define NGX_HTTP_UPSTREAM_MAX_CONNS 0x0100 > > > @@ -133,6 +138,8 @@ struct ngx_http_upstream_srv_conf_s { > > #if (NGX_HTTP_UPSTREAM_ZONE) > ngx_shm_zone_t *shm_zone; > + ngx_resolver_t *resolver; > + ngx_msec_t resolver_timeout; > #endif > }; > > diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c > --- a/src/http/ngx_http_upstream_round_robin.c > +++ b/src/http/ngx_http_upstream_round_robin.c > @@ -32,10 +32,15 @@ ngx_http_upstream_init_round_robin(ngx_c > ngx_http_upstream_srv_conf_t *us) > { > ngx_url_t u; > - ngx_uint_t i, j, n, w, t; > + ngx_uint_t i, j, n, r, w, t; > ngx_http_upstream_server_t *server; > ngx_http_upstream_rr_peer_t *peer, **peerp; > ngx_http_upstream_rr_peers_t *peers, *backup; > +#if (NGX_HTTP_UPSTREAM_ZONE) > + ngx_uint_t resolve; > + ngx_http_core_loc_conf_t *clcf; > + ngx_http_upstream_rr_peer_t **rpeerp; > +#endif > > us->peer.init = ngx_http_upstream_init_round_robin_peer; > > @@ -43,23 +48,99 @@ ngx_http_upstream_init_round_robin(ngx_c > server = us->servers->elts; > > n = 0; > + r = 0; > w = 0; > t = 0; > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + resolve = 0; > +#endif > + > for (i = 0; i < us->servers->nelts; i++) { > + > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (server[i].host.len) { > + resolve = 1; > + } > +#endif > + > if (server[i].backup) { > continue; > } > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (server[i].host.len) { > + r++; > + > + } else { > + n += server[i].naddrs; > + w += server[i].naddrs * server[i].weight; > + > + if (!server[i].down) { > + t += server[i].naddrs; > + } > + } > +#else The code above and below is the same code. The reason behind duplication was to simplify the diff. Now duplication makes no sense. Instead, the following can be done: #if (NGX_HTTP_UPSTREAM_ZONE) if (server[i].host.len) { r++; continue; } #endif > n += server[i].naddrs; > w += server[i].naddrs * server[i].weight; > > if (!server[i].down) { > t += server[i].naddrs; > } > +#endif > } > > - if (n == 0) { > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (us->shm_zone) { > + > + if (resolve && !(us->flags & NGX_HTTP_UPSTREAM_MODIFY)) { > + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > + "load balancing method does not support" > + " resolving names at run time in" > + " upstream \"%V\" in %s:%ui", > + &us->host, us->file_name, us->line); > + return NGX_ERROR; > + } > + > + clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module); > + > + us->resolver = clcf->resolver; > + us->resolver_timeout = clcf->resolver_timeout; > + > + /* > + * Without "resolver_timeout" in http{}, the value is unset. > + * Even if we set it in ngx_http_core_merge_loc_conf(), it's > + * still dependent on the module order and unreliable. > + */ > + ngx_conf_init_msec_value(us->resolver_timeout, 30000); > + > + if (resolve > + && (us->resolver == NULL > + || us->resolver->connections.nelts == 0)) > + { > + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > + "no resolver defined to resolve names" > + " at run time in upstream \"%V\" in %s:%ui", > + &us->host, us->file_name, us->line); > + return NGX_ERROR; > + } > + > + } else if (resolve) { > + > + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > + "resolving names at run time requires" > + " upstream \"%V\" in %s:%ui" > + " to be in shared memory", > + &us->host, us->file_name, us->line); > + return NGX_ERROR; > + } > +#endif > + > + if (n == 0 > +#if (NGX_HTTP_UPSTREAM_ZONE) > + && us->shm_zone == NULL > +#endif An empty zone will always be empty in open source nginx. This should be checked instead: if (n + r == 0) { ... } > + ) { > ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > "no servers in upstream \"%V\" in %s:%ui", > &us->host, us->file_name, us->line); > @@ -71,7 +152,8 @@ ngx_http_upstream_init_round_robin(ngx_c > return NGX_ERROR; > } > > - peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) * n); > + peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) > + * (n + r)); > if (peer == NULL) { > return NGX_ERROR; > } > @@ -86,11 +168,46 @@ ngx_http_upstream_init_round_robin(ngx_c > n = 0; > peerp = &peers->peer; > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + rpeerp = &peers->resolve; > +#endif > + > for (i = 0; i < us->servers->nelts; i++) { > if (server[i].backup) { > continue; > } > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (server[i].host.len) { > + > + peer[n].host = ngx_pcalloc(cf->pool, > + sizeof(ngx_http_upstream_host_t)); > + if (peer[n].host == NULL) { > + return NGX_ERROR; > + } > + > + peer[n].host->name = server[i].host; > + > + peer[n].sockaddr = server[i].addrs[0].sockaddr; > + peer[n].socklen = server[i].addrs[0].socklen; > + peer[n].name = server[i].addrs[0].name; > + peer[n].weight = server[i].weight; > + peer[n].effective_weight = server[i].weight; > + peer[n].current_weight = 0; > + peer[n].max_conns = server[i].max_conns; > + peer[n].max_fails = server[i].max_fails; > + peer[n].fail_timeout = server[i].fail_timeout; > + peer[n].down = server[i].down; > + peer[n].server = server[i].name; > + > + *rpeerp = &peer[n]; > + rpeerp = &peer[n].next; > + n++; > + > + continue; > + } > +#endif > + > for (j = 0; j < server[i].naddrs; j++) { > peer[n].sockaddr = server[i].addrs[j].sockaddr; > peer[n].socklen = server[i].addrs[j].socklen; > @@ -115,6 +232,7 @@ ngx_http_upstream_init_round_robin(ngx_c > /* backup servers */ > > n = 0; > + r = 0; > w = 0; > t = 0; > > @@ -123,15 +241,37 @@ ngx_http_upstream_init_round_robin(ngx_c > continue; > } > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (server[i].host.len) { > + r++; > + > + } else { > + n += server[i].naddrs; > + w += server[i].naddrs * server[i].weight; > + > + if (!server[i].down) { > + t += server[i].naddrs; > + } > + } > +#else See above. > n += server[i].naddrs; > w += server[i].naddrs * server[i].weight; > > if (!server[i].down) { > t += server[i].naddrs; > } > +#endif > } > > - if (n == 0) { > + if (n == 0 > +#if (NGX_HTTP_UPSTREAM_ZONE) > + && us->shm_zone == NULL > +#endif > + ) { > + return NGX_OK; > + } See above. if (n + r == 0) { .. } > + > + if (n + r == 0 && !(us->flags & NGX_HTTP_UPSTREAM_BACKUP)) { > return NGX_OK; > } After the change above this block will be useless. > @@ -140,12 +280,16 @@ ngx_http_upstream_init_round_robin(ngx_c > return NGX_ERROR; > } > > - peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) * n); > + peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) > + * (n + r)); > if (peer == NULL) { > return NGX_ERROR; > } > > - peers->single = 0; > + if (n > 0) { > + peers->single = 0; > + } > + > backup->single = 0; > backup->number = n; > backup->weighted = (w != n); > @@ -156,11 +300,46 @@ ngx_http_upstream_init_round_robin(ngx_c > n = 0; > peerp = &backup->peer; > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + rpeerp = &backup->resolve; > +#endif > + > for (i = 0; i < us->servers->nelts; i++) { > if (!server[i].backup) { > continue; > } > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (server[i].host.len) { > + > + peer[n].host = ngx_pcalloc(cf->pool, > + sizeof(ngx_http_upstream_host_t)); > + if (peer[n].host == NULL) { > + return NGX_ERROR; > + } > + > + peer[n].host->name = server[i].host; > + > + peer[n].sockaddr = server[i].addrs[0].sockaddr; > + peer[n].socklen = server[i].addrs[0].socklen; > + peer[n].name = server[i].addrs[0].name; > + peer[n].weight = server[i].weight; > + peer[n].effective_weight = server[i].weight; > + peer[n].current_weight = 0; > + peer[n].max_conns = server[i].max_conns; > + peer[n].max_fails = server[i].max_fails; > + peer[n].fail_timeout = server[i].fail_timeout; > + peer[n].down = server[i].down; > + peer[n].server = server[i].name; > + > + *rpeerp = &peer[n]; > + rpeerp = &peer[n].next; > + n++; > + > + continue; > + } > +#endif > + > for (j = 0; j < server[i].naddrs; j++) { > peer[n].sockaddr = server[i].addrs[j].sockaddr; > peer[n].socklen = server[i].addrs[j].socklen; > @@ -273,7 +452,12 @@ ngx_http_upstream_init_round_robin_peer( > > rrp->peers = us->peer.data; > rrp->current = NULL; > - rrp->config = 0; > + > + ngx_http_upstream_rr_peers_rlock(rrp->peers); > + > +#if (NGX_HTTP_UPSTREAM_ZONE) > + rrp->config = rrp->peers->config ? *rrp->peers->config : 0; > +#endif > > n = rrp->peers->number; > > @@ -281,6 +465,10 @@ ngx_http_upstream_init_round_robin_peer( > n = rrp->peers->next->number; > } > > + r->upstream->peer.tries = ngx_http_upstream_tries(rrp->peers); > + > + ngx_http_upstream_rr_peers_unlock(rrp->peers); > + > if (n <= 8 * sizeof(uintptr_t)) { > rrp->tried = &rrp->data; > rrp->data = 0; > @@ -296,7 +484,6 @@ ngx_http_upstream_init_round_robin_peer( > > r->upstream->peer.get = ngx_http_upstream_get_round_robin_peer; > r->upstream->peer.free = ngx_http_upstream_free_round_robin_peer; > - r->upstream->peer.tries = ngx_http_upstream_tries(rrp->peers); > #if (NGX_HTTP_SSL) > r->upstream->peer.set_session = > ngx_http_upstream_set_round_robin_peer_session; > @@ -446,6 +633,12 @@ ngx_http_upstream_get_round_robin_peer(n > peers = rrp->peers; > ngx_http_upstream_rr_peers_wlock(peers); > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (peers->config && rrp->config != *peers->config) { > + goto busy; > + } > +#endif > + > if (peers->single) { > peer = peers->peer; > > @@ -458,6 +651,7 @@ ngx_http_upstream_get_round_robin_peer(n > } > > rrp->current = peer; > + ngx_http_upstream_rr_peer_ref(peers, peer); > > } else { > > @@ -508,8 +702,18 @@ failed: > } > > ngx_http_upstream_rr_peers_wlock(peers); > + > +#if (NGX_HTTP_UPSTREAM_ZONE) > + if (peers->config && rrp->config != *peers->config) { > + goto busy; > + } > +#endif This block is useless. > } > > +#if (NGX_HTTP_UPSTREAM_ZONE) > +busy: > +#endif > + > ngx_http_upstream_rr_peers_unlock(peers); > > pc->name = peers->name; > @@ -580,6 +784,7 @@ ngx_http_upstream_get_peer(ngx_http_upst > } > > rrp->current = best; > + ngx_http_upstream_rr_peer_ref(rrp->peers, best); > > n = p / (8 * sizeof(uintptr_t)); > m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); > @@ -617,9 +822,16 @@ ngx_http_upstream_free_round_robin_peer( > > if (rrp->peers->single) { > > + if (peer->fails) { > + peer->fails = 0; > + } > + > peer->conns--; > > - ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); > + if (ngx_http_upstream_rr_peer_unref(rrp->peers, peer) == NGX_OK) { > + ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); > + } > + > ngx_http_upstream_rr_peers_unlock(rrp->peers); > > pc->tries = 0; > @@ -661,7 +873,10 @@ ngx_http_upstream_free_round_robin_peer( > > peer->conns--; > > - ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); > + if (ngx_http_upstream_rr_peer_unref(rrp->peers, peer) == NGX_OK) { > + ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); > + } > + > ngx_http_upstream_rr_peers_unlock(rrp->peers); > > if (pc->tries) { > diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h > --- a/src/http/ngx_http_upstream_round_robin.h > +++ b/src/http/ngx_http_upstream_round_robin.h > @@ -14,8 +14,23 @@ > #include > > > +typedef struct ngx_http_upstream_rr_peers_s ngx_http_upstream_rr_peers_t; > typedef struct ngx_http_upstream_rr_peer_s ngx_http_upstream_rr_peer_t; > > + > +#if (NGX_HTTP_UPSTREAM_ZONE) > + > +typedef struct { > + ngx_event_t event; /* must be first */ > + ngx_uint_t worker; > + ngx_str_t name; > + ngx_http_upstream_rr_peers_t *peers; > + ngx_http_upstream_rr_peer_t *peer; > +} ngx_http_upstream_host_t; > + > +#endif > + > + > struct ngx_http_upstream_rr_peer_s { > struct sockaddr *sockaddr; > socklen_t socklen; > @@ -46,7 +61,12 @@ struct ngx_http_upstream_rr_peer_s { > #endif > > #if (NGX_HTTP_UPSTREAM_ZONE) > + unsigned zombie:1; I suggest declaring this as in other similar places: ngx_uint_t zombie; /* unsigned zombie:1; */ > + > ngx_atomic_t lock; > + ngx_uint_t id; This field is not used in open source nginx and should not be added or assigned. > + ngx_uint_t refs; > + ngx_http_upstream_host_t *host; > #endif > > ngx_http_upstream_rr_peer_t *next; > @@ -56,8 +76,6 @@ struct ngx_http_upstream_rr_peer_s { > }; > > > -typedef struct ngx_http_upstream_rr_peers_s ngx_http_upstream_rr_peers_t; > - > struct ngx_http_upstream_rr_peers_s { > ngx_uint_t number; > > @@ -78,6 +96,12 @@ struct ngx_http_upstream_rr_peers_s { > ngx_http_upstream_rr_peers_t *next; > > ngx_http_upstream_rr_peer_t *peer; > + > +#if (NGX_HTTP_UPSTREAM_ZONE) > + ngx_uint_t *config; > + ngx_http_upstream_rr_peer_t *resolve; > + ngx_uint_t zombies; This field is unused in open source nginx and should not be added or assigned. > +#endif > }; > > > @@ -114,6 +138,67 @@ struct ngx_http_upstream_rr_peers_s { > ngx_rwlock_unlock(&peer->lock); \ > } > > + > +#define ngx_http_upstream_rr_peer_ref(peers, peer) \ > + (peer)->refs++; > + > + > +static ngx_inline void > +ngx_http_upstream_rr_peer_free_locked(ngx_http_upstream_rr_peers_t *peers, > + ngx_http_upstream_rr_peer_t *peer) > +{ > + if (peer->refs) { > + peer->zombie = 1; > + peers->zombies++; > + return; > + } > + > + ngx_slab_free_locked(peers->shpool, peer->sockaddr); > + ngx_slab_free_locked(peers->shpool, peer->name.data); > + > + if (peer->server.data && (peer->host == NULL || peer->host->peer == peer)) { > + ngx_slab_free_locked(peers->shpool, peer->server.data); > + } > + > +#if (NGX_HTTP_SSL) > + if (peer->ssl_session) { > + ngx_slab_free_locked(peers->shpool, peer->ssl_session); > + } > +#endif > + > + ngx_slab_free_locked(peers->shpool, peer); > +} > + > + > +static ngx_inline void > +ngx_http_upstream_rr_peer_free(ngx_http_upstream_rr_peers_t *peers, > + ngx_http_upstream_rr_peer_t *peer) > +{ > + ngx_shmtx_lock(&peers->shpool->mutex); > + ngx_http_upstream_rr_peer_free_locked(peers, peer); > + ngx_shmtx_unlock(&peers->shpool->mutex); > +} > + > + > +static ngx_inline ngx_int_t > +ngx_http_upstream_rr_peer_unref(ngx_http_upstream_rr_peers_t *peers, > + ngx_http_upstream_rr_peer_t *peer) > +{ > + peer->refs--; > + > + if (peers->shpool == NULL) { > + return NGX_OK; > + } > + > + if (peer->refs == 0 && peer->zombie) { > + ngx_http_upstream_rr_peer_free(peers, peer); > + peers->zombies--; > + return NGX_DONE; > + } > + > + return NGX_OK; > +} > + > #else > > #define ngx_http_upstream_rr_peers_rlock(peers) > @@ -121,6 +206,8 @@ struct ngx_http_upstream_rr_peers_s { > #define ngx_http_upstream_rr_peers_unlock(peers) > #define ngx_http_upstream_rr_peer_lock(peers, peer) > #define ngx_http_upstream_rr_peer_unlock(peers, peer) > +#define ngx_http_upstream_rr_peer_ref(peers, peer) > +#define ngx_http_upstream_rr_peer_unref(peers, peer) NGX_OK > > #endif > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan From arut at nginx.com Mon Jul 8 14:22:47 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 8 Jul 2024 18:22:47 +0400 Subject: [PATCH 4 of 9] Core: inheritance of non-reusable shared memory zones In-Reply-To: <90ef6f74980d5e46c95a.1718317739@fedora-wsl.local> References: <90ef6f74980d5e46c95a.1718317739@fedora-wsl.local> Message-ID: <20240708142247.nkgxkc24wnxghcia@N00W24XTQX> On Thu, Jun 13, 2024 at 03:28:59PM -0700, Aleksei Bavshin wrote: > # HG changeset patch > # User Ruslan Ermilov > # Date 1509736941 -10800 > # Fri Nov 03 22:22:21 2017 +0300 > # Node ID 90ef6f74980d5e46c95aa32375a58bb8eb56122e > # Parent 8b7fcded3983023229de1a6df5e2e0b857ee1bc9 > Core: inheritance of non-reusable shared memory zones. > > When re-creating a non-reusable zone, make the pointer to the old zone > available during the new zone initialization. > > diff --git a/src/core/ngx_cycle.c b/src/core/ngx_cycle.c > --- a/src/core/ngx_cycle.c > +++ b/src/core/ngx_cycle.c > @@ -38,7 +38,7 @@ static ngx_connection_t dumb; > ngx_cycle_t * > ngx_init_cycle(ngx_cycle_t *old_cycle) > { > - void *rv; > + void *rv, *data; > char **senv; > ngx_uint_t i, n; > ngx_log_t *log; > @@ -438,6 +438,8 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) > opart = &old_cycle->shared_memory.part; > oshm_zone = opart->elts; > > + data = NULL; > + > for (n = 0; /* void */ ; n++) { > > if (n >= opart->nelts) { > @@ -461,9 +463,13 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) > continue; > } > > + if (shm_zone[i].tag == oshm_zone[n].tag && shm_zone[i].noreuse) { > + data = oshm_zone[n].data; > + break; > + } > + > if (shm_zone[i].tag == oshm_zone[n].tag > - && shm_zone[i].shm.size == oshm_zone[n].shm.size > - && !shm_zone[i].noreuse) > + && shm_zone[i].shm.size == oshm_zone[n].shm.size) > { > shm_zone[i].shm.addr = oshm_zone[n].shm.addr; > #if (NGX_WIN32) > @@ -490,7 +496,7 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) > goto failed; > } > > - if (shm_zone[i].init(&shm_zone[i], NULL) != NGX_OK) { > + if (shm_zone[i].init(&shm_zone[i], data) != NGX_OK) { > goto failed; > } > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel Looks fine From arut at nginx.com Mon Jul 8 15:06:29 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 8 Jul 2024 19:06:29 +0400 Subject: [PATCH 7 of 9] Upstream: copy upstream zone DNS valid time during config reload In-Reply-To: <8c8d8118c7ac0a0426f4.1718317742@fedora-wsl.local> References: <8c8d8118c7ac0a0426f4.1718317742@fedora-wsl.local> Message-ID: <20240708150629.w3h3gauafljd36vn@N00W24XTQX> Hi, On Thu, Jun 13, 2024 at 03:29:02PM -0700, Aleksei Bavshin wrote: > # HG changeset patch > # User Mini Hawthorne > # Date 1689189645 25200 > # Wed Jul 12 12:20:45 2023 -0700 > # Node ID 8c8d8118c7ac0a0426f48dbfed94e279dddff992 > # Parent 621ba257aeac3017ea83b24fafa201e07c1c7756 > Upstream: copy upstream zone DNS valid time during config reload. > > Previously, all upstream DNS entries would be immediately re-resolved > on config reload. With a large number of upstreams, this creates > a spike of DNS resolution requests. These spikes can overwhelm the > DNS server or cause drops on the network. > > This patch retains the TTL of previous resolutions across reloads > by copying each upstream's name's expiry time across configuration > cycles. As a result, no additional resolutions are needed. > > diff --git a/src/http/modules/ngx_http_upstream_zone_module.c b/src/http/modules/ngx_http_upstream_zone_module.c > --- a/src/http/modules/ngx_http_upstream_zone_module.c > +++ b/src/http/modules/ngx_http_upstream_zone_module.c > @@ -443,6 +443,8 @@ ngx_http_upstream_zone_copy_peer(ngx_htt > ngx_memcpy(dst->host->service.data, src->host->service.data, > src->host->service.len); > } > + > + dst->host->valid = src->host->valid; > } > } > > @@ -547,6 +549,8 @@ ngx_http_upstream_zone_preresolve(ngx_ht > > peer->host = template->host; > > + template->host->valid = host->valid; > + > server = template->host->service.len ? &opeer->server > : &template->server; > > @@ -694,6 +698,8 @@ ngx_http_upstream_zone_init_worker(ngx_c > static void > ngx_http_upstream_zone_resolve_timer(ngx_event_t *event) > { > + time_t now, valid; > + ngx_msec_t timer; > ngx_resolver_ctx_t *ctx; > ngx_http_upstream_host_t *host; > ngx_http_upstream_rr_peer_t *template; > @@ -705,6 +711,9 @@ ngx_http_upstream_zone_resolve_timer(ngx > peers = host->peers; > template = host->peer; > > + now = ngx_time(); > + valid = host->valid; > + > if (template->zombie) { > (void) ngx_http_upstream_rr_peer_unref(peers, template); > > @@ -721,6 +730,10 @@ ngx_http_upstream_zone_resolve_timer(ngx > return; > } > > + if (valid > now) { > + goto retry; > + } > + > ctx = ngx_resolve_start(uscf->resolver, NULL); > if (ctx == NULL) { > goto retry; > @@ -745,7 +758,11 @@ ngx_http_upstream_zone_resolve_timer(ngx > > retry: > > - ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); > + /* don't delay zombie cleanup longer than resolver_timeout */ > + timer = (ngx_msec_t) 1000 * (valid > now ? valid - now + 1 : 1); > + timer = ngx_min(timer, uscf->resolver_timeout); In open source nginx we have no zombies to clean up, which means 1. the comment above should be removed 2. when valid > now, we should wait exactly (valid - now) Probably it's easier to leave this code as is and add a separate ngx_add_timer() to the block above. An even better solution is to schedule the right timeout in ngx_http_upstream_zone_init_worker() for the case when valid > now instead of 1. > + > + ngx_add_timer(event, ngx_max(timer, 1000)); > } > > > @@ -1026,6 +1043,8 @@ again: > > done: > > + host->valid = ctx->valid; > + > ngx_http_upstream_rr_peers_unlock(peers); > > while (++i < ctx->naddrs) { > diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h > --- a/src/http/ngx_http_upstream_round_robin.h > +++ b/src/http/ngx_http_upstream_round_robin.h > @@ -25,6 +25,7 @@ typedef struct { > ngx_uint_t worker; > ngx_str_t name; > ngx_str_t service; > + time_t valid; > ngx_http_upstream_rr_peers_t *peers; > ngx_http_upstream_rr_peer_t *peer; > } ngx_http_upstream_host_t; > diff --git a/src/stream/ngx_stream_upstream_round_robin.h b/src/stream/ngx_stream_upstream_round_robin.h > --- a/src/stream/ngx_stream_upstream_round_robin.h > +++ b/src/stream/ngx_stream_upstream_round_robin.h > @@ -25,6 +25,7 @@ typedef struct { > ngx_uint_t worker; > ngx_str_t name; > ngx_str_t service; > + time_t valid; > ngx_stream_upstream_rr_peers_t *peers; > ngx_stream_upstream_rr_peer_t *peer; > } ngx_stream_upstream_host_t; > diff --git a/src/stream/ngx_stream_upstream_zone_module.c b/src/stream/ngx_stream_upstream_zone_module.c > --- a/src/stream/ngx_stream_upstream_zone_module.c > +++ b/src/stream/ngx_stream_upstream_zone_module.c > @@ -544,6 +544,8 @@ ngx_stream_upstream_zone_preresolve(ngx_ > > peer->host = template->host; > > + template->host->valid = host->valid; > + > server = template->host->service.len ? &opeer->server > : &template->server; > > @@ -692,6 +694,8 @@ ngx_stream_upstream_zone_init_worker(ngx > static void > ngx_stream_upstream_zone_resolve_timer(ngx_event_t *event) > { > + time_t now, valid; > + ngx_msec_t timer; > ngx_resolver_ctx_t *ctx; > ngx_stream_upstream_host_t *host; > ngx_stream_upstream_rr_peer_t *template; > @@ -703,6 +707,9 @@ ngx_stream_upstream_zone_resolve_timer(n > peers = host->peers; > template = host->peer; > > + now = ngx_time(); > + valid = host->valid; > + > if (template->zombie) { > (void) ngx_stream_upstream_rr_peer_unref(peers, template); > > @@ -719,6 +726,10 @@ ngx_stream_upstream_zone_resolve_timer(n > return; > } > > + if (valid > now) { > + goto retry; > + } > + > ctx = ngx_resolve_start(uscf->resolver, NULL); > if (ctx == NULL) { > goto retry; > @@ -743,7 +754,11 @@ ngx_stream_upstream_zone_resolve_timer(n > > retry: > > - ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); > + /* don't delay zombie cleanup longer than resolver_timeout */ > + timer = (ngx_msec_t) 1000 * (valid > now ? valid - now + 1 : 1); > + timer = ngx_min(timer, uscf->resolver_timeout); > + >Here + ngx_add_timer(event, ngx_max(timer, 1000)); Same here. > } > > > @@ -1024,6 +1039,8 @@ again: > > done: > > + host->valid = ctx->valid; > + > ngx_stream_upstream_rr_peers_unlock(peers); > > while (++i < ctx->naddrs) { > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan From arut at nginx.com Mon Jul 8 15:16:27 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 8 Jul 2024 19:16:27 +0400 Subject: [PATCH 6 of 9] Upstream: per-upstream resolver In-Reply-To: <621ba257aeac3017ea83.1718317741@fedora-wsl.local> References: <621ba257aeac3017ea83.1718317741@fedora-wsl.local> Message-ID: <20240708151627.tuemqnwnvedyujuk@N00W24XTQX> Hi, On Thu, Jun 13, 2024 at 03:29:01PM -0700, Aleksei Bavshin wrote: > # HG changeset patch > # User Vladimir Homutov > # Date 1571405595 -10800 > # Fri Oct 18 16:33:15 2019 +0300 > # Node ID 621ba257aeac3017ea83b24fafa201e07c1c7756 > # Parent 238c1695d3b7450159ba0c03509584683bf25f9b > Upstream: per-upstream resolver. > > The "resolver" and "resolver_timeout" directives can now be specified > directly in the "upstream" block. > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -169,6 +169,10 @@ static ngx_int_t ngx_http_upstream_cooki > static char *ngx_http_upstream(ngx_conf_t *cf, ngx_command_t *cmd, void *dummy); > static char *ngx_http_upstream_server(ngx_conf_t *cf, ngx_command_t *cmd, > void *conf); > +#if (NGX_HTTP_UPSTREAM_ZONE) > +static char *ngx_http_upstream_resolver(ngx_conf_t *cf, ngx_command_t *cmd, > + void *conf); > +#endif > > static ngx_int_t ngx_http_upstream_set_local(ngx_http_request_t *r, > ngx_http_upstream_t *u, ngx_http_upstream_local_t *local); > @@ -339,6 +343,24 @@ static ngx_command_t ngx_http_upstream_ > 0, > NULL }, > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + > + { ngx_string("resolver"), > + NGX_HTTP_UPS_CONF|NGX_CONF_1MORE, > + ngx_http_upstream_resolver, > + NGX_HTTP_SRV_CONF_OFFSET, > + 0, > + NULL }, > + > + { ngx_string("resolver_timeout"), > + NGX_HTTP_UPS_CONF|NGX_CONF_TAKE1, > + ngx_conf_set_msec_slot, > + NGX_HTTP_SRV_CONF_OFFSET, > + offsetof(ngx_http_upstream_srv_conf_t, resolver_timeout), > + NULL }, > + > +#endif > + > ngx_null_command > }; > > @@ -6438,6 +6460,32 @@ not_supported: > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > + > +static char * > +ngx_http_upstream_resolver(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > +{ > + ngx_http_upstream_srv_conf_t *uscf = conf; > + > + ngx_str_t *value; > + > + if (uscf->resolver) { > + return "is duplicate"; > + } > + > + value = cf->args->elts; > + > + uscf->resolver = ngx_resolver_create(cf, &value[1], cf->args->nelts - 1); > + if (uscf->resolver == NULL) { > + return NGX_CONF_ERROR; > + } > + > + return NGX_CONF_OK; > +} > + > +#endif > + > + > ngx_http_upstream_srv_conf_t * > ngx_http_upstream_add(ngx_conf_t *cf, ngx_url_t *u, ngx_uint_t flags) > { > @@ -6519,6 +6567,9 @@ ngx_http_upstream_add(ngx_conf_t *cf, ng > uscf->line = cf->conf_file->line; > uscf->port = u->port; > uscf->no_port = u->no_port; > +#if (NGX_HTTP_UPSTREAM_ZONE) > + uscf->resolver_timeout = NGX_CONF_UNSET_MSEC; > +#endif > > if (u->naddrs == 1 && (u->port || u->family == AF_UNIX)) { > uscf->servers = ngx_array_create(cf->pool, 1, > diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c > --- a/src/http/ngx_http_upstream_round_robin.c > +++ b/src/http/ngx_http_upstream_round_robin.c > @@ -104,15 +104,15 @@ ngx_http_upstream_init_round_robin(ngx_c > > clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module); > > - us->resolver = clcf->resolver; > - us->resolver_timeout = clcf->resolver_timeout; > + if (us->resolver == NULL) { > + us->resolver = clcf->resolver; > + } > > /* > - * Without "resolver_timeout" in http{}, the value is unset. > - * Even if we set it in ngx_http_core_merge_loc_conf(), it's > - * still dependent on the module order and unreliable. > + * Without "resolver_timeout" in http{} the merged value is unset. > */ > - ngx_conf_init_msec_value(us->resolver_timeout, 30000); > + ngx_conf_merge_msec_value(us->resolver_timeout, > + clcf->resolver_timeout, 30000); > > if (resolve > && (us->resolver == NULL > diff --git a/src/stream/ngx_stream_upstream.c b/src/stream/ngx_stream_upstream.c > --- a/src/stream/ngx_stream_upstream.c > +++ b/src/stream/ngx_stream_upstream.c > @@ -22,6 +22,11 @@ static char *ngx_stream_upstream(ngx_con > void *dummy); > static char *ngx_stream_upstream_server(ngx_conf_t *cf, ngx_command_t *cmd, > void *conf); > +#if (NGX_STREAM_UPSTREAM_ZONE) > +static char *ngx_stream_upstream_resolver(ngx_conf_t *cf, ngx_command_t *cmd, > + void *conf); > +#endif > + > static void *ngx_stream_upstream_create_main_conf(ngx_conf_t *cf); > static char *ngx_stream_upstream_init_main_conf(ngx_conf_t *cf, void *conf); > > @@ -42,6 +47,24 @@ static ngx_command_t ngx_stream_upstrea > 0, > NULL }, > > +#if (NGX_STREAM_UPSTREAM_ZONE) > + > + { ngx_string("resolver"), > + NGX_STREAM_UPS_CONF|NGX_CONF_1MORE, > + ngx_stream_upstream_resolver, > + NGX_STREAM_SRV_CONF_OFFSET, > + 0, > + NULL }, > + > + { ngx_string("resolver_timeout"), > + NGX_STREAM_UPS_CONF|NGX_CONF_TAKE1, > + ngx_conf_set_msec_slot, > + NGX_STREAM_SRV_CONF_OFFSET, > + offsetof(ngx_stream_upstream_srv_conf_t, resolver_timeout), > + NULL }, > + > +#endif > + > ngx_null_command > }; > > @@ -665,6 +688,32 @@ not_supported: > } > > > +#if (NGX_STREAM_UPSTREAM_ZONE) > + > +static char * > +ngx_stream_upstream_resolver(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) > +{ > + ngx_stream_upstream_srv_conf_t *uscf = conf; > + > + ngx_str_t *value; > + > + if (uscf->resolver) { > + return "is duplicate"; > + } > + > + value = cf->args->elts; > + > + uscf->resolver = ngx_resolver_create(cf, &value[1], cf->args->nelts - 1); > + if (uscf->resolver == NULL) { > + return NGX_CONF_ERROR; > + } > + > + return NGX_CONF_OK; > +} > + > +#endif > + > + > ngx_stream_upstream_srv_conf_t * > ngx_stream_upstream_add(ngx_conf_t *cf, ngx_url_t *u, ngx_uint_t flags) > { > @@ -743,6 +792,9 @@ ngx_stream_upstream_add(ngx_conf_t *cf, > uscf->line = cf->conf_file->line; > uscf->port = u->port; > uscf->no_port = u->no_port; > +#if (NGX_STREAM_UPSTREAM_ZONE) > + uscf->resolver_timeout = NGX_CONF_UNSET_MSEC; > +#endif > > if (u->naddrs == 1 && (u->port || u->family == AF_UNIX)) { > uscf->servers = ngx_array_create(cf->pool, 1, > diff --git a/src/stream/ngx_stream_upstream_round_robin.c b/src/stream/ngx_stream_upstream_round_robin.c > --- a/src/stream/ngx_stream_upstream_round_robin.c > +++ b/src/stream/ngx_stream_upstream_round_robin.c > @@ -111,15 +111,15 @@ ngx_stream_upstream_init_round_robin(ngx > cscf = ngx_stream_conf_get_module_srv_conf(cf, > ngx_stream_core_module); > > - us->resolver = cscf->resolver; > - us->resolver_timeout = cscf->resolver_timeout; > + if (us->resolver == NULL) { > + us->resolver = cscf->resolver; > + } > > /* > - * Without "resolver_timeout" in stream{}, the value is unset. > - * Even if we set it in ngx_stream_core_merge_srv_conf(), it's > - * still dependent on the module order and unreliable. > + * Without "resolver_timeout" in stream{} the merged value is unset. > */ > - ngx_conf_init_msec_value(us->resolver_timeout, 30000); > + ngx_conf_merge_msec_value(us->resolver_timeout, > + cscf->resolver_timeout, 30000); > > if (resolve > && (us->resolver == NULL > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel Looks fine From arut at nginx.com Tue Jul 9 16:22:09 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 9 Jul 2024 20:22:09 +0400 Subject: [PATCH 1 of 9] Upstream: re-resolvable servers In-Reply-To: <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> References: <56aeae9355df8a2ee07e.1718317736@fedora-wsl.local> <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> Message-ID: <20240709162209.jxzzleb6hnjlx3r5@N00W24XTQX> Hi, On Mon, Jul 08, 2024 at 06:20:58PM +0400, Roman Arutyunyan wrote: > Hi, > > On Thu, Jun 13, 2024 at 03:28:56PM -0700, Aleksei Bavshin wrote: > > # HG changeset patch > > # User Ruslan Ermilov > > # Date 1392462754 -14400 > > # Sat Feb 15 15:12:34 2014 +0400 > > # Node ID 56aeae9355df8a2ee07e21b65b6869747dd9ee45 > > # Parent 02e9411009b987f408214ab4a8b6b6093f843bcd > > Upstream: re-resolvable servers. > > > > Specifying the upstream server by a hostname together with the > > "resolve" parameter will make the hostname to be periodically > > resolved, and upstream servers added/removed as necessary. > > > > This requires a "resolver" at the "http" configuration block. > > > > The "resolver_timeout" parameter also affects when the failed > > DNS requests will be attempted again. Responses with NXDOMAIN > > will be attempted again in 10 seconds. > > > > Upstream has a configuration generation number that is incremented each > > time servers are added/removed to the primary/backup list. This number > > is remembered by the peer.init method, and if peer.get detects a change > > in configuration, it returns NGX_BUSY. > > > > Each server has a reference counter. It is incremented by peer.get and > > decremented by peer.free. When a server is removed, it is removed from > > the list of servers and is marked as "zombie". The memory allocated by > > a zombie peer is freed only when its reference count becomes zero. > > > > Re-resolvable servers utilize timers that also hold a reference. A > > reference is also held while upstream keepalive caches an idle > > connection. > > > > Co-authored-by: Roman Arutyunyan > > Co-authored-by: Sergey Kandaurov > > Co-authored-by: Vladimir Homutov [..] > > diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h > > --- a/src/http/ngx_http_upstream_round_robin.h > > +++ b/src/http/ngx_http_upstream_round_robin.h > > @@ -14,8 +14,23 @@ > > #include > > > > > > +typedef struct ngx_http_upstream_rr_peers_s ngx_http_upstream_rr_peers_t; > > typedef struct ngx_http_upstream_rr_peer_s ngx_http_upstream_rr_peer_t; > > > > + > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > + > > +typedef struct { > > + ngx_event_t event; /* must be first */ > > + ngx_uint_t worker; Missed this last time. This field should be removed since all resolving is in worker #0. > > + ngx_str_t name; > > + ngx_http_upstream_rr_peers_t *peers; > > + ngx_http_upstream_rr_peer_t *peer; > > +} ngx_http_upstream_host_t; > > + > > +#endif > > + > > + > > struct ngx_http_upstream_rr_peer_s { > > struct sockaddr *sockaddr; > > socklen_t socklen; > > @@ -46,7 +61,12 @@ struct ngx_http_upstream_rr_peer_s { > > #endif > > > > #if (NGX_HTTP_UPSTREAM_ZONE) > > + unsigned zombie:1; > > I suggest declaring this as in other similar places: > > ngx_uint_t zombie; /* unsigned zombie:1; */ > > > + > > ngx_atomic_t lock; > > + ngx_uint_t id; > > This field is not used in open source nginx and should not be added or assigned. > > > + ngx_uint_t refs; > > + ngx_http_upstream_host_t *host; > > #endif > > > > ngx_http_upstream_rr_peer_t *next; [..] -- Roman Arutyunyan From a.bavshin at nginx.com Tue Jul 9 20:21:18 2024 From: a.bavshin at nginx.com (Aleksei Bavshin) Date: Tue, 9 Jul 2024 13:21:18 -0700 Subject: [PATCH 1 of 9] Upstream: re-resolvable servers In-Reply-To: <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> References: <56aeae9355df8a2ee07e.1718317736@fedora-wsl.local> <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> Message-ID: <47b74e2e-d081-49af-bae2-82f6352cf24a@nginx.com> On 7/8/2024 7:20 AM, Roman Arutyunyan wrote: > Hi, > > On Thu, Jun 13, 2024 at 03:28:56PM -0700, Aleksei Bavshin wrote: >> # HG changeset patch >> # User Ruslan Ermilov >> # Date 1392462754 -14400 >> # Sat Feb 15 15:12:34 2014 +0400 >> # Node ID 56aeae9355df8a2ee07e21b65b6869747dd9ee45 >> # Parent 02e9411009b987f408214ab4a8b6b6093f843bcd >> Upstream: re-resolvable servers. >> >> Specifying the upstream server by a hostname together with the >> "resolve" parameter will make the hostname to be periodically >> resolved, and upstream servers added/removed as necessary. >> >> This requires a "resolver" at the "http" configuration block. >> >> The "resolver_timeout" parameter also affects when the failed >> DNS requests will be attempted again. Responses with NXDOMAIN >> will be attempted again in 10 seconds. >> >> Upstream has a configuration generation number that is incremented each >> time servers are added/removed to the primary/backup list. This number >> is remembered by the peer.init method, and if peer.get detects a change >> in configuration, it returns NGX_BUSY. >> >> Each server has a reference counter. It is incremented by peer.get and >> decremented by peer.free. When a server is removed, it is removed from >> the list of servers and is marked as "zombie". The memory allocated by >> a zombie peer is freed only when its reference count becomes zero. >> >> Re-resolvable servers utilize timers that also hold a reference. A >> reference is also held while upstream keepalive caches an idle >> connection. >> >> Co-authored-by: Roman Arutyunyan >> Co-authored-by: Sergey Kandaurov >> Co-authored-by: Vladimir Homutov > > I feel like it would be easier to merge this patch, SRV resolve and preresolve > in a single change. > I disagree here, because * the patches represent large, significant and logically independent pieces of work. I squashed a lot of changes into the initial patch, but all of those were bugfixes or compatibility fixes for newer oss/plus code. * the commit messages and the diffs give more context for developers who don't have access to the original history but want to understand why certain changes were made * it's easier to track attribution I'll merge patches 1 and 2 though, as that makes sense. And I'll make sure to individually test unmerged patches again in the next revision. >> diff --git a/src/http/modules/ngx_http_upstream_hash_module.c b/src/http/modules/ngx_http_upstream_hash_module.c >> --- a/src/http/modules/ngx_http_upstream_hash_module.c >> +++ b/src/http/modules/ngx_http_upstream_hash_module.c >> @@ -24,6 +24,9 @@ typedef struct { >> >> typedef struct { >> ngx_http_complex_value_t key; >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + ngx_uint_t config; >> +#endif >> ngx_http_upstream_chash_points_t *points; >> } ngx_http_upstream_hash_srv_conf_t; >> >> @@ -49,6 +52,8 @@ static ngx_int_t ngx_http_upstream_get_h >> >> static ngx_int_t ngx_http_upstream_init_chash(ngx_conf_t *cf, >> ngx_http_upstream_srv_conf_t *us); >> +static ngx_int_t ngx_http_upstream_update_chash(ngx_pool_t *pool, >> + ngx_http_upstream_srv_conf_t *us); >> static int ngx_libc_cdecl >> ngx_http_upstream_chash_cmp_points(const void *one, const void *two); >> static ngx_uint_t ngx_http_upstream_find_chash_point( >> @@ -178,11 +183,18 @@ ngx_http_upstream_get_hash_peer(ngx_peer >> >> ngx_http_upstream_rr_peers_rlock(hp->rrp.peers); >> >> - if (hp->tries > 20 || hp->rrp.peers->single || hp->key.len == 0) { >> + if (hp->tries > 20 || hp->rrp.peers->number < 2 || hp->key.len == 0) { >> ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); >> return hp->get_rr_peer(pc, &hp->rrp); >> } >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (hp->rrp.peers->config && hp->rrp.config != *hp->rrp.peers->config) { >> + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); >> + return hp->get_rr_peer(pc, &hp->rrp); >> + } >> +#endif >> + >> now = ngx_time(); >> >> pc->cached = 0; >> @@ -262,6 +274,7 @@ ngx_http_upstream_get_hash_peer(ngx_peer >> } >> >> hp->rrp.current = peer; >> + ngx_http_upstream_rr_peer_ref(hp->rrp.peers, peer); >> >> pc->sockaddr = peer->sockaddr; >> pc->socklen = peer->socklen; >> @@ -285,6 +298,26 @@ ngx_http_upstream_get_hash_peer(ngx_peer >> static ngx_int_t >> ngx_http_upstream_init_chash(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us) >> { >> + if (ngx_http_upstream_init_round_robin(cf, us) != NGX_OK) { >> + return NGX_ERROR; >> + } >> + >> + us->peer.init = ngx_http_upstream_init_chash_peer; >> + >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (us->shm_zone) { >> + return NGX_OK; >> + } >> +#endif >> + >> + return ngx_http_upstream_update_chash(cf->pool, us); >> +} >> + >> + >> +static ngx_int_t >> +ngx_http_upstream_update_chash(ngx_pool_t *pool, >> + ngx_http_upstream_srv_conf_t *us) >> +{ >> u_char *host, *port, c; >> size_t host_len, port_len, size; >> uint32_t hash, base_hash; >> @@ -299,25 +332,32 @@ ngx_http_upstream_init_chash(ngx_conf_t >> u_char byte[4]; >> } prev_hash; >> >> - if (ngx_http_upstream_init_round_robin(cf, us) != NGX_OK) { >> - return NGX_ERROR; >> + hcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_hash_module); >> + >> + if (hcf->points) { >> + ngx_free(hcf->points); >> + hcf->points = NULL; >> } >> >> - us->peer.init = ngx_http_upstream_init_chash_peer; >> - >> peers = us->peer.data; >> npoints = peers->total_weight * 160; >> >> size = sizeof(ngx_http_upstream_chash_points_t) >> - + sizeof(ngx_http_upstream_chash_point_t) * (npoints - 1); >> + - sizeof(ngx_http_upstream_chash_point_t) >> + + sizeof(ngx_http_upstream_chash_point_t) * npoints; >> >> - points = ngx_palloc(cf->pool, size); >> + points = pool ? ngx_palloc(pool, size) : ngx_alloc(size, ngx_cycle->log); >> if (points == NULL) { >> return NGX_ERROR; >> } >> >> points->number = 0; >> >> + if (npoints == 0) { >> + hcf->points = points; >> + return NGX_OK; >> + } >> + >> for (peer = peers->peer; peer; peer = peer->next) { >> server = &peer->server; >> >> @@ -401,7 +441,6 @@ ngx_http_upstream_init_chash(ngx_conf_t >> >> points->number = i + 1; >> >> - hcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_hash_module); >> hcf->points = points; >> >> return NGX_OK; >> @@ -481,7 +520,22 @@ ngx_http_upstream_init_chash_peer(ngx_ht >> >> ngx_http_upstream_rr_peers_rlock(hp->rrp.peers); >> >> - hp->hash = ngx_http_upstream_find_chash_point(hcf->points, hash); >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (hp->rrp.peers->config >> + && (hcf->points == NULL || hcf->config != *hp->rrp.peers->config)) >> + { >> + if (ngx_http_upstream_update_chash(NULL, us) != NGX_OK) { >> + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); >> + return NGX_ERROR; >> + } >> + >> + hcf->config = *hp->rrp.peers->config; >> + } >> +#endif >> + >> + if (hcf->points->number) { >> + hp->hash = ngx_http_upstream_find_chash_point(hcf->points, hash); >> + } >> >> ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); >> >> @@ -517,6 +571,20 @@ ngx_http_upstream_get_chash_peer(ngx_pee >> pc->cached = 0; >> pc->connection = NULL; >> >> + if (hp->rrp.peers->number == 0) { >> + pc->name = hp->rrp.peers->name; >> + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); >> + return NGX_BUSY; >> + } >> + >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (hp->rrp.peers->config && hp->rrp.config != *hp->rrp.peers->config) { >> + pc->name = hp->rrp.peers->name; >> + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); >> + return NGX_BUSY; >> + } >> +#endif >> + >> now = ngx_time(); >> hcf = hp->conf; >> >> @@ -597,6 +665,7 @@ ngx_http_upstream_get_chash_peer(ngx_pee >> found: >> >> hp->rrp.current = best; >> + ngx_http_upstream_rr_peer_ref(hp->rrp.peers, best); >> >> pc->sockaddr = best->sockaddr; >> pc->socklen = best->socklen; >> @@ -664,6 +733,7 @@ ngx_http_upstream_hash(ngx_conf_t *cf, n >> } >> >> uscf->flags = NGX_HTTP_UPSTREAM_CREATE >> + |NGX_HTTP_UPSTREAM_MODIFY >> |NGX_HTTP_UPSTREAM_WEIGHT >> |NGX_HTTP_UPSTREAM_MAX_CONNS >> |NGX_HTTP_UPSTREAM_MAX_FAILS >> diff --git a/src/http/modules/ngx_http_upstream_ip_hash_module.c b/src/http/modules/ngx_http_upstream_ip_hash_module.c >> --- a/src/http/modules/ngx_http_upstream_ip_hash_module.c >> +++ b/src/http/modules/ngx_http_upstream_ip_hash_module.c >> @@ -163,11 +163,19 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p >> >> ngx_http_upstream_rr_peers_rlock(iphp->rrp.peers); >> >> - if (iphp->tries > 20 || iphp->rrp.peers->single) { >> + if (iphp->tries > 20 || iphp->rrp.peers->number < 2) { >> ngx_http_upstream_rr_peers_unlock(iphp->rrp.peers); >> return iphp->get_rr_peer(pc, &iphp->rrp); >> } >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (iphp->rrp.peers->config && iphp->rrp.config != *iphp->rrp.peers->config) >> + { >> + ngx_http_upstream_rr_peers_unlock(iphp->rrp.peers); >> + return iphp->get_rr_peer(pc, &iphp->rrp); >> + } >> +#endif >> + >> now = ngx_time(); >> >> pc->cached = 0; >> @@ -232,6 +240,7 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p >> } >> >> iphp->rrp.current = peer; >> + ngx_http_upstream_rr_peer_ref(iphp->rrp.peers, peer); >> >> pc->sockaddr = peer->sockaddr; >> pc->socklen = peer->socklen; >> @@ -268,6 +277,7 @@ ngx_http_upstream_ip_hash(ngx_conf_t *cf >> uscf->peer.init_upstream = ngx_http_upstream_init_ip_hash; >> >> uscf->flags = NGX_HTTP_UPSTREAM_CREATE >> + |NGX_HTTP_UPSTREAM_MODIFY >> |NGX_HTTP_UPSTREAM_WEIGHT >> |NGX_HTTP_UPSTREAM_MAX_CONNS >> |NGX_HTTP_UPSTREAM_MAX_FAILS >> diff --git a/src/http/modules/ngx_http_upstream_least_conn_module.c b/src/http/modules/ngx_http_upstream_least_conn_module.c >> --- a/src/http/modules/ngx_http_upstream_least_conn_module.c >> +++ b/src/http/modules/ngx_http_upstream_least_conn_module.c >> @@ -124,6 +124,12 @@ ngx_http_upstream_get_least_conn_peer(ng >> >> ngx_http_upstream_rr_peers_wlock(peers); >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (peers->config && rrp->config != *peers->config) { >> + goto busy; >> + } >> +#endif >> + >> best = NULL; >> total = 0; >> >> @@ -244,6 +250,7 @@ ngx_http_upstream_get_least_conn_peer(ng >> best->conns++; >> >> rrp->current = best; >> + ngx_http_upstream_rr_peer_ref(peers, best); >> >> n = p / (8 * sizeof(uintptr_t)); >> m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); >> @@ -278,8 +285,18 @@ failed: >> } >> >> ngx_http_upstream_rr_peers_wlock(peers); >> + >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (peers->config && rrp->config != *peers->config) { >> + goto busy; >> + } >> +#endif > > This block is useless. > >> } >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> +busy: >> +#endif >> + >> ngx_http_upstream_rr_peers_unlock(peers); >> >> pc->name = peers->name; >> @@ -303,6 +320,7 @@ ngx_http_upstream_least_conn(ngx_conf_t >> uscf->peer.init_upstream = ngx_http_upstream_init_least_conn; >> >> uscf->flags = NGX_HTTP_UPSTREAM_CREATE >> + |NGX_HTTP_UPSTREAM_MODIFY >> |NGX_HTTP_UPSTREAM_WEIGHT >> |NGX_HTTP_UPSTREAM_MAX_CONNS >> |NGX_HTTP_UPSTREAM_MAX_FAILS >> diff --git a/src/http/modules/ngx_http_upstream_random_module.c b/src/http/modules/ngx_http_upstream_random_module.c >> --- a/src/http/modules/ngx_http_upstream_random_module.c >> +++ b/src/http/modules/ngx_http_upstream_random_module.c >> @@ -17,6 +17,9 @@ typedef struct { >> >> typedef struct { >> ngx_uint_t two; >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + ngx_uint_t config; >> +#endif >> ngx_http_upstream_random_range_t *ranges; >> } ngx_http_upstream_random_srv_conf_t; >> >> @@ -127,6 +130,11 @@ ngx_http_upstream_update_random(ngx_pool >> >> rcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_random_module); >> >> + if (rcf->ranges) { >> + ngx_free(rcf->ranges); >> + rcf->ranges = NULL; >> + } >> + >> peers = us->peer.data; >> >> size = peers->number * sizeof(ngx_http_upstream_random_range_t); >> @@ -186,11 +194,15 @@ ngx_http_upstream_init_random_peer(ngx_h >> ngx_http_upstream_rr_peers_rlock(rp->rrp.peers); >> >> #if (NGX_HTTP_UPSTREAM_ZONE) >> - if (rp->rrp.peers->shpool && rcf->ranges == NULL) { >> + if (rp->rrp.peers->config >> + && (rcf->ranges == NULL || rcf->config != *rp->rrp.peers->config)) >> + { >> if (ngx_http_upstream_update_random(NULL, us) != NGX_OK) { >> ngx_http_upstream_rr_peers_unlock(rp->rrp.peers); >> return NGX_ERROR; >> } >> + >> + rcf->config = *rp->rrp.peers->config; >> } >> #endif >> >> @@ -220,11 +232,18 @@ ngx_http_upstream_get_random_peer(ngx_pe >> >> ngx_http_upstream_rr_peers_rlock(peers); >> >> - if (rp->tries > 20 || peers->single) { >> + if (rp->tries > 20 || peers->number < 2) { >> ngx_http_upstream_rr_peers_unlock(peers); >> return ngx_http_upstream_get_round_robin_peer(pc, rrp); >> } >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (peers->config && rrp->config != *peers->config) { >> + ngx_http_upstream_rr_peers_unlock(peers); >> + return ngx_http_upstream_get_round_robin_peer(pc, rrp); >> + } >> +#endif >> + >> pc->cached = 0; >> pc->connection = NULL; >> >> @@ -274,6 +293,7 @@ ngx_http_upstream_get_random_peer(ngx_pe >> } >> >> rrp->current = peer; >> + ngx_http_upstream_rr_peer_ref(peers, peer); >> >> if (now - peer->checked > peer->fail_timeout) { >> peer->checked = now; >> @@ -314,11 +334,18 @@ ngx_http_upstream_get_random2_peer(ngx_p >> >> ngx_http_upstream_rr_peers_wlock(peers); >> >> - if (rp->tries > 20 || peers->single) { >> + if (rp->tries > 20 || peers->number < 2) { >> ngx_http_upstream_rr_peers_unlock(peers); >> return ngx_http_upstream_get_round_robin_peer(pc, rrp); >> } >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (peers->config && rrp->config != *peers->config) { >> + ngx_http_upstream_rr_peers_unlock(peers); >> + return ngx_http_upstream_get_round_robin_peer(pc, rrp); >> + } >> +#endif >> + >> pc->cached = 0; >> pc->connection = NULL; >> >> @@ -384,6 +411,7 @@ ngx_http_upstream_get_random2_peer(ngx_p >> } >> >> rrp->current = peer; >> + ngx_http_upstream_rr_peer_ref(peers, peer); >> >> if (now - peer->checked > peer->fail_timeout) { >> peer->checked = now; >> @@ -467,6 +495,7 @@ ngx_http_upstream_random(ngx_conf_t *cf, >> uscf->peer.init_upstream = ngx_http_upstream_init_random; >> >> uscf->flags = NGX_HTTP_UPSTREAM_CREATE >> + |NGX_HTTP_UPSTREAM_MODIFY >> |NGX_HTTP_UPSTREAM_WEIGHT >> |NGX_HTTP_UPSTREAM_MAX_CONNS >> |NGX_HTTP_UPSTREAM_MAX_FAILS >> diff --git a/src/http/modules/ngx_http_upstream_zone_module.c b/src/http/modules/ngx_http_upstream_zone_module.c >> --- a/src/http/modules/ngx_http_upstream_zone_module.c >> +++ b/src/http/modules/ngx_http_upstream_zone_module.c >> @@ -18,6 +18,10 @@ static ngx_http_upstream_rr_peers_t *ngx >> ngx_slab_pool_t *shpool, ngx_http_upstream_srv_conf_t *uscf); >> static ngx_http_upstream_rr_peer_t *ngx_http_upstream_zone_copy_peer( >> ngx_http_upstream_rr_peers_t *peers, ngx_http_upstream_rr_peer_t *src); >> +static void ngx_http_upstream_zone_set_single( >> + ngx_http_upstream_srv_conf_t *uscf); >> +static void ngx_http_upstream_zone_remove_peer_locked( >> + ngx_http_upstream_rr_peers_t *peers, ngx_http_upstream_rr_peer_t *peer); >> >> >> static ngx_command_t ngx_http_upstream_zone_commands[] = { >> @@ -33,6 +37,11 @@ static ngx_command_t ngx_http_upstream_ >> }; >> >> >> +static ngx_int_t ngx_http_upstream_zone_init_worker(ngx_cycle_t *cycle); >> +static void ngx_http_upstream_zone_resolve_timer(ngx_event_t *event); >> +static void ngx_http_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx); > > These declarations should be moved up to other functions declarations. > >> static ngx_http_module_t ngx_http_upstream_zone_module_ctx = { >> NULL, /* preconfiguration */ >> NULL, /* postconfiguration */ >> @@ -55,7 +64,7 @@ ngx_module_t ngx_http_upstream_zone_mod >> NGX_HTTP_MODULE, /* module type */ >> NULL, /* init master */ >> NULL, /* init module */ >> - NULL, /* init process */ >> + ngx_http_upstream_zone_init_worker, /* init process */ >> NULL, /* init thread */ >> NULL, /* exit thread */ >> NULL, /* exit process */ >> @@ -188,9 +197,15 @@ ngx_http_upstream_zone_copy_peers(ngx_sl >> ngx_http_upstream_srv_conf_t *uscf) >> { >> ngx_str_t *name; >> + ngx_uint_t *config; >> ngx_http_upstream_rr_peer_t *peer, **peerp; >> ngx_http_upstream_rr_peers_t *peers, *backup; >> >> + config = ngx_slab_calloc(shpool, sizeof(ngx_uint_t)); >> + if (config == NULL) { >> + return NULL; >> + } >> + >> peers = ngx_slab_alloc(shpool, sizeof(ngx_http_upstream_rr_peers_t)); >> if (peers == NULL) { >> return NULL; >> @@ -214,6 +229,7 @@ ngx_http_upstream_zone_copy_peers(ngx_sl >> peers->name = name; >> >> peers->shpool = shpool; >> + peers->config = config; >> >> for (peerp = &peers->peer; *peerp; peerp = &peer->next) { >> /* pool is unlocked */ >> @@ -223,6 +239,17 @@ ngx_http_upstream_zone_copy_peers(ngx_sl >> } >> >> *peerp = peer; >> + peer->id = (*peers->config)++; >> + } >> + >> + for (peerp = &peers->resolve; *peerp; peerp = &peer->next) { >> + peer = ngx_http_upstream_zone_copy_peer(peers, *peerp); >> + if (peer == NULL) { >> + return NULL; >> + } >> + >> + *peerp = peer; >> + peer->id = (*peers->config)++; >> } >> >> if (peers->next == NULL) { >> @@ -239,6 +266,7 @@ ngx_http_upstream_zone_copy_peers(ngx_sl >> backup->name = name; >> >> backup->shpool = shpool; >> + backup->config = config; >> >> for (peerp = &backup->peer; *peerp; peerp = &peer->next) { >> /* pool is unlocked */ >> @@ -248,6 +276,17 @@ ngx_http_upstream_zone_copy_peers(ngx_sl >> } >> >> *peerp = peer; >> + peer->id = (*backup->config)++; >> + } >> + >> + for (peerp = &backup->resolve; *peerp; peerp = &peer->next) { >> + peer = ngx_http_upstream_zone_copy_peer(backup, *peerp); >> + if (peer == NULL) { >> + return NULL; >> + } >> + >> + *peerp = peer; >> + peer->id = (*backup->config)++; >> } >> >> peers->next = backup; >> @@ -279,6 +318,7 @@ ngx_http_upstream_zone_copy_peer(ngx_htt >> dst->sockaddr = NULL; >> dst->name.data = NULL; >> dst->server.data = NULL; >> + dst->host = NULL; >> } >> >> dst->sockaddr = ngx_slab_calloc_locked(pool, sizeof(ngx_sockaddr_t)); >> @@ -301,12 +341,37 @@ ngx_http_upstream_zone_copy_peer(ngx_htt >> } >> >> ngx_memcpy(dst->server.data, src->server.data, src->server.len); >> + >> + if (src->host) { >> + dst->host = ngx_slab_calloc_locked(pool, >> + sizeof(ngx_http_upstream_host_t)); >> + if (dst->host == NULL) { >> + goto failed; >> + } >> + >> + dst->host->name.data = ngx_slab_alloc_locked(pool, >> + src->host->name.len); >> + if (dst->host->name.data == NULL) { >> + goto failed; >> + } >> + >> + dst->host->peers = peers; >> + dst->host->peer = dst; >> + >> + dst->host->name.len = src->host->name.len; >> + ngx_memcpy(dst->host->name.data, src->host->name.data, >> + src->host->name.len); >> + } >> } >> >> return dst; >> >> failed: >> >> + if (dst->host) { >> + ngx_slab_free_locked(pool, dst->host); >> + } >> + >> if (dst->server.data) { >> ngx_slab_free_locked(pool, dst->server.data); >> } >> @@ -323,3 +388,337 @@ failed: >> >> return NULL; >> } >> + >> + >> +static void >> +ngx_http_upstream_zone_set_single(ngx_http_upstream_srv_conf_t *uscf) >> +{ >> + ngx_http_upstream_rr_peers_t *peers; >> + >> + peers = uscf->peer.data; >> + >> + if (peers->number == 1 >> + && (peers->next == NULL || peers->next->number == 0)) >> + { >> + peers->single = 1; >> + >> + } else { >> + peers->single = 0; >> + } >> +} >> + >> + >> +static void >> +ngx_http_upstream_zone_remove_peer_locked(ngx_http_upstream_rr_peers_t *peers, >> + ngx_http_upstream_rr_peer_t *peer) >> +{ >> + peers->total_weight -= peer->weight; >> + peers->number--; >> + peers->tries -= (peer->down == 0); >> + (*peers->config)++; >> + peers->weighted = (peers->total_weight != peers->number); >> + >> + ngx_http_upstream_rr_peer_free(peers, peer); >> +} >> + >> + >> +static ngx_int_t >> +ngx_http_upstream_zone_init_worker(ngx_cycle_t *cycle) >> +{ >> + ngx_uint_t i; >> + ngx_event_t *event; >> + ngx_http_upstream_rr_peer_t *peer; >> + ngx_http_upstream_rr_peers_t *peers; >> + ngx_http_upstream_srv_conf_t *uscf, **uscfp; >> + ngx_http_upstream_main_conf_t *umcf; >> + >> + if (ngx_process != NGX_PROCESS_WORKER >> + && ngx_process != NGX_PROCESS_SINGLE) >> + { >> + return NGX_OK; >> + } >> + >> + umcf = ngx_http_cycle_get_module_main_conf(cycle, ngx_http_upstream_module); >> + >> + if (umcf == NULL) { >> + return NGX_OK; >> + } >> + >> + uscfp = umcf->upstreams.elts; >> + >> + for (i = 0; i < umcf->upstreams.nelts; i++) { >> + >> + uscf = uscfp[i]; >> + >> + if (uscf->shm_zone == NULL) { >> + continue; >> + } >> + >> + peers = uscf->peer.data; >> + >> + do { >> + ngx_http_upstream_rr_peers_wlock(peers); >> + >> + for (peer = peers->resolve; peer; peer = peer->next) { >> + >> + if (peer->host->worker != ngx_worker) { >> + continue; >> + } >> + >> + event = &peer->host->event; >> + ngx_memzero(event, sizeof(ngx_event_t)); >> + >> + event->data = uscf; >> + event->handler = ngx_http_upstream_zone_resolve_timer; >> + event->log = cycle->log; >> + event->cancelable = 1; >> + >> + ngx_http_upstream_rr_peer_ref(peers, peer); > > In open source nginx a template cannot be deleted since there's no API. > As a result, there's no reason in increase the reference counter here. > >> + ngx_add_timer(event, 1); >> + } >> + >> + ngx_http_upstream_rr_peers_unlock(peers); >> + >> + peers = peers->next; >> + >> + } while (peers); >> + } >> + >> + return NGX_OK; >> +} >> + >> + >> +static void >> +ngx_http_upstream_zone_resolve_timer(ngx_event_t *event) >> +{ >> + ngx_resolver_ctx_t *ctx; >> + ngx_http_upstream_host_t *host; >> + ngx_http_upstream_rr_peer_t *template; >> + ngx_http_upstream_rr_peers_t *peers; >> + ngx_http_upstream_srv_conf_t *uscf; >> + >> + host = (ngx_http_upstream_host_t *) event; >> + uscf = event->data; >> + peers = host->peers; >> + template = host->peer; >> + >> + if (template->zombie) { >> + (void) ngx_http_upstream_rr_peer_unref(peers, template); >> + >> + ngx_shmtx_lock(&peers->shpool->mutex); >> + >> + if (host->service.len) { >> + ngx_slab_free_locked(peers->shpool, host->service.data); >> + } >> + >> + ngx_slab_free_locked(peers->shpool, host->name.data); >> + ngx_slab_free_locked(peers->shpool, host); >> + ngx_shmtx_unlock(&peers->shpool->mutex); >> + >> + return; >> + } > > Since a template cannot be deleted, it cannot become a zombie as well. > This block is useless. > >> + ctx = ngx_resolve_start(uscf->resolver, NULL); >> + if (ctx == NULL) { >> + goto retry; >> + } >> + >> + if (ctx == NGX_NO_RESOLVER) { >> + ngx_log_error(NGX_LOG_ERR, event->log, 0, >> + "no resolver defined to resolve %V", &host->name); >> + return; >> + } >> + >> + ctx->name = host->name; >> + ctx->handler = ngx_http_upstream_zone_resolve_handler; >> + ctx->data = host; >> + ctx->timeout = uscf->resolver_timeout; >> + ctx->cancelable = 1; >> + >> + if (ngx_resolve_name(ctx) == NGX_OK) { >> + return; >> + } >> + >> +retry: >> + >> + ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); >> +} >> + >> + >> +static void >> +ngx_http_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx) >> +{ >> + time_t now; >> + in_port_t port; >> + ngx_msec_t timer; >> + ngx_uint_t i, j; >> + ngx_event_t *event; >> + ngx_resolver_addr_t *addr; >> + ngx_http_upstream_host_t *host; >> + ngx_http_upstream_rr_peer_t *peer, *template, **peerp; >> + ngx_http_upstream_rr_peers_t *peers; >> + ngx_http_upstream_srv_conf_t *uscf; >> + >> + host = ctx->data; >> + event = &host->event; >> + uscf = event->data; >> + peers = host->peers; >> + template = host->peer; >> + >> + ngx_http_upstream_rr_peers_wlock(peers); >> + >> + if (template->zombie) { >> + (void) ngx_http_upstream_rr_peer_unref(peers, template); >> + >> + ngx_http_upstream_rr_peers_unlock(peers); >> + >> + ngx_shmtx_lock(&peers->shpool->mutex); >> + ngx_slab_free_locked(peers->shpool, host->name.data); >> + ngx_slab_free_locked(peers->shpool, host); >> + ngx_shmtx_unlock(&peers->shpool->mutex); >> + >> + ngx_resolve_name_done(ctx); >> + >> + return; >> + } > > Again, this block is useless. > >> + now = ngx_time(); >> + >> + if (ctx->state) { >> + ngx_log_error(NGX_LOG_ERR, event->log, 0, >> + "%V could not be resolved (%i: %s)", >> + &ctx->name, ctx->state, >> + ngx_resolver_strerror(ctx->state)); >> + >> + if (ctx->state != NGX_RESOLVE_NXDOMAIN) { >> + ngx_http_upstream_rr_peers_unlock(peers); >> + >> + ngx_resolve_name_done(ctx); >> + >> + ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); >> + return; >> + } >> + >> + /* NGX_RESOLVE_NXDOMAIN */ >> + >> + ctx->naddrs = 0; >> + } >> + >> +#if (NGX_DEBUG) >> + { >> + u_char text[NGX_SOCKADDR_STRLEN]; >> + size_t len; >> + >> + for (i = 0; i < ctx->naddrs; i++) { >> + len = ngx_sock_ntop(ctx->addrs[i].sockaddr, ctx->addrs[i].socklen, >> + text, NGX_SOCKADDR_STRLEN, 0); >> + >> + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, event->log, 0, >> + "name %V was resolved to %*s", &host->name, len, text); >> + } >> + } >> +#endif >> + >> + for (peerp = &peers->peer; *peerp; /* void */ ) { >> + peer = *peerp; >> + >> + if (peer->host != host) { >> + goto next; >> + } >> + >> + for (j = 0; j < ctx->naddrs; j++) { >> + >> + addr = &ctx->addrs[j]; >> + >> + if (addr->name.len == 0 >> + && ngx_cmp_sockaddr(peer->sockaddr, peer->socklen, >> + addr->sockaddr, addr->socklen, 0) >> + == NGX_OK) >> + { >> + addr->name.len = 1; >> + goto next; >> + } >> + } >> + >> + *peerp = peer->next; >> + ngx_http_upstream_zone_remove_peer_locked(peers, peer); >> + >> + ngx_http_upstream_zone_set_single(uscf); >> + >> + continue; >> + >> + next: >> + >> + peerp = &peer->next; >> + } >> + >> + for (i = 0; i < ctx->naddrs; i++) { >> + >> + addr = &ctx->addrs[i]; >> + >> + if (addr->name.len == 1) { >> + addr->name.len = 0; >> + continue; >> + } >> + >> + ngx_shmtx_lock(&peers->shpool->mutex); >> + peer = ngx_http_upstream_zone_copy_peer(peers, NULL); >> + ngx_shmtx_unlock(&peers->shpool->mutex); >> + >> + if (peer == NULL) { >> + ngx_log_error(NGX_LOG_ERR, event->log, 0, >> + "cannot add new server to upstream \"%V\", " >> + "memory exhausted", peers->name); >> + break; >> + } >> + >> + ngx_memcpy(peer->sockaddr, addr->sockaddr, addr->socklen); >> + >> + port = ((struct sockaddr_in *) template->sockaddr)->sin_port; >> + >> + switch (peer->sockaddr->sa_family) { >> +#if (NGX_HAVE_INET6) >> + case AF_INET6: >> + ((struct sockaddr_in6 *) peer->sockaddr)->sin6_port = port; >> + break; >> +#endif >> + default: /* AF_INET */ >> + ((struct sockaddr_in *) peer->sockaddr)->sin_port = port; >> + } >> + >> + peer->socklen = addr->socklen; >> + >> + peer->name.len = ngx_sock_ntop(peer->sockaddr, peer->socklen, >> + peer->name.data, NGX_SOCKADDR_STRLEN, 1); >> + >> + peer->host = template->host; >> + peer->server = template->server; >> + >> + peer->weight = template->weight; >> + peer->effective_weight = peer->weight; >> + peer->max_conns = template->max_conns; >> + peer->max_fails = template->max_fails; >> + peer->fail_timeout = template->fail_timeout; >> + peer->down = template->down; >> + >> + *peerp = peer; >> + peerp = &peer->next; >> + >> + peers->number++; >> + peers->tries += (peer->down == 0); >> + peers->total_weight += peer->weight; >> + peers->weighted = (peers->total_weight != peers->number); >> + peer->id = (*peers->config)++; >> + >> + ngx_http_upstream_zone_set_single(uscf); >> + } >> + >> + ngx_http_upstream_rr_peers_unlock(peers); >> + >> + timer = (ngx_msec_t) 1000 * (ctx->valid > now ? ctx->valid - now + 1 : 1); >> + timer = ngx_min(timer, uscf->resolver_timeout); > > The last line was added to facilitate faster recycle of zombie templates. > Since there are no zombie templates here, the line can be removed. > >> + ngx_resolve_name_done(ctx); >> + >> + ngx_add_timer(event, timer); >> +} >> diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c >> --- a/src/http/ngx_http_upstream.c >> +++ b/src/http/ngx_http_upstream.c >> @@ -1565,6 +1565,26 @@ ngx_http_upstream_connect(ngx_http_reque >> >> u->state->peer = u->peer.name; >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (u->upstream && u->upstream->shm_zone >> + && (u->upstream->flags & NGX_HTTP_UPSTREAM_MODIFY) >> + ) { > > Style: ')' should be move to the line above. > >> + u->state->peer = ngx_palloc(r->pool, >> + sizeof(ngx_str_t) + u->peer.name->len); >> + if (u->state->peer == NULL) { >> + ngx_http_upstream_finalize_request(r, u, >> + NGX_HTTP_INTERNAL_SERVER_ERROR); >> + return; >> + } >> + >> + u->state->peer->len = u->peer.name->len; >> + u->state->peer->data = (u_char *) (u->state->peer + 1); >> + ngx_memcpy(u->state->peer->data, u->peer.name->data, u->peer.name->len); >> + >> + u->peer.name = u->state->peer; >> + } >> +#endif >> + >> if (rc == NGX_BUSY) { >> ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "no live upstreams"); >> ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_NOLIVE); >> @@ -6066,6 +6086,7 @@ ngx_http_upstream(ngx_conf_t *cf, ngx_co >> u.no_port = 1; >> >> uscf = ngx_http_upstream_add(cf, &u, NGX_HTTP_UPSTREAM_CREATE >> + |NGX_HTTP_UPSTREAM_MODIFY >> |NGX_HTTP_UPSTREAM_WEIGHT >> |NGX_HTTP_UPSTREAM_MAX_CONNS >> |NGX_HTTP_UPSTREAM_MAX_FAILS >> @@ -6151,7 +6172,11 @@ ngx_http_upstream(ngx_conf_t *cf, ngx_co >> return rv; >> } >> >> - if (uscf->servers->nelts == 0) { >> + if (uscf->servers->nelts == 0 >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + && uscf->shm_zone == NULL >> +#endif > > In open source nginx empty upstreams are not allowed, irrespective of the zone. > No new servers can appear in the upstream during runtime since there's no API. > >> + ) { >> ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, >> "no servers are inside upstream"); >> return NGX_CONF_ERROR; >> @@ -6171,6 +6196,9 @@ ngx_http_upstream_server(ngx_conf_t *cf, >> ngx_url_t u; >> ngx_int_t weight, max_conns, max_fails; >> ngx_uint_t i; >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + ngx_uint_t resolve; >> +#endif >> ngx_http_upstream_server_t *us; >> >> us = ngx_array_push(uscf->servers); >> @@ -6186,6 +6214,9 @@ ngx_http_upstream_server(ngx_conf_t *cf, >> max_conns = 0; >> max_fails = 1; >> fail_timeout = 10; >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + resolve = 0; >> +#endif >> >> for (i = 2; i < cf->args->nelts; i++) { >> >> @@ -6274,6 +6305,13 @@ ngx_http_upstream_server(ngx_conf_t *cf, >> continue; >> } >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (ngx_strcmp(value[i].data, "resolve") == 0) { >> + resolve = 1; >> + continue; >> + } >> +#endif >> + >> goto invalid; >> } >> >> @@ -6282,6 +6320,13 @@ ngx_http_upstream_server(ngx_conf_t *cf, >> u.url = value[1]; >> u.default_port = 80; >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (resolve) { >> + /* resolve at run time */ >> + u.no_resolve = 1; >> + } >> +#endif >> + >> if (ngx_parse_url(cf->pool, &u) != NGX_OK) { >> if (u.err) { >> ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, >> @@ -6292,8 +6337,45 @@ ngx_http_upstream_server(ngx_conf_t *cf, >> } >> >> us->name = u.url; >> + >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + >> + if (resolve && u.naddrs == 0) { >> + ngx_addr_t *addr; >> + >> + /* save port */ >> + >> + addr = ngx_pcalloc(cf->pool, sizeof(ngx_addr_t)); >> + if (addr == NULL) { >> + return NGX_CONF_ERROR; >> + } >> + >> + addr->sockaddr = ngx_palloc(cf->pool, u.socklen); >> + if (addr->sockaddr == NULL) { >> + return NGX_CONF_ERROR; >> + } >> + >> + ngx_memcpy(addr->sockaddr, &u.sockaddr, u.socklen); >> + >> + addr->socklen = u.socklen; >> + >> + us->addrs = addr; >> + us->naddrs = 1; >> + >> + us->host = u.host; >> + >> + } else { >> + us->addrs = u.addrs; >> + us->naddrs = u.naddrs; >> + } >> + >> +#else >> + >> us->addrs = u.addrs; >> us->naddrs = u.naddrs; >> + >> +#endif >> + >> us->weight = weight; >> us->max_conns = max_conns; >> us->max_fails = max_fails; >> diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h >> --- a/src/http/ngx_http_upstream.h >> +++ b/src/http/ngx_http_upstream.h >> @@ -104,7 +104,11 @@ typedef struct { >> >> unsigned backup:1; >> >> - NGX_COMPAT_BEGIN(6) >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + ngx_str_t host; >> +#endif >> + >> + NGX_COMPAT_BEGIN(4) >> NGX_COMPAT_END >> } ngx_http_upstream_server_t; >> >> @@ -115,6 +119,7 @@ typedef struct { >> #define NGX_HTTP_UPSTREAM_FAIL_TIMEOUT 0x0008 >> #define NGX_HTTP_UPSTREAM_DOWN 0x0010 >> #define NGX_HTTP_UPSTREAM_BACKUP 0x0020 >> +#define NGX_HTTP_UPSTREAM_MODIFY 0x0040 >> #define NGX_HTTP_UPSTREAM_MAX_CONNS 0x0100 >> >> >> @@ -133,6 +138,8 @@ struct ngx_http_upstream_srv_conf_s { >> >> #if (NGX_HTTP_UPSTREAM_ZONE) >> ngx_shm_zone_t *shm_zone; >> + ngx_resolver_t *resolver; >> + ngx_msec_t resolver_timeout; >> #endif >> }; >> >> diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c >> --- a/src/http/ngx_http_upstream_round_robin.c >> +++ b/src/http/ngx_http_upstream_round_robin.c >> @@ -32,10 +32,15 @@ ngx_http_upstream_init_round_robin(ngx_c >> ngx_http_upstream_srv_conf_t *us) >> { >> ngx_url_t u; >> - ngx_uint_t i, j, n, w, t; >> + ngx_uint_t i, j, n, r, w, t; >> ngx_http_upstream_server_t *server; >> ngx_http_upstream_rr_peer_t *peer, **peerp; >> ngx_http_upstream_rr_peers_t *peers, *backup; >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + ngx_uint_t resolve; >> + ngx_http_core_loc_conf_t *clcf; >> + ngx_http_upstream_rr_peer_t **rpeerp; >> +#endif >> >> us->peer.init = ngx_http_upstream_init_round_robin_peer; >> >> @@ -43,23 +48,99 @@ ngx_http_upstream_init_round_robin(ngx_c >> server = us->servers->elts; >> >> n = 0; >> + r = 0; >> w = 0; >> t = 0; >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + resolve = 0; >> +#endif >> + >> for (i = 0; i < us->servers->nelts; i++) { >> + >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (server[i].host.len) { >> + resolve = 1; >> + } >> +#endif >> + >> if (server[i].backup) { >> continue; >> } >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (server[i].host.len) { >> + r++; >> + >> + } else { >> + n += server[i].naddrs; >> + w += server[i].naddrs * server[i].weight; >> + >> + if (!server[i].down) { >> + t += server[i].naddrs; >> + } >> + } >> +#else > > The code above and below is the same code. The reason behind duplication was > to simplify the diff. Now duplication makes no sense. Instead, the following > can be done: > > #if (NGX_HTTP_UPSTREAM_ZONE) > if (server[i].host.len) { > r++; > continue; > } > #endif > >> n += server[i].naddrs; >> w += server[i].naddrs * server[i].weight; >> >> if (!server[i].down) { >> t += server[i].naddrs; >> } >> +#endif >> } >> >> - if (n == 0) { >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (us->shm_zone) { >> + >> + if (resolve && !(us->flags & NGX_HTTP_UPSTREAM_MODIFY)) { >> + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, >> + "load balancing method does not support" >> + " resolving names at run time in" >> + " upstream \"%V\" in %s:%ui", >> + &us->host, us->file_name, us->line); >> + return NGX_ERROR; >> + } >> + >> + clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module); >> + >> + us->resolver = clcf->resolver; >> + us->resolver_timeout = clcf->resolver_timeout; >> + >> + /* >> + * Without "resolver_timeout" in http{}, the value is unset. >> + * Even if we set it in ngx_http_core_merge_loc_conf(), it's >> + * still dependent on the module order and unreliable. >> + */ >> + ngx_conf_init_msec_value(us->resolver_timeout, 30000); >> + >> + if (resolve >> + && (us->resolver == NULL >> + || us->resolver->connections.nelts == 0)) >> + { >> + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, >> + "no resolver defined to resolve names" >> + " at run time in upstream \"%V\" in %s:%ui", >> + &us->host, us->file_name, us->line); >> + return NGX_ERROR; >> + } >> + >> + } else if (resolve) { >> + >> + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, >> + "resolving names at run time requires" >> + " upstream \"%V\" in %s:%ui" >> + " to be in shared memory", >> + &us->host, us->file_name, us->line); >> + return NGX_ERROR; >> + } >> +#endif >> + >> + if (n == 0 >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + && us->shm_zone == NULL >> +#endif > > An empty zone will always be empty in open source nginx. This should be checked > instead: > > if (n + r == 0) { ... } > >> + ) { >> ngx_log_error(NGX_LOG_EMERG, cf->log, 0, >> "no servers in upstream \"%V\" in %s:%ui", >> &us->host, us->file_name, us->line); >> @@ -71,7 +152,8 @@ ngx_http_upstream_init_round_robin(ngx_c >> return NGX_ERROR; >> } >> >> - peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) * n); >> + peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) >> + * (n + r)); >> if (peer == NULL) { >> return NGX_ERROR; >> } >> @@ -86,11 +168,46 @@ ngx_http_upstream_init_round_robin(ngx_c >> n = 0; >> peerp = &peers->peer; >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + rpeerp = &peers->resolve; >> +#endif >> + >> for (i = 0; i < us->servers->nelts; i++) { >> if (server[i].backup) { >> continue; >> } >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (server[i].host.len) { >> + >> + peer[n].host = ngx_pcalloc(cf->pool, >> + sizeof(ngx_http_upstream_host_t)); >> + if (peer[n].host == NULL) { >> + return NGX_ERROR; >> + } >> + >> + peer[n].host->name = server[i].host; >> + >> + peer[n].sockaddr = server[i].addrs[0].sockaddr; >> + peer[n].socklen = server[i].addrs[0].socklen; >> + peer[n].name = server[i].addrs[0].name; >> + peer[n].weight = server[i].weight; >> + peer[n].effective_weight = server[i].weight; >> + peer[n].current_weight = 0; >> + peer[n].max_conns = server[i].max_conns; >> + peer[n].max_fails = server[i].max_fails; >> + peer[n].fail_timeout = server[i].fail_timeout; >> + peer[n].down = server[i].down; >> + peer[n].server = server[i].name; >> + >> + *rpeerp = &peer[n]; >> + rpeerp = &peer[n].next; >> + n++; >> + >> + continue; >> + } >> +#endif >> + >> for (j = 0; j < server[i].naddrs; j++) { >> peer[n].sockaddr = server[i].addrs[j].sockaddr; >> peer[n].socklen = server[i].addrs[j].socklen; >> @@ -115,6 +232,7 @@ ngx_http_upstream_init_round_robin(ngx_c >> /* backup servers */ >> >> n = 0; >> + r = 0; >> w = 0; >> t = 0; >> >> @@ -123,15 +241,37 @@ ngx_http_upstream_init_round_robin(ngx_c >> continue; >> } >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (server[i].host.len) { >> + r++; >> + >> + } else { >> + n += server[i].naddrs; >> + w += server[i].naddrs * server[i].weight; >> + >> + if (!server[i].down) { >> + t += server[i].naddrs; >> + } >> + } >> +#else > > See above. > >> n += server[i].naddrs; >> w += server[i].naddrs * server[i].weight; >> >> if (!server[i].down) { >> t += server[i].naddrs; >> } >> +#endif >> } >> >> - if (n == 0) { >> + if (n == 0 >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + && us->shm_zone == NULL >> +#endif >> + ) { >> + return NGX_OK; >> + } > > See above. > > if (n + r == 0) { .. } > >> + >> + if (n + r == 0 && !(us->flags & NGX_HTTP_UPSTREAM_BACKUP)) { >> return NGX_OK; >> } > > After the change above this block will be useless. > Actually, this code should be preserved as is (or moved to the subsequent patches). The way we handle SRV peer weights, we may place some of the peers to the backup list. That requires always initializing an empty backup list if the upstream has resolvable servers and the lb method supports backup servers. The condition could be optimized for the opensource code though: if (n + r == 0 #if (NGX_HTTP_UPSTREAM_ZONE) && (!resolve || !(us->flags & NGX_HTTP_UPSTREAM_BACKUP)) #endif ) { return NGX_OK; } WDYT? >> @@ -140,12 +280,16 @@ ngx_http_upstream_init_round_robin(ngx_c >> return NGX_ERROR; >> } >> >> - peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) * n); >> + peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) >> + * (n + r)); >> if (peer == NULL) { >> return NGX_ERROR; >> } >> >> - peers->single = 0; >> + if (n > 0) { >> + peers->single = 0; >> + } >> + >> backup->single = 0; >> backup->number = n; >> backup->weighted = (w != n); >> @@ -156,11 +300,46 @@ ngx_http_upstream_init_round_robin(ngx_c >> n = 0; >> peerp = &backup->peer; >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + rpeerp = &backup->resolve; >> +#endif >> + >> for (i = 0; i < us->servers->nelts; i++) { >> if (!server[i].backup) { >> continue; >> } >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (server[i].host.len) { >> + >> + peer[n].host = ngx_pcalloc(cf->pool, >> + sizeof(ngx_http_upstream_host_t)); >> + if (peer[n].host == NULL) { >> + return NGX_ERROR; >> + } >> + >> + peer[n].host->name = server[i].host; >> + >> + peer[n].sockaddr = server[i].addrs[0].sockaddr; >> + peer[n].socklen = server[i].addrs[0].socklen; >> + peer[n].name = server[i].addrs[0].name; >> + peer[n].weight = server[i].weight; >> + peer[n].effective_weight = server[i].weight; >> + peer[n].current_weight = 0; >> + peer[n].max_conns = server[i].max_conns; >> + peer[n].max_fails = server[i].max_fails; >> + peer[n].fail_timeout = server[i].fail_timeout; >> + peer[n].down = server[i].down; >> + peer[n].server = server[i].name; >> + >> + *rpeerp = &peer[n]; >> + rpeerp = &peer[n].next; >> + n++; >> + >> + continue; >> + } >> +#endif >> + >> for (j = 0; j < server[i].naddrs; j++) { >> peer[n].sockaddr = server[i].addrs[j].sockaddr; >> peer[n].socklen = server[i].addrs[j].socklen; >> @@ -273,7 +452,12 @@ ngx_http_upstream_init_round_robin_peer( >> >> rrp->peers = us->peer.data; >> rrp->current = NULL; >> - rrp->config = 0; >> + >> + ngx_http_upstream_rr_peers_rlock(rrp->peers); >> + >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + rrp->config = rrp->peers->config ? *rrp->peers->config : 0; >> +#endif >> >> n = rrp->peers->number; >> >> @@ -281,6 +465,10 @@ ngx_http_upstream_init_round_robin_peer( >> n = rrp->peers->next->number; >> } >> >> + r->upstream->peer.tries = ngx_http_upstream_tries(rrp->peers); >> + >> + ngx_http_upstream_rr_peers_unlock(rrp->peers); >> + >> if (n <= 8 * sizeof(uintptr_t)) { >> rrp->tried = &rrp->data; >> rrp->data = 0; >> @@ -296,7 +484,6 @@ ngx_http_upstream_init_round_robin_peer( >> >> r->upstream->peer.get = ngx_http_upstream_get_round_robin_peer; >> r->upstream->peer.free = ngx_http_upstream_free_round_robin_peer; >> - r->upstream->peer.tries = ngx_http_upstream_tries(rrp->peers); >> #if (NGX_HTTP_SSL) >> r->upstream->peer.set_session = >> ngx_http_upstream_set_round_robin_peer_session; >> @@ -446,6 +633,12 @@ ngx_http_upstream_get_round_robin_peer(n >> peers = rrp->peers; >> ngx_http_upstream_rr_peers_wlock(peers); >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (peers->config && rrp->config != *peers->config) { >> + goto busy; >> + } >> +#endif >> + >> if (peers->single) { >> peer = peers->peer; >> >> @@ -458,6 +651,7 @@ ngx_http_upstream_get_round_robin_peer(n >> } >> >> rrp->current = peer; >> + ngx_http_upstream_rr_peer_ref(peers, peer); >> >> } else { >> >> @@ -508,8 +702,18 @@ failed: >> } >> >> ngx_http_upstream_rr_peers_wlock(peers); >> + >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + if (peers->config && rrp->config != *peers->config) { >> + goto busy; >> + } >> +#endif > > This block is useless. > >> } >> >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> +busy: >> +#endif >> + >> ngx_http_upstream_rr_peers_unlock(peers); >> >> pc->name = peers->name; >> @@ -580,6 +784,7 @@ ngx_http_upstream_get_peer(ngx_http_upst >> } >> >> rrp->current = best; >> + ngx_http_upstream_rr_peer_ref(rrp->peers, best); >> >> n = p / (8 * sizeof(uintptr_t)); >> m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); >> @@ -617,9 +822,16 @@ ngx_http_upstream_free_round_robin_peer( >> >> if (rrp->peers->single) { >> >> + if (peer->fails) { >> + peer->fails = 0; >> + } >> + >> peer->conns--; >> >> - ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); >> + if (ngx_http_upstream_rr_peer_unref(rrp->peers, peer) == NGX_OK) { >> + ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); >> + } >> + >> ngx_http_upstream_rr_peers_unlock(rrp->peers); >> >> pc->tries = 0; >> @@ -661,7 +873,10 @@ ngx_http_upstream_free_round_robin_peer( >> >> peer->conns--; >> >> - ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); >> + if (ngx_http_upstream_rr_peer_unref(rrp->peers, peer) == NGX_OK) { >> + ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); >> + } >> + >> ngx_http_upstream_rr_peers_unlock(rrp->peers); >> >> if (pc->tries) { >> diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h >> --- a/src/http/ngx_http_upstream_round_robin.h >> +++ b/src/http/ngx_http_upstream_round_robin.h >> @@ -14,8 +14,23 @@ >> #include >> >> >> +typedef struct ngx_http_upstream_rr_peers_s ngx_http_upstream_rr_peers_t; >> typedef struct ngx_http_upstream_rr_peer_s ngx_http_upstream_rr_peer_t; >> >> + >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + >> +typedef struct { >> + ngx_event_t event; /* must be first */ >> + ngx_uint_t worker; >> + ngx_str_t name; >> + ngx_http_upstream_rr_peers_t *peers; >> + ngx_http_upstream_rr_peer_t *peer; >> +} ngx_http_upstream_host_t; >> + >> +#endif >> + >> + >> struct ngx_http_upstream_rr_peer_s { >> struct sockaddr *sockaddr; >> socklen_t socklen; >> @@ -46,7 +61,12 @@ struct ngx_http_upstream_rr_peer_s { >> #endif >> >> #if (NGX_HTTP_UPSTREAM_ZONE) >> + unsigned zombie:1; > > I suggest declaring this as in other similar places: > > ngx_uint_t zombie; /* unsigned zombie:1; */ > We already use bitfields in the same file though. Also, in the most popular configuration (64-bit Unix-like platform, GCC or Clang), the suggested change would increase the struct size by 8 bytes. Currently the flag is located in a 4 byte hole left between int and ngx_atomic_t, so we get it for free. >> + >> ngx_atomic_t lock; >> + ngx_uint_t id; > > This field is not used in open source nginx and should not be added or assigned. > >> + ngx_uint_t refs; >> + ngx_http_upstream_host_t *host; >> #endif >> >> ngx_http_upstream_rr_peer_t *next; >> @@ -56,8 +76,6 @@ struct ngx_http_upstream_rr_peer_s { >> }; >> >> >> -typedef struct ngx_http_upstream_rr_peers_s ngx_http_upstream_rr_peers_t; >> - >> struct ngx_http_upstream_rr_peers_s { >> ngx_uint_t number; >> >> @@ -78,6 +96,12 @@ struct ngx_http_upstream_rr_peers_s { >> ngx_http_upstream_rr_peers_t *next; >> >> ngx_http_upstream_rr_peer_t *peer; >> + >> +#if (NGX_HTTP_UPSTREAM_ZONE) >> + ngx_uint_t *config; >> + ngx_http_upstream_rr_peer_t *resolve; >> + ngx_uint_t zombies; > > This field is unused in open source nginx and should not be added or assigned. > >> +#endif >> }; >> >> >> @@ -114,6 +138,67 @@ struct ngx_http_upstream_rr_peers_s { >> ngx_rwlock_unlock(&peer->lock); \ >> } >> >> + >> +#define ngx_http_upstream_rr_peer_ref(peers, peer) \ >> + (peer)->refs++; >> + >> + >> +static ngx_inline void >> +ngx_http_upstream_rr_peer_free_locked(ngx_http_upstream_rr_peers_t *peers, >> + ngx_http_upstream_rr_peer_t *peer) >> +{ >> + if (peer->refs) { >> + peer->zombie = 1; >> + peers->zombies++; >> + return; >> + } >> + >> + ngx_slab_free_locked(peers->shpool, peer->sockaddr); >> + ngx_slab_free_locked(peers->shpool, peer->name.data); >> + >> + if (peer->server.data && (peer->host == NULL || peer->host->peer == peer)) { >> + ngx_slab_free_locked(peers->shpool, peer->server.data); >> + } >> + >> +#if (NGX_HTTP_SSL) >> + if (peer->ssl_session) { >> + ngx_slab_free_locked(peers->shpool, peer->ssl_session); >> + } >> +#endif >> + >> + ngx_slab_free_locked(peers->shpool, peer); >> +} >> + >> + >> +static ngx_inline void >> +ngx_http_upstream_rr_peer_free(ngx_http_upstream_rr_peers_t *peers, >> + ngx_http_upstream_rr_peer_t *peer) >> +{ >> + ngx_shmtx_lock(&peers->shpool->mutex); >> + ngx_http_upstream_rr_peer_free_locked(peers, peer); >> + ngx_shmtx_unlock(&peers->shpool->mutex); >> +} >> + >> + >> +static ngx_inline ngx_int_t >> +ngx_http_upstream_rr_peer_unref(ngx_http_upstream_rr_peers_t *peers, >> + ngx_http_upstream_rr_peer_t *peer) >> +{ >> + peer->refs--; >> + >> + if (peers->shpool == NULL) { >> + return NGX_OK; >> + } >> + >> + if (peer->refs == 0 && peer->zombie) { >> + ngx_http_upstream_rr_peer_free(peers, peer); >> + peers->zombies--; >> + return NGX_DONE; >> + } >> + >> + return NGX_OK; >> +} >> + >> #else >> >> #define ngx_http_upstream_rr_peers_rlock(peers) >> @@ -121,6 +206,8 @@ struct ngx_http_upstream_rr_peers_s { >> #define ngx_http_upstream_rr_peers_unlock(peers) >> #define ngx_http_upstream_rr_peer_lock(peers, peer) >> #define ngx_http_upstream_rr_peer_unlock(peers, peer) >> +#define ngx_http_upstream_rr_peer_ref(peers, peer) >> +#define ngx_http_upstream_rr_peer_unref(peers, peer) NGX_OK >> >> #endif >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> https://mailman.nginx.org/mailman/listinfo/nginx-devel > > -- > Roman Arutyunyan > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel The rest of the comments make sense and will be addressed in the next revision. Thanks for the review! From arut at nginx.com Wed Jul 10 13:16:07 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 10 Jul 2024 17:16:07 +0400 Subject: [PATCH 8 of 9] Upstream: disable re-resolve functionality on Windows In-Reply-To: <375fa42f1a6010692a87.1718317743@fedora-wsl.local> References: <375fa42f1a6010692a87.1718317743@fedora-wsl.local> Message-ID: <20240710131607.u3ox3x6ofnswbi6y@N00W24XTQX> Hi, On Thu, Jun 13, 2024 at 03:29:03PM -0700, Aleksei Bavshin wrote: > # HG changeset patch > # User Aleksei Bavshin > # Date 1712181327 25200 > # Wed Apr 03 14:55:27 2024 -0700 > # Node ID 375fa42f1a6010692a8782c4f03c6ad465d3f7f7 > # Parent 8c8d8118c7ac0a0426f48dbfed94e279dddff992 > Upstream: disable re-resolve functionality on Windows. > > Following features are currently not implemented on Windows, making re-resolve > functionality unsafe to use: > > * 'noreuse' shared zones that are re-created on each configuration reload. > The work scheduling logic is not prepared to handle simultaneous access to > the shared zone from multiple generations of the worker processes. I don't see a problem here. Could you please elaborate. > * 'ngx_worker' identification. > It is possible to configure multiple worker processes on Windows, even if > only one would actually handle the traffic. All of the worker processes are > currently identified as process 0, breaking scheduling and locking of the > resolver tasks. This can be fixed. Patch attached. > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -6327,7 +6327,7 @@ ngx_http_upstream_server(ngx_conf_t *cf, > continue; > } > > -#if (NGX_HTTP_UPSTREAM_ZONE) > +#if (NGX_HTTP_UPSTREAM_ZONE && !(NGX_WIN32)) > if (ngx_strcmp(value[i].data, "resolve") == 0) { > resolve = 1; > continue; > diff --git a/src/stream/ngx_stream_upstream.c b/src/stream/ngx_stream_upstream.c > --- a/src/stream/ngx_stream_upstream.c > +++ b/src/stream/ngx_stream_upstream.c > @@ -545,7 +545,7 @@ ngx_stream_upstream_server(ngx_conf_t *c > continue; > } > > -#if (NGX_STREAM_UPSTREAM_ZONE) > +#if (NGX_STREAM_UPSTREAM_ZONE && !(NGX_WIN32)) > if (ngx_strcmp(value[i].data, "resolve") == 0) { > resolve = 1; > continue; > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1720529662 -14400 # Tue Jul 09 16:54:22 2024 +0400 # Node ID 09dfd8c3da0e44dcf75052929e34c76dab3d7781 # Parent 8c8d8118c7ac0a0426f48dbfed94e279dddff992 Win32: support for ngx_worker variable. In unix the variable holds nginx worker number. For win32 it was always zero. diff --git a/src/os/win32/ngx_os.h b/src/os/win32/ngx_os.h --- a/src/os/win32/ngx_os.h +++ b/src/os/win32/ngx_os.h @@ -63,6 +63,7 @@ extern ngx_uint_t ngx_inherited_nonblo extern ngx_uint_t ngx_tcp_nodelay_and_tcp_nopush; extern ngx_uint_t ngx_win32_version; extern char ngx_unique[]; +extern char ngx_worker_str[]; #endif /* _NGX_OS_H_INCLUDED_ */ diff --git a/src/os/win32/ngx_process_cycle.c b/src/os/win32/ngx_process_cycle.c --- a/src/os/win32/ngx_process_cycle.c +++ b/src/os/win32/ngx_process_cycle.c @@ -375,6 +375,10 @@ ngx_start_worker_processes(ngx_cycle_t * ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); for (n = 0; n < ccf->worker_processes; n++) { + ngx_sprintf((u_char *) ngx_worker_str, "%i%Z", n); + + SetEnvironmentVariable("ngx_worker", ngx_worker_str); + if (ngx_spawn_process(cycle, "worker", type) == NGX_INVALID_PID) { break; } diff --git a/src/os/win32/ngx_win32_init.c b/src/os/win32/ngx_win32_init.c --- a/src/os/win32/ngx_win32_init.c +++ b/src/os/win32/ngx_win32_init.c @@ -18,6 +18,7 @@ ngx_uint_t ngx_inherited_nonblocking = ngx_uint_t ngx_tcp_nodelay_and_tcp_nopush; char ngx_unique[NGX_INT32_LEN + 1]; +char ngx_worker_str[NGX_INT32_LEN + 1]; ngx_os_io_t ngx_os_io = { @@ -71,6 +72,7 @@ ngx_os_init(ngx_log_t *log) SOCKET s; WSADATA wsd; ngx_err_t err; + ngx_int_t w; ngx_time_t *tp; ngx_uint_t n; SYSTEM_INFO si; @@ -260,6 +262,30 @@ nopoll: { ngx_process = NGX_PROCESS_WORKER; + bytes = GetEnvironmentVariable("ngx_worker", ngx_worker_str, + NGX_INT32_LEN + 1); + + if (bytes == 0) { + ngx_log_error(NGX_LOG_EMERG, log, ngx_errno, + "GetEnvironmentVariable(\"ngx_worker\") failed"); + return NGX_ERROR; + } + + if (bytes <= NGX_INT32_LEN) { + w = ngx_atoi(ngx_worker_str, bytes); + + } else { + w = NGX_ERROR; + } + + if (w == NGX_ERROR) { + ngx_log_error(NGX_LOG_EMERG, log, 0, + "broken environment variable \"ngx_worker\""); + return NGX_ERROR; + } + + ngx_worker = w; + } else { err = ngx_errno; From arut at nginx.com Wed Jul 10 15:43:39 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 10 Jul 2024 19:43:39 +0400 Subject: [PATCH 1 of 9] Upstream: re-resolvable servers In-Reply-To: <47b74e2e-d081-49af-bae2-82f6352cf24a@nginx.com> References: <56aeae9355df8a2ee07e.1718317736@fedora-wsl.local> <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> <47b74e2e-d081-49af-bae2-82f6352cf24a@nginx.com> Message-ID: <20240710154339.gmeq37c35wx563lo@N00W24XTQX> Hi, On Tue, Jul 09, 2024 at 01:21:18PM -0700, Aleksei Bavshin wrote: > On 7/8/2024 7:20 AM, Roman Arutyunyan wrote: > > Hi, > > > > On Thu, Jun 13, 2024 at 03:28:56PM -0700, Aleksei Bavshin wrote: > > > # HG changeset patch > > > # User Ruslan Ermilov > > > # Date 1392462754 -14400 > > > # Sat Feb 15 15:12:34 2014 +0400 > > > # Node ID 56aeae9355df8a2ee07e21b65b6869747dd9ee45 > > > # Parent 02e9411009b987f408214ab4a8b6b6093f843bcd > > > Upstream: re-resolvable servers. > > > > > > Specifying the upstream server by a hostname together with the > > > "resolve" parameter will make the hostname to be periodically > > > resolved, and upstream servers added/removed as necessary. > > > > > > This requires a "resolver" at the "http" configuration block. > > > > > > The "resolver_timeout" parameter also affects when the failed > > > DNS requests will be attempted again. Responses with NXDOMAIN > > > will be attempted again in 10 seconds. > > > > > > Upstream has a configuration generation number that is incremented each > > > time servers are added/removed to the primary/backup list. This number > > > is remembered by the peer.init method, and if peer.get detects a change > > > in configuration, it returns NGX_BUSY. > > > > > > Each server has a reference counter. It is incremented by peer.get and > > > decremented by peer.free. When a server is removed, it is removed from > > > the list of servers and is marked as "zombie". The memory allocated by > > > a zombie peer is freed only when its reference count becomes zero. > > > > > > Re-resolvable servers utilize timers that also hold a reference. A > > > reference is also held while upstream keepalive caches an idle > > > connection. > > > > > > Co-authored-by: Roman Arutyunyan > > > Co-authored-by: Sergey Kandaurov > > > Co-authored-by: Vladimir Homutov > > > > I feel like it would be easier to merge this patch, SRV resolve and preresolve > > in a single change. > > > > I disagree here, because > * the patches represent large, significant and logically independent pieces > of work. I squashed a lot of changes into the initial patch, but all of > those were bugfixes or compatibility fixes for newer oss/plus code. > * the commit messages and the diffs give more context for developers who > don't have access to the original history but want to understand why certain > changes were made > * it's easier to track attribution > > I'll merge patches 1 and 2 though, as that makes sense. And I'll make sure > to individually test unmerged patches again in the next revision. > > > > diff --git a/src/http/modules/ngx_http_upstream_hash_module.c b/src/http/modules/ngx_http_upstream_hash_module.c > > > --- a/src/http/modules/ngx_http_upstream_hash_module.c > > > +++ b/src/http/modules/ngx_http_upstream_hash_module.c > > > @@ -24,6 +24,9 @@ typedef struct { > > > typedef struct { > > > ngx_http_complex_value_t key; > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + ngx_uint_t config; > > > +#endif > > > ngx_http_upstream_chash_points_t *points; > > > } ngx_http_upstream_hash_srv_conf_t; > > > @@ -49,6 +52,8 @@ static ngx_int_t ngx_http_upstream_get_h > > > static ngx_int_t ngx_http_upstream_init_chash(ngx_conf_t *cf, > > > ngx_http_upstream_srv_conf_t *us); > > > +static ngx_int_t ngx_http_upstream_update_chash(ngx_pool_t *pool, > > > + ngx_http_upstream_srv_conf_t *us); > > > static int ngx_libc_cdecl > > > ngx_http_upstream_chash_cmp_points(const void *one, const void *two); > > > static ngx_uint_t ngx_http_upstream_find_chash_point( > > > @@ -178,11 +183,18 @@ ngx_http_upstream_get_hash_peer(ngx_peer > > > ngx_http_upstream_rr_peers_rlock(hp->rrp.peers); > > > - if (hp->tries > 20 || hp->rrp.peers->single || hp->key.len == 0) { > > > + if (hp->tries > 20 || hp->rrp.peers->number < 2 || hp->key.len == 0) { > > > ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > > > return hp->get_rr_peer(pc, &hp->rrp); > > > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (hp->rrp.peers->config && hp->rrp.config != *hp->rrp.peers->config) { > > > + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > > > + return hp->get_rr_peer(pc, &hp->rrp); > > > + } > > > +#endif > > > + > > > now = ngx_time(); > > > pc->cached = 0; > > > @@ -262,6 +274,7 @@ ngx_http_upstream_get_hash_peer(ngx_peer > > > } > > > hp->rrp.current = peer; > > > + ngx_http_upstream_rr_peer_ref(hp->rrp.peers, peer); > > > pc->sockaddr = peer->sockaddr; > > > pc->socklen = peer->socklen; > > > @@ -285,6 +298,26 @@ ngx_http_upstream_get_hash_peer(ngx_peer > > > static ngx_int_t > > > ngx_http_upstream_init_chash(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us) > > > { > > > + if (ngx_http_upstream_init_round_robin(cf, us) != NGX_OK) { > > > + return NGX_ERROR; > > > + } > > > + > > > + us->peer.init = ngx_http_upstream_init_chash_peer; > > > + > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (us->shm_zone) { > > > + return NGX_OK; > > > + } > > > +#endif > > > + > > > + return ngx_http_upstream_update_chash(cf->pool, us); > > > +} > > > + > > > + > > > +static ngx_int_t > > > +ngx_http_upstream_update_chash(ngx_pool_t *pool, > > > + ngx_http_upstream_srv_conf_t *us) > > > +{ > > > u_char *host, *port, c; > > > size_t host_len, port_len, size; > > > uint32_t hash, base_hash; > > > @@ -299,25 +332,32 @@ ngx_http_upstream_init_chash(ngx_conf_t > > > u_char byte[4]; > > > } prev_hash; > > > - if (ngx_http_upstream_init_round_robin(cf, us) != NGX_OK) { > > > - return NGX_ERROR; > > > + hcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_hash_module); > > > + > > > + if (hcf->points) { > > > + ngx_free(hcf->points); > > > + hcf->points = NULL; > > > } > > > - us->peer.init = ngx_http_upstream_init_chash_peer; > > > - > > > peers = us->peer.data; > > > npoints = peers->total_weight * 160; > > > size = sizeof(ngx_http_upstream_chash_points_t) > > > - + sizeof(ngx_http_upstream_chash_point_t) * (npoints - 1); > > > + - sizeof(ngx_http_upstream_chash_point_t) > > > + + sizeof(ngx_http_upstream_chash_point_t) * npoints; > > > - points = ngx_palloc(cf->pool, size); > > > + points = pool ? ngx_palloc(pool, size) : ngx_alloc(size, ngx_cycle->log); > > > if (points == NULL) { > > > return NGX_ERROR; > > > } > > > points->number = 0; > > > + if (npoints == 0) { > > > + hcf->points = points; > > > + return NGX_OK; > > > + } > > > + > > > for (peer = peers->peer; peer; peer = peer->next) { > > > server = &peer->server; > > > @@ -401,7 +441,6 @@ ngx_http_upstream_init_chash(ngx_conf_t > > > points->number = i + 1; > > > - hcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_hash_module); > > > hcf->points = points; > > > return NGX_OK; > > > @@ -481,7 +520,22 @@ ngx_http_upstream_init_chash_peer(ngx_ht > > > ngx_http_upstream_rr_peers_rlock(hp->rrp.peers); > > > - hp->hash = ngx_http_upstream_find_chash_point(hcf->points, hash); > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (hp->rrp.peers->config > > > + && (hcf->points == NULL || hcf->config != *hp->rrp.peers->config)) > > > + { > > > + if (ngx_http_upstream_update_chash(NULL, us) != NGX_OK) { > > > + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > > > + return NGX_ERROR; > > > + } > > > + > > > + hcf->config = *hp->rrp.peers->config; > > > + } > > > +#endif > > > + > > > + if (hcf->points->number) { > > > + hp->hash = ngx_http_upstream_find_chash_point(hcf->points, hash); > > > + } > > > ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > > > @@ -517,6 +571,20 @@ ngx_http_upstream_get_chash_peer(ngx_pee > > > pc->cached = 0; > > > pc->connection = NULL; > > > + if (hp->rrp.peers->number == 0) { > > > + pc->name = hp->rrp.peers->name; > > > + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > > > + return NGX_BUSY; > > > + } > > > + > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (hp->rrp.peers->config && hp->rrp.config != *hp->rrp.peers->config) { > > > + pc->name = hp->rrp.peers->name; > > > + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); > > > + return NGX_BUSY; > > > + } > > > +#endif > > > + > > > now = ngx_time(); > > > hcf = hp->conf; > > > @@ -597,6 +665,7 @@ ngx_http_upstream_get_chash_peer(ngx_pee > > > found: > > > hp->rrp.current = best; > > > + ngx_http_upstream_rr_peer_ref(hp->rrp.peers, best); > > > pc->sockaddr = best->sockaddr; > > > pc->socklen = best->socklen; > > > @@ -664,6 +733,7 @@ ngx_http_upstream_hash(ngx_conf_t *cf, n > > > } > > > uscf->flags = NGX_HTTP_UPSTREAM_CREATE > > > + |NGX_HTTP_UPSTREAM_MODIFY > > > |NGX_HTTP_UPSTREAM_WEIGHT > > > |NGX_HTTP_UPSTREAM_MAX_CONNS > > > |NGX_HTTP_UPSTREAM_MAX_FAILS > > > diff --git a/src/http/modules/ngx_http_upstream_ip_hash_module.c b/src/http/modules/ngx_http_upstream_ip_hash_module.c > > > --- a/src/http/modules/ngx_http_upstream_ip_hash_module.c > > > +++ b/src/http/modules/ngx_http_upstream_ip_hash_module.c > > > @@ -163,11 +163,19 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p > > > ngx_http_upstream_rr_peers_rlock(iphp->rrp.peers); > > > - if (iphp->tries > 20 || iphp->rrp.peers->single) { > > > + if (iphp->tries > 20 || iphp->rrp.peers->number < 2) { > > > ngx_http_upstream_rr_peers_unlock(iphp->rrp.peers); > > > return iphp->get_rr_peer(pc, &iphp->rrp); > > > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (iphp->rrp.peers->config && iphp->rrp.config != *iphp->rrp.peers->config) > > > + { > > > + ngx_http_upstream_rr_peers_unlock(iphp->rrp.peers); > > > + return iphp->get_rr_peer(pc, &iphp->rrp); > > > + } > > > +#endif > > > + > > > now = ngx_time(); > > > pc->cached = 0; > > > @@ -232,6 +240,7 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p > > > } > > > iphp->rrp.current = peer; > > > + ngx_http_upstream_rr_peer_ref(iphp->rrp.peers, peer); > > > pc->sockaddr = peer->sockaddr; > > > pc->socklen = peer->socklen; > > > @@ -268,6 +277,7 @@ ngx_http_upstream_ip_hash(ngx_conf_t *cf > > > uscf->peer.init_upstream = ngx_http_upstream_init_ip_hash; > > > uscf->flags = NGX_HTTP_UPSTREAM_CREATE > > > + |NGX_HTTP_UPSTREAM_MODIFY > > > |NGX_HTTP_UPSTREAM_WEIGHT > > > |NGX_HTTP_UPSTREAM_MAX_CONNS > > > |NGX_HTTP_UPSTREAM_MAX_FAILS > > > diff --git a/src/http/modules/ngx_http_upstream_least_conn_module.c b/src/http/modules/ngx_http_upstream_least_conn_module.c > > > --- a/src/http/modules/ngx_http_upstream_least_conn_module.c > > > +++ b/src/http/modules/ngx_http_upstream_least_conn_module.c > > > @@ -124,6 +124,12 @@ ngx_http_upstream_get_least_conn_peer(ng > > > ngx_http_upstream_rr_peers_wlock(peers); > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (peers->config && rrp->config != *peers->config) { > > > + goto busy; > > > + } > > > +#endif > > > + > > > best = NULL; > > > total = 0; > > > @@ -244,6 +250,7 @@ ngx_http_upstream_get_least_conn_peer(ng > > > best->conns++; > > > rrp->current = best; > > > + ngx_http_upstream_rr_peer_ref(peers, best); > > > n = p / (8 * sizeof(uintptr_t)); > > > m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); > > > @@ -278,8 +285,18 @@ failed: > > > } > > > ngx_http_upstream_rr_peers_wlock(peers); > > > + > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (peers->config && rrp->config != *peers->config) { > > > + goto busy; > > > + } > > > +#endif > > > > This block is useless. > > > > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > +busy: > > > +#endif > > > + > > > ngx_http_upstream_rr_peers_unlock(peers); > > > pc->name = peers->name; > > > @@ -303,6 +320,7 @@ ngx_http_upstream_least_conn(ngx_conf_t > > > uscf->peer.init_upstream = ngx_http_upstream_init_least_conn; > > > uscf->flags = NGX_HTTP_UPSTREAM_CREATE > > > + |NGX_HTTP_UPSTREAM_MODIFY > > > |NGX_HTTP_UPSTREAM_WEIGHT > > > |NGX_HTTP_UPSTREAM_MAX_CONNS > > > |NGX_HTTP_UPSTREAM_MAX_FAILS > > > diff --git a/src/http/modules/ngx_http_upstream_random_module.c b/src/http/modules/ngx_http_upstream_random_module.c > > > --- a/src/http/modules/ngx_http_upstream_random_module.c > > > +++ b/src/http/modules/ngx_http_upstream_random_module.c > > > @@ -17,6 +17,9 @@ typedef struct { > > > typedef struct { > > > ngx_uint_t two; > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + ngx_uint_t config; > > > +#endif > > > ngx_http_upstream_random_range_t *ranges; > > > } ngx_http_upstream_random_srv_conf_t; > > > @@ -127,6 +130,11 @@ ngx_http_upstream_update_random(ngx_pool > > > rcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_random_module); > > > + if (rcf->ranges) { > > > + ngx_free(rcf->ranges); > > > + rcf->ranges = NULL; > > > + } > > > + > > > peers = us->peer.data; > > > size = peers->number * sizeof(ngx_http_upstream_random_range_t); > > > @@ -186,11 +194,15 @@ ngx_http_upstream_init_random_peer(ngx_h > > > ngx_http_upstream_rr_peers_rlock(rp->rrp.peers); > > > #if (NGX_HTTP_UPSTREAM_ZONE) > > > - if (rp->rrp.peers->shpool && rcf->ranges == NULL) { > > > + if (rp->rrp.peers->config > > > + && (rcf->ranges == NULL || rcf->config != *rp->rrp.peers->config)) > > > + { > > > if (ngx_http_upstream_update_random(NULL, us) != NGX_OK) { > > > ngx_http_upstream_rr_peers_unlock(rp->rrp.peers); > > > return NGX_ERROR; > > > } > > > + > > > + rcf->config = *rp->rrp.peers->config; > > > } > > > #endif > > > @@ -220,11 +232,18 @@ ngx_http_upstream_get_random_peer(ngx_pe > > > ngx_http_upstream_rr_peers_rlock(peers); > > > - if (rp->tries > 20 || peers->single) { > > > + if (rp->tries > 20 || peers->number < 2) { > > > ngx_http_upstream_rr_peers_unlock(peers); > > > return ngx_http_upstream_get_round_robin_peer(pc, rrp); > > > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (peers->config && rrp->config != *peers->config) { > > > + ngx_http_upstream_rr_peers_unlock(peers); > > > + return ngx_http_upstream_get_round_robin_peer(pc, rrp); > > > + } > > > +#endif > > > + > > > pc->cached = 0; > > > pc->connection = NULL; > > > @@ -274,6 +293,7 @@ ngx_http_upstream_get_random_peer(ngx_pe > > > } > > > rrp->current = peer; > > > + ngx_http_upstream_rr_peer_ref(peers, peer); > > > if (now - peer->checked > peer->fail_timeout) { > > > peer->checked = now; > > > @@ -314,11 +334,18 @@ ngx_http_upstream_get_random2_peer(ngx_p > > > ngx_http_upstream_rr_peers_wlock(peers); > > > - if (rp->tries > 20 || peers->single) { > > > + if (rp->tries > 20 || peers->number < 2) { > > > ngx_http_upstream_rr_peers_unlock(peers); > > > return ngx_http_upstream_get_round_robin_peer(pc, rrp); > > > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (peers->config && rrp->config != *peers->config) { > > > + ngx_http_upstream_rr_peers_unlock(peers); > > > + return ngx_http_upstream_get_round_robin_peer(pc, rrp); > > > + } > > > +#endif > > > + > > > pc->cached = 0; > > > pc->connection = NULL; > > > @@ -384,6 +411,7 @@ ngx_http_upstream_get_random2_peer(ngx_p > > > } > > > rrp->current = peer; > > > + ngx_http_upstream_rr_peer_ref(peers, peer); > > > if (now - peer->checked > peer->fail_timeout) { > > > peer->checked = now; > > > @@ -467,6 +495,7 @@ ngx_http_upstream_random(ngx_conf_t *cf, > > > uscf->peer.init_upstream = ngx_http_upstream_init_random; > > > uscf->flags = NGX_HTTP_UPSTREAM_CREATE > > > + |NGX_HTTP_UPSTREAM_MODIFY > > > |NGX_HTTP_UPSTREAM_WEIGHT > > > |NGX_HTTP_UPSTREAM_MAX_CONNS > > > |NGX_HTTP_UPSTREAM_MAX_FAILS > > > diff --git a/src/http/modules/ngx_http_upstream_zone_module.c b/src/http/modules/ngx_http_upstream_zone_module.c > > > --- a/src/http/modules/ngx_http_upstream_zone_module.c > > > +++ b/src/http/modules/ngx_http_upstream_zone_module.c > > > @@ -18,6 +18,10 @@ static ngx_http_upstream_rr_peers_t *ngx > > > ngx_slab_pool_t *shpool, ngx_http_upstream_srv_conf_t *uscf); > > > static ngx_http_upstream_rr_peer_t *ngx_http_upstream_zone_copy_peer( > > > ngx_http_upstream_rr_peers_t *peers, ngx_http_upstream_rr_peer_t *src); > > > +static void ngx_http_upstream_zone_set_single( > > > + ngx_http_upstream_srv_conf_t *uscf); > > > +static void ngx_http_upstream_zone_remove_peer_locked( > > > + ngx_http_upstream_rr_peers_t *peers, ngx_http_upstream_rr_peer_t *peer); > > > static ngx_command_t ngx_http_upstream_zone_commands[] = { > > > @@ -33,6 +37,11 @@ static ngx_command_t ngx_http_upstream_ > > > }; > > > +static ngx_int_t ngx_http_upstream_zone_init_worker(ngx_cycle_t *cycle); > > > +static void ngx_http_upstream_zone_resolve_timer(ngx_event_t *event); > > > +static void ngx_http_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx); > > > > These declarations should be moved up to other functions declarations. > > > > > static ngx_http_module_t ngx_http_upstream_zone_module_ctx = { > > > NULL, /* preconfiguration */ > > > NULL, /* postconfiguration */ > > > @@ -55,7 +64,7 @@ ngx_module_t ngx_http_upstream_zone_mod > > > NGX_HTTP_MODULE, /* module type */ > > > NULL, /* init master */ > > > NULL, /* init module */ > > > - NULL, /* init process */ > > > + ngx_http_upstream_zone_init_worker, /* init process */ > > > NULL, /* init thread */ > > > NULL, /* exit thread */ > > > NULL, /* exit process */ > > > @@ -188,9 +197,15 @@ ngx_http_upstream_zone_copy_peers(ngx_sl > > > ngx_http_upstream_srv_conf_t *uscf) > > > { > > > ngx_str_t *name; > > > + ngx_uint_t *config; > > > ngx_http_upstream_rr_peer_t *peer, **peerp; > > > ngx_http_upstream_rr_peers_t *peers, *backup; > > > + config = ngx_slab_calloc(shpool, sizeof(ngx_uint_t)); > > > + if (config == NULL) { > > > + return NULL; > > > + } > > > + > > > peers = ngx_slab_alloc(shpool, sizeof(ngx_http_upstream_rr_peers_t)); > > > if (peers == NULL) { > > > return NULL; > > > @@ -214,6 +229,7 @@ ngx_http_upstream_zone_copy_peers(ngx_sl > > > peers->name = name; > > > peers->shpool = shpool; > > > + peers->config = config; > > > for (peerp = &peers->peer; *peerp; peerp = &peer->next) { > > > /* pool is unlocked */ > > > @@ -223,6 +239,17 @@ ngx_http_upstream_zone_copy_peers(ngx_sl > > > } > > > *peerp = peer; > > > + peer->id = (*peers->config)++; > > > + } > > > + > > > + for (peerp = &peers->resolve; *peerp; peerp = &peer->next) { > > > + peer = ngx_http_upstream_zone_copy_peer(peers, *peerp); > > > + if (peer == NULL) { > > > + return NULL; > > > + } > > > + > > > + *peerp = peer; > > > + peer->id = (*peers->config)++; > > > } > > > if (peers->next == NULL) { > > > @@ -239,6 +266,7 @@ ngx_http_upstream_zone_copy_peers(ngx_sl > > > backup->name = name; > > > backup->shpool = shpool; > > > + backup->config = config; > > > for (peerp = &backup->peer; *peerp; peerp = &peer->next) { > > > /* pool is unlocked */ > > > @@ -248,6 +276,17 @@ ngx_http_upstream_zone_copy_peers(ngx_sl > > > } > > > *peerp = peer; > > > + peer->id = (*backup->config)++; > > > + } > > > + > > > + for (peerp = &backup->resolve; *peerp; peerp = &peer->next) { > > > + peer = ngx_http_upstream_zone_copy_peer(backup, *peerp); > > > + if (peer == NULL) { > > > + return NULL; > > > + } > > > + > > > + *peerp = peer; > > > + peer->id = (*backup->config)++; > > > } > > > peers->next = backup; > > > @@ -279,6 +318,7 @@ ngx_http_upstream_zone_copy_peer(ngx_htt > > > dst->sockaddr = NULL; > > > dst->name.data = NULL; > > > dst->server.data = NULL; > > > + dst->host = NULL; > > > } > > > dst->sockaddr = ngx_slab_calloc_locked(pool, sizeof(ngx_sockaddr_t)); > > > @@ -301,12 +341,37 @@ ngx_http_upstream_zone_copy_peer(ngx_htt > > > } > > > ngx_memcpy(dst->server.data, src->server.data, src->server.len); > > > + > > > + if (src->host) { > > > + dst->host = ngx_slab_calloc_locked(pool, > > > + sizeof(ngx_http_upstream_host_t)); > > > + if (dst->host == NULL) { > > > + goto failed; > > > + } > > > + > > > + dst->host->name.data = ngx_slab_alloc_locked(pool, > > > + src->host->name.len); > > > + if (dst->host->name.data == NULL) { > > > + goto failed; > > > + } > > > + > > > + dst->host->peers = peers; > > > + dst->host->peer = dst; > > > + > > > + dst->host->name.len = src->host->name.len; > > > + ngx_memcpy(dst->host->name.data, src->host->name.data, > > > + src->host->name.len); > > > + } > > > } > > > return dst; > > > failed: > > > + if (dst->host) { > > > + ngx_slab_free_locked(pool, dst->host); > > > + } > > > + > > > if (dst->server.data) { > > > ngx_slab_free_locked(pool, dst->server.data); > > > } > > > @@ -323,3 +388,337 @@ failed: > > > return NULL; > > > } > > > + > > > + > > > +static void > > > +ngx_http_upstream_zone_set_single(ngx_http_upstream_srv_conf_t *uscf) > > > +{ > > > + ngx_http_upstream_rr_peers_t *peers; > > > + > > > + peers = uscf->peer.data; > > > + > > > + if (peers->number == 1 > > > + && (peers->next == NULL || peers->next->number == 0)) > > > + { > > > + peers->single = 1; > > > + > > > + } else { > > > + peers->single = 0; > > > + } > > > +} > > > + > > > + > > > +static void > > > +ngx_http_upstream_zone_remove_peer_locked(ngx_http_upstream_rr_peers_t *peers, > > > + ngx_http_upstream_rr_peer_t *peer) > > > +{ > > > + peers->total_weight -= peer->weight; > > > + peers->number--; > > > + peers->tries -= (peer->down == 0); > > > + (*peers->config)++; > > > + peers->weighted = (peers->total_weight != peers->number); > > > + > > > + ngx_http_upstream_rr_peer_free(peers, peer); > > > +} > > > + > > > + > > > +static ngx_int_t > > > +ngx_http_upstream_zone_init_worker(ngx_cycle_t *cycle) > > > +{ > > > + ngx_uint_t i; > > > + ngx_event_t *event; > > > + ngx_http_upstream_rr_peer_t *peer; > > > + ngx_http_upstream_rr_peers_t *peers; > > > + ngx_http_upstream_srv_conf_t *uscf, **uscfp; > > > + ngx_http_upstream_main_conf_t *umcf; > > > + > > > + if (ngx_process != NGX_PROCESS_WORKER > > > + && ngx_process != NGX_PROCESS_SINGLE) > > > + { > > > + return NGX_OK; > > > + } > > > + > > > + umcf = ngx_http_cycle_get_module_main_conf(cycle, ngx_http_upstream_module); > > > + > > > + if (umcf == NULL) { > > > + return NGX_OK; > > > + } > > > + > > > + uscfp = umcf->upstreams.elts; > > > + > > > + for (i = 0; i < umcf->upstreams.nelts; i++) { > > > + > > > + uscf = uscfp[i]; > > > + > > > + if (uscf->shm_zone == NULL) { > > > + continue; > > > + } > > > + > > > + peers = uscf->peer.data; > > > + > > > + do { > > > + ngx_http_upstream_rr_peers_wlock(peers); > > > + > > > + for (peer = peers->resolve; peer; peer = peer->next) { > > > + > > > + if (peer->host->worker != ngx_worker) { > > > + continue; > > > + } > > > + > > > + event = &peer->host->event; > > > + ngx_memzero(event, sizeof(ngx_event_t)); > > > + > > > + event->data = uscf; > > > + event->handler = ngx_http_upstream_zone_resolve_timer; > > > + event->log = cycle->log; > > > + event->cancelable = 1; > > > + > > > + ngx_http_upstream_rr_peer_ref(peers, peer); > > > > In open source nginx a template cannot be deleted since there's no API. > > As a result, there's no reason in increase the reference counter here. > > > > > + ngx_add_timer(event, 1); > > > + } > > > + > > > + ngx_http_upstream_rr_peers_unlock(peers); > > > + > > > + peers = peers->next; > > > + > > > + } while (peers); > > > + } > > > + > > > + return NGX_OK; > > > +} > > > + > > > + > > > +static void > > > +ngx_http_upstream_zone_resolve_timer(ngx_event_t *event) > > > +{ > > > + ngx_resolver_ctx_t *ctx; > > > + ngx_http_upstream_host_t *host; > > > + ngx_http_upstream_rr_peer_t *template; > > > + ngx_http_upstream_rr_peers_t *peers; > > > + ngx_http_upstream_srv_conf_t *uscf; > > > + > > > + host = (ngx_http_upstream_host_t *) event; > > > + uscf = event->data; > > > + peers = host->peers; > > > + template = host->peer; > > > + > > > + if (template->zombie) { > > > + (void) ngx_http_upstream_rr_peer_unref(peers, template); > > > + > > > + ngx_shmtx_lock(&peers->shpool->mutex); > > > + > > > + if (host->service.len) { > > > + ngx_slab_free_locked(peers->shpool, host->service.data); > > > + } > > > + > > > + ngx_slab_free_locked(peers->shpool, host->name.data); > > > + ngx_slab_free_locked(peers->shpool, host); > > > + ngx_shmtx_unlock(&peers->shpool->mutex); > > > + > > > + return; > > > + } > > > > Since a template cannot be deleted, it cannot become a zombie as well. > > This block is useless. > > > > > + ctx = ngx_resolve_start(uscf->resolver, NULL); > > > + if (ctx == NULL) { > > > + goto retry; > > > + } > > > + > > > + if (ctx == NGX_NO_RESOLVER) { > > > + ngx_log_error(NGX_LOG_ERR, event->log, 0, > > > + "no resolver defined to resolve %V", &host->name); > > > + return; > > > + } > > > + > > > + ctx->name = host->name; > > > + ctx->handler = ngx_http_upstream_zone_resolve_handler; > > > + ctx->data = host; > > > + ctx->timeout = uscf->resolver_timeout; > > > + ctx->cancelable = 1; > > > + > > > + if (ngx_resolve_name(ctx) == NGX_OK) { > > > + return; > > > + } > > > + > > > +retry: > > > + > > > + ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); > > > +} > > > + > > > + > > > +static void > > > +ngx_http_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx) > > > +{ > > > + time_t now; > > > + in_port_t port; > > > + ngx_msec_t timer; > > > + ngx_uint_t i, j; > > > + ngx_event_t *event; > > > + ngx_resolver_addr_t *addr; > > > + ngx_http_upstream_host_t *host; > > > + ngx_http_upstream_rr_peer_t *peer, *template, **peerp; > > > + ngx_http_upstream_rr_peers_t *peers; > > > + ngx_http_upstream_srv_conf_t *uscf; > > > + > > > + host = ctx->data; > > > + event = &host->event; > > > + uscf = event->data; > > > + peers = host->peers; > > > + template = host->peer; > > > + > > > + ngx_http_upstream_rr_peers_wlock(peers); > > > + > > > + if (template->zombie) { > > > + (void) ngx_http_upstream_rr_peer_unref(peers, template); > > > + > > > + ngx_http_upstream_rr_peers_unlock(peers); > > > + > > > + ngx_shmtx_lock(&peers->shpool->mutex); > > > + ngx_slab_free_locked(peers->shpool, host->name.data); > > > + ngx_slab_free_locked(peers->shpool, host); > > > + ngx_shmtx_unlock(&peers->shpool->mutex); > > > + > > > + ngx_resolve_name_done(ctx); > > > + > > > + return; > > > + } > > > > Again, this block is useless. > > > > > + now = ngx_time(); > > > + > > > + if (ctx->state) { > > > + ngx_log_error(NGX_LOG_ERR, event->log, 0, > > > + "%V could not be resolved (%i: %s)", > > > + &ctx->name, ctx->state, > > > + ngx_resolver_strerror(ctx->state)); > > > + > > > + if (ctx->state != NGX_RESOLVE_NXDOMAIN) { > > > + ngx_http_upstream_rr_peers_unlock(peers); > > > + > > > + ngx_resolve_name_done(ctx); > > > + > > > + ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); > > > + return; > > > + } > > > + > > > + /* NGX_RESOLVE_NXDOMAIN */ > > > + > > > + ctx->naddrs = 0; > > > + } > > > + > > > +#if (NGX_DEBUG) > > > + { > > > + u_char text[NGX_SOCKADDR_STRLEN]; > > > + size_t len; > > > + > > > + for (i = 0; i < ctx->naddrs; i++) { > > > + len = ngx_sock_ntop(ctx->addrs[i].sockaddr, ctx->addrs[i].socklen, > > > + text, NGX_SOCKADDR_STRLEN, 0); > > > + > > > + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, event->log, 0, > > > + "name %V was resolved to %*s", &host->name, len, text); > > > + } > > > + } > > > +#endif > > > + > > > + for (peerp = &peers->peer; *peerp; /* void */ ) { > > > + peer = *peerp; > > > + > > > + if (peer->host != host) { > > > + goto next; > > > + } > > > + > > > + for (j = 0; j < ctx->naddrs; j++) { > > > + > > > + addr = &ctx->addrs[j]; > > > + > > > + if (addr->name.len == 0 > > > + && ngx_cmp_sockaddr(peer->sockaddr, peer->socklen, > > > + addr->sockaddr, addr->socklen, 0) > > > + == NGX_OK) > > > + { > > > + addr->name.len = 1; > > > + goto next; > > > + } > > > + } > > > + > > > + *peerp = peer->next; > > > + ngx_http_upstream_zone_remove_peer_locked(peers, peer); > > > + > > > + ngx_http_upstream_zone_set_single(uscf); > > > + > > > + continue; > > > + > > > + next: > > > + > > > + peerp = &peer->next; > > > + } > > > + > > > + for (i = 0; i < ctx->naddrs; i++) { > > > + > > > + addr = &ctx->addrs[i]; > > > + > > > + if (addr->name.len == 1) { > > > + addr->name.len = 0; > > > + continue; > > > + } > > > + > > > + ngx_shmtx_lock(&peers->shpool->mutex); > > > + peer = ngx_http_upstream_zone_copy_peer(peers, NULL); > > > + ngx_shmtx_unlock(&peers->shpool->mutex); > > > + > > > + if (peer == NULL) { > > > + ngx_log_error(NGX_LOG_ERR, event->log, 0, > > > + "cannot add new server to upstream \"%V\", " > > > + "memory exhausted", peers->name); > > > + break; > > > + } > > > + > > > + ngx_memcpy(peer->sockaddr, addr->sockaddr, addr->socklen); > > > + > > > + port = ((struct sockaddr_in *) template->sockaddr)->sin_port; > > > + > > > + switch (peer->sockaddr->sa_family) { > > > +#if (NGX_HAVE_INET6) > > > + case AF_INET6: > > > + ((struct sockaddr_in6 *) peer->sockaddr)->sin6_port = port; > > > + break; > > > +#endif > > > + default: /* AF_INET */ > > > + ((struct sockaddr_in *) peer->sockaddr)->sin_port = port; > > > + } > > > + > > > + peer->socklen = addr->socklen; > > > + > > > + peer->name.len = ngx_sock_ntop(peer->sockaddr, peer->socklen, > > > + peer->name.data, NGX_SOCKADDR_STRLEN, 1); > > > + > > > + peer->host = template->host; > > > + peer->server = template->server; > > > + > > > + peer->weight = template->weight; > > > + peer->effective_weight = peer->weight; > > > + peer->max_conns = template->max_conns; > > > + peer->max_fails = template->max_fails; > > > + peer->fail_timeout = template->fail_timeout; > > > + peer->down = template->down; > > > + > > > + *peerp = peer; > > > + peerp = &peer->next; > > > + > > > + peers->number++; > > > + peers->tries += (peer->down == 0); > > > + peers->total_weight += peer->weight; > > > + peers->weighted = (peers->total_weight != peers->number); > > > + peer->id = (*peers->config)++; > > > + > > > + ngx_http_upstream_zone_set_single(uscf); > > > + } > > > + > > > + ngx_http_upstream_rr_peers_unlock(peers); > > > + > > > + timer = (ngx_msec_t) 1000 * (ctx->valid > now ? ctx->valid - now + 1 : 1); > > > + timer = ngx_min(timer, uscf->resolver_timeout); > > > > The last line was added to facilitate faster recycle of zombie templates. > > Since there are no zombie templates here, the line can be removed. > > > > > + ngx_resolve_name_done(ctx); > > > + > > > + ngx_add_timer(event, timer); > > > +} > > > diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c > > > --- a/src/http/ngx_http_upstream.c > > > +++ b/src/http/ngx_http_upstream.c > > > @@ -1565,6 +1565,26 @@ ngx_http_upstream_connect(ngx_http_reque > > > u->state->peer = u->peer.name; > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (u->upstream && u->upstream->shm_zone > > > + && (u->upstream->flags & NGX_HTTP_UPSTREAM_MODIFY) > > > + ) { > > > > Style: ')' should be move to the line above. > > > > > + u->state->peer = ngx_palloc(r->pool, > > > + sizeof(ngx_str_t) + u->peer.name->len); > > > + if (u->state->peer == NULL) { > > > + ngx_http_upstream_finalize_request(r, u, > > > + NGX_HTTP_INTERNAL_SERVER_ERROR); > > > + return; > > > + } > > > + > > > + u->state->peer->len = u->peer.name->len; > > > + u->state->peer->data = (u_char *) (u->state->peer + 1); > > > + ngx_memcpy(u->state->peer->data, u->peer.name->data, u->peer.name->len); > > > + > > > + u->peer.name = u->state->peer; > > > + } > > > +#endif > > > + > > > if (rc == NGX_BUSY) { > > > ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "no live upstreams"); > > > ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_NOLIVE); > > > @@ -6066,6 +6086,7 @@ ngx_http_upstream(ngx_conf_t *cf, ngx_co > > > u.no_port = 1; > > > uscf = ngx_http_upstream_add(cf, &u, NGX_HTTP_UPSTREAM_CREATE > > > + |NGX_HTTP_UPSTREAM_MODIFY > > > |NGX_HTTP_UPSTREAM_WEIGHT > > > |NGX_HTTP_UPSTREAM_MAX_CONNS > > > |NGX_HTTP_UPSTREAM_MAX_FAILS > > > @@ -6151,7 +6172,11 @@ ngx_http_upstream(ngx_conf_t *cf, ngx_co > > > return rv; > > > } > > > - if (uscf->servers->nelts == 0) { > > > + if (uscf->servers->nelts == 0 > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + && uscf->shm_zone == NULL > > > +#endif > > > > In open source nginx empty upstreams are not allowed, irrespective of the zone. > > No new servers can appear in the upstream during runtime since there's no API. > > > > > + ) { > > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > > "no servers are inside upstream"); > > > return NGX_CONF_ERROR; > > > @@ -6171,6 +6196,9 @@ ngx_http_upstream_server(ngx_conf_t *cf, > > > ngx_url_t u; > > > ngx_int_t weight, max_conns, max_fails; > > > ngx_uint_t i; > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + ngx_uint_t resolve; > > > +#endif > > > ngx_http_upstream_server_t *us; > > > us = ngx_array_push(uscf->servers); > > > @@ -6186,6 +6214,9 @@ ngx_http_upstream_server(ngx_conf_t *cf, > > > max_conns = 0; > > > max_fails = 1; > > > fail_timeout = 10; > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + resolve = 0; > > > +#endif > > > for (i = 2; i < cf->args->nelts; i++) { > > > @@ -6274,6 +6305,13 @@ ngx_http_upstream_server(ngx_conf_t *cf, > > > continue; > > > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (ngx_strcmp(value[i].data, "resolve") == 0) { > > > + resolve = 1; > > > + continue; > > > + } > > > +#endif > > > + > > > goto invalid; > > > } > > > @@ -6282,6 +6320,13 @@ ngx_http_upstream_server(ngx_conf_t *cf, > > > u.url = value[1]; > > > u.default_port = 80; > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (resolve) { > > > + /* resolve at run time */ > > > + u.no_resolve = 1; > > > + } > > > +#endif > > > + > > > if (ngx_parse_url(cf->pool, &u) != NGX_OK) { > > > if (u.err) { > > > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > > > @@ -6292,8 +6337,45 @@ ngx_http_upstream_server(ngx_conf_t *cf, > > > } > > > us->name = u.url; > > > + > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + > > > + if (resolve && u.naddrs == 0) { > > > + ngx_addr_t *addr; > > > + > > > + /* save port */ > > > + > > > + addr = ngx_pcalloc(cf->pool, sizeof(ngx_addr_t)); > > > + if (addr == NULL) { > > > + return NGX_CONF_ERROR; > > > + } > > > + > > > + addr->sockaddr = ngx_palloc(cf->pool, u.socklen); > > > + if (addr->sockaddr == NULL) { > > > + return NGX_CONF_ERROR; > > > + } > > > + > > > + ngx_memcpy(addr->sockaddr, &u.sockaddr, u.socklen); > > > + > > > + addr->socklen = u.socklen; > > > + > > > + us->addrs = addr; > > > + us->naddrs = 1; > > > + > > > + us->host = u.host; > > > + > > > + } else { > > > + us->addrs = u.addrs; > > > + us->naddrs = u.naddrs; > > > + } > > > + > > > +#else > > > + > > > us->addrs = u.addrs; > > > us->naddrs = u.naddrs; > > > + > > > +#endif > > > + > > > us->weight = weight; > > > us->max_conns = max_conns; > > > us->max_fails = max_fails; > > > diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h > > > --- a/src/http/ngx_http_upstream.h > > > +++ b/src/http/ngx_http_upstream.h > > > @@ -104,7 +104,11 @@ typedef struct { > > > unsigned backup:1; > > > - NGX_COMPAT_BEGIN(6) > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + ngx_str_t host; > > > +#endif > > > + > > > + NGX_COMPAT_BEGIN(4) > > > NGX_COMPAT_END > > > } ngx_http_upstream_server_t; > > > @@ -115,6 +119,7 @@ typedef struct { > > > #define NGX_HTTP_UPSTREAM_FAIL_TIMEOUT 0x0008 > > > #define NGX_HTTP_UPSTREAM_DOWN 0x0010 > > > #define NGX_HTTP_UPSTREAM_BACKUP 0x0020 > > > +#define NGX_HTTP_UPSTREAM_MODIFY 0x0040 > > > #define NGX_HTTP_UPSTREAM_MAX_CONNS 0x0100 > > > @@ -133,6 +138,8 @@ struct ngx_http_upstream_srv_conf_s { > > > #if (NGX_HTTP_UPSTREAM_ZONE) > > > ngx_shm_zone_t *shm_zone; > > > + ngx_resolver_t *resolver; > > > + ngx_msec_t resolver_timeout; > > > #endif > > > }; > > > diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c > > > --- a/src/http/ngx_http_upstream_round_robin.c > > > +++ b/src/http/ngx_http_upstream_round_robin.c > > > @@ -32,10 +32,15 @@ ngx_http_upstream_init_round_robin(ngx_c > > > ngx_http_upstream_srv_conf_t *us) > > > { > > > ngx_url_t u; > > > - ngx_uint_t i, j, n, w, t; > > > + ngx_uint_t i, j, n, r, w, t; > > > ngx_http_upstream_server_t *server; > > > ngx_http_upstream_rr_peer_t *peer, **peerp; > > > ngx_http_upstream_rr_peers_t *peers, *backup; > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + ngx_uint_t resolve; > > > + ngx_http_core_loc_conf_t *clcf; > > > + ngx_http_upstream_rr_peer_t **rpeerp; > > > +#endif > > > us->peer.init = ngx_http_upstream_init_round_robin_peer; > > > @@ -43,23 +48,99 @@ ngx_http_upstream_init_round_robin(ngx_c > > > server = us->servers->elts; > > > n = 0; > > > + r = 0; > > > w = 0; > > > t = 0; > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + resolve = 0; > > > +#endif > > > + > > > for (i = 0; i < us->servers->nelts; i++) { > > > + > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (server[i].host.len) { > > > + resolve = 1; > > > + } > > > +#endif > > > + > > > if (server[i].backup) { > > > continue; > > > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (server[i].host.len) { > > > + r++; > > > + > > > + } else { > > > + n += server[i].naddrs; > > > + w += server[i].naddrs * server[i].weight; > > > + > > > + if (!server[i].down) { > > > + t += server[i].naddrs; > > > + } > > > + } > > > +#else > > > > The code above and below is the same code. The reason behind duplication was > > to simplify the diff. Now duplication makes no sense. Instead, the following > > can be done: > > > > #if (NGX_HTTP_UPSTREAM_ZONE) > > if (server[i].host.len) { > > r++; > > continue; > > } > > #endif > > > > > n += server[i].naddrs; > > > w += server[i].naddrs * server[i].weight; > > > if (!server[i].down) { > > > t += server[i].naddrs; > > > } > > > +#endif > > > } > > > - if (n == 0) { > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (us->shm_zone) { > > > + > > > + if (resolve && !(us->flags & NGX_HTTP_UPSTREAM_MODIFY)) { > > > + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > > > + "load balancing method does not support" > > > + " resolving names at run time in" > > > + " upstream \"%V\" in %s:%ui", > > > + &us->host, us->file_name, us->line); > > > + return NGX_ERROR; > > > + } > > > + > > > + clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module); > > > + > > > + us->resolver = clcf->resolver; > > > + us->resolver_timeout = clcf->resolver_timeout; > > > + > > > + /* > > > + * Without "resolver_timeout" in http{}, the value is unset. > > > + * Even if we set it in ngx_http_core_merge_loc_conf(), it's > > > + * still dependent on the module order and unreliable. > > > + */ > > > + ngx_conf_init_msec_value(us->resolver_timeout, 30000); > > > + > > > + if (resolve > > > + && (us->resolver == NULL > > > + || us->resolver->connections.nelts == 0)) > > > + { > > > + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > > > + "no resolver defined to resolve names" > > > + " at run time in upstream \"%V\" in %s:%ui", > > > + &us->host, us->file_name, us->line); > > > + return NGX_ERROR; > > > + } > > > + > > > + } else if (resolve) { > > > + > > > + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > > > + "resolving names at run time requires" > > > + " upstream \"%V\" in %s:%ui" > > > + " to be in shared memory", > > > + &us->host, us->file_name, us->line); > > > + return NGX_ERROR; > > > + } > > > +#endif > > > + > > > + if (n == 0 > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + && us->shm_zone == NULL > > > +#endif > > > > An empty zone will always be empty in open source nginx. This should be checked > > instead: > > > > if (n + r == 0) { ... } > > > > > + ) { > > > ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > > > "no servers in upstream \"%V\" in %s:%ui", > > > &us->host, us->file_name, us->line); > > > @@ -71,7 +152,8 @@ ngx_http_upstream_init_round_robin(ngx_c > > > return NGX_ERROR; > > > } > > > - peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) * n); > > > + peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) > > > + * (n + r)); > > > if (peer == NULL) { > > > return NGX_ERROR; > > > } > > > @@ -86,11 +168,46 @@ ngx_http_upstream_init_round_robin(ngx_c > > > n = 0; > > > peerp = &peers->peer; > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + rpeerp = &peers->resolve; > > > +#endif > > > + > > > for (i = 0; i < us->servers->nelts; i++) { > > > if (server[i].backup) { > > > continue; > > > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (server[i].host.len) { > > > + > > > + peer[n].host = ngx_pcalloc(cf->pool, > > > + sizeof(ngx_http_upstream_host_t)); > > > + if (peer[n].host == NULL) { > > > + return NGX_ERROR; > > > + } > > > + > > > + peer[n].host->name = server[i].host; > > > + > > > + peer[n].sockaddr = server[i].addrs[0].sockaddr; > > > + peer[n].socklen = server[i].addrs[0].socklen; > > > + peer[n].name = server[i].addrs[0].name; > > > + peer[n].weight = server[i].weight; > > > + peer[n].effective_weight = server[i].weight; > > > + peer[n].current_weight = 0; > > > + peer[n].max_conns = server[i].max_conns; > > > + peer[n].max_fails = server[i].max_fails; > > > + peer[n].fail_timeout = server[i].fail_timeout; > > > + peer[n].down = server[i].down; > > > + peer[n].server = server[i].name; > > > + > > > + *rpeerp = &peer[n]; > > > + rpeerp = &peer[n].next; > > > + n++; > > > + > > > + continue; > > > + } > > > +#endif > > > + > > > for (j = 0; j < server[i].naddrs; j++) { > > > peer[n].sockaddr = server[i].addrs[j].sockaddr; > > > peer[n].socklen = server[i].addrs[j].socklen; > > > @@ -115,6 +232,7 @@ ngx_http_upstream_init_round_robin(ngx_c > > > /* backup servers */ > > > n = 0; > > > + r = 0; > > > w = 0; > > > t = 0; > > > @@ -123,15 +241,37 @@ ngx_http_upstream_init_round_robin(ngx_c > > > continue; > > > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (server[i].host.len) { > > > + r++; > > > + > > > + } else { > > > + n += server[i].naddrs; > > > + w += server[i].naddrs * server[i].weight; > > > + > > > + if (!server[i].down) { > > > + t += server[i].naddrs; > > > + } > > > + } > > > +#else > > > > See above. > > > > > n += server[i].naddrs; > > > w += server[i].naddrs * server[i].weight; > > > if (!server[i].down) { > > > t += server[i].naddrs; > > > } > > > +#endif > > > } > > > - if (n == 0) { > > > + if (n == 0 > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + && us->shm_zone == NULL > > > +#endif > > > + ) { > > > + return NGX_OK; > > > + } > > > > See above. > > > > if (n + r == 0) { .. } > > > > > + > > > + if (n + r == 0 && !(us->flags & NGX_HTTP_UPSTREAM_BACKUP)) { > > > return NGX_OK; > > > } > > > > After the change above this block will be useless. > > > > Actually, this code should be preserved as is (or moved to the subsequent > patches). > The way we handle SRV peer weights, we may place some of the peers to the > backup list. That requires always initializing an empty backup list if the > upstream has resolvable servers and the lb method supports backup servers. > > The condition could be optimized for the opensource code though: > > if (n + r == 0 > #if (NGX_HTTP_UPSTREAM_ZONE) > && (!resolve || !(us->flags & NGX_HTTP_UPSTREAM_BACKUP)) > #endif > ) { > return NGX_OK; > } > > WDYT? Agreed. But I suggest splitting these in two both for simplicity and smaller diff. The second condition will remain the same. But the first one can be simplified since !resolve also means r == 0: if (n == 0 #if (NGX_HTTP_UPSTREAM_ZONE) && !resolve #endif ) { return NGX_OK; } if (n + r == 0 && !(us->flags & NGX_HTTP_UPSTREAM_BACKUP)) { return NGX_OK; } > > > @@ -140,12 +280,16 @@ ngx_http_upstream_init_round_robin(ngx_c > > > return NGX_ERROR; > > > } > > > - peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) * n); > > > + peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) > > > + * (n + r)); > > > if (peer == NULL) { > > > return NGX_ERROR; > > > } > > > - peers->single = 0; > > > + if (n > 0) { > > > + peers->single = 0; > > > + } > > > + > > > backup->single = 0; > > > backup->number = n; > > > backup->weighted = (w != n); > > > @@ -156,11 +300,46 @@ ngx_http_upstream_init_round_robin(ngx_c > > > n = 0; > > > peerp = &backup->peer; > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + rpeerp = &backup->resolve; > > > +#endif > > > + > > > for (i = 0; i < us->servers->nelts; i++) { > > > if (!server[i].backup) { > > > continue; > > > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (server[i].host.len) { > > > + > > > + peer[n].host = ngx_pcalloc(cf->pool, > > > + sizeof(ngx_http_upstream_host_t)); > > > + if (peer[n].host == NULL) { > > > + return NGX_ERROR; > > > + } > > > + > > > + peer[n].host->name = server[i].host; > > > + > > > + peer[n].sockaddr = server[i].addrs[0].sockaddr; > > > + peer[n].socklen = server[i].addrs[0].socklen; > > > + peer[n].name = server[i].addrs[0].name; > > > + peer[n].weight = server[i].weight; > > > + peer[n].effective_weight = server[i].weight; > > > + peer[n].current_weight = 0; > > > + peer[n].max_conns = server[i].max_conns; > > > + peer[n].max_fails = server[i].max_fails; > > > + peer[n].fail_timeout = server[i].fail_timeout; > > > + peer[n].down = server[i].down; > > > + peer[n].server = server[i].name; > > > + > > > + *rpeerp = &peer[n]; > > > + rpeerp = &peer[n].next; > > > + n++; > > > + > > > + continue; > > > + } > > > +#endif > > > + > > > for (j = 0; j < server[i].naddrs; j++) { > > > peer[n].sockaddr = server[i].addrs[j].sockaddr; > > > peer[n].socklen = server[i].addrs[j].socklen; > > > @@ -273,7 +452,12 @@ ngx_http_upstream_init_round_robin_peer( > > > rrp->peers = us->peer.data; > > > rrp->current = NULL; > > > - rrp->config = 0; > > > + > > > + ngx_http_upstream_rr_peers_rlock(rrp->peers); > > > + > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + rrp->config = rrp->peers->config ? *rrp->peers->config : 0; > > > +#endif > > > n = rrp->peers->number; > > > @@ -281,6 +465,10 @@ ngx_http_upstream_init_round_robin_peer( > > > n = rrp->peers->next->number; > > > } > > > + r->upstream->peer.tries = ngx_http_upstream_tries(rrp->peers); > > > + > > > + ngx_http_upstream_rr_peers_unlock(rrp->peers); > > > + > > > if (n <= 8 * sizeof(uintptr_t)) { > > > rrp->tried = &rrp->data; > > > rrp->data = 0; > > > @@ -296,7 +484,6 @@ ngx_http_upstream_init_round_robin_peer( > > > r->upstream->peer.get = ngx_http_upstream_get_round_robin_peer; > > > r->upstream->peer.free = ngx_http_upstream_free_round_robin_peer; > > > - r->upstream->peer.tries = ngx_http_upstream_tries(rrp->peers); > > > #if (NGX_HTTP_SSL) > > > r->upstream->peer.set_session = > > > ngx_http_upstream_set_round_robin_peer_session; > > > @@ -446,6 +633,12 @@ ngx_http_upstream_get_round_robin_peer(n > > > peers = rrp->peers; > > > ngx_http_upstream_rr_peers_wlock(peers); > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (peers->config && rrp->config != *peers->config) { > > > + goto busy; > > > + } > > > +#endif > > > + > > > if (peers->single) { > > > peer = peers->peer; > > > @@ -458,6 +651,7 @@ ngx_http_upstream_get_round_robin_peer(n > > > } > > > rrp->current = peer; > > > + ngx_http_upstream_rr_peer_ref(peers, peer); > > > } else { > > > @@ -508,8 +702,18 @@ failed: > > > } > > > ngx_http_upstream_rr_peers_wlock(peers); > > > + > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + if (peers->config && rrp->config != *peers->config) { > > > + goto busy; > > > + } > > > +#endif > > > > This block is useless. > > > > > } > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > +busy: > > > +#endif > > > + > > > ngx_http_upstream_rr_peers_unlock(peers); > > > pc->name = peers->name; > > > @@ -580,6 +784,7 @@ ngx_http_upstream_get_peer(ngx_http_upst > > > } > > > rrp->current = best; > > > + ngx_http_upstream_rr_peer_ref(rrp->peers, best); > > > n = p / (8 * sizeof(uintptr_t)); > > > m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); > > > @@ -617,9 +822,16 @@ ngx_http_upstream_free_round_robin_peer( > > > if (rrp->peers->single) { > > > + if (peer->fails) { > > > + peer->fails = 0; > > > + } > > > + > > > peer->conns--; > > > - ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); > > > + if (ngx_http_upstream_rr_peer_unref(rrp->peers, peer) == NGX_OK) { > > > + ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); > > > + } > > > + > > > ngx_http_upstream_rr_peers_unlock(rrp->peers); > > > pc->tries = 0; > > > @@ -661,7 +873,10 @@ ngx_http_upstream_free_round_robin_peer( > > > peer->conns--; > > > - ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); > > > + if (ngx_http_upstream_rr_peer_unref(rrp->peers, peer) == NGX_OK) { > > > + ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); > > > + } > > > + > > > ngx_http_upstream_rr_peers_unlock(rrp->peers); > > > if (pc->tries) { > > > diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h > > > --- a/src/http/ngx_http_upstream_round_robin.h > > > +++ b/src/http/ngx_http_upstream_round_robin.h > > > @@ -14,8 +14,23 @@ > > > #include > > > +typedef struct ngx_http_upstream_rr_peers_s ngx_http_upstream_rr_peers_t; > > > typedef struct ngx_http_upstream_rr_peer_s ngx_http_upstream_rr_peer_t; > > > + > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + > > > +typedef struct { > > > + ngx_event_t event; /* must be first */ > > > + ngx_uint_t worker; > > > + ngx_str_t name; > > > + ngx_http_upstream_rr_peers_t *peers; > > > + ngx_http_upstream_rr_peer_t *peer; > > > +} ngx_http_upstream_host_t; > > > + > > > +#endif > > > + > > > + > > > struct ngx_http_upstream_rr_peer_s { > > > struct sockaddr *sockaddr; > > > socklen_t socklen; > > > @@ -46,7 +61,12 @@ struct ngx_http_upstream_rr_peer_s { > > > #endif > > > #if (NGX_HTTP_UPSTREAM_ZONE) > > > + unsigned zombie:1; > > > > I suggest declaring this as in other similar places: > > > > ngx_uint_t zombie; /* unsigned zombie:1; */ > > > > We already use bitfields in the same file though. Of course we do. > Also, in the most popular configuration (64-bit Unix-like platform, GCC or > Clang), the suggested change would increase the struct size by 8 bytes. > Currently the flag is located in a 4 byte hole left between int and > ngx_atomic_t, so we get it for free. The purpose of this is to optimize access to the bitfield. The access is indeed more efficient in this case, which was tested multiple times. However considering the int above, we can indeed leave the bitfield as is to save some memory instead. > > > + > > > ngx_atomic_t lock; > > > + ngx_uint_t id; > > > > This field is not used in open source nginx and should not be added or assigned. > > > > > + ngx_uint_t refs; > > > + ngx_http_upstream_host_t *host; > > > #endif > > > ngx_http_upstream_rr_peer_t *next; > > > @@ -56,8 +76,6 @@ struct ngx_http_upstream_rr_peer_s { > > > }; > > > -typedef struct ngx_http_upstream_rr_peers_s ngx_http_upstream_rr_peers_t; > > > - > > > struct ngx_http_upstream_rr_peers_s { > > > ngx_uint_t number; > > > @@ -78,6 +96,12 @@ struct ngx_http_upstream_rr_peers_s { > > > ngx_http_upstream_rr_peers_t *next; > > > ngx_http_upstream_rr_peer_t *peer; > > > + > > > +#if (NGX_HTTP_UPSTREAM_ZONE) > > > + ngx_uint_t *config; > > > + ngx_http_upstream_rr_peer_t *resolve; > > > + ngx_uint_t zombies; > > > > This field is unused in open source nginx and should not be added or assigned. > > > > > +#endif > > > }; > > > @@ -114,6 +138,67 @@ struct ngx_http_upstream_rr_peers_s { > > > ngx_rwlock_unlock(&peer->lock); \ > > > } > > > + > > > +#define ngx_http_upstream_rr_peer_ref(peers, peer) \ > > > + (peer)->refs++; > > > + > > > + > > > +static ngx_inline void > > > +ngx_http_upstream_rr_peer_free_locked(ngx_http_upstream_rr_peers_t *peers, > > > + ngx_http_upstream_rr_peer_t *peer) > > > +{ > > > + if (peer->refs) { > > > + peer->zombie = 1; > > > + peers->zombies++; > > > + return; > > > + } > > > + > > > + ngx_slab_free_locked(peers->shpool, peer->sockaddr); > > > + ngx_slab_free_locked(peers->shpool, peer->name.data); > > > + > > > + if (peer->server.data && (peer->host == NULL || peer->host->peer == peer)) { > > > + ngx_slab_free_locked(peers->shpool, peer->server.data); > > > + } > > > + > > > +#if (NGX_HTTP_SSL) > > > + if (peer->ssl_session) { > > > + ngx_slab_free_locked(peers->shpool, peer->ssl_session); > > > + } > > > +#endif > > > + > > > + ngx_slab_free_locked(peers->shpool, peer); > > > +} > > > + > > > + > > > +static ngx_inline void > > > +ngx_http_upstream_rr_peer_free(ngx_http_upstream_rr_peers_t *peers, > > > + ngx_http_upstream_rr_peer_t *peer) > > > +{ > > > + ngx_shmtx_lock(&peers->shpool->mutex); > > > + ngx_http_upstream_rr_peer_free_locked(peers, peer); > > > + ngx_shmtx_unlock(&peers->shpool->mutex); > > > +} > > > + > > > + > > > +static ngx_inline ngx_int_t > > > +ngx_http_upstream_rr_peer_unref(ngx_http_upstream_rr_peers_t *peers, > > > + ngx_http_upstream_rr_peer_t *peer) > > > +{ > > > + peer->refs--; > > > + > > > + if (peers->shpool == NULL) { > > > + return NGX_OK; > > > + } > > > + > > > + if (peer->refs == 0 && peer->zombie) { > > > + ngx_http_upstream_rr_peer_free(peers, peer); > > > + peers->zombies--; > > > + return NGX_DONE; > > > + } > > > + > > > + return NGX_OK; > > > +} > > > + > > > #else > > > #define ngx_http_upstream_rr_peers_rlock(peers) > > > @@ -121,6 +206,8 @@ struct ngx_http_upstream_rr_peers_s { > > > #define ngx_http_upstream_rr_peers_unlock(peers) > > > #define ngx_http_upstream_rr_peer_lock(peers, peer) > > > #define ngx_http_upstream_rr_peer_unlock(peers, peer) > > > +#define ngx_http_upstream_rr_peer_ref(peers, peer) > > > +#define ngx_http_upstream_rr_peer_unref(peers, peer) NGX_OK > > > #endif > > > _______________________________________________ > > > nginx-devel mailing list > > > nginx-devel at nginx.org > > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > -- > > Roman Arutyunyan > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > The rest of the comments make sense and will be addressed in the next > revision. > Thanks for the review! > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan From noreply at nginx.com Wed Jul 10 22:59:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Wed, 10 Jul 2024 22:59:02 +0000 (UTC) Subject: [njs] Fixed empty labelled statement in a function. Message-ID: <20240710225902.A8EF147594@pubserv1.nginx> details: https://github.com/nginx/njs/commit/7f10ae251661656dfd4f40ac3347fba802c14399 branches: master commit: 7f10ae251661656dfd4f40ac3347fba802c14399 user: Vadim Zhestikov date: Fri, 5 Jul 2024 11:23:33 -0700 description: Fixed empty labelled statement in a function. --- src/njs_parser.c | 33 +++++++++++++++++++++------------ src/test/njs_unit_test.c | 16 ++++++++++++++++ 2 files changed, 37 insertions(+), 12 deletions(-) diff --git a/src/njs_parser.c b/src/njs_parser.c index dbd9169f..7eb6292e 100644 --- a/src/njs_parser.c +++ b/src/njs_parser.c @@ -6699,23 +6699,32 @@ njs_parser_labelled_statement_after(njs_parser_t *parser, { njs_int_t ret; uintptr_t unique_id; + njs_parser_node_t *node; const njs_lexer_entry_t *entry; - if (parser->node != NULL) { - /* The statement is not empty block or just semicolon. */ - - unique_id = (uintptr_t) parser->target; - entry = (const njs_lexer_entry_t *) unique_id; - - ret = njs_name_copy(parser->vm, &parser->node->name, &entry->name); - if (ret != NJS_OK) { + node = parser->node; + if (node == NULL) { + node = njs_parser_node_new(parser, NJS_TOKEN_BLOCK); + if (node == NULL) { return NJS_ERROR; } - ret = njs_label_remove(parser->vm, parser->scope, unique_id); - if (ret != NJS_OK) { - return NJS_ERROR; - } + node->token_line = token->line; + + parser->node = node; + } + + unique_id = (uintptr_t) parser->target; + entry = (const njs_lexer_entry_t *) unique_id; + + ret = njs_name_copy(parser->vm, &parser->node->name, &entry->name); + if (ret != NJS_OK) { + return NJS_ERROR; + } + + ret = njs_label_remove(parser->vm, parser->scope, unique_id); + if (ret != NJS_OK) { + return NJS_ERROR; } return njs_parser_stack_pop(parser); diff --git a/src/test/njs_unit_test.c b/src/test/njs_unit_test.c index 2f0e318c..75933665 100644 --- a/src/test/njs_unit_test.c +++ b/src/test/njs_unit_test.c @@ -3494,6 +3494,22 @@ static njs_unit_test_t njs_test[] = "} catch(e) {c = 10;}; [c, fin]"), njs_str("1,1") }, + { njs_str("function v1() {" + "function v2 () {}" + "v3:;" + "1;" + "} v1();"), + njs_str("undefined") }, + + { njs_str("function v1() {" + "function v2 () {}" + "v3:;" + "} v1();"), + njs_str("undefined") }, + + { njs_str("{v1:;}"), + njs_str("undefined") }, + /* jumping out of a nested try-catch block. */ { njs_str("var r = 0; " From arut at nginx.com Thu Jul 11 06:51:16 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 11 Jul 2024 10:51:16 +0400 Subject: [PATCH 8 of 9] Upstream: disable re-resolve functionality on Windows In-Reply-To: <20240710131607.u3ox3x6ofnswbi6y@N00W24XTQX> References: <375fa42f1a6010692a87.1718317743@fedora-wsl.local> <20240710131607.u3ox3x6ofnswbi6y@N00W24XTQX> Message-ID: <20240711065116.4646u5hogvsjcmoo@N00W24XTQX> Hello, On Wed, Jul 10, 2024 at 05:16:07PM +0400, Roman Arutyunyan wrote: > Hi, > > On Thu, Jun 13, 2024 at 03:29:03PM -0700, Aleksei Bavshin wrote: > > # HG changeset patch > > # User Aleksei Bavshin > > # Date 1712181327 25200 > > # Wed Apr 03 14:55:27 2024 -0700 > > # Node ID 375fa42f1a6010692a8782c4f03c6ad465d3f7f7 > > # Parent 8c8d8118c7ac0a0426f48dbfed94e279dddff992 > > Upstream: disable re-resolve functionality on Windows. > > > > Following features are currently not implemented on Windows, making re-resolve > > functionality unsafe to use: > > > > * 'noreuse' shared zones that are re-created on each configuration reload. > > The work scheduling logic is not prepared to handle simultaneous access to > > the shared zone from multiple generations of the worker processes. > > I don't see a problem here. Could you please elaborate. > > > * 'ngx_worker' identification. > > It is possible to configure multiple worker processes on Windows, even if > > only one would actually handle the traffic. All of the worker processes are > > currently identified as process 0, breaking scheduling and locking of the > > resolver tasks. > > This can be fixed. Patch attached. [..] More thoughts on this. It's known that even though nginx for Windows may be configured to have multiple workers, only one of them actually accepts connections: https://nginx.org/en/docs/windows.html This makes multi-worker Windows configuration useless. However since they are allowed, these workers at least should not crash. And this is where the problems with upstream resolve arise. Since ngx_worker is always zero, all workers will identify as #0 and will start upstream serevr name resolutions. With current patches this will cause worker crashes since the resolve event object is allocated in shared zone. If we remove it from the zone and allocate it locally with ngx_alloc() instead, the problems with Windows will go away. All workers will safely resolve the same names, which makes zero sense, so as having multiple workers in the first place. And I generally think it's better to allocate process-local data locally to avoid memory overuse and potential confusions. In this case my last ngx_worker patch is not needed. -- Roman Arutyunyan From arut at nginx.com Thu Jul 11 09:33:43 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 11 Jul 2024 13:33:43 +0400 Subject: [PATCH]HTTP/2 connection not properly closing during graceful shutdown In-Reply-To: <5fe2f2ca476eab484ed54c00e02f321a2b84b1bc.camel@kasei.im> References: <20240430152746.bksaeztpdhlo22uv@N00W24XTQX> <5fe2f2ca476eab484ed54c00e02f321a2b84b1bc.camel@kasei.im> Message-ID: <20240711093343.vkxjj377f4th5y2c@N00W24XTQX> Hi, On Mon, May 06, 2024 at 11:14:24AM +0800, kasei at kasei.im wrote: > Hello, > > Thanks for your confirmation and explanations. > > The following is a modified patch. In my test it would send GOAWAY(on > stream id 0), same as the previous one, but call > ngx_http_v2_finalize_connection instead of set c->close. > > # HG changeset patch > # User Kasei Wang > # Date 1714965008 -28800 > # Mon May 06 11:10:08 2024 +0800 > # Branch help > # Node ID 70b6b6b69e6fd3a0d03de004acf45bad16b03a9c > # Parent 8618e4d900cc71082fbe7dc72af087937d64faf5 > HTTP/2: close http2 connections initialized during graceful shutdown. I'd remove 'http2' to fulfill the 67 character restriction. > In some rare cases, a HTTP/2 connections can be initialized during a > graceful shutdown. Now close such an connection to avoid unexcepted > delays in the graceful shutdown. > > diff -r 8618e4d900cc -r 70b6b6b69e6f src/http/v2/ngx_http_v2.c > --- a/src/http/v2/ngx_http_v2.c Tue Apr 16 18:27:50 2024 +0400 > +++ b/src/http/v2/ngx_http_v2.c Mon May 06 11:10:08 2024 +0800 > @@ -304,6 +304,11 @@ > c->idle = 1; > ngx_reusable_connection(c, 0); > > + if (ngx_exiting) { > + ngx_http_v2_finalize_connection(h2c, NGX_HTTP_V2_NO_ERROR); > + return; > + } I'd move this up a few lines. > + > if (c->buffer) { > p = c->buffer->pos; > end = c->buffer->last; Otherwise looks fine, thanks. And sorry for the delay. -- Roman Arutyunyan From pluknet at nginx.com Thu Jul 11 12:12:53 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 11 Jul 2024 16:12:53 +0400 Subject: [PATCH] Stream: allow servers with no handler In-Reply-To: <252582428b60f3422aa3.1719495238@arut-laptop> References: <252582428b60f3422aa3.1719495238@arut-laptop> Message-ID: <7D3234B6-B6D2-4DD7-AE08-23BA471819BD@nginx.com> > On 27 Jun 2024, at 17:33, Roman Arutyunyan wrote: > > # HG changeset patch > # User Roman Arutyunyan > # Date 1719494996 -14400 > # Thu Jun 27 17:29:56 2024 +0400 > # Node ID 252582428b60f3422aa3b25dac8cca94edd43c34 > # Parent 4fbc38ad3c8a8ed7986aaaa76b8aceda5ed70ef3 > Stream: allow servers with no handler. > > Previously handlers were mandatory. However they are not always needed. > For example, a server configured with ssl_reject_handshake does not need a > handler. Such servers required a fake handler to pass the check. Now handler > absence check is moved to runtime. If handler is missing, the connection is > closed with 500 code. > > diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c > --- a/src/stream/ngx_stream_core_module.c > +++ b/src/stream/ngx_stream_core_module.c > @@ -458,6 +458,13 @@ ngx_stream_core_content_phase(ngx_stream > return NGX_OK; > } > > + if (cscf->handler == NULL) { > + ngx_log_debug0(NGX_LOG_DEBUG_STREAM, c->log, 0, > + "no handler for server"); > + ngx_stream_finalize_session(s, NGX_STREAM_INTERNAL_SERVER_ERROR); > + return NGX_OK; > + } > + > cscf->handler(s); > > return NGX_OK; > @@ -734,13 +741,6 @@ ngx_stream_core_merge_srv_conf(ngx_conf_ > conf->resolver = prev->resolver; > } > > - if (conf->handler == NULL) { > - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, > - "no handler for server in %s:%ui", > - conf->file_name, conf->line); > - return NGX_CONF_ERROR; > - } > - > if (conf->error_log == NULL) { > if (prev->error_log) { > conf->error_log = prev->error_log; Looks good. -- Sergey Kandaurov From arut at nginx.com Thu Jul 11 14:55:08 2024 From: arut at nginx.com (=?utf-8?q?Roman_Arutyunyan?=) Date: Thu, 11 Jul 2024 14:55:08 +0000 Subject: [nginx] Stream: allow servers with no handler. Message-ID: details: https://hg.nginx.org/nginx/rev/072ca4906154 branches: changeset: 9269:072ca4906154 user: Roman Arutyunyan date: Thu Jun 27 17:29:56 2024 +0400 description: Stream: allow servers with no handler. Previously handlers were mandatory. However they are not always needed. For example, a server configured with ssl_reject_handshake does not need a handler. Such servers required a fake handler to pass the check. Now handler absence check is moved to runtime. If handler is missing, the connection is closed with 500 code. diffstat: src/stream/ngx_stream_core_module.c | 14 +++++++------- 1 files changed, 7 insertions(+), 7 deletions(-) diffs (31 lines): diff -r 02e9411009b9 -r 072ca4906154 src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c Tue May 28 17:22:30 2024 +0400 +++ b/src/stream/ngx_stream_core_module.c Thu Jun 27 17:29:56 2024 +0400 @@ -458,6 +458,13 @@ ngx_stream_core_content_phase(ngx_stream return NGX_OK; } + if (cscf->handler == NULL) { + ngx_log_debug0(NGX_LOG_DEBUG_STREAM, c->log, 0, + "no handler for server"); + ngx_stream_finalize_session(s, NGX_STREAM_INTERNAL_SERVER_ERROR); + return NGX_OK; + } + cscf->handler(s); return NGX_OK; @@ -734,13 +741,6 @@ ngx_stream_core_merge_srv_conf(ngx_conf_ conf->resolver = prev->resolver; } - if (conf->handler == NULL) { - ngx_log_error(NGX_LOG_EMERG, cf->log, 0, - "no handler for server in %s:%ui", - conf->file_name, conf->line); - return NGX_CONF_ERROR; - } - if (conf->error_log == NULL) { if (prev->error_log) { conf->error_log = prev->error_log; From a.bavshin at nginx.com Thu Jul 11 16:25:43 2024 From: a.bavshin at nginx.com (Aleksei Bavshin) Date: Thu, 11 Jul 2024 09:25:43 -0700 Subject: [PATCH 8 of 9] Upstream: disable re-resolve functionality on Windows In-Reply-To: <20240710131607.u3ox3x6ofnswbi6y@N00W24XTQX> References: <375fa42f1a6010692a87.1718317743@fedora-wsl.local> <20240710131607.u3ox3x6ofnswbi6y@N00W24XTQX> Message-ID: <9265e7d8-d7e9-43e4-a936-2aea3acb8f3a@nginx.com> On 7/10/2024 6:16 AM, Roman Arutyunyan wrote: > Hi, > > On Thu, Jun 13, 2024 at 03:29:03PM -0700, Aleksei Bavshin wrote: >> # HG changeset patch >> # User Aleksei Bavshin >> # Date 1712181327 25200 >> # Wed Apr 03 14:55:27 2024 -0700 >> # Node ID 375fa42f1a6010692a8782c4f03c6ad465d3f7f7 >> # Parent 8c8d8118c7ac0a0426f48dbfed94e279dddff992 >> Upstream: disable re-resolve functionality on Windows. >> >> Following features are currently not implemented on Windows, making re-resolve >> functionality unsafe to use: >> >> * 'noreuse' shared zones that are re-created on each configuration reload. >> The work scheduling logic is not prepared to handle simultaneous access to >> the shared zone from multiple generations of the worker processes. > > I don't see a problem here. Could you please elaborate. Now I recall that I could've been running a modified version with different handling of `noreuse` when I wrote that. The problem is still valid; assuming that `noreuse` is simply ignored on Windows, we could've get the following behavior: A new cycle is created on configuration reload, and an existing shared zone is referenced by the new cycle. Depending on how we handle that, `ngx_http_upstream_init_zone` in the new cycle either will find an existing set of peers (still actively used by a previous cycle) and overwrite host->event fields with worker-local pointers, or will allocate a new `ngx_http_upstream_rr_peers_t` and eventually exhausts the zone memory. Thankfully, neither option is possible and `noreuse` does something on Windows platform: an upstream zone in the configuration will completely break configuration reload with 2024/07/10 13:13:12 [alert] 41332#45116: MapViewOfFileEx(65536, 2EFE0000) of file mapping "upstream_zone" failed (487: Attempt to access invalid address) What's happening there is that we see the 'noreuse' flag and attempt to allocate a new shared zone. Due to a platform-specific implementation detail, we get `ERROR_ALREADY_EXISTS` and proceed with the existing file mapping object mapped at a different address. Next, the code added in af7eba90645d will notice that the newly mapped address does not match the expected address of an existing zone and will attempt to remap it at the original address via ngx_shm_remap. As we're in the master process and the original mapping still exists at that address, `MapViewOfFileEx` would fail and abort the configuration reload. Note that you don't even need the patches in this series to reproduce that. *** Overall, I'm convinced that the problem is not in the resolver but in the `noreuse` zone type we use in upstreams. And we already allow these on Windows, so the ship has sailed. I saw a previous idea to resolve that by allocating `noreuse` zones with a name unique to the config generation and tried implementing that, but it wasn't beautiful. Thus, I'm retracting this patch. Let's just update the Windows platform limitations in the documentation instead. > >> * 'ngx_worker' identification. >> It is possible to configure multiple worker processes on Windows, even if >> only one would actually handle the traffic. All of the worker processes are >> currently identified as process 0, breaking scheduling and locking of the >> resolver tasks. > > This can be fixed. Patch attached. Note that the patch is insufficient, as it does not handle respawning. We should store the ngx_worker value in the ngx_processes and restore the variable when necessary, or find an alternative way of passing it to the worker process. My WIP patch with `cycle->generation` support had an attempt to solve that with making environment snapshots (GetEnvironmentStrings) and passing these to the CreateProcess, but that quickly escalated into a ton of unrelated fixes and I decided to stop. > >> diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c >> --- a/src/http/ngx_http_upstream.c >> +++ b/src/http/ngx_http_upstream.c >> @@ -6327,7 +6327,7 @@ ngx_http_upstream_server(ngx_conf_t *cf, >> continue; >> } >> >> -#if (NGX_HTTP_UPSTREAM_ZONE) >> +#if (NGX_HTTP_UPSTREAM_ZONE && !(NGX_WIN32)) >> if (ngx_strcmp(value[i].data, "resolve") == 0) { >> resolve = 1; >> continue; >> diff --git a/src/stream/ngx_stream_upstream.c b/src/stream/ngx_stream_upstream.c >> --- a/src/stream/ngx_stream_upstream.c >> +++ b/src/stream/ngx_stream_upstream.c >> @@ -545,7 +545,7 @@ ngx_stream_upstream_server(ngx_conf_t *c >> continue; >> } >> >> -#if (NGX_STREAM_UPSTREAM_ZONE) >> +#if (NGX_STREAM_UPSTREAM_ZONE && !(NGX_WIN32)) >> if (ngx_strcmp(value[i].data, "resolve") == 0) { >> resolve = 1; >> continue; >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> https://mailman.nginx.org/mailman/listinfo/nginx-devel > > -- > Roman Arutyunyan > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From a.bavshin at nginx.com Thu Jul 11 16:40:57 2024 From: a.bavshin at nginx.com (Aleksei Bavshin) Date: Thu, 11 Jul 2024 09:40:57 -0700 Subject: [PATCH 1 of 9] Upstream: re-resolvable servers In-Reply-To: <20240709162209.jxzzleb6hnjlx3r5@N00W24XTQX> References: <56aeae9355df8a2ee07e.1718317736@fedora-wsl.local> <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> <20240709162209.jxzzleb6hnjlx3r5@N00W24XTQX> Message-ID: On 7/9/2024 9:22 AM, Roman Arutyunyan wrote: > Hi, > > On Mon, Jul 08, 2024 at 06:20:58PM +0400, Roman Arutyunyan wrote: >> Hi, >> >> On Thu, Jun 13, 2024 at 03:28:56PM -0700, Aleksei Bavshin wrote: >>> # HG changeset patch >>> # User Ruslan Ermilov >>> # Date 1392462754 -14400 >>> # Sat Feb 15 15:12:34 2014 +0400 >>> # Node ID 56aeae9355df8a2ee07e21b65b6869747dd9ee45 >>> # Parent 02e9411009b987f408214ab4a8b6b6093f843bcd >>> Upstream: re-resolvable servers. >>> >>> Specifying the upstream server by a hostname together with the >>> "resolve" parameter will make the hostname to be periodically >>> resolved, and upstream servers added/removed as necessary. >>> >>> This requires a "resolver" at the "http" configuration block. >>> >>> The "resolver_timeout" parameter also affects when the failed >>> DNS requests will be attempted again. Responses with NXDOMAIN >>> will be attempted again in 10 seconds. >>> >>> Upstream has a configuration generation number that is incremented each >>> time servers are added/removed to the primary/backup list. This number >>> is remembered by the peer.init method, and if peer.get detects a change >>> in configuration, it returns NGX_BUSY. >>> >>> Each server has a reference counter. It is incremented by peer.get and >>> decremented by peer.free. When a server is removed, it is removed from >>> the list of servers and is marked as "zombie". The memory allocated by >>> a zombie peer is freed only when its reference count becomes zero. >>> >>> Re-resolvable servers utilize timers that also hold a reference. A >>> reference is also held while upstream keepalive caches an idle >>> connection. >>> >>> Co-authored-by: Roman Arutyunyan >>> Co-authored-by: Sergey Kandaurov >>> Co-authored-by: Vladimir Homutov > > [..] > >>> diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h >>> --- a/src/http/ngx_http_upstream_round_robin.h >>> +++ b/src/http/ngx_http_upstream_round_robin.h >>> @@ -14,8 +14,23 @@ >>> #include >>> >>> >>> +typedef struct ngx_http_upstream_rr_peers_s ngx_http_upstream_rr_peers_t; >>> typedef struct ngx_http_upstream_rr_peer_s ngx_http_upstream_rr_peer_t; >>> >>> + >>> +#if (NGX_HTTP_UPSTREAM_ZONE) >>> + >>> +typedef struct { >>> + ngx_event_t event; /* must be first */ >>> + ngx_uint_t worker; > > Missed this last time. This field should be removed since all resolving is in > worker #0. Unfortunately, that would break the ABI compatibility between OSS and Plus. Replacing the field with yet another NGX_COMPAT_BEGIN isn't any better than leaving it in the opensource code. > >>> + ngx_str_t name; >>> + ngx_http_upstream_rr_peers_t *peers; >>> + ngx_http_upstream_rr_peer_t *peer; >>> +} ngx_http_upstream_host_t; >>> + >>> +#endif >>> + >>> + >>> struct ngx_http_upstream_rr_peer_s { >>> struct sockaddr *sockaddr; >>> socklen_t socklen; >>> @@ -46,7 +61,12 @@ struct ngx_http_upstream_rr_peer_s { >>> #endif >>> >>> #if (NGX_HTTP_UPSTREAM_ZONE) >>> + unsigned zombie:1; >> >> I suggest declaring this as in other similar places: >> >> ngx_uint_t zombie; /* unsigned zombie:1; */ >> >>> + >>> ngx_atomic_t lock; >>> + ngx_uint_t id; >> >> This field is not used in open source nginx and should not be added or assigned. >> >>> + ngx_uint_t refs; >>> + ngx_http_upstream_host_t *host; >>> #endif >>> >>> ngx_http_upstream_rr_peer_t *next; > > [..] > > -- > Roman Arutyunyan > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From pluknet at nginx.com Thu Jul 11 16:51:54 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 11 Jul 2024 20:51:54 +0400 Subject: [PATCH]HTTP/2 connection not properly closing during graceful shutdown In-Reply-To: <20240711093343.vkxjj377f4th5y2c@N00W24XTQX> References: <20240430152746.bksaeztpdhlo22uv@N00W24XTQX> <5fe2f2ca476eab484ed54c00e02f321a2b84b1bc.camel@kasei.im> <20240711093343.vkxjj377f4th5y2c@N00W24XTQX> Message-ID: > On 11 Jul 2024, at 13:33, Roman Arutyunyan wrote: > > Hi, > > On Mon, May 06, 2024 at 11:14:24AM +0800, kasei at kasei.im wrote: >> Hello, >> >> Thanks for your confirmation and explanations. >> >> The following is a modified patch. In my test it would send GOAWAY(on >> stream id 0), same as the previous one, but call >> ngx_http_v2_finalize_connection instead of set c->close. >> >> # HG changeset patch >> # User Kasei Wang >> # Date 1714965008 -28800 >> # Mon May 06 11:10:08 2024 +0800 >> # Branch help >> # Node ID 70b6b6b69e6fd3a0d03de004acf45bad16b03a9c >> # Parent 8618e4d900cc71082fbe7dc72af087937d64faf5 >> HTTP/2: close http2 connections initialized during graceful shutdown. > > I'd remove 'http2' to fulfill the 67 character restriction. > >> In some rare cases, a HTTP/2 connections can be initialized during a >> graceful shutdown. Now close such an connection to avoid unexcepted >> delays in the graceful shutdown. >> >> diff -r 8618e4d900cc -r 70b6b6b69e6f src/http/v2/ngx_http_v2.c >> --- a/src/http/v2/ngx_http_v2.c Tue Apr 16 18:27:50 2024 +0400 >> +++ b/src/http/v2/ngx_http_v2.c Mon May 06 11:10:08 2024 +0800 >> @@ -304,6 +304,11 @@ >> c->idle = 1; >> ngx_reusable_connection(c, 0); >> >> + if (ngx_exiting) { >> + ngx_http_v2_finalize_connection(h2c, NGX_HTTP_V2_NO_ERROR); >> + return; >> + } > > I'd move this up a few lines. I think the block can be moved between setting c->data, as this is required for lingering close processing, and setting read and write handlers, as they're useless because connection is switched to lingering close mode as part of finalization. > >> + >> if (c->buffer) { >> p = c->buffer->pos; >> end = c->buffer->last; > > Otherwise looks fine, thanks. And sorry for the delay. > -- Sergey Kandaurov From mat999 at gmail.com Thu Jul 11 19:15:26 2024 From: mat999 at gmail.com (Mathew Heard) Date: Fri, 12 Jul 2024 05:15:26 +1000 Subject: [PATCH 1 of 9] Upstream: re-resolvable servers In-Reply-To: References: <56aeae9355df8a2ee07e.1718317736@fedora-wsl.local> <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> <20240709162209.jxzzleb6hnjlx3r5@N00W24XTQX> Message-ID: Do you happen to know if there remains any gap in obvious capability provided by a module like jdomain compared to this? Perhaps it would be worth checking to ensure nothing obvious is not implemented? The one that I see is the ability to control if a resolution is IPv4, IPv6 or mixed. Is this something that would be useful for this feature? On Fri, 12 Jul 2024 at 02:40, Aleksei Bavshin wrote: > On 7/9/2024 9:22 AM, Roman Arutyunyan wrote: > > Hi, > > > > On Mon, Jul 08, 2024 at 06:20:58PM +0400, Roman Arutyunyan wrote: > >> Hi, > >> > >> On Thu, Jun 13, 2024 at 03:28:56PM -0700, Aleksei Bavshin wrote: > >>> # HG changeset patch > >>> # User Ruslan Ermilov > >>> # Date 1392462754 -14400 > >>> # Sat Feb 15 15:12:34 2014 +0400 > >>> # Node ID 56aeae9355df8a2ee07e21b65b6869747dd9ee45 > >>> # Parent 02e9411009b987f408214ab4a8b6b6093f843bcd > >>> Upstream: re-resolvable servers. > >>> > >>> Specifying the upstream server by a hostname together with the > >>> "resolve" parameter will make the hostname to be periodically > >>> resolved, and upstream servers added/removed as necessary. > >>> > >>> This requires a "resolver" at the "http" configuration block. > >>> > >>> The "resolver_timeout" parameter also affects when the failed > >>> DNS requests will be attempted again. Responses with NXDOMAIN > >>> will be attempted again in 10 seconds. > >>> > >>> Upstream has a configuration generation number that is incremented each > >>> time servers are added/removed to the primary/backup list. This number > >>> is remembered by the peer.init method, and if peer.get detects a change > >>> in configuration, it returns NGX_BUSY. > >>> > >>> Each server has a reference counter. It is incremented by peer.get and > >>> decremented by peer.free. When a server is removed, it is removed from > >>> the list of servers and is marked as "zombie". The memory allocated by > >>> a zombie peer is freed only when its reference count becomes zero. > >>> > >>> Re-resolvable servers utilize timers that also hold a reference. A > >>> reference is also held while upstream keepalive caches an idle > >>> connection. > >>> > >>> Co-authored-by: Roman Arutyunyan > >>> Co-authored-by: Sergey Kandaurov > >>> Co-authored-by: Vladimir Homutov > > > > [..] > > > >>> diff --git a/src/http/ngx_http_upstream_round_robin.h > b/src/http/ngx_http_upstream_round_robin.h > >>> --- a/src/http/ngx_http_upstream_round_robin.h > >>> +++ b/src/http/ngx_http_upstream_round_robin.h > >>> @@ -14,8 +14,23 @@ > >>> #include > >>> > >>> > >>> +typedef struct ngx_http_upstream_rr_peers_s > ngx_http_upstream_rr_peers_t; > >>> typedef struct ngx_http_upstream_rr_peer_s > ngx_http_upstream_rr_peer_t; > >>> > >>> + > >>> +#if (NGX_HTTP_UPSTREAM_ZONE) > >>> + > >>> +typedef struct { > >>> + ngx_event_t event; /* must be first */ > >>> + ngx_uint_t worker; > > > > Missed this last time. This field should be removed since all resolving > is in > > worker #0. > > Unfortunately, that would break the ABI compatibility between OSS and > Plus. Replacing the field with yet another NGX_COMPAT_BEGIN isn't any > better than leaving it in the opensource code. > > > > >>> + ngx_str_t name; > >>> + ngx_http_upstream_rr_peers_t *peers; > >>> + ngx_http_upstream_rr_peer_t *peer; > >>> +} ngx_http_upstream_host_t; > >>> + > >>> +#endif > >>> + > >>> + > >>> struct ngx_http_upstream_rr_peer_s { > >>> struct sockaddr *sockaddr; > >>> socklen_t socklen; > >>> @@ -46,7 +61,12 @@ struct ngx_http_upstream_rr_peer_s { > >>> #endif > >>> > >>> #if (NGX_HTTP_UPSTREAM_ZONE) > >>> + unsigned zombie:1; > >> > >> I suggest declaring this as in other similar places: > >> > >> ngx_uint_t zombie; /* unsigned zombie:1; > */ > >> > >>> + > >>> ngx_atomic_t lock; > >>> + ngx_uint_t id; > >> > >> This field is not used in open source nginx and should not be added or > assigned. > >> > >>> + ngx_uint_t refs; > >>> + ngx_http_upstream_host_t *host; > >>> #endif > >>> > >>> ngx_http_upstream_rr_peer_t *next; > > > > [..] > > > > -- > > Roman Arutyunyan > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From a.bavshin at nginx.com Thu Jul 11 19:42:51 2024 From: a.bavshin at nginx.com (Aleksei Bavshin) Date: Thu, 11 Jul 2024 12:42:51 -0700 Subject: [PATCH 1 of 9] Upstream: re-resolvable servers In-Reply-To: References: <56aeae9355df8a2ee07e.1718317736@fedora-wsl.local> <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> <20240709162209.jxzzleb6hnjlx3r5@N00W24XTQX> Message-ID: On 7/11/2024 12:15 PM, Mathew Heard wrote: > Do you happen to know if there remains any gap in obvious capability > provided by a module like jdomain compared to this? An obvious difference is that the jdomain directive performs resolution on demand instead of periodic polling, but that's a design choice rather than a feature gap. Other that that I don't see anything missing. > > Perhaps it would be worth checking to ensure nothing obvious is not > implemented? > > The one that I see is the ability to control if a resolution is IPv4, > IPv6 or mixed. Is this something that would be useful for this feature? That is already implemented in the resolver directive. If the http block scope is too large, one of the patches in the series allows configuring resolver per upstream. upstream backend { zone upstream_dynamic 64k; resolver 127.0.0.1 ipv4=off ipv6=on; server example.com resolve; }; > > On Fri, 12 Jul 2024 at 02:40, Aleksei Bavshin > wrote: > > On 7/9/2024 9:22 AM, Roman Arutyunyan wrote: > > Hi, > > > > On Mon, Jul 08, 2024 at 06:20:58PM +0400, Roman Arutyunyan wrote: > >> Hi, > >> > >> On Thu, Jun 13, 2024 at 03:28:56PM -0700, Aleksei Bavshin wrote: > >>> # HG changeset patch > >>> # User Ruslan Ermilov > > >>> # Date 1392462754 -14400 > >>> #      Sat Feb 15 15:12:34 2014 +0400 > >>> # Node ID 56aeae9355df8a2ee07e21b65b6869747dd9ee45 > >>> # Parent  02e9411009b987f408214ab4a8b6b6093f843bcd > >>> Upstream: re-resolvable servers. > >>> > >>> Specifying the upstream server by a hostname together with the > >>> "resolve" parameter will make the hostname to be periodically > >>> resolved, and upstream servers added/removed as necessary. > >>> > >>> This requires a "resolver" at the "http" configuration block. > >>> > >>> The "resolver_timeout" parameter also affects when the failed > >>> DNS requests will be attempted again.  Responses with NXDOMAIN > >>> will be attempted again in 10 seconds. > >>> > >>> Upstream has a configuration generation number that is > incremented each > >>> time servers are added/removed to the primary/backup list. > This number > >>> is remembered by the peer.init method, and if peer.get detects > a change > >>> in configuration, it returns NGX_BUSY. > >>> > >>> Each server has a reference counter.  It is incremented by > peer.get and > >>> decremented by peer.free.  When a server is removed, it is > removed from > >>> the list of servers and is marked as "zombie".  The memory > allocated by > >>> a zombie peer is freed only when its reference count becomes zero. > >>> > >>> Re-resolvable servers utilize timers that also hold a reference.  A > >>> reference is also held while upstream keepalive caches an idle > >>> connection. > >>> > >>> Co-authored-by: Roman Arutyunyan > > >>> Co-authored-by: Sergey Kandaurov > > >>> Co-authored-by: Vladimir Homutov > > > > > [..] > > > >>> diff --git a/src/http/ngx_http_upstream_round_robin.h > b/src/http/ngx_http_upstream_round_robin.h > >>> --- a/src/http/ngx_http_upstream_round_robin.h > >>> +++ b/src/http/ngx_http_upstream_round_robin.h > >>> @@ -14,8 +14,23 @@ > >>>   #include > >>> > >>> > >>> +typedef struct ngx_http_upstream_rr_peers_s > ngx_http_upstream_rr_peers_t; > >>>   typedef struct ngx_http_upstream_rr_peer_s >  ngx_http_upstream_rr_peer_t; > >>> > >>> + > >>> +#if (NGX_HTTP_UPSTREAM_ZONE) > >>> + > >>> +typedef struct { > >>> +    ngx_event_t                     event;         /* must be > first */ > >>> +    ngx_uint_t                      worker; > > > > Missed this last time.  This field should be removed since all > resolving is in > > worker #0. > > Unfortunately, that would break the ABI compatibility between OSS and > Plus. Replacing the field with yet another NGX_COMPAT_BEGIN isn't any > better than leaving it in the opensource code. > > > > >>> +    ngx_str_t                       name; > >>> +    ngx_http_upstream_rr_peers_t   *peers; > >>> +    ngx_http_upstream_rr_peer_t    *peer; > >>> +} ngx_http_upstream_host_t; > >>> + > >>> +#endif > >>> + > >>> + > >>>   struct ngx_http_upstream_rr_peer_s { > >>>       struct sockaddr                *sockaddr; > >>>       socklen_t                       socklen; > >>> @@ -46,7 +61,12 @@ struct ngx_http_upstream_rr_peer_s { > >>>   #endif > >>> > >>>   #if (NGX_HTTP_UPSTREAM_ZONE) > >>> +    unsigned                        zombie:1; > >> > >> I suggest declaring this as in other similar places: > >> > >>         ngx_uint_t                      zombie; /* unsigned > zombie:1; */ > >> > >>> + > >>>       ngx_atomic_t                    lock; > >>> +    ngx_uint_t                      id; > >> > >> This field is not used in open source nginx and should not be > added or assigned. > >> > >>> +    ngx_uint_t                      refs; > >>> +    ngx_http_upstream_host_t       *host; > >>>   #endif > >>> > >>>       ngx_http_upstream_rr_peer_t    *next; > > > > [..] > > > > -- > > Roman Arutyunyan > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From mat999 at gmail.com Thu Jul 11 19:58:02 2024 From: mat999 at gmail.com (Mathew Heard) Date: Fri, 12 Jul 2024 05:58:02 +1000 Subject: [PATCH 1 of 9] Upstream: re-resolvable servers In-Reply-To: References: <56aeae9355df8a2ee07e.1718317736@fedora-wsl.local> <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> <20240709162209.jxzzleb6hnjlx3r5@N00W24XTQX> Message-ID: On Fri, 12 Jul 2024 at 05:42, Aleksei Bavshin wrote: > > On 7/11/2024 12:15 PM, Mathew Heard wrote: > > Do you happen to know if there remains any gap in obvious capability > > provided by a module like jdomain compared to this? > > An obvious difference is that the jdomain directive performs resolution > on demand instead of periodic polling, but that's a design choice rather > than a feature gap. Other that that I don't see anything missing. > Thinking about it thats actually a surprising feature (rather than minor implementation detail). We don't want the service to ever fail to start, or to take any longer than it has to start. For this reason we use jdomain (and a local DNS cache). By requesting on the first request jdomain allows the server to start without resolution delays in all cases. Imagine the configuration may contain xxxx.yyyy.com which takes a prolonged time to resolve (for any reason) we would rather that the server is able to start and for requests to that server block fail until the resolution of xxxx.yyyy.com becomes healthy. However if the resolution of xxxx.yyyy.com resolves quickly (healthy) no interruption on start occurs. Beyond the time it takes to resolve the name, which with the aid of a DNS cache is immediately available on server reload. AFAIK is this a feature gap with re-resolution? I guess this is not really a feature gap as when using domains in a static configuration within nginx the behaviour is already like this. > > > > Perhaps it would be worth checking to ensure nothing obvious is not > > implemented? > > > > The one that I see is the ability to control if a resolution is IPv4, > > IPv6 or mixed. Is this something that would be useful for this feature? > > That is already implemented in the resolver directive. If the http block > scope is too large, one of the patches in the series allows configuring > resolver per upstream. > > upstream backend { > zone upstream_dynamic 64k; > resolver 127.0.0.1 ipv4=off ipv6=on; > > server example.com resolve; > }; > Thank you for your eyes. I was unaware of upstream level resolver configuration. That does indeed look like it would provide the same capability. > > > > On Fri, 12 Jul 2024 at 02:40, Aleksei Bavshin > > wrote: > > > > On 7/9/2024 9:22 AM, Roman Arutyunyan wrote: > > > Hi, > > > > > > On Mon, Jul 08, 2024 at 06:20:58PM +0400, Roman Arutyunyan wrote: > > >> Hi, > > >> > > >> On Thu, Jun 13, 2024 at 03:28:56PM -0700, Aleksei Bavshin wrote: > > >>> # HG changeset patch > > >>> # User Ruslan Ermilov > > > >>> # Date 1392462754 -14400 > > >>> # Sat Feb 15 15:12:34 2014 +0400 > > >>> # Node ID 56aeae9355df8a2ee07e21b65b6869747dd9ee45 > > >>> # Parent 02e9411009b987f408214ab4a8b6b6093f843bcd > > >>> Upstream: re-resolvable servers. > > >>> > > >>> Specifying the upstream server by a hostname together with the > > >>> "resolve" parameter will make the hostname to be periodically > > >>> resolved, and upstream servers added/removed as necessary. > > >>> > > >>> This requires a "resolver" at the "http" configuration block. > > >>> > > >>> The "resolver_timeout" parameter also affects when the failed > > >>> DNS requests will be attempted again. Responses with NXDOMAIN > > >>> will be attempted again in 10 seconds. > > >>> > > >>> Upstream has a configuration generation number that is > > incremented each > > >>> time servers are added/removed to the primary/backup list. > > This number > > >>> is remembered by the peer.init method, and if peer.get detects > > a change > > >>> in configuration, it returns NGX_BUSY. > > >>> > > >>> Each server has a reference counter. It is incremented by > > peer.get and > > >>> decremented by peer.free. When a server is removed, it is > > removed from > > >>> the list of servers and is marked as "zombie". The memory > > allocated by > > >>> a zombie peer is freed only when its reference count becomes zero. > > >>> > > >>> Re-resolvable servers utilize timers that also hold a reference. A > > >>> reference is also held while upstream keepalive caches an idle > > >>> connection. > > >>> > > >>> Co-authored-by: Roman Arutyunyan > > > > >>> Co-authored-by: Sergey Kandaurov > > > > >>> Co-authored-by: Vladimir Homutov > > > > > > > > [..] > > > > > >>> diff --git a/src/http/ngx_http_upstream_round_robin.h > > b/src/http/ngx_http_upstream_round_robin.h > > >>> --- a/src/http/ngx_http_upstream_round_robin.h > > >>> +++ b/src/http/ngx_http_upstream_round_robin.h > > >>> @@ -14,8 +14,23 @@ > > >>> #include > > >>> > > >>> > > >>> +typedef struct ngx_http_upstream_rr_peers_s > > ngx_http_upstream_rr_peers_t; > > >>> typedef struct ngx_http_upstream_rr_peer_s > > ngx_http_upstream_rr_peer_t; > > >>> > > >>> + > > >>> +#if (NGX_HTTP_UPSTREAM_ZONE) > > >>> + > > >>> +typedef struct { > > >>> + ngx_event_t event; /* must be > > first */ > > >>> + ngx_uint_t worker; > > > > > > Missed this last time. This field should be removed since all > > resolving is in > > > worker #0. > > > > Unfortunately, that would break the ABI compatibility between OSS and > > Plus. Replacing the field with yet another NGX_COMPAT_BEGIN isn't any > > better than leaving it in the opensource code. > > > > > > > >>> + ngx_str_t name; > > >>> + ngx_http_upstream_rr_peers_t *peers; > > >>> + ngx_http_upstream_rr_peer_t *peer; > > >>> +} ngx_http_upstream_host_t; > > >>> + > > >>> +#endif > > >>> + > > >>> + > > >>> struct ngx_http_upstream_rr_peer_s { > > >>> struct sockaddr *sockaddr; > > >>> socklen_t socklen; > > >>> @@ -46,7 +61,12 @@ struct ngx_http_upstream_rr_peer_s { > > >>> #endif > > >>> > > >>> #if (NGX_HTTP_UPSTREAM_ZONE) > > >>> + unsigned zombie:1; > > >> > > >> I suggest declaring this as in other similar places: > > >> > > >> ngx_uint_t zombie; /* unsigned > > zombie:1; */ > > >> > > >>> + > > >>> ngx_atomic_t lock; > > >>> + ngx_uint_t id; > > >> > > >> This field is not used in open source nginx and should not be > > added or assigned. > > >> > > >>> + ngx_uint_t refs; > > >>> + ngx_http_upstream_host_t *host; > > >>> #endif > > >>> > > >>> ngx_http_upstream_rr_peer_t *next; > > > > > > [..] > > > > > > -- > > > Roman Arutyunyan > > > _______________________________________________ > > > nginx-devel mailing list > > > nginx-devel at nginx.org > > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > > > > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From a.bavshin at nginx.com Thu Jul 11 20:32:52 2024 From: a.bavshin at nginx.com (Aleksei Bavshin) Date: Thu, 11 Jul 2024 13:32:52 -0700 Subject: [PATCH 1 of 9] Upstream: re-resolvable servers In-Reply-To: References: <56aeae9355df8a2ee07e.1718317736@fedora-wsl.local> <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> <20240709162209.jxzzleb6hnjlx3r5@N00W24XTQX> Message-ID: On 7/11/2024 12:58 PM, Mathew Heard wrote: > On Fri, 12 Jul 2024 at 05:42, Aleksei Bavshin wrote: >> >> On 7/11/2024 12:15 PM, Mathew Heard wrote: >>> Do you happen to know if there remains any gap in obvious capability >>> provided by a module like jdomain compared to this? >> >> An obvious difference is that the jdomain directive performs resolution >> on demand instead of periodic polling, but that's a design choice rather >> than a feature gap. Other that that I don't see anything missing. >> > > Thinking about it thats actually a surprising feature (rather than > minor implementation detail). We don't want the service to ever fail > to start, or to take any longer than it has to start. For this reason > we use jdomain (and a local DNS cache). By requesting on the first > request jdomain allows the server to start without resolution delays > in all cases. > > Imagine the configuration may contain xxxx.yyyy.com which takes a > prolonged time to resolve (for any reason) we would rather that the > server is able to start and for requests to that server block fail > until the resolution of xxxx.yyyy.com becomes healthy. > > However if the resolution of xxxx.yyyy.com resolves quickly (healthy) > no interruption on start occurs. Beyond the time it takes to resolve > the name, which with the aid of a DNS cache is immediately available > on server reload. > > AFAIK is this a feature gap with re-resolution? > > I guess this is not really a feature gap as when using domains in a > static configuration within nginx the behaviour is already like this. `server ... resolve;` actually behaves quite similar to what you describe: name resolution is deferred to runtime and doesn't block or delay the startup. The main difference is that the resolution starts as soon as the worker processes are up instead of waiting for the first request, and we update the expired entries in the background according to the response TTL. > >>> >>> Perhaps it would be worth checking to ensure nothing obvious is not >>> implemented? >>> >>> The one that I see is the ability to control if a resolution is IPv4, >>> IPv6 or mixed. Is this something that would be useful for this feature? >> >> That is already implemented in the resolver directive. If the http block >> scope is too large, one of the patches in the series allows configuring >> resolver per upstream. >> >> upstream backend { >> zone upstream_dynamic 64k; >> resolver 127.0.0.1 ipv4=off ipv6=on; >> >> server example.com resolve; >> }; >> > > Thank you for your eyes. I was unaware of upstream level resolver > configuration. That does indeed look like it would provide the same > capability. > > >>> >>> On Fri, 12 Jul 2024 at 02:40, Aleksei Bavshin >> > wrote: >>> >>> On 7/9/2024 9:22 AM, Roman Arutyunyan wrote: >>> > Hi, >>> > >>> > On Mon, Jul 08, 2024 at 06:20:58PM +0400, Roman Arutyunyan wrote: >>> >> Hi, >>> >> >>> >> On Thu, Jun 13, 2024 at 03:28:56PM -0700, Aleksei Bavshin wrote: >>> >>> # HG changeset patch >>> >>> # User Ruslan Ermilov > >>> >>> # Date 1392462754 -14400 >>> >>> # Sat Feb 15 15:12:34 2014 +0400 >>> >>> # Node ID 56aeae9355df8a2ee07e21b65b6869747dd9ee45 >>> >>> # Parent 02e9411009b987f408214ab4a8b6b6093f843bcd >>> >>> Upstream: re-resolvable servers. >>> >>> >>> >>> Specifying the upstream server by a hostname together with the >>> >>> "resolve" parameter will make the hostname to be periodically >>> >>> resolved, and upstream servers added/removed as necessary. >>> >>> >>> >>> This requires a "resolver" at the "http" configuration block. >>> >>> >>> >>> The "resolver_timeout" parameter also affects when the failed >>> >>> DNS requests will be attempted again. Responses with NXDOMAIN >>> >>> will be attempted again in 10 seconds. >>> >>> >>> >>> Upstream has a configuration generation number that is >>> incremented each >>> >>> time servers are added/removed to the primary/backup list. >>> This number >>> >>> is remembered by the peer.init method, and if peer.get detects >>> a change >>> >>> in configuration, it returns NGX_BUSY. >>> >>> >>> >>> Each server has a reference counter. It is incremented by >>> peer.get and >>> >>> decremented by peer.free. When a server is removed, it is >>> removed from >>> >>> the list of servers and is marked as "zombie". The memory >>> allocated by >>> >>> a zombie peer is freed only when its reference count becomes zero. >>> >>> >>> >>> Re-resolvable servers utilize timers that also hold a reference. A >>> >>> reference is also held while upstream keepalive caches an idle >>> >>> connection. >>> >>> >>> >>> Co-authored-by: Roman Arutyunyan >> > >>> >>> Co-authored-by: Sergey Kandaurov >> > >>> >>> Co-authored-by: Vladimir Homutov >> > >>> > >>> > [..] >>> > >>> >>> diff --git a/src/http/ngx_http_upstream_round_robin.h >>> b/src/http/ngx_http_upstream_round_robin.h >>> >>> --- a/src/http/ngx_http_upstream_round_robin.h >>> >>> +++ b/src/http/ngx_http_upstream_round_robin.h >>> >>> @@ -14,8 +14,23 @@ >>> >>> #include >>> >>> >>> >>> >>> >>> +typedef struct ngx_http_upstream_rr_peers_s >>> ngx_http_upstream_rr_peers_t; >>> >>> typedef struct ngx_http_upstream_rr_peer_s >>> ngx_http_upstream_rr_peer_t; >>> >>> >>> >>> + >>> >>> +#if (NGX_HTTP_UPSTREAM_ZONE) >>> >>> + >>> >>> +typedef struct { >>> >>> + ngx_event_t event; /* must be >>> first */ >>> >>> + ngx_uint_t worker; >>> > >>> > Missed this last time. This field should be removed since all >>> resolving is in >>> > worker #0. >>> >>> Unfortunately, that would break the ABI compatibility between OSS and >>> Plus. Replacing the field with yet another NGX_COMPAT_BEGIN isn't any >>> better than leaving it in the opensource code. >>> >>> > >>> >>> + ngx_str_t name; >>> >>> + ngx_http_upstream_rr_peers_t *peers; >>> >>> + ngx_http_upstream_rr_peer_t *peer; >>> >>> +} ngx_http_upstream_host_t; >>> >>> + >>> >>> +#endif >>> >>> + >>> >>> + >>> >>> struct ngx_http_upstream_rr_peer_s { >>> >>> struct sockaddr *sockaddr; >>> >>> socklen_t socklen; >>> >>> @@ -46,7 +61,12 @@ struct ngx_http_upstream_rr_peer_s { >>> >>> #endif >>> >>> >>> >>> #if (NGX_HTTP_UPSTREAM_ZONE) >>> >>> + unsigned zombie:1; >>> >> >>> >> I suggest declaring this as in other similar places: >>> >> >>> >> ngx_uint_t zombie; /* unsigned >>> zombie:1; */ >>> >> >>> >>> + >>> >>> ngx_atomic_t lock; >>> >>> + ngx_uint_t id; >>> >> >>> >> This field is not used in open source nginx and should not be >>> added or assigned. >>> >> >>> >>> + ngx_uint_t refs; >>> >>> + ngx_http_upstream_host_t *host; >>> >>> #endif >>> >>> >>> >>> ngx_http_upstream_rr_peer_t *next; >>> > >>> > [..] >>> > >>> > -- >>> > Roman Arutyunyan >>> > _______________________________________________ >>> > nginx-devel mailing list >>> > nginx-devel at nginx.org >>> > https://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> https://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> https://mailman.nginx.org/mailman/listinfo/nginx-devel >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> https://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From mat999 at gmail.com Thu Jul 11 21:03:44 2024 From: mat999 at gmail.com (Mathew Heard) Date: Fri, 12 Jul 2024 07:03:44 +1000 Subject: [PATCH 1 of 9] Upstream: re-resolvable servers In-Reply-To: References: <56aeae9355df8a2ee07e.1718317736@fedora-wsl.local> <20240708142058.g6h3c275ur6pgflu@N00W24XTQX> <20240709162209.jxzzleb6hnjlx3r5@N00W24XTQX> Message-ID: Thank you Aleksi, I think you are right. Thats the only one of our use cases using jdomain that I was concerned wouldn't be met by this feature. On Fri, 12 Jul 2024 at 06:32, Aleksei Bavshin wrote: > > On 7/11/2024 12:58 PM, Mathew Heard wrote: > > On Fri, 12 Jul 2024 at 05:42, Aleksei Bavshin wrote: > >> > >> On 7/11/2024 12:15 PM, Mathew Heard wrote: > >>> Do you happen to know if there remains any gap in obvious capability > >>> provided by a module like jdomain compared to this? > >> > >> An obvious difference is that the jdomain directive performs resolution > >> on demand instead of periodic polling, but that's a design choice rather > >> than a feature gap. Other that that I don't see anything missing. > >> > > > > Thinking about it thats actually a surprising feature (rather than > > minor implementation detail). We don't want the service to ever fail > > to start, or to take any longer than it has to start. For this reason > > we use jdomain (and a local DNS cache). By requesting on the first > > request jdomain allows the server to start without resolution delays > > in all cases. > > > > Imagine the configuration may contain xxxx.yyyy.com which takes a > > prolonged time to resolve (for any reason) we would rather that the > > server is able to start and for requests to that server block fail > > until the resolution of xxxx.yyyy.com becomes healthy. > > > > However if the resolution of xxxx.yyyy.com resolves quickly (healthy) > > no interruption on start occurs. Beyond the time it takes to resolve > > the name, which with the aid of a DNS cache is immediately available > > on server reload. > > > > AFAIK is this a feature gap with re-resolution? > > > > I guess this is not really a feature gap as when using domains in a > > static configuration within nginx the behaviour is already like this. > > `server ... resolve;` actually behaves quite similar to what you > describe: name resolution is deferred to runtime and doesn't block or > delay the startup. > The main difference is that the resolution starts as soon as the worker > processes are up instead of waiting for the first request, and we update > the expired entries in the background according to the response TTL. > > > > >>> > >>> Perhaps it would be worth checking to ensure nothing obvious is not > >>> implemented? > >>> > >>> The one that I see is the ability to control if a resolution is IPv4, > >>> IPv6 or mixed. Is this something that would be useful for this feature? > >> > >> That is already implemented in the resolver directive. If the http block > >> scope is too large, one of the patches in the series allows configuring > >> resolver per upstream. > >> > >> upstream backend { > >> zone upstream_dynamic 64k; > >> resolver 127.0.0.1 ipv4=off ipv6=on; > >> > >> server example.com resolve; > >> }; > >> > > > > Thank you for your eyes. I was unaware of upstream level resolver > > configuration. That does indeed look like it would provide the same > > capability. > > > > > >>> > >>> On Fri, 12 Jul 2024 at 02:40, Aleksei Bavshin >>> > wrote: > >>> > >>> On 7/9/2024 9:22 AM, Roman Arutyunyan wrote: > >>> > Hi, > >>> > > >>> > On Mon, Jul 08, 2024 at 06:20:58PM +0400, Roman Arutyunyan wrote: > >>> >> Hi, > >>> >> > >>> >> On Thu, Jun 13, 2024 at 03:28:56PM -0700, Aleksei Bavshin wrote: > >>> >>> # HG changeset patch > >>> >>> # User Ruslan Ermilov > > >>> >>> # Date 1392462754 -14400 > >>> >>> # Sat Feb 15 15:12:34 2014 +0400 > >>> >>> # Node ID 56aeae9355df8a2ee07e21b65b6869747dd9ee45 > >>> >>> # Parent 02e9411009b987f408214ab4a8b6b6093f843bcd > >>> >>> Upstream: re-resolvable servers. > >>> >>> > >>> >>> Specifying the upstream server by a hostname together with the > >>> >>> "resolve" parameter will make the hostname to be periodically > >>> >>> resolved, and upstream servers added/removed as necessary. > >>> >>> > >>> >>> This requires a "resolver" at the "http" configuration block. > >>> >>> > >>> >>> The "resolver_timeout" parameter also affects when the failed > >>> >>> DNS requests will be attempted again. Responses with NXDOMAIN > >>> >>> will be attempted again in 10 seconds. > >>> >>> > >>> >>> Upstream has a configuration generation number that is > >>> incremented each > >>> >>> time servers are added/removed to the primary/backup list. > >>> This number > >>> >>> is remembered by the peer.init method, and if peer.get detects > >>> a change > >>> >>> in configuration, it returns NGX_BUSY. > >>> >>> > >>> >>> Each server has a reference counter. It is incremented by > >>> peer.get and > >>> >>> decremented by peer.free. When a server is removed, it is > >>> removed from > >>> >>> the list of servers and is marked as "zombie". The memory > >>> allocated by > >>> >>> a zombie peer is freed only when its reference count becomes zero. > >>> >>> > >>> >>> Re-resolvable servers utilize timers that also hold a reference. A > >>> >>> reference is also held while upstream keepalive caches an idle > >>> >>> connection. > >>> >>> > >>> >>> Co-authored-by: Roman Arutyunyan >>> > > >>> >>> Co-authored-by: Sergey Kandaurov >>> > > >>> >>> Co-authored-by: Vladimir Homutov >>> > > >>> > > >>> > [..] > >>> > > >>> >>> diff --git a/src/http/ngx_http_upstream_round_robin.h > >>> b/src/http/ngx_http_upstream_round_robin.h > >>> >>> --- a/src/http/ngx_http_upstream_round_robin.h > >>> >>> +++ b/src/http/ngx_http_upstream_round_robin.h > >>> >>> @@ -14,8 +14,23 @@ > >>> >>> #include > >>> >>> > >>> >>> > >>> >>> +typedef struct ngx_http_upstream_rr_peers_s > >>> ngx_http_upstream_rr_peers_t; > >>> >>> typedef struct ngx_http_upstream_rr_peer_s > >>> ngx_http_upstream_rr_peer_t; > >>> >>> > >>> >>> + > >>> >>> +#if (NGX_HTTP_UPSTREAM_ZONE) > >>> >>> + > >>> >>> +typedef struct { > >>> >>> + ngx_event_t event; /* must be > >>> first */ > >>> >>> + ngx_uint_t worker; > >>> > > >>> > Missed this last time. This field should be removed since all > >>> resolving is in > >>> > worker #0. > >>> > >>> Unfortunately, that would break the ABI compatibility between OSS and > >>> Plus. Replacing the field with yet another NGX_COMPAT_BEGIN isn't any > >>> better than leaving it in the opensource code. > >>> > >>> > > >>> >>> + ngx_str_t name; > >>> >>> + ngx_http_upstream_rr_peers_t *peers; > >>> >>> + ngx_http_upstream_rr_peer_t *peer; > >>> >>> +} ngx_http_upstream_host_t; > >>> >>> + > >>> >>> +#endif > >>> >>> + > >>> >>> + > >>> >>> struct ngx_http_upstream_rr_peer_s { > >>> >>> struct sockaddr *sockaddr; > >>> >>> socklen_t socklen; > >>> >>> @@ -46,7 +61,12 @@ struct ngx_http_upstream_rr_peer_s { > >>> >>> #endif > >>> >>> > >>> >>> #if (NGX_HTTP_UPSTREAM_ZONE) > >>> >>> + unsigned zombie:1; > >>> >> > >>> >> I suggest declaring this as in other similar places: > >>> >> > >>> >> ngx_uint_t zombie; /* unsigned > >>> zombie:1; */ > >>> >> > >>> >>> + > >>> >>> ngx_atomic_t lock; > >>> >>> + ngx_uint_t id; > >>> >> > >>> >> This field is not used in open source nginx and should not be > >>> added or assigned. > >>> >> > >>> >>> + ngx_uint_t refs; > >>> >>> + ngx_http_upstream_host_t *host; > >>> >>> #endif > >>> >>> > >>> >>> ngx_http_upstream_rr_peer_t *next; > >>> > > >>> > [..] > >>> > > >>> > -- > >>> > Roman Arutyunyan > >>> > _______________________________________________ > >>> > nginx-devel mailing list > >>> > nginx-devel at nginx.org > >>> > https://mailman.nginx.org/mailman/listinfo/nginx-devel > >>> > >>> _______________________________________________ > >>> nginx-devel mailing list > >>> nginx-devel at nginx.org > >>> https://mailman.nginx.org/mailman/listinfo/nginx-devel > >>> > >>> > >>> > >>> _______________________________________________ > >>> nginx-devel mailing list > >>> nginx-devel at nginx.org > >>> https://mailman.nginx.org/mailman/listinfo/nginx-devel > >> _______________________________________________ > >> nginx-devel mailing list > >> nginx-devel at nginx.org > >> https://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From vbart at wbsrv.ru Fri Jul 12 15:28:54 2024 From: vbart at wbsrv.ru (=?iso-8859-1?q?Valentin_V=2E_Bartenev?=) Date: Fri, 12 Jul 2024 18:28:54 +0300 Subject: [PATCH] Avoided updating of parent PID value after daemonization Message-ID: # HG changeset patch # User Valentin Bartenev # Date 1720793182 -10800 # Fri Jul 12 17:06:22 2024 +0300 # Node ID d9b53b7e164560a44c9228dfd2c882b619004ed2 # Parent 072ca4906154effb64cbf8209dfe86742ff074d7 Avoided updating of parent PID value after daemonization. The "ngx_parent" value is only used to detect conditions where the daemon is in a binary upgrade procedure and the old master process hasn't exited yet. To do this, it's checked for equality with ngx_getppid(). As a result, if it's set to the parent process during daemonization, the binary upgrade signal is ignored with an error: "[crit] the changing binary signal is ignored: you should shutdown or terminate before either old or new binary's process" until the parent process is terminated, which may not happen immediately after forking under load on a slow system. On the other hand, it should be absolutely safe to upgrade the binary before the process left after daemonization has finally exited. Notably, this problem has been observed sporadically in the "binary_upgrade.t" test, as it sends a USR2 signal immediately after starting the daemon and checking its pid file. diff --git a/src/os/unix/ngx_daemon.c b/src/os/unix/ngx_daemon.c --- a/src/os/unix/ngx_daemon.c +++ b/src/os/unix/ngx_daemon.c @@ -26,7 +26,6 @@ ngx_daemon(ngx_log_t *log) exit(0); } - ngx_parent = ngx_pid; ngx_pid = ngx_getpid(); if (setsid() == -1) { From o.deeva at wbsrv.ru Mon Jul 15 12:01:15 2024 From: o.deeva at wbsrv.ru (=?iso-8859-1?q?Oksana_Deeva?=) Date: Mon, 15 Jul 2024 15:01:15 +0300 Subject: [PATCH] Tests: stream_udp_proxy_requests adjusted Message-ID: <50fc19b32628b1c5a2a4.1721044875@oksana-wbsrv> # HG changeset patch # User Oksana Deeva # Date 1721044584 -10800 # Mon Jul 15 14:56:24 2024 +0300 # Node ID 50fc19b32628b1c5a2a49c127b013e15e4083dd6 # Parent 0e9c1a8aa1d49e57b211e2c8ece94e00bf032ed7 Tests: stream_udp_proxy_requests adjusted. The test occasionally failed due to the fact that the order of packets could change. Now the order of packets will be ignored. diff -r 0e9c1a8aa1d4 -r 50fc19b32628 stream_udp_proxy_requests.t --- a/stream_udp_proxy_requests.t Fri Jul 12 01:19:15 2024 +0400 +++ b/stream_udp_proxy_requests.t Mon Jul 15 14:56:24 2024 +0300 @@ -160,14 +160,23 @@ $s = dgram('127.0.0.1:' . port(8985)); $s->write('1') for 1 .. 5; -$b = join ' ', map { $s->read() } (1 .. 10); + +my @parts = map { $s->read() } (1 .. 10); + +my $res = {}; +for (my $i = 0; $i < scalar @parts; $i++) { + my $part = $parts[$i]; -SKIP: { -skip 'session could early terminate', 1 unless $ENV{TEST_NGINX_UNSAFE}; + if ($i % 2 == 0) { + $res->{$part} //= 0; + } else { + $res->{$parts[$i-1]} += $part; + } +} -like($b, qr/^(\d+ 1) \1 (?!\1)(\d+ 1) \2 (?!\2)\d+ 1$/, 'requests - responses'); +$b = join ' ', sort values %$res; -} +is($b, '1 2 2', 'requests - responses'); $t->stop(); From thorvaldur.thorvaldsson at gmail.com Tue Jul 16 22:04:10 2024 From: thorvaldur.thorvaldsson at gmail.com (Thorvaldur Thorvaldsson) Date: Tue, 16 Jul 2024 22:04:10 +0000 Subject: [PATCH] Tests: stream_udp_proxy_requests adjusted In-Reply-To: <50fc19b32628b1c5a2a4.1721044875@oksana-wbsrv> References: <50fc19b32628b1c5a2a4.1721044875@oksana-wbsrv> Message-ID: Unsubscribe. On Mon, Jul 15, 2024 at 12:02 PM Oksana Deeva wrote: > > # HG changeset patch > # User Oksana Deeva > # Date 1721044584 -10800 > # Mon Jul 15 14:56:24 2024 +0300 > # Node ID 50fc19b32628b1c5a2a49c127b013e15e4083dd6 > # Parent 0e9c1a8aa1d49e57b211e2c8ece94e00bf032ed7 > Tests: stream_udp_proxy_requests adjusted. > > The test occasionally failed due to the fact that the order of packets > could change. Now the order of packets will be ignored. > > diff -r 0e9c1a8aa1d4 -r 50fc19b32628 stream_udp_proxy_requests.t > --- a/stream_udp_proxy_requests.t Fri Jul 12 01:19:15 2024 +0400 > +++ b/stream_udp_proxy_requests.t Mon Jul 15 14:56:24 2024 +0300 > @@ -160,14 +160,23 @@ > > $s = dgram('127.0.0.1:' . port(8985)); > $s->write('1') for 1 .. 5; > -$b = join ' ', map { $s->read() } (1 .. 10); > + > +my @parts = map { $s->read() } (1 .. 10); > + > +my $res = {}; > +for (my $i = 0; $i < scalar @parts; $i++) { > + my $part = $parts[$i]; > > -SKIP: { > -skip 'session could early terminate', 1 unless $ENV{TEST_NGINX_UNSAFE}; > + if ($i % 2 == 0) { > + $res->{$part} //= 0; > + } else { > + $res->{$parts[$i-1]} += $part; > + } > +} > > -like($b, qr/^(\d+ 1) \1 (?!\1)(\d+ 1) \2 (?!\2)\d+ 1$/, 'requests - responses'); > +$b = join ' ', sort values %$res; > > -} > +is($b, '1 2 2', 'requests - responses'); > > $t->stop(); > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel From sunggon.song at quantil.com Wed Jul 17 01:08:13 2024 From: sunggon.song at quantil.com (Sunggon Song) Date: Wed, 17 Jul 2024 10:08:13 +0900 Subject: [PATCH] Delayed closing of idle connections Message-ID: # HG changeset patch # User Sunggon Song # Date 1721178360 -32400 # Wed Jul 17 10:06:00 2024 +0900 # Node ID 3db2df01147dcbcc7bc63670207a0aa2439f6a8c # Parent 072ca4906154effb64cbf8209dfe86742ff074d7 [PATCH] Delayed closing of idle connections Delays processing of closing idle connection to read unread requests while nginx performs a graceful shutdown. This is the case where the client sent the request message through the keepalive(idle) connection, but nginx has not yet read it from the socket recv buffer. If there is no delay time and there are some unread requests in the socket recv buffer, the idle connection is immediately closed and a TCP reset is sent to the client by the TCP stack of the kernel. diff -r 072ca4906154 -r 3db2df01147d src/core/nginx.c --- a/src/core/nginx.c Thu Jun 27 17:29:56 2024 +0400 +++ b/src/core/nginx.c Wed Jul 17 10:06:00 2024 +0900 @@ -153,6 +153,13 @@ 0, NULL }, + { ngx_string("close_idle_connections_delay"), + NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1, + ngx_conf_set_close_idle_connecions_delay, + 0, + offsetof(ngx_core_conf_t, close_idle_connections_delay), + NULL }, + ngx_null_command }; @@ -1125,6 +1132,8 @@ ccf->user = (ngx_uid_t) NGX_CONF_UNSET_UINT; ccf->group = (ngx_gid_t) NGX_CONF_UNSET_UINT; + ccf->close_idle_connections_delay = NGX_CONF_UNSET; + if (ngx_array_init(&ccf->env, cycle->pool, 1, sizeof(ngx_str_t)) != NGX_OK) { @@ -1148,6 +1157,8 @@ ngx_conf_init_value(ccf->worker_processes, 1); ngx_conf_init_value(ccf->debug_points, 0); + ngx_conf_init_value(ccf->close_idle_connections_delay, 0); + #if (NGX_HAVE_CPU_AFFINITY) if (!ccf->cpu_affinity_auto diff -r 072ca4906154 -r 3db2df01147d src/core/ngx_conf_file.c --- a/src/core/ngx_conf_file.c Thu Jun 27 17:29:56 2024 +0400 +++ b/src/core/ngx_conf_file.c Wed Jul 17 10:06:00 2024 +0900 @@ -1484,3 +1484,31 @@ return NGX_CONF_ERROR; } + +char * +ngx_conf_set_close_idle_connecions_delay(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf) +{ + char *p = conf; + + time_t *sp; + ngx_str_t *value; + + sp = (time_t *) (p + cmd->offset); + if (*sp != NGX_CONF_UNSET) { + return "is duplicate"; + } + + value = cf->args->elts; + + *sp = ngx_parse_time(&value[1], 1); + + if (*sp == (time_t) NGX_ERROR) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid value \"%s\"", value[i].data); + + return NGX_CONF_ERROR; + } + + return NGX_CONF_OK; +} diff -r 072ca4906154 -r 3db2df01147d src/core/ngx_conf_file.h --- a/src/core/ngx_conf_file.h Thu Jun 27 17:29:56 2024 +0400 +++ b/src/core/ngx_conf_file.h Wed Jul 17 10:06:00 2024 +0900 @@ -290,6 +290,7 @@ char *ngx_conf_set_bufs_slot(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); char *ngx_conf_set_enum_slot(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); char *ngx_conf_set_bitmask_slot(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); - +char *ngx_conf_set_close_idle_connecions_delay(ngx_conf_t *cf, + ngx_command_t *cmd, void *conf); #endif /* _NGX_CONF_FILE_H_INCLUDED_ */ diff -r 072ca4906154 -r 3db2df01147d src/core/ngx_cycle.h --- a/src/core/ngx_cycle.h Thu Jun 27 17:29:56 2024 +0400 +++ b/src/core/ngx_cycle.h Wed Jul 17 10:06:00 2024 +0900 @@ -118,6 +118,8 @@ ngx_array_t env; char **environment; + time_t close_idle_connections_delay; + ngx_uint_t transparent; /* unsigned transparent:1; */ } ngx_core_conf_t; diff -r 072ca4906154 -r 3db2df01147d src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c Thu Jun 27 17:29:56 2024 +0400 +++ b/src/os/unix/ngx_process_cycle.c Wed Jul 17 10:06:00 2024 +0900 @@ -51,6 +51,7 @@ sig_atomic_t ngx_noaccept; ngx_uint_t ngx_noaccepting; ngx_uint_t ngx_restart; +ngx_uint_t ngx_delayed_close_done; static u_char master_process[] = "master process"; @@ -698,6 +699,9 @@ static void ngx_worker_process_cycle(ngx_cycle_t *cycle, void *data) { + time_t now; + ngx_core_conf_t *ccf; + time_t delay_sec; ngx_int_t worker = (intptr_t) data; ngx_process = NGX_PROCESS_WORKER; @@ -706,10 +710,26 @@ ngx_worker_process_init(cycle, worker); ngx_setproctitle("worker process"); + delay_sec = 0; + ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); for ( ;; ) { if (ngx_exiting) { + now = ngx_time(); + if (ccf->close_idle_connections_delay && + !ngx_delayed_close_done) + { + + if (!delay_sec) { + delay_sec = now + ccf->close_idle_connections_delay; + } else { + if (delay_sec < now) { + ngx_close_idle_connections(cycle); + ngx_delayed_close_done = 1; + } + } + } if (ngx_event_no_timers_left() == NGX_OK) { ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "exiting"); ngx_worker_process_exit(cycle); @@ -735,7 +755,9 @@ ngx_exiting = 1; ngx_set_shutdown_timer(cycle); ngx_close_listening_sockets(cycle); - ngx_close_idle_connections(cycle); + if (!ccf->close_idle_connections_delay) { + ngx_close_idle_connections(cycle); + } ngx_event_process_posted(cycle, &ngx_posted_events); } } @@ -941,6 +963,16 @@ { ngx_uint_t i; ngx_connection_t *c; + ngx_core_conf_t *ccf; + + ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); + + if (ccf->close_idle_connections_delay && + !ngx_delayed_close_done) + { + ngx_close_idle_connections(cycle); + ngx_delayed_close_done = 1; + } for (i = 0; cycle->modules[i]; i++) { if (cycle->modules[i]->exit_process) { Best Regards and thanks, Sunggon --------------------------------------------------------------------- Sunggon Song I Principal Research Engineer Cache Team Office. 82-2-3441-0375 Cell. 82-10-8744-0133 E-mail. sunggon.song at cdnetworks.co.kr, sunggon.song at quantil.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Wed Jul 17 13:37:30 2024 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 17 Jul 2024 17:37:30 +0400 Subject: [PATCH] Tests: stream_udp_proxy_requests adjusted In-Reply-To: <50fc19b32628b1c5a2a4.1721044875@oksana-wbsrv> References: <50fc19b32628b1c5a2a4.1721044875@oksana-wbsrv> Message-ID: On Mon, Jul 15, 2024 at 03:01:15PM +0300, Oksana Deeva wrote: > # HG changeset patch > # User Oksana Deeva > # Date 1721044584 -10800 > # Mon Jul 15 14:56:24 2024 +0300 > # Node ID 50fc19b32628b1c5a2a49c127b013e15e4083dd6 > # Parent 0e9c1a8aa1d49e57b211e2c8ece94e00bf032ed7 > Tests: stream_udp_proxy_requests adjusted. > > The test occasionally failed due to the fact that the order of packets > could change. Now the order of packets will be ignored. > > diff -r 0e9c1a8aa1d4 -r 50fc19b32628 stream_udp_proxy_requests.t > --- a/stream_udp_proxy_requests.t Fri Jul 12 01:19:15 2024 +0400 > +++ b/stream_udp_proxy_requests.t Mon Jul 15 14:56:24 2024 +0300 > @@ -160,14 +160,23 @@ > > $s = dgram('127.0.0.1:' . port(8985)); > $s->write('1') for 1 .. 5; > -$b = join ' ', map { $s->read() } (1 .. 10); > + > +my @parts = map { $s->read() } (1 .. 10); > + > +my $res = {}; > +for (my $i = 0; $i < scalar @parts; $i++) { > + my $part = $parts[$i]; > > -SKIP: { > -skip 'session could early terminate', 1 unless $ENV{TEST_NGINX_UNSAFE}; > + if ($i % 2 == 0) { > + $res->{$part} //= 0; > + } else { > + $res->{$parts[$i-1]} += $part; Due to use of the relative order, this won't solve a perfectly out-of-order sequence. Rather, it may be fixed by introducing sequence numbers: # HG changeset patch # User Sergey Kandaurov # Date 1721223063 -14400 # Wed Jul 17 17:31:03 2024 +0400 # Node ID 64d10bdcce5e4a8b6294c26fdcf4ff9f9a42cac2 # Parent b5ddbcf2fbb4d26712b5825ef660ebea30eacd8d Tests: stream_udp_proxy_requests adjusted. The test occasionally failed due to the fact that the order of packets could change. The fix is to introduce sequence numbers. Inspired by Oksana Deeva. diff --git a/stream_udp_proxy_requests.t b/stream_udp_proxy_requests.t --- a/stream_udp_proxy_requests.t +++ b/stream_udp_proxy_requests.t @@ -76,7 +76,7 @@ stream { server { listen 127.0.0.1:%%PORT_8985_UDP%% udp; - proxy_pass 127.0.0.1:%%PORT_8990_UDP%%; + proxy_pass 127.0.0.1:%%PORT_8991_UDP%%; proxy_requests 2; proxy_responses 2; @@ -150,24 +150,16 @@ is($s->read(), '1', 'requests unset foll # expects all packets proxied from backend, the last (uneven) session succeed $s = dgram('127.0.0.1:' . port(8984)); -$s->write('2') for 1 .. 5; -my $b = join ' ', map { $s->read() } (1 .. 15); -like($b, qr/^(\d+ 1 2) \1 (?!\1)(\d+ 1 2) \2 (?!\2)\d+ 1 2$/, 'slow backend'); +like(many($s, '2', 5, 15), + qr/^(\d+ 1 2) \1 (?!\1)(\d+ 1 2) \2 (?!\2)\d+ 1 2$/, 'slow backend'); # proxy_requests 2, proxy_responses 2 # client sends 5 packets, each responded with 2 packets # expects all packets proxied from backend, the last (uneven) session succeed $s = dgram('127.0.0.1:' . port(8985)); -$s->write('1') for 1 .. 5; -$b = join ' ', map { $s->read() } (1 .. 10); - -SKIP: { -skip 'session could early terminate', 1 unless $ENV{TEST_NGINX_UNSAFE}; - -like($b, qr/^(\d+ 1) \1 (?!\1)(\d+ 1) \2 (?!\2)\d+ 1$/, 'requests - responses'); - -} +like(many($s, '1', 5, 10), + qr/^(\d+ 1) \1 (?!\1)(\d+ 1) \2 (?!\2)\d+ 1$/, 'requests - responses'); $t->stop(); @@ -185,6 +177,17 @@ EOF ############################################################################### +sub many { + my ($s, $buf, $wcount, $rcount) = @_; + + $s->write($buf) for 1 .. $wcount; + join ' ', map { $_->[1] } + sort { $a->[0] <=> $b->[0] } + map { [ $s->read() =~ /^(\d+) (.+)/ ] } 1 .. $rcount; +} + +############################################################################### + sub udp_daemon { my ($t, $port) = @_; @@ -200,14 +203,14 @@ sub udp_daemon { open my $fh, '>', $t->testdir() . "/$port"; close $fh; - my $slp = 1 if $port == port(8991); + my ($slp, $i) = (1, 1) if $port == port(8991); while (1) { $server->recv(my $buffer, 65536); sleep 1, $slp = 0 if $slp; - $server->send($server->peerport()); - $server->send($_) for (1 .. $buffer); + $server->send(($i ? "${\($i++)} " : "") . $server->peerport()); + $server->send(($i ? "${\($i++)} " : "") . $_) for (1..$buffer); } } > + } > +} > > -like($b, qr/^(\d+ 1) \1 (?!\1)(\d+ 1) \2 (?!\2)\d+ 1$/, 'requests - responses'); > +$b = join ' ', sort values %$res; > > -} > +is($b, '1 2 2', 'requests - responses'); > > $t->stop(); > From arut at nginx.com Thu Jul 18 13:49:07 2024 From: arut at nginx.com (=?utf-8?q?Roman_Arutyunyan?=) Date: Thu, 18 Jul 2024 13:49:07 +0000 Subject: [nginx] HTTP/2: close connections initialized during graceful shutdown. Message-ID: details: https://hg.nginx.org/nginx/rev/d1b8568f3042 branches: changeset: 9270:d1b8568f3042 user: Kasei Wang date: Thu Jul 18 17:43:25 2024 +0400 description: HTTP/2: close connections initialized during graceful shutdown. In some rare cases, graceful shutdown may happen while initializing an HTTP/2 connection. Previously, such a connection ignored the shutdown and remained active. Now it is gracefully closed prior to processing any streams to eliminate the shutdown delay. diffstat: src/http/v2/ngx_http_v2.c | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) diffs (15 lines): diff -r 072ca4906154 -r d1b8568f3042 src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Thu Jun 27 17:29:56 2024 +0400 +++ b/src/http/v2/ngx_http_v2.c Thu Jul 18 17:43:25 2024 +0400 @@ -292,6 +292,11 @@ ngx_http_v2_init(ngx_event_t *rev) c->data = h2c; + if (ngx_exiting) { + ngx_http_v2_finalize_connection(h2c, NGX_HTTP_V2_NO_ERROR); + return; + } + rev->handler = ngx_http_v2_read_handler; c->write->handler = ngx_http_v2_write_handler; From arut at nginx.com Thu Jul 18 13:50:45 2024 From: arut at nginx.com (Roman Arutyunyan) Date: Thu, 18 Jul 2024 17:50:45 +0400 Subject: [PATCH]HTTP/2 connection not properly closing during graceful shutdown In-Reply-To: References: <20240430152746.bksaeztpdhlo22uv@N00W24XTQX> <5fe2f2ca476eab484ed54c00e02f321a2b84b1bc.camel@kasei.im> <20240711093343.vkxjj377f4th5y2c@N00W24XTQX> Message-ID: <20240718135045.2q67ucrdyxx37yrn@N00W24XTQX> Hi, On Thu, Jul 11, 2024 at 08:51:54PM +0400, Sergey Kandaurov wrote: > > > On 11 Jul 2024, at 13:33, Roman Arutyunyan wrote: > > > > Hi, > > > > On Mon, May 06, 2024 at 11:14:24AM +0800, kasei at kasei.im wrote: > >> Hello, > >> > >> Thanks for your confirmation and explanations. > >> > >> The following is a modified patch. In my test it would send GOAWAY(on > >> stream id 0), same as the previous one, but call > >> ngx_http_v2_finalize_connection instead of set c->close. > >> > >> # HG changeset patch > >> # User Kasei Wang > >> # Date 1714965008 -28800 > >> # Mon May 06 11:10:08 2024 +0800 > >> # Branch help > >> # Node ID 70b6b6b69e6fd3a0d03de004acf45bad16b03a9c > >> # Parent 8618e4d900cc71082fbe7dc72af087937d64faf5 > >> HTTP/2: close http2 connections initialized during graceful shutdown. > > > > I'd remove 'http2' to fulfill the 67 character restriction. > > > >> In some rare cases, a HTTP/2 connections can be initialized during a > >> graceful shutdown. Now close such an connection to avoid unexcepted > >> delays in the graceful shutdown. > >> > >> diff -r 8618e4d900cc -r 70b6b6b69e6f src/http/v2/ngx_http_v2.c > >> --- a/src/http/v2/ngx_http_v2.c Tue Apr 16 18:27:50 2024 +0400 > >> +++ b/src/http/v2/ngx_http_v2.c Mon May 06 11:10:08 2024 +0800 > >> @@ -304,6 +304,11 @@ > >> c->idle = 1; > >> ngx_reusable_connection(c, 0); > >> > >> + if (ngx_exiting) { > >> + ngx_http_v2_finalize_connection(h2c, NGX_HTTP_V2_NO_ERROR); > >> + return; > >> + } > > > > I'd move this up a few lines. > > I think the block can be moved between setting c->data, > as this is required for lingering close processing, > and setting read and write handlers, as they're useless > because connection is switched to lingering close mode > as part of finalization. > > > > >> + > >> if (c->buffer) { > >> p = c->buffer->pos; > >> end = c->buffer->last; > > > > Otherwise looks fine, thanks. And sorry for the delay. > > > > -- > Sergey Kandaurov > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel Committed with minor changes: https://hg.nginx.org/nginx/rev/d1b8568f3042 -- Roman Arutyunyan From a.bavshin at nginx.com Thu Jul 18 18:20:37 2024 From: a.bavshin at nginx.com (=?iso-8859-1?q?Aleksei_Bavshin?=) Date: Thu, 18 Jul 2024 11:20:37 -0700 Subject: [PATCH 0 of 7] Upstream: re-resolvable servers. Message-ID: See https://mailman.nginx.org/pipermail/nginx-devel/2024-June/TSMKAHLFU3X4OBKLPRW5B2PXGBTUEHPU.html v3 changes: - Addressed comments from Roman - Removed the patch that disables re-resolve on Windows - Disabled reload tests on Windows From a.bavshin at nginx.com Thu Jul 18 18:20:38 2024 From: a.bavshin at nginx.com (=?iso-8859-1?q?Aleksei_Bavshin?=) Date: Thu, 18 Jul 2024 11:20:38 -0700 Subject: [PATCH 1 of 7] Upstream: re-resolvable servers In-Reply-To: References: Message-ID: <46d4c383cf3a72db5d57.1721326838@fedora-wsl.> # HG changeset patch # User Ruslan Ermilov # Date 1392462754 -14400 # Sat Feb 15 15:12:34 2014 +0400 # Node ID 46d4c383cf3a72db5d579092636a6be3cd907786 # Parent 072ca4906154effb64cbf8209dfe86742ff074d7 Upstream: re-resolvable servers. Specifying the upstream server by a hostname together with the "resolve" parameter will make the hostname to be periodically resolved, and upstream servers added/removed as necessary. This requires a "resolver" at the "http" configuration block. The "resolver_timeout" parameter also affects when the failed DNS requests will be attempted again. Responses with NXDOMAIN will be attempted again in 10 seconds. Upstream has a configuration generation number that is incremented each time servers are added/removed to the primary/backup list. This number is remembered by the peer.init method, and if peer.get detects a change in configuration, it returns NGX_BUSY. Each server has a reference counter. It is incremented by peer.get and decremented by peer.free. When a server is removed, it is removed from the list of servers and is marked as "zombie". The memory allocated by a zombie peer is freed only when its reference count becomes zero. Co-authored-by: Roman Arutyunyan Co-authored-by: Sergey Kandaurov Co-authored-by: Vladimir Homutov diff --git a/src/http/modules/ngx_http_upstream_hash_module.c b/src/http/modules/ngx_http_upstream_hash_module.c --- a/src/http/modules/ngx_http_upstream_hash_module.c +++ b/src/http/modules/ngx_http_upstream_hash_module.c @@ -24,6 +24,9 @@ typedef struct { typedef struct { ngx_http_complex_value_t key; +#if (NGX_HTTP_UPSTREAM_ZONE) + ngx_uint_t config; +#endif ngx_http_upstream_chash_points_t *points; } ngx_http_upstream_hash_srv_conf_t; @@ -49,6 +52,8 @@ static ngx_int_t ngx_http_upstream_get_h static ngx_int_t ngx_http_upstream_init_chash(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us); +static ngx_int_t ngx_http_upstream_update_chash(ngx_pool_t *pool, + ngx_http_upstream_srv_conf_t *us); static int ngx_libc_cdecl ngx_http_upstream_chash_cmp_points(const void *one, const void *two); static ngx_uint_t ngx_http_upstream_find_chash_point( @@ -178,11 +183,18 @@ ngx_http_upstream_get_hash_peer(ngx_peer ngx_http_upstream_rr_peers_rlock(hp->rrp.peers); - if (hp->tries > 20 || hp->rrp.peers->single || hp->key.len == 0) { + if (hp->tries > 20 || hp->rrp.peers->number < 2 || hp->key.len == 0) { ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); return hp->get_rr_peer(pc, &hp->rrp); } +#if (NGX_HTTP_UPSTREAM_ZONE) + if (hp->rrp.peers->config && hp->rrp.config != *hp->rrp.peers->config) { + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); + return hp->get_rr_peer(pc, &hp->rrp); + } +#endif + now = ngx_time(); pc->cached = 0; @@ -262,6 +274,7 @@ ngx_http_upstream_get_hash_peer(ngx_peer } hp->rrp.current = peer; + ngx_http_upstream_rr_peer_ref(hp->rrp.peers, peer); pc->sockaddr = peer->sockaddr; pc->socklen = peer->socklen; @@ -285,6 +298,26 @@ ngx_http_upstream_get_hash_peer(ngx_peer static ngx_int_t ngx_http_upstream_init_chash(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us) { + if (ngx_http_upstream_init_round_robin(cf, us) != NGX_OK) { + return NGX_ERROR; + } + + us->peer.init = ngx_http_upstream_init_chash_peer; + +#if (NGX_HTTP_UPSTREAM_ZONE) + if (us->shm_zone) { + return NGX_OK; + } +#endif + + return ngx_http_upstream_update_chash(cf->pool, us); +} + + +static ngx_int_t +ngx_http_upstream_update_chash(ngx_pool_t *pool, + ngx_http_upstream_srv_conf_t *us) +{ u_char *host, *port, c; size_t host_len, port_len, size; uint32_t hash, base_hash; @@ -299,25 +332,32 @@ ngx_http_upstream_init_chash(ngx_conf_t u_char byte[4]; } prev_hash; - if (ngx_http_upstream_init_round_robin(cf, us) != NGX_OK) { - return NGX_ERROR; + hcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_hash_module); + + if (hcf->points) { + ngx_free(hcf->points); + hcf->points = NULL; } - us->peer.init = ngx_http_upstream_init_chash_peer; - peers = us->peer.data; npoints = peers->total_weight * 160; size = sizeof(ngx_http_upstream_chash_points_t) - + sizeof(ngx_http_upstream_chash_point_t) * (npoints - 1); + - sizeof(ngx_http_upstream_chash_point_t) + + sizeof(ngx_http_upstream_chash_point_t) * npoints; - points = ngx_palloc(cf->pool, size); + points = pool ? ngx_palloc(pool, size) : ngx_alloc(size, ngx_cycle->log); if (points == NULL) { return NGX_ERROR; } points->number = 0; + if (npoints == 0) { + hcf->points = points; + return NGX_OK; + } + for (peer = peers->peer; peer; peer = peer->next) { server = &peer->server; @@ -401,7 +441,6 @@ ngx_http_upstream_init_chash(ngx_conf_t points->number = i + 1; - hcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_hash_module); hcf->points = points; return NGX_OK; @@ -481,7 +520,22 @@ ngx_http_upstream_init_chash_peer(ngx_ht ngx_http_upstream_rr_peers_rlock(hp->rrp.peers); - hp->hash = ngx_http_upstream_find_chash_point(hcf->points, hash); +#if (NGX_HTTP_UPSTREAM_ZONE) + if (hp->rrp.peers->config + && (hcf->points == NULL || hcf->config != *hp->rrp.peers->config)) + { + if (ngx_http_upstream_update_chash(NULL, us) != NGX_OK) { + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); + return NGX_ERROR; + } + + hcf->config = *hp->rrp.peers->config; + } +#endif + + if (hcf->points->number) { + hp->hash = ngx_http_upstream_find_chash_point(hcf->points, hash); + } ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); @@ -517,6 +571,20 @@ ngx_http_upstream_get_chash_peer(ngx_pee pc->cached = 0; pc->connection = NULL; + if (hp->rrp.peers->number == 0) { + pc->name = hp->rrp.peers->name; + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); + return NGX_BUSY; + } + +#if (NGX_HTTP_UPSTREAM_ZONE) + if (hp->rrp.peers->config && hp->rrp.config != *hp->rrp.peers->config) { + pc->name = hp->rrp.peers->name; + ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); + return NGX_BUSY; + } +#endif + now = ngx_time(); hcf = hp->conf; @@ -597,6 +665,7 @@ ngx_http_upstream_get_chash_peer(ngx_pee found: hp->rrp.current = best; + ngx_http_upstream_rr_peer_ref(hp->rrp.peers, best); pc->sockaddr = best->sockaddr; pc->socklen = best->socklen; @@ -664,6 +733,7 @@ ngx_http_upstream_hash(ngx_conf_t *cf, n } uscf->flags = NGX_HTTP_UPSTREAM_CREATE + |NGX_HTTP_UPSTREAM_MODIFY |NGX_HTTP_UPSTREAM_WEIGHT |NGX_HTTP_UPSTREAM_MAX_CONNS |NGX_HTTP_UPSTREAM_MAX_FAILS diff --git a/src/http/modules/ngx_http_upstream_ip_hash_module.c b/src/http/modules/ngx_http_upstream_ip_hash_module.c --- a/src/http/modules/ngx_http_upstream_ip_hash_module.c +++ b/src/http/modules/ngx_http_upstream_ip_hash_module.c @@ -163,11 +163,19 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p ngx_http_upstream_rr_peers_rlock(iphp->rrp.peers); - if (iphp->tries > 20 || iphp->rrp.peers->single) { + if (iphp->tries > 20 || iphp->rrp.peers->number < 2) { ngx_http_upstream_rr_peers_unlock(iphp->rrp.peers); return iphp->get_rr_peer(pc, &iphp->rrp); } +#if (NGX_HTTP_UPSTREAM_ZONE) + if (iphp->rrp.peers->config && iphp->rrp.config != *iphp->rrp.peers->config) + { + ngx_http_upstream_rr_peers_unlock(iphp->rrp.peers); + return iphp->get_rr_peer(pc, &iphp->rrp); + } +#endif + now = ngx_time(); pc->cached = 0; @@ -232,6 +240,7 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p } iphp->rrp.current = peer; + ngx_http_upstream_rr_peer_ref(iphp->rrp.peers, peer); pc->sockaddr = peer->sockaddr; pc->socklen = peer->socklen; @@ -268,6 +277,7 @@ ngx_http_upstream_ip_hash(ngx_conf_t *cf uscf->peer.init_upstream = ngx_http_upstream_init_ip_hash; uscf->flags = NGX_HTTP_UPSTREAM_CREATE + |NGX_HTTP_UPSTREAM_MODIFY |NGX_HTTP_UPSTREAM_WEIGHT |NGX_HTTP_UPSTREAM_MAX_CONNS |NGX_HTTP_UPSTREAM_MAX_FAILS diff --git a/src/http/modules/ngx_http_upstream_least_conn_module.c b/src/http/modules/ngx_http_upstream_least_conn_module.c --- a/src/http/modules/ngx_http_upstream_least_conn_module.c +++ b/src/http/modules/ngx_http_upstream_least_conn_module.c @@ -124,6 +124,12 @@ ngx_http_upstream_get_least_conn_peer(ng ngx_http_upstream_rr_peers_wlock(peers); +#if (NGX_HTTP_UPSTREAM_ZONE) + if (peers->config && rrp->config != *peers->config) { + goto busy; + } +#endif + best = NULL; total = 0; @@ -244,6 +250,7 @@ ngx_http_upstream_get_least_conn_peer(ng best->conns++; rrp->current = best; + ngx_http_upstream_rr_peer_ref(peers, best); n = p / (8 * sizeof(uintptr_t)); m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); @@ -278,8 +285,18 @@ failed: } ngx_http_upstream_rr_peers_wlock(peers); + +#if (NGX_HTTP_UPSTREAM_ZONE) + if (peers->config && rrp->config != *peers->config) { + goto busy; + } +#endif } +#if (NGX_HTTP_UPSTREAM_ZONE) +busy: +#endif + ngx_http_upstream_rr_peers_unlock(peers); pc->name = peers->name; @@ -303,6 +320,7 @@ ngx_http_upstream_least_conn(ngx_conf_t uscf->peer.init_upstream = ngx_http_upstream_init_least_conn; uscf->flags = NGX_HTTP_UPSTREAM_CREATE + |NGX_HTTP_UPSTREAM_MODIFY |NGX_HTTP_UPSTREAM_WEIGHT |NGX_HTTP_UPSTREAM_MAX_CONNS |NGX_HTTP_UPSTREAM_MAX_FAILS diff --git a/src/http/modules/ngx_http_upstream_random_module.c b/src/http/modules/ngx_http_upstream_random_module.c --- a/src/http/modules/ngx_http_upstream_random_module.c +++ b/src/http/modules/ngx_http_upstream_random_module.c @@ -17,6 +17,9 @@ typedef struct { typedef struct { ngx_uint_t two; +#if (NGX_HTTP_UPSTREAM_ZONE) + ngx_uint_t config; +#endif ngx_http_upstream_random_range_t *ranges; } ngx_http_upstream_random_srv_conf_t; @@ -127,6 +130,11 @@ ngx_http_upstream_update_random(ngx_pool rcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_random_module); + if (rcf->ranges) { + ngx_free(rcf->ranges); + rcf->ranges = NULL; + } + peers = us->peer.data; size = peers->number * sizeof(ngx_http_upstream_random_range_t); @@ -186,11 +194,15 @@ ngx_http_upstream_init_random_peer(ngx_h ngx_http_upstream_rr_peers_rlock(rp->rrp.peers); #if (NGX_HTTP_UPSTREAM_ZONE) - if (rp->rrp.peers->shpool && rcf->ranges == NULL) { + if (rp->rrp.peers->config + && (rcf->ranges == NULL || rcf->config != *rp->rrp.peers->config)) + { if (ngx_http_upstream_update_random(NULL, us) != NGX_OK) { ngx_http_upstream_rr_peers_unlock(rp->rrp.peers); return NGX_ERROR; } + + rcf->config = *rp->rrp.peers->config; } #endif @@ -220,11 +232,18 @@ ngx_http_upstream_get_random_peer(ngx_pe ngx_http_upstream_rr_peers_rlock(peers); - if (rp->tries > 20 || peers->single) { + if (rp->tries > 20 || peers->number < 2) { ngx_http_upstream_rr_peers_unlock(peers); return ngx_http_upstream_get_round_robin_peer(pc, rrp); } +#if (NGX_HTTP_UPSTREAM_ZONE) + if (peers->config && rrp->config != *peers->config) { + ngx_http_upstream_rr_peers_unlock(peers); + return ngx_http_upstream_get_round_robin_peer(pc, rrp); + } +#endif + pc->cached = 0; pc->connection = NULL; @@ -274,6 +293,7 @@ ngx_http_upstream_get_random_peer(ngx_pe } rrp->current = peer; + ngx_http_upstream_rr_peer_ref(peers, peer); if (now - peer->checked > peer->fail_timeout) { peer->checked = now; @@ -314,11 +334,18 @@ ngx_http_upstream_get_random2_peer(ngx_p ngx_http_upstream_rr_peers_wlock(peers); - if (rp->tries > 20 || peers->single) { + if (rp->tries > 20 || peers->number < 2) { ngx_http_upstream_rr_peers_unlock(peers); return ngx_http_upstream_get_round_robin_peer(pc, rrp); } +#if (NGX_HTTP_UPSTREAM_ZONE) + if (peers->config && rrp->config != *peers->config) { + ngx_http_upstream_rr_peers_unlock(peers); + return ngx_http_upstream_get_round_robin_peer(pc, rrp); + } +#endif + pc->cached = 0; pc->connection = NULL; @@ -384,6 +411,7 @@ ngx_http_upstream_get_random2_peer(ngx_p } rrp->current = peer; + ngx_http_upstream_rr_peer_ref(peers, peer); if (now - peer->checked > peer->fail_timeout) { peer->checked = now; @@ -467,6 +495,7 @@ ngx_http_upstream_random(ngx_conf_t *cf, uscf->peer.init_upstream = ngx_http_upstream_init_random; uscf->flags = NGX_HTTP_UPSTREAM_CREATE + |NGX_HTTP_UPSTREAM_MODIFY |NGX_HTTP_UPSTREAM_WEIGHT |NGX_HTTP_UPSTREAM_MAX_CONNS |NGX_HTTP_UPSTREAM_MAX_FAILS diff --git a/src/http/modules/ngx_http_upstream_zone_module.c b/src/http/modules/ngx_http_upstream_zone_module.c --- a/src/http/modules/ngx_http_upstream_zone_module.c +++ b/src/http/modules/ngx_http_upstream_zone_module.c @@ -18,6 +18,13 @@ static ngx_http_upstream_rr_peers_t *ngx ngx_slab_pool_t *shpool, ngx_http_upstream_srv_conf_t *uscf); static ngx_http_upstream_rr_peer_t *ngx_http_upstream_zone_copy_peer( ngx_http_upstream_rr_peers_t *peers, ngx_http_upstream_rr_peer_t *src); +static void ngx_http_upstream_zone_set_single( + ngx_http_upstream_srv_conf_t *uscf); +static void ngx_http_upstream_zone_remove_peer_locked( + ngx_http_upstream_rr_peers_t *peers, ngx_http_upstream_rr_peer_t *peer); +static ngx_int_t ngx_http_upstream_zone_init_worker(ngx_cycle_t *cycle); +static void ngx_http_upstream_zone_resolve_timer(ngx_event_t *event); +static void ngx_http_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx); static ngx_command_t ngx_http_upstream_zone_commands[] = { @@ -55,7 +62,7 @@ ngx_module_t ngx_http_upstream_zone_mod NGX_HTTP_MODULE, /* module type */ NULL, /* init master */ NULL, /* init module */ - NULL, /* init process */ + ngx_http_upstream_zone_init_worker, /* init process */ NULL, /* init thread */ NULL, /* exit thread */ NULL, /* exit process */ @@ -188,9 +195,15 @@ ngx_http_upstream_zone_copy_peers(ngx_sl ngx_http_upstream_srv_conf_t *uscf) { ngx_str_t *name; + ngx_uint_t *config; ngx_http_upstream_rr_peer_t *peer, **peerp; ngx_http_upstream_rr_peers_t *peers, *backup; + config = ngx_slab_calloc(shpool, sizeof(ngx_uint_t)); + if (config == NULL) { + return NULL; + } + peers = ngx_slab_alloc(shpool, sizeof(ngx_http_upstream_rr_peers_t)); if (peers == NULL) { return NULL; @@ -214,6 +227,7 @@ ngx_http_upstream_zone_copy_peers(ngx_sl peers->name = name; peers->shpool = shpool; + peers->config = config; for (peerp = &peers->peer; *peerp; peerp = &peer->next) { /* pool is unlocked */ @@ -223,6 +237,17 @@ ngx_http_upstream_zone_copy_peers(ngx_sl } *peerp = peer; + (*peers->config)++; + } + + for (peerp = &peers->resolve; *peerp; peerp = &peer->next) { + peer = ngx_http_upstream_zone_copy_peer(peers, *peerp); + if (peer == NULL) { + return NULL; + } + + *peerp = peer; + (*peers->config)++; } if (peers->next == NULL) { @@ -239,6 +264,7 @@ ngx_http_upstream_zone_copy_peers(ngx_sl backup->name = name; backup->shpool = shpool; + backup->config = config; for (peerp = &backup->peer; *peerp; peerp = &peer->next) { /* pool is unlocked */ @@ -248,6 +274,17 @@ ngx_http_upstream_zone_copy_peers(ngx_sl } *peerp = peer; + (*backup->config)++; + } + + for (peerp = &backup->resolve; *peerp; peerp = &peer->next) { + peer = ngx_http_upstream_zone_copy_peer(backup, *peerp); + if (peer == NULL) { + return NULL; + } + + *peerp = peer; + (*backup->config)++; } peers->next = backup; @@ -279,6 +316,7 @@ ngx_http_upstream_zone_copy_peer(ngx_htt dst->sockaddr = NULL; dst->name.data = NULL; dst->server.data = NULL; + dst->host = NULL; } dst->sockaddr = ngx_slab_calloc_locked(pool, sizeof(ngx_sockaddr_t)); @@ -301,12 +339,37 @@ ngx_http_upstream_zone_copy_peer(ngx_htt } ngx_memcpy(dst->server.data, src->server.data, src->server.len); + + if (src->host) { + dst->host = ngx_slab_calloc_locked(pool, + sizeof(ngx_http_upstream_host_t)); + if (dst->host == NULL) { + goto failed; + } + + dst->host->name.data = ngx_slab_alloc_locked(pool, + src->host->name.len); + if (dst->host->name.data == NULL) { + goto failed; + } + + dst->host->peers = peers; + dst->host->peer = dst; + + dst->host->name.len = src->host->name.len; + ngx_memcpy(dst->host->name.data, src->host->name.data, + src->host->name.len); + } } return dst; failed: + if (dst->host) { + ngx_slab_free_locked(pool, dst->host); + } + if (dst->server.data) { ngx_slab_free_locked(pool, dst->server.data); } @@ -323,3 +386,296 @@ failed: return NULL; } + + +static void +ngx_http_upstream_zone_set_single(ngx_http_upstream_srv_conf_t *uscf) +{ + ngx_http_upstream_rr_peers_t *peers; + + peers = uscf->peer.data; + + if (peers->number == 1 + && (peers->next == NULL || peers->next->number == 0)) + { + peers->single = 1; + + } else { + peers->single = 0; + } +} + + +static void +ngx_http_upstream_zone_remove_peer_locked(ngx_http_upstream_rr_peers_t *peers, + ngx_http_upstream_rr_peer_t *peer) +{ + peers->total_weight -= peer->weight; + peers->number--; + peers->tries -= (peer->down == 0); + (*peers->config)++; + peers->weighted = (peers->total_weight != peers->number); + + ngx_http_upstream_rr_peer_free(peers, peer); +} + + +static ngx_int_t +ngx_http_upstream_zone_init_worker(ngx_cycle_t *cycle) +{ + ngx_uint_t i; + ngx_event_t *event; + ngx_http_upstream_rr_peer_t *peer; + ngx_http_upstream_rr_peers_t *peers; + ngx_http_upstream_srv_conf_t *uscf, **uscfp; + ngx_http_upstream_main_conf_t *umcf; + + if ((ngx_process != NGX_PROCESS_WORKER || ngx_worker != 0) + && ngx_process != NGX_PROCESS_SINGLE) + { + return NGX_OK; + } + + umcf = ngx_http_cycle_get_module_main_conf(cycle, ngx_http_upstream_module); + + if (umcf == NULL) { + return NGX_OK; + } + + uscfp = umcf->upstreams.elts; + + for (i = 0; i < umcf->upstreams.nelts; i++) { + + uscf = uscfp[i]; + + if (uscf->shm_zone == NULL) { + continue; + } + + peers = uscf->peer.data; + + do { + ngx_http_upstream_rr_peers_wlock(peers); + + for (peer = peers->resolve; peer; peer = peer->next) { + + event = &peer->host->event; + ngx_memzero(event, sizeof(ngx_event_t)); + + event->data = uscf; + event->handler = ngx_http_upstream_zone_resolve_timer; + event->log = cycle->log; + event->cancelable = 1; + + ngx_add_timer(event, 1); + } + + ngx_http_upstream_rr_peers_unlock(peers); + + peers = peers->next; + + } while (peers); + } + + return NGX_OK; +} + + +static void +ngx_http_upstream_zone_resolve_timer(ngx_event_t *event) +{ + ngx_resolver_ctx_t *ctx; + ngx_http_upstream_host_t *host; + ngx_http_upstream_srv_conf_t *uscf; + + host = (ngx_http_upstream_host_t *) event; + uscf = event->data; + + ctx = ngx_resolve_start(uscf->resolver, NULL); + if (ctx == NULL) { + goto retry; + } + + if (ctx == NGX_NO_RESOLVER) { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "no resolver defined to resolve %V", &host->name); + return; + } + + ctx->name = host->name; + ctx->handler = ngx_http_upstream_zone_resolve_handler; + ctx->data = host; + ctx->timeout = uscf->resolver_timeout; + ctx->cancelable = 1; + + if (ngx_resolve_name(ctx) == NGX_OK) { + return; + } + +retry: + + ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); +} + + +static void +ngx_http_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx) +{ + time_t now; + in_port_t port; + ngx_msec_t timer; + ngx_uint_t i, j; + ngx_event_t *event; + ngx_resolver_addr_t *addr; + ngx_http_upstream_host_t *host; + ngx_http_upstream_rr_peer_t *peer, *template, **peerp; + ngx_http_upstream_rr_peers_t *peers; + ngx_http_upstream_srv_conf_t *uscf; + + host = ctx->data; + event = &host->event; + uscf = event->data; + peers = host->peers; + template = host->peer; + + ngx_http_upstream_rr_peers_wlock(peers); + + now = ngx_time(); + + if (ctx->state) { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "%V could not be resolved (%i: %s)", + &ctx->name, ctx->state, + ngx_resolver_strerror(ctx->state)); + + if (ctx->state != NGX_RESOLVE_NXDOMAIN) { + ngx_http_upstream_rr_peers_unlock(peers); + + ngx_resolve_name_done(ctx); + + ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); + return; + } + + /* NGX_RESOLVE_NXDOMAIN */ + + ctx->naddrs = 0; + } + +#if (NGX_DEBUG) + { + u_char text[NGX_SOCKADDR_STRLEN]; + size_t len; + + for (i = 0; i < ctx->naddrs; i++) { + len = ngx_sock_ntop(ctx->addrs[i].sockaddr, ctx->addrs[i].socklen, + text, NGX_SOCKADDR_STRLEN, 0); + + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, event->log, 0, + "name %V was resolved to %*s", &host->name, len, text); + } + } +#endif + + for (peerp = &peers->peer; *peerp; /* void */ ) { + peer = *peerp; + + if (peer->host != host) { + goto next; + } + + for (j = 0; j < ctx->naddrs; j++) { + + addr = &ctx->addrs[j]; + + if (addr->name.len == 0 + && ngx_cmp_sockaddr(peer->sockaddr, peer->socklen, + addr->sockaddr, addr->socklen, 0) + == NGX_OK) + { + addr->name.len = 1; + goto next; + } + } + + *peerp = peer->next; + ngx_http_upstream_zone_remove_peer_locked(peers, peer); + + ngx_http_upstream_zone_set_single(uscf); + + continue; + + next: + + peerp = &peer->next; + } + + for (i = 0; i < ctx->naddrs; i++) { + + addr = &ctx->addrs[i]; + + if (addr->name.len == 1) { + addr->name.len = 0; + continue; + } + + ngx_shmtx_lock(&peers->shpool->mutex); + peer = ngx_http_upstream_zone_copy_peer(peers, NULL); + ngx_shmtx_unlock(&peers->shpool->mutex); + + if (peer == NULL) { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "cannot add new server to upstream \"%V\", " + "memory exhausted", peers->name); + break; + } + + ngx_memcpy(peer->sockaddr, addr->sockaddr, addr->socklen); + + port = ((struct sockaddr_in *) template->sockaddr)->sin_port; + + switch (peer->sockaddr->sa_family) { +#if (NGX_HAVE_INET6) + case AF_INET6: + ((struct sockaddr_in6 *) peer->sockaddr)->sin6_port = port; + break; +#endif + default: /* AF_INET */ + ((struct sockaddr_in *) peer->sockaddr)->sin_port = port; + } + + peer->socklen = addr->socklen; + + peer->name.len = ngx_sock_ntop(peer->sockaddr, peer->socklen, + peer->name.data, NGX_SOCKADDR_STRLEN, 1); + + peer->host = template->host; + peer->server = template->server; + + peer->weight = template->weight; + peer->effective_weight = peer->weight; + peer->max_conns = template->max_conns; + peer->max_fails = template->max_fails; + peer->fail_timeout = template->fail_timeout; + peer->down = template->down; + + *peerp = peer; + peerp = &peer->next; + + peers->number++; + peers->tries += (peer->down == 0); + peers->total_weight += peer->weight; + peers->weighted = (peers->total_weight != peers->number); + (*peers->config)++; + + ngx_http_upstream_zone_set_single(uscf); + } + + ngx_http_upstream_rr_peers_unlock(peers); + + timer = (ngx_msec_t) 1000 * (ctx->valid > now ? ctx->valid - now + 1 : 1); + + ngx_resolve_name_done(ctx); + + ngx_add_timer(event, timer); +} diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -1565,6 +1565,26 @@ ngx_http_upstream_connect(ngx_http_reque u->state->peer = u->peer.name; +#if (NGX_HTTP_UPSTREAM_ZONE) + if (u->upstream && u->upstream->shm_zone + && (u->upstream->flags & NGX_HTTP_UPSTREAM_MODIFY)) + { + u->state->peer = ngx_palloc(r->pool, + sizeof(ngx_str_t) + u->peer.name->len); + if (u->state->peer == NULL) { + ngx_http_upstream_finalize_request(r, u, + NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + + u->state->peer->len = u->peer.name->len; + u->state->peer->data = (u_char *) (u->state->peer + 1); + ngx_memcpy(u->state->peer->data, u->peer.name->data, u->peer.name->len); + + u->peer.name = u->state->peer; + } +#endif + if (rc == NGX_BUSY) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "no live upstreams"); ngx_http_upstream_next(r, u, NGX_HTTP_UPSTREAM_FT_NOLIVE); @@ -6066,6 +6086,7 @@ ngx_http_upstream(ngx_conf_t *cf, ngx_co u.no_port = 1; uscf = ngx_http_upstream_add(cf, &u, NGX_HTTP_UPSTREAM_CREATE + |NGX_HTTP_UPSTREAM_MODIFY |NGX_HTTP_UPSTREAM_WEIGHT |NGX_HTTP_UPSTREAM_MAX_CONNS |NGX_HTTP_UPSTREAM_MAX_FAILS @@ -6171,6 +6192,9 @@ ngx_http_upstream_server(ngx_conf_t *cf, ngx_url_t u; ngx_int_t weight, max_conns, max_fails; ngx_uint_t i; +#if (NGX_HTTP_UPSTREAM_ZONE) + ngx_uint_t resolve; +#endif ngx_http_upstream_server_t *us; us = ngx_array_push(uscf->servers); @@ -6186,6 +6210,9 @@ ngx_http_upstream_server(ngx_conf_t *cf, max_conns = 0; max_fails = 1; fail_timeout = 10; +#if (NGX_HTTP_UPSTREAM_ZONE) + resolve = 0; +#endif for (i = 2; i < cf->args->nelts; i++) { @@ -6274,6 +6301,13 @@ ngx_http_upstream_server(ngx_conf_t *cf, continue; } +#if (NGX_HTTP_UPSTREAM_ZONE) + if (ngx_strcmp(value[i].data, "resolve") == 0) { + resolve = 1; + continue; + } +#endif + goto invalid; } @@ -6282,6 +6316,13 @@ ngx_http_upstream_server(ngx_conf_t *cf, u.url = value[1]; u.default_port = 80; +#if (NGX_HTTP_UPSTREAM_ZONE) + if (resolve) { + /* resolve at run time */ + u.no_resolve = 1; + } +#endif + if (ngx_parse_url(cf->pool, &u) != NGX_OK) { if (u.err) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, @@ -6292,8 +6333,45 @@ ngx_http_upstream_server(ngx_conf_t *cf, } us->name = u.url; + +#if (NGX_HTTP_UPSTREAM_ZONE) + + if (resolve && u.naddrs == 0) { + ngx_addr_t *addr; + + /* save port */ + + addr = ngx_pcalloc(cf->pool, sizeof(ngx_addr_t)); + if (addr == NULL) { + return NGX_CONF_ERROR; + } + + addr->sockaddr = ngx_palloc(cf->pool, u.socklen); + if (addr->sockaddr == NULL) { + return NGX_CONF_ERROR; + } + + ngx_memcpy(addr->sockaddr, &u.sockaddr, u.socklen); + + addr->socklen = u.socklen; + + us->addrs = addr; + us->naddrs = 1; + + us->host = u.host; + + } else { + us->addrs = u.addrs; + us->naddrs = u.naddrs; + } + +#else + us->addrs = u.addrs; us->naddrs = u.naddrs; + +#endif + us->weight = weight; us->max_conns = max_conns; us->max_fails = max_fails; diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h --- a/src/http/ngx_http_upstream.h +++ b/src/http/ngx_http_upstream.h @@ -104,7 +104,11 @@ typedef struct { unsigned backup:1; - NGX_COMPAT_BEGIN(6) +#if (NGX_HTTP_UPSTREAM_ZONE) + ngx_str_t host; +#endif + + NGX_COMPAT_BEGIN(4) NGX_COMPAT_END } ngx_http_upstream_server_t; @@ -115,6 +119,7 @@ typedef struct { #define NGX_HTTP_UPSTREAM_FAIL_TIMEOUT 0x0008 #define NGX_HTTP_UPSTREAM_DOWN 0x0010 #define NGX_HTTP_UPSTREAM_BACKUP 0x0020 +#define NGX_HTTP_UPSTREAM_MODIFY 0x0040 #define NGX_HTTP_UPSTREAM_MAX_CONNS 0x0100 @@ -133,6 +138,8 @@ struct ngx_http_upstream_srv_conf_s { #if (NGX_HTTP_UPSTREAM_ZONE) ngx_shm_zone_t *shm_zone; + ngx_resolver_t *resolver; + ngx_msec_t resolver_timeout; #endif }; diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c --- a/src/http/ngx_http_upstream_round_robin.c +++ b/src/http/ngx_http_upstream_round_robin.c @@ -32,10 +32,15 @@ ngx_http_upstream_init_round_robin(ngx_c ngx_http_upstream_srv_conf_t *us) { ngx_url_t u; - ngx_uint_t i, j, n, w, t; + ngx_uint_t i, j, n, r, w, t; ngx_http_upstream_server_t *server; ngx_http_upstream_rr_peer_t *peer, **peerp; ngx_http_upstream_rr_peers_t *peers, *backup; +#if (NGX_HTTP_UPSTREAM_ZONE) + ngx_uint_t resolve; + ngx_http_core_loc_conf_t *clcf; + ngx_http_upstream_rr_peer_t **rpeerp; +#endif us->peer.init = ngx_http_upstream_init_round_robin_peer; @@ -43,14 +48,33 @@ ngx_http_upstream_init_round_robin(ngx_c server = us->servers->elts; n = 0; + r = 0; w = 0; t = 0; +#if (NGX_HTTP_UPSTREAM_ZONE) + resolve = 0; +#endif + for (i = 0; i < us->servers->nelts; i++) { + +#if (NGX_HTTP_UPSTREAM_ZONE) + if (server[i].host.len) { + resolve = 1; + } +#endif + if (server[i].backup) { continue; } +#if (NGX_HTTP_UPSTREAM_ZONE) + if (server[i].host.len) { + r++; + continue; + } +#endif + n += server[i].naddrs; w += server[i].naddrs * server[i].weight; @@ -59,7 +83,53 @@ ngx_http_upstream_init_round_robin(ngx_c } } - if (n == 0) { +#if (NGX_HTTP_UPSTREAM_ZONE) + if (us->shm_zone) { + + if (resolve && !(us->flags & NGX_HTTP_UPSTREAM_MODIFY)) { + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "load balancing method does not support" + " resolving names at run time in" + " upstream \"%V\" in %s:%ui", + &us->host, us->file_name, us->line); + return NGX_ERROR; + } + + clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module); + + us->resolver = clcf->resolver; + us->resolver_timeout = clcf->resolver_timeout; + + /* + * Without "resolver_timeout" in http{}, the value is unset. + * Even if we set it in ngx_http_core_merge_loc_conf(), it's + * still dependent on the module order and unreliable. + */ + ngx_conf_init_msec_value(us->resolver_timeout, 30000); + + if (resolve + && (us->resolver == NULL + || us->resolver->connections.nelts == 0)) + { + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "no resolver defined to resolve names" + " at run time in upstream \"%V\" in %s:%ui", + &us->host, us->file_name, us->line); + return NGX_ERROR; + } + + } else if (resolve) { + + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "resolving names at run time requires" + " upstream \"%V\" in %s:%ui" + " to be in shared memory", + &us->host, us->file_name, us->line); + return NGX_ERROR; + } +#endif + + if (n + r == 0) { ngx_log_error(NGX_LOG_EMERG, cf->log, 0, "no servers in upstream \"%V\" in %s:%ui", &us->host, us->file_name, us->line); @@ -71,7 +141,8 @@ ngx_http_upstream_init_round_robin(ngx_c return NGX_ERROR; } - peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) * n); + peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) + * (n + r)); if (peer == NULL) { return NGX_ERROR; } @@ -86,11 +157,46 @@ ngx_http_upstream_init_round_robin(ngx_c n = 0; peerp = &peers->peer; +#if (NGX_HTTP_UPSTREAM_ZONE) + rpeerp = &peers->resolve; +#endif + for (i = 0; i < us->servers->nelts; i++) { if (server[i].backup) { continue; } +#if (NGX_HTTP_UPSTREAM_ZONE) + if (server[i].host.len) { + + peer[n].host = ngx_pcalloc(cf->pool, + sizeof(ngx_http_upstream_host_t)); + if (peer[n].host == NULL) { + return NGX_ERROR; + } + + peer[n].host->name = server[i].host; + + peer[n].sockaddr = server[i].addrs[0].sockaddr; + peer[n].socklen = server[i].addrs[0].socklen; + peer[n].name = server[i].addrs[0].name; + peer[n].weight = server[i].weight; + peer[n].effective_weight = server[i].weight; + peer[n].current_weight = 0; + peer[n].max_conns = server[i].max_conns; + peer[n].max_fails = server[i].max_fails; + peer[n].fail_timeout = server[i].fail_timeout; + peer[n].down = server[i].down; + peer[n].server = server[i].name; + + *rpeerp = &peer[n]; + rpeerp = &peer[n].next; + n++; + + continue; + } +#endif + for (j = 0; j < server[i].naddrs; j++) { peer[n].sockaddr = server[i].addrs[j].sockaddr; peer[n].socklen = server[i].addrs[j].socklen; @@ -115,6 +221,7 @@ ngx_http_upstream_init_round_robin(ngx_c /* backup servers */ n = 0; + r = 0; w = 0; t = 0; @@ -123,6 +230,13 @@ ngx_http_upstream_init_round_robin(ngx_c continue; } +#if (NGX_HTTP_UPSTREAM_ZONE) + if (server[i].host.len) { + r++; + continue; + } +#endif + n += server[i].naddrs; w += server[i].naddrs * server[i].weight; @@ -131,7 +245,7 @@ ngx_http_upstream_init_round_robin(ngx_c } } - if (n == 0) { + if (n + r == 0) { return NGX_OK; } @@ -140,12 +254,16 @@ ngx_http_upstream_init_round_robin(ngx_c return NGX_ERROR; } - peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) * n); + peer = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_rr_peer_t) + * (n + r)); if (peer == NULL) { return NGX_ERROR; } - peers->single = 0; + if (n > 0) { + peers->single = 0; + } + backup->single = 0; backup->number = n; backup->weighted = (w != n); @@ -156,11 +274,46 @@ ngx_http_upstream_init_round_robin(ngx_c n = 0; peerp = &backup->peer; +#if (NGX_HTTP_UPSTREAM_ZONE) + rpeerp = &backup->resolve; +#endif + for (i = 0; i < us->servers->nelts; i++) { if (!server[i].backup) { continue; } +#if (NGX_HTTP_UPSTREAM_ZONE) + if (server[i].host.len) { + + peer[n].host = ngx_pcalloc(cf->pool, + sizeof(ngx_http_upstream_host_t)); + if (peer[n].host == NULL) { + return NGX_ERROR; + } + + peer[n].host->name = server[i].host; + + peer[n].sockaddr = server[i].addrs[0].sockaddr; + peer[n].socklen = server[i].addrs[0].socklen; + peer[n].name = server[i].addrs[0].name; + peer[n].weight = server[i].weight; + peer[n].effective_weight = server[i].weight; + peer[n].current_weight = 0; + peer[n].max_conns = server[i].max_conns; + peer[n].max_fails = server[i].max_fails; + peer[n].fail_timeout = server[i].fail_timeout; + peer[n].down = server[i].down; + peer[n].server = server[i].name; + + *rpeerp = &peer[n]; + rpeerp = &peer[n].next; + n++; + + continue; + } +#endif + for (j = 0; j < server[i].naddrs; j++) { peer[n].sockaddr = server[i].addrs[j].sockaddr; peer[n].socklen = server[i].addrs[j].socklen; @@ -273,7 +426,12 @@ ngx_http_upstream_init_round_robin_peer( rrp->peers = us->peer.data; rrp->current = NULL; - rrp->config = 0; + + ngx_http_upstream_rr_peers_rlock(rrp->peers); + +#if (NGX_HTTP_UPSTREAM_ZONE) + rrp->config = rrp->peers->config ? *rrp->peers->config : 0; +#endif n = rrp->peers->number; @@ -281,6 +439,10 @@ ngx_http_upstream_init_round_robin_peer( n = rrp->peers->next->number; } + r->upstream->peer.tries = ngx_http_upstream_tries(rrp->peers); + + ngx_http_upstream_rr_peers_unlock(rrp->peers); + if (n <= 8 * sizeof(uintptr_t)) { rrp->tried = &rrp->data; rrp->data = 0; @@ -296,7 +458,6 @@ ngx_http_upstream_init_round_robin_peer( r->upstream->peer.get = ngx_http_upstream_get_round_robin_peer; r->upstream->peer.free = ngx_http_upstream_free_round_robin_peer; - r->upstream->peer.tries = ngx_http_upstream_tries(rrp->peers); #if (NGX_HTTP_SSL) r->upstream->peer.set_session = ngx_http_upstream_set_round_robin_peer_session; @@ -446,6 +607,12 @@ ngx_http_upstream_get_round_robin_peer(n peers = rrp->peers; ngx_http_upstream_rr_peers_wlock(peers); +#if (NGX_HTTP_UPSTREAM_ZONE) + if (peers->config && rrp->config != *peers->config) { + goto busy; + } +#endif + if (peers->single) { peer = peers->peer; @@ -458,6 +625,7 @@ ngx_http_upstream_get_round_robin_peer(n } rrp->current = peer; + ngx_http_upstream_rr_peer_ref(peers, peer); } else { @@ -510,6 +678,10 @@ failed: ngx_http_upstream_rr_peers_wlock(peers); } +#if (NGX_HTTP_UPSTREAM_ZONE) +busy: +#endif + ngx_http_upstream_rr_peers_unlock(peers); pc->name = peers->name; @@ -580,6 +752,7 @@ ngx_http_upstream_get_peer(ngx_http_upst } rrp->current = best; + ngx_http_upstream_rr_peer_ref(rrp->peers, best); n = p / (8 * sizeof(uintptr_t)); m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); @@ -617,9 +790,16 @@ ngx_http_upstream_free_round_robin_peer( if (rrp->peers->single) { + if (peer->fails) { + peer->fails = 0; + } + peer->conns--; - ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); + if (ngx_http_upstream_rr_peer_unref(rrp->peers, peer) == NGX_OK) { + ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); + } + ngx_http_upstream_rr_peers_unlock(rrp->peers); pc->tries = 0; @@ -661,7 +841,10 @@ ngx_http_upstream_free_round_robin_peer( peer->conns--; - ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); + if (ngx_http_upstream_rr_peer_unref(rrp->peers, peer) == NGX_OK) { + ngx_http_upstream_rr_peer_unlock(rrp->peers, peer); + } + ngx_http_upstream_rr_peers_unlock(rrp->peers); if (pc->tries) { diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h --- a/src/http/ngx_http_upstream_round_robin.h +++ b/src/http/ngx_http_upstream_round_robin.h @@ -14,8 +14,23 @@ #include +typedef struct ngx_http_upstream_rr_peers_s ngx_http_upstream_rr_peers_t; typedef struct ngx_http_upstream_rr_peer_s ngx_http_upstream_rr_peer_t; + +#if (NGX_HTTP_UPSTREAM_ZONE) + +typedef struct { + ngx_event_t event; /* must be first */ + ngx_uint_t worker; + ngx_str_t name; + ngx_http_upstream_rr_peers_t *peers; + ngx_http_upstream_rr_peer_t *peer; +} ngx_http_upstream_host_t; + +#endif + + struct ngx_http_upstream_rr_peer_s { struct sockaddr *sockaddr; socklen_t socklen; @@ -46,18 +61,20 @@ struct ngx_http_upstream_rr_peer_s { #endif #if (NGX_HTTP_UPSTREAM_ZONE) + unsigned zombie:1; + ngx_atomic_t lock; + ngx_uint_t refs; + ngx_http_upstream_host_t *host; #endif ngx_http_upstream_rr_peer_t *next; - NGX_COMPAT_BEGIN(32) + NGX_COMPAT_BEGIN(15) NGX_COMPAT_END }; -typedef struct ngx_http_upstream_rr_peers_s ngx_http_upstream_rr_peers_t; - struct ngx_http_upstream_rr_peers_s { ngx_uint_t number; @@ -78,6 +95,11 @@ struct ngx_http_upstream_rr_peers_s { ngx_http_upstream_rr_peers_t *next; ngx_http_upstream_rr_peer_t *peer; + +#if (NGX_HTTP_UPSTREAM_ZONE) + ngx_uint_t *config; + ngx_http_upstream_rr_peer_t *resolve; +#endif }; @@ -114,6 +136,65 @@ struct ngx_http_upstream_rr_peers_s { ngx_rwlock_unlock(&peer->lock); \ } + +#define ngx_http_upstream_rr_peer_ref(peers, peer) \ + (peer)->refs++; + + +static ngx_inline void +ngx_http_upstream_rr_peer_free_locked(ngx_http_upstream_rr_peers_t *peers, + ngx_http_upstream_rr_peer_t *peer) +{ + if (peer->refs) { + peer->zombie = 1; + return; + } + + ngx_slab_free_locked(peers->shpool, peer->sockaddr); + ngx_slab_free_locked(peers->shpool, peer->name.data); + + if (peer->server.data && (peer->host == NULL || peer->host->peer == peer)) { + ngx_slab_free_locked(peers->shpool, peer->server.data); + } + +#if (NGX_HTTP_SSL) + if (peer->ssl_session) { + ngx_slab_free_locked(peers->shpool, peer->ssl_session); + } +#endif + + ngx_slab_free_locked(peers->shpool, peer); +} + + +static ngx_inline void +ngx_http_upstream_rr_peer_free(ngx_http_upstream_rr_peers_t *peers, + ngx_http_upstream_rr_peer_t *peer) +{ + ngx_shmtx_lock(&peers->shpool->mutex); + ngx_http_upstream_rr_peer_free_locked(peers, peer); + ngx_shmtx_unlock(&peers->shpool->mutex); +} + + +static ngx_inline ngx_int_t +ngx_http_upstream_rr_peer_unref(ngx_http_upstream_rr_peers_t *peers, + ngx_http_upstream_rr_peer_t *peer) +{ + peer->refs--; + + if (peers->shpool == NULL) { + return NGX_OK; + } + + if (peer->refs == 0 && peer->zombie) { + ngx_http_upstream_rr_peer_free(peers, peer); + return NGX_DONE; + } + + return NGX_OK; +} + #else #define ngx_http_upstream_rr_peers_rlock(peers) @@ -121,6 +202,8 @@ struct ngx_http_upstream_rr_peers_s { #define ngx_http_upstream_rr_peers_unlock(peers) #define ngx_http_upstream_rr_peer_lock(peers, peer) #define ngx_http_upstream_rr_peer_unlock(peers, peer) +#define ngx_http_upstream_rr_peer_ref(peers, peer) +#define ngx_http_upstream_rr_peer_unref(peers, peer) NGX_OK #endif diff --git a/src/stream/ngx_stream_proxy_module.c b/src/stream/ngx_stream_proxy_module.c --- a/src/stream/ngx_stream_proxy_module.c +++ b/src/stream/ngx_stream_proxy_module.c @@ -742,6 +742,25 @@ ngx_stream_proxy_connect(ngx_stream_sess u->state->peer = u->peer.name; +#if (NGX_STREAM_UPSTREAM_ZONE) + if (u->upstream && u->upstream->shm_zone + && (u->upstream->flags & NGX_STREAM_UPSTREAM_MODIFY) + ) { + u->state->peer = ngx_palloc(s->connection->pool, + sizeof(ngx_str_t) + u->peer.name->len); + if (u->state->peer == NULL) { + ngx_stream_proxy_finalize(s, NGX_STREAM_INTERNAL_SERVER_ERROR); + return; + } + + u->state->peer->len = u->peer.name->len; + u->state->peer->data = (u_char *) (u->state->peer + 1); + ngx_memcpy(u->state->peer->data, u->peer.name->data, u->peer.name->len); + + u->peer.name = u->state->peer; + } +#endif + if (rc == NGX_BUSY) { ngx_log_error(NGX_LOG_ERR, c->log, 0, "no live upstreams"); ngx_stream_proxy_finalize(s, NGX_STREAM_BAD_GATEWAY); diff --git a/src/stream/ngx_stream_upstream.c b/src/stream/ngx_stream_upstream.c --- a/src/stream/ngx_stream_upstream.c +++ b/src/stream/ngx_stream_upstream.c @@ -319,6 +319,7 @@ ngx_stream_upstream(ngx_conf_t *cf, ngx_ u.no_port = 1; uscf = ngx_stream_upstream_add(cf, &u, NGX_STREAM_UPSTREAM_CREATE + |NGX_STREAM_UPSTREAM_MODIFY |NGX_STREAM_UPSTREAM_WEIGHT |NGX_STREAM_UPSTREAM_MAX_CONNS |NGX_STREAM_UPSTREAM_MAX_FAILS @@ -408,6 +409,9 @@ ngx_stream_upstream_server(ngx_conf_t *c ngx_url_t u; ngx_int_t weight, max_conns, max_fails; ngx_uint_t i; +#if (NGX_STREAM_UPSTREAM_ZONE) + ngx_uint_t resolve; +#endif ngx_stream_upstream_server_t *us; us = ngx_array_push(uscf->servers); @@ -423,6 +427,9 @@ ngx_stream_upstream_server(ngx_conf_t *c max_conns = 0; max_fails = 1; fail_timeout = 10; +#if (NGX_STREAM_UPSTREAM_ZONE) + resolve = 0; +#endif for (i = 2; i < cf->args->nelts; i++) { @@ -511,6 +518,13 @@ ngx_stream_upstream_server(ngx_conf_t *c continue; } +#if (NGX_STREAM_UPSTREAM_ZONE) + if (ngx_strcmp(value[i].data, "resolve") == 0) { + resolve = 1; + continue; + } +#endif + goto invalid; } @@ -518,6 +532,13 @@ ngx_stream_upstream_server(ngx_conf_t *c u.url = value[1]; +#if (NGX_STREAM_UPSTREAM_ZONE) + if (resolve) { + /* resolve at run time */ + u.no_resolve = 1; + } +#endif + if (ngx_parse_url(cf->pool, &u) != NGX_OK) { if (u.err) { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, @@ -534,8 +555,45 @@ ngx_stream_upstream_server(ngx_conf_t *c } us->name = u.url; + +#if (NGX_STREAM_UPSTREAM_ZONE) + + if (resolve && u.naddrs == 0) { + ngx_addr_t *addr; + + /* save port */ + + addr = ngx_pcalloc(cf->pool, sizeof(ngx_addr_t)); + if (addr == NULL) { + return NGX_CONF_ERROR; + } + + addr->sockaddr = ngx_palloc(cf->pool, u.socklen); + if (addr->sockaddr == NULL) { + return NGX_CONF_ERROR; + } + + ngx_memcpy(addr->sockaddr, &u.sockaddr, u.socklen); + + addr->socklen = u.socklen; + + us->addrs = addr; + us->naddrs = 1; + + us->host = u.host; + + } else { + us->addrs = u.addrs; + us->naddrs = u.naddrs; + } + +#else + us->addrs = u.addrs; us->naddrs = u.naddrs; + +#endif + us->weight = weight; us->max_conns = max_conns; us->max_fails = max_fails; diff --git a/src/stream/ngx_stream_upstream.h b/src/stream/ngx_stream_upstream.h --- a/src/stream/ngx_stream_upstream.h +++ b/src/stream/ngx_stream_upstream.h @@ -21,6 +21,7 @@ #define NGX_STREAM_UPSTREAM_FAIL_TIMEOUT 0x0008 #define NGX_STREAM_UPSTREAM_DOWN 0x0010 #define NGX_STREAM_UPSTREAM_BACKUP 0x0020 +#define NGX_STREAM_UPSTREAM_MODIFY 0x0040 #define NGX_STREAM_UPSTREAM_MAX_CONNS 0x0100 @@ -62,7 +63,11 @@ typedef struct { unsigned backup:1; - NGX_COMPAT_BEGIN(4) +#if (NGX_STREAM_UPSTREAM_ZONE) + ngx_str_t host; +#endif + + NGX_COMPAT_BEGIN(2) NGX_COMPAT_END } ngx_stream_upstream_server_t; @@ -83,6 +88,8 @@ struct ngx_stream_upstream_srv_conf_s { #if (NGX_STREAM_UPSTREAM_ZONE) ngx_shm_zone_t *shm_zone; + ngx_resolver_t *resolver; + ngx_msec_t resolver_timeout; #endif }; diff --git a/src/stream/ngx_stream_upstream_hash_module.c b/src/stream/ngx_stream_upstream_hash_module.c --- a/src/stream/ngx_stream_upstream_hash_module.c +++ b/src/stream/ngx_stream_upstream_hash_module.c @@ -23,6 +23,9 @@ typedef struct { typedef struct { +#if (NGX_STREAM_UPSTREAM_ZONE) + ngx_uint_t config; +#endif ngx_stream_complex_value_t key; ngx_stream_upstream_chash_points_t *points; } ngx_stream_upstream_hash_srv_conf_t; @@ -49,6 +52,8 @@ static ngx_int_t ngx_stream_upstream_get static ngx_int_t ngx_stream_upstream_init_chash(ngx_conf_t *cf, ngx_stream_upstream_srv_conf_t *us); +static ngx_int_t ngx_stream_upstream_update_chash(ngx_pool_t *pool, + ngx_stream_upstream_srv_conf_t *us); static int ngx_libc_cdecl ngx_stream_upstream_chash_cmp_points(const void *one, const void *two); static ngx_uint_t ngx_stream_upstream_find_chash_point( @@ -178,11 +183,18 @@ ngx_stream_upstream_get_hash_peer(ngx_pe ngx_stream_upstream_rr_peers_rlock(hp->rrp.peers); - if (hp->tries > 20 || hp->rrp.peers->single || hp->key.len == 0) { + if (hp->tries > 20 || hp->rrp.peers->number < 2 || hp->key.len == 0) { ngx_stream_upstream_rr_peers_unlock(hp->rrp.peers); return hp->get_rr_peer(pc, &hp->rrp); } +#if (NGX_STREAM_UPSTREAM_ZONE) + if (hp->rrp.peers->config && hp->rrp.config != *hp->rrp.peers->config) { + ngx_stream_upstream_rr_peers_unlock(hp->rrp.peers); + return hp->get_rr_peer(pc, &hp->rrp); + } +#endif + now = ngx_time(); pc->connection = NULL; @@ -261,6 +273,7 @@ ngx_stream_upstream_get_hash_peer(ngx_pe } hp->rrp.current = peer; + ngx_stream_upstream_rr_peer_ref(hp->rrp.peers, peer); pc->sockaddr = peer->sockaddr; pc->socklen = peer->socklen; @@ -285,6 +298,26 @@ static ngx_int_t ngx_stream_upstream_init_chash(ngx_conf_t *cf, ngx_stream_upstream_srv_conf_t *us) { + if (ngx_stream_upstream_init_round_robin(cf, us) != NGX_OK) { + return NGX_ERROR; + } + + us->peer.init = ngx_stream_upstream_init_chash_peer; + +#if (NGX_STREAM_UPSTREAM_ZONE) + if (us->shm_zone) { + return NGX_OK; + } +#endif + + return ngx_stream_upstream_update_chash(cf->pool, us); +} + + +static ngx_int_t +ngx_stream_upstream_update_chash(ngx_pool_t *pool, + ngx_stream_upstream_srv_conf_t *us) +{ u_char *host, *port, c; size_t host_len, port_len, size; uint32_t hash, base_hash; @@ -299,25 +332,33 @@ ngx_stream_upstream_init_chash(ngx_conf_ u_char byte[4]; } prev_hash; - if (ngx_stream_upstream_init_round_robin(cf, us) != NGX_OK) { - return NGX_ERROR; + hcf = ngx_stream_conf_upstream_srv_conf(us, + ngx_stream_upstream_hash_module); + + if (hcf->points) { + ngx_free(hcf->points); + hcf->points = NULL; } - us->peer.init = ngx_stream_upstream_init_chash_peer; - peers = us->peer.data; npoints = peers->total_weight * 160; size = sizeof(ngx_stream_upstream_chash_points_t) - + sizeof(ngx_stream_upstream_chash_point_t) * (npoints - 1); + - sizeof(ngx_stream_upstream_chash_point_t) + + sizeof(ngx_stream_upstream_chash_point_t) * npoints; - points = ngx_palloc(cf->pool, size); + points = pool ? ngx_palloc(pool, size) : ngx_alloc(size, ngx_cycle->log); if (points == NULL) { return NGX_ERROR; } points->number = 0; + if (npoints == 0) { + hcf->points = points; + return NGX_OK; + } + for (peer = peers->peer; peer; peer = peer->next) { server = &peer->server; @@ -401,8 +442,6 @@ ngx_stream_upstream_init_chash(ngx_conf_ points->number = i + 1; - hcf = ngx_stream_conf_upstream_srv_conf(us, - ngx_stream_upstream_hash_module); hcf->points = points; return NGX_OK; @@ -483,7 +522,22 @@ ngx_stream_upstream_init_chash_peer(ngx_ ngx_stream_upstream_rr_peers_rlock(hp->rrp.peers); - hp->hash = ngx_stream_upstream_find_chash_point(hcf->points, hash); +#if (NGX_STREAM_UPSTREAM_ZONE) + if (hp->rrp.peers->config + && (hcf->points == NULL || hcf->config != *hp->rrp.peers->config)) + { + if (ngx_stream_upstream_update_chash(NULL, us) != NGX_OK) { + ngx_stream_upstream_rr_peers_unlock(hp->rrp.peers); + return NGX_ERROR; + } + + hcf->config = *hp->rrp.peers->config; + } +#endif + + if (hcf->points->number) { + hp->hash = ngx_stream_upstream_find_chash_point(hcf->points, hash); + } ngx_stream_upstream_rr_peers_unlock(hp->rrp.peers); @@ -518,6 +572,20 @@ ngx_stream_upstream_get_chash_peer(ngx_p pc->connection = NULL; + if (hp->rrp.peers->number == 0) { + pc->name = hp->rrp.peers->name; + ngx_stream_upstream_rr_peers_unlock(hp->rrp.peers); + return NGX_BUSY; + } + +#if (NGX_STREAM_UPSTREAM_ZONE) + if (hp->rrp.peers->config && hp->rrp.config != *hp->rrp.peers->config) { + pc->name = hp->rrp.peers->name; + ngx_stream_upstream_rr_peers_unlock(hp->rrp.peers); + return NGX_BUSY; + } +#endif + now = ngx_time(); hcf = hp->conf; @@ -596,6 +664,7 @@ ngx_stream_upstream_get_chash_peer(ngx_p } hp->rrp.current = best; + ngx_stream_upstream_rr_peer_ref(hp->rrp.peers, best); pc->sockaddr = best->sockaddr; pc->socklen = best->socklen; @@ -663,6 +732,7 @@ ngx_stream_upstream_hash(ngx_conf_t *cf, } uscf->flags = NGX_STREAM_UPSTREAM_CREATE + |NGX_STREAM_UPSTREAM_MODIFY |NGX_STREAM_UPSTREAM_WEIGHT |NGX_STREAM_UPSTREAM_MAX_CONNS |NGX_STREAM_UPSTREAM_MAX_FAILS diff --git a/src/stream/ngx_stream_upstream_least_conn_module.c b/src/stream/ngx_stream_upstream_least_conn_module.c --- a/src/stream/ngx_stream_upstream_least_conn_module.c +++ b/src/stream/ngx_stream_upstream_least_conn_module.c @@ -120,6 +120,12 @@ ngx_stream_upstream_get_least_conn_peer( ngx_stream_upstream_rr_peers_wlock(peers); +#if (NGX_STREAM_UPSTREAM_ZONE) + if (peers->config && rrp->config != *peers->config) { + goto busy; + } +#endif + best = NULL; total = 0; @@ -240,6 +246,7 @@ ngx_stream_upstream_get_least_conn_peer( best->conns++; rrp->current = best; + ngx_stream_upstream_rr_peer_ref(peers, best); n = p / (8 * sizeof(uintptr_t)); m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); @@ -274,8 +281,18 @@ failed: } ngx_stream_upstream_rr_peers_wlock(peers); + +#if (NGX_STREAM_UPSTREAM_ZONE) + if (peers->config && rrp->config != *peers->config) { + goto busy; + } +#endif } +#if (NGX_STREAM_UPSTREAM_ZONE) +busy: +#endif + ngx_stream_upstream_rr_peers_unlock(peers); pc->name = peers->name; @@ -299,6 +316,7 @@ ngx_stream_upstream_least_conn(ngx_conf_ uscf->peer.init_upstream = ngx_stream_upstream_init_least_conn; uscf->flags = NGX_STREAM_UPSTREAM_CREATE + |NGX_STREAM_UPSTREAM_MODIFY |NGX_STREAM_UPSTREAM_WEIGHT |NGX_STREAM_UPSTREAM_MAX_CONNS |NGX_STREAM_UPSTREAM_MAX_FAILS diff --git a/src/stream/ngx_stream_upstream_random_module.c b/src/stream/ngx_stream_upstream_random_module.c --- a/src/stream/ngx_stream_upstream_random_module.c +++ b/src/stream/ngx_stream_upstream_random_module.c @@ -17,6 +17,9 @@ typedef struct { typedef struct { ngx_uint_t two; +#if (NGX_STREAM_UPSTREAM_ZONE) + ngx_uint_t config; +#endif ngx_stream_upstream_random_range_t *ranges; } ngx_stream_upstream_random_srv_conf_t; @@ -125,6 +128,11 @@ ngx_stream_upstream_update_random(ngx_po rcf = ngx_stream_conf_upstream_srv_conf(us, ngx_stream_upstream_random_module); + if (rcf->ranges) { + ngx_free(rcf->ranges); + rcf->ranges = NULL; + } + peers = us->peer.data; size = peers->number * sizeof(ngx_stream_upstream_random_range_t); @@ -186,11 +194,15 @@ ngx_stream_upstream_init_random_peer(ngx ngx_stream_upstream_rr_peers_rlock(rp->rrp.peers); #if (NGX_STREAM_UPSTREAM_ZONE) - if (rp->rrp.peers->shpool && rcf->ranges == NULL) { + if (rp->rrp.peers->config + && (rcf->ranges == NULL || rcf->config != *rp->rrp.peers->config)) + { if (ngx_stream_upstream_update_random(NULL, us) != NGX_OK) { ngx_stream_upstream_rr_peers_unlock(rp->rrp.peers); return NGX_ERROR; } + + rcf->config = *rp->rrp.peers->config; } #endif @@ -220,11 +232,18 @@ ngx_stream_upstream_get_random_peer(ngx_ ngx_stream_upstream_rr_peers_rlock(peers); - if (rp->tries > 20 || peers->single) { + if (rp->tries > 20 || peers->number < 2) { ngx_stream_upstream_rr_peers_unlock(peers); return ngx_stream_upstream_get_round_robin_peer(pc, rrp); } +#if (NGX_STREAM_UPSTREAM_ZONE) + if (peers->config && rrp->config != *peers->config) { + ngx_stream_upstream_rr_peers_unlock(peers); + return ngx_stream_upstream_get_round_robin_peer(pc, rrp); + } +#endif + pc->cached = 0; pc->connection = NULL; @@ -274,6 +293,7 @@ ngx_stream_upstream_get_random_peer(ngx_ } rrp->current = peer; + ngx_stream_upstream_rr_peer_ref(peers, peer); if (now - peer->checked > peer->fail_timeout) { peer->checked = now; @@ -314,11 +334,18 @@ ngx_stream_upstream_get_random2_peer(ngx ngx_stream_upstream_rr_peers_wlock(peers); - if (rp->tries > 20 || peers->single) { + if (rp->tries > 20 || peers->number < 2) { ngx_stream_upstream_rr_peers_unlock(peers); return ngx_stream_upstream_get_round_robin_peer(pc, rrp); } +#if (NGX_STREAM_UPSTREAM_ZONE) + if (peers->config && rrp->config != *peers->config) { + ngx_stream_upstream_rr_peers_unlock(peers); + return ngx_stream_upstream_get_round_robin_peer(pc, rrp); + } +#endif + pc->cached = 0; pc->connection = NULL; @@ -384,6 +411,7 @@ ngx_stream_upstream_get_random2_peer(ngx } rrp->current = peer; + ngx_stream_upstream_rr_peer_ref(peers, peer); if (now - peer->checked > peer->fail_timeout) { peer->checked = now; @@ -467,6 +495,7 @@ ngx_stream_upstream_random(ngx_conf_t *c uscf->peer.init_upstream = ngx_stream_upstream_init_random; uscf->flags = NGX_STREAM_UPSTREAM_CREATE + |NGX_STREAM_UPSTREAM_MODIFY |NGX_STREAM_UPSTREAM_WEIGHT |NGX_STREAM_UPSTREAM_MAX_CONNS |NGX_STREAM_UPSTREAM_MAX_FAILS diff --git a/src/stream/ngx_stream_upstream_round_robin.c b/src/stream/ngx_stream_upstream_round_robin.c --- a/src/stream/ngx_stream_upstream_round_robin.c +++ b/src/stream/ngx_stream_upstream_round_robin.c @@ -38,10 +38,15 @@ ngx_stream_upstream_init_round_robin(ngx ngx_stream_upstream_srv_conf_t *us) { ngx_url_t u; - ngx_uint_t i, j, n, w, t; + ngx_uint_t i, j, n, r, w, t; ngx_stream_upstream_server_t *server; ngx_stream_upstream_rr_peer_t *peer, **peerp; ngx_stream_upstream_rr_peers_t *peers, *backup; +#if (NGX_STREAM_UPSTREAM_ZONE) + ngx_uint_t resolve; + ngx_stream_core_srv_conf_t *cscf; + ngx_stream_upstream_rr_peer_t **rpeerp; +#endif us->peer.init = ngx_stream_upstream_init_round_robin_peer; @@ -49,14 +54,33 @@ ngx_stream_upstream_init_round_robin(ngx server = us->servers->elts; n = 0; + r = 0; w = 0; t = 0; +#if (NGX_STREAM_UPSTREAM_ZONE) + resolve = 0; +#endif + for (i = 0; i < us->servers->nelts; i++) { + +#if (NGX_STREAM_UPSTREAM_ZONE) + if (server[i].host.len) { + resolve = 1; + } +#endif + if (server[i].backup) { continue; } +#if (NGX_STREAM_UPSTREAM_ZONE) + if (server[i].host.len) { + r++; + continue; + } +#endif + n += server[i].naddrs; w += server[i].naddrs * server[i].weight; @@ -65,7 +89,54 @@ ngx_stream_upstream_init_round_robin(ngx } } - if (n == 0) { +#if (NGX_STREAM_UPSTREAM_ZONE) + if (us->shm_zone) { + + if (resolve && !(us->flags & NGX_STREAM_UPSTREAM_MODIFY)) { + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "load balancing method does not support" + " resolving names at run time in" + " upstream \"%V\" in %s:%ui", + &us->host, us->file_name, us->line); + return NGX_ERROR; + } + + cscf = ngx_stream_conf_get_module_srv_conf(cf, + ngx_stream_core_module); + + us->resolver = cscf->resolver; + us->resolver_timeout = cscf->resolver_timeout; + + /* + * Without "resolver_timeout" in stream{}, the value is unset. + * Even if we set it in ngx_stream_core_merge_srv_conf(), it's + * still dependent on the module order and unreliable. + */ + ngx_conf_init_msec_value(us->resolver_timeout, 30000); + + if (resolve + && (us->resolver == NULL + || us->resolver->connections.nelts == 0)) + { + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "no resolver defined to resolve names" + " at run time in upstream \"%V\" in %s:%ui", + &us->host, us->file_name, us->line); + return NGX_ERROR; + } + + } else if (resolve) { + + ngx_log_error(NGX_LOG_EMERG, cf->log, 0, + "resolving names at run time requires" + " upstream \"%V\" in %s:%ui" + " to be in shared memory", + &us->host, us->file_name, us->line); + return NGX_ERROR; + } +#endif + + if (n + r == 0) { ngx_log_error(NGX_LOG_EMERG, cf->log, 0, "no servers in upstream \"%V\" in %s:%ui", &us->host, us->file_name, us->line); @@ -77,7 +148,8 @@ ngx_stream_upstream_init_round_robin(ngx return NGX_ERROR; } - peer = ngx_pcalloc(cf->pool, sizeof(ngx_stream_upstream_rr_peer_t) * n); + peer = ngx_pcalloc(cf->pool, sizeof(ngx_stream_upstream_rr_peer_t) + * (n + r)); if (peer == NULL) { return NGX_ERROR; } @@ -92,11 +164,45 @@ ngx_stream_upstream_init_round_robin(ngx n = 0; peerp = &peers->peer; +#if (NGX_STREAM_UPSTREAM_ZONE) + rpeerp = &peers->resolve; +#endif + for (i = 0; i < us->servers->nelts; i++) { if (server[i].backup) { continue; } +#if (NGX_STREAM_UPSTREAM_ZONE) + if (server[i].host.len) { + + peer[n].host = ngx_pcalloc(cf->pool, + sizeof(ngx_stream_upstream_host_t)); + if (peer[n].host == NULL) { + return NGX_ERROR; + } + + peer[n].host->name = server[i].host; + + peer[n].sockaddr = server[i].addrs[0].sockaddr; + peer[n].socklen = server[i].addrs[0].socklen; + peer[n].name = server[i].addrs[0].name; + peer[n].weight = server[i].weight; + peer[n].effective_weight = server[i].weight; + peer[n].current_weight = 0; + peer[n].max_conns = server[i].max_conns; + peer[n].max_fails = server[i].max_fails; + peer[n].fail_timeout = server[i].fail_timeout; + peer[n].down = server[i].down; + peer[n].server = server[i].name; + *rpeerp = &peer[n]; + rpeerp = &peer[n].next; + n++; + + continue; + } +#endif + for (j = 0; j < server[i].naddrs; j++) { peer[n].sockaddr = server[i].addrs[j].sockaddr; peer[n].socklen = server[i].addrs[j].socklen; @@ -121,6 +227,7 @@ ngx_stream_upstream_init_round_robin(ngx /* backup servers */ n = 0; + r = 0; w = 0; t = 0; @@ -129,6 +236,13 @@ ngx_stream_upstream_init_round_robin(ngx continue; } +#if (NGX_STREAM_UPSTREAM_ZONE) + if (server[i].host.len) { + r++; + continue; + } +#endif + n += server[i].naddrs; w += server[i].naddrs * server[i].weight; @@ -137,7 +251,7 @@ ngx_stream_upstream_init_round_robin(ngx } } - if (n == 0) { + if (n + r == 0) { return NGX_OK; } @@ -146,12 +260,16 @@ ngx_stream_upstream_init_round_robin(ngx return NGX_ERROR; } - peer = ngx_pcalloc(cf->pool, sizeof(ngx_stream_upstream_rr_peer_t) * n); + peer = ngx_pcalloc(cf->pool, sizeof(ngx_stream_upstream_rr_peer_t) + * (n + r)); if (peer == NULL) { return NGX_ERROR; } - peers->single = 0; + if (n > 0) { + peers->single = 0; + } + backup->single = 0; backup->number = n; backup->weighted = (w != n); @@ -162,11 +280,45 @@ ngx_stream_upstream_init_round_robin(ngx n = 0; peerp = &backup->peer; +#if (NGX_STREAM_UPSTREAM_ZONE) + rpeerp = &backup->resolve; +#endif + for (i = 0; i < us->servers->nelts; i++) { if (!server[i].backup) { continue; } +#if (NGX_STREAM_UPSTREAM_ZONE) + if (server[i].host.len) { + + peer[n].host = ngx_pcalloc(cf->pool, + sizeof(ngx_stream_upstream_host_t)); + if (peer[n].host == NULL) { + return NGX_ERROR; + } + + peer[n].host->name = server[i].host; + + peer[n].sockaddr = server[i].addrs[0].sockaddr; + peer[n].socklen = server[i].addrs[0].socklen; + peer[n].name = server[i].addrs[0].name; + peer[n].weight = server[i].weight; + peer[n].effective_weight = server[i].weight; + peer[n].current_weight = 0; + peer[n].max_conns = server[i].max_conns; + peer[n].max_fails = server[i].max_fails; + peer[n].fail_timeout = server[i].fail_timeout; + peer[n].down = server[i].down; + peer[n].server = server[i].name; + *rpeerp = &peer[n]; + rpeerp = &peer[n].next; + n++; + + continue; + } +#endif + for (j = 0; j < server[i].naddrs; j++) { peer[n].sockaddr = server[i].addrs[j].sockaddr; peer[n].socklen = server[i].addrs[j].socklen; @@ -280,7 +432,12 @@ ngx_stream_upstream_init_round_robin_pee rrp->peers = us->peer.data; rrp->current = NULL; - rrp->config = 0; + + ngx_stream_upstream_rr_peers_rlock(rrp->peers); + +#if (NGX_STREAM_UPSTREAM_ZONE) + rrp->config = rrp->peers->config ? *rrp->peers->config : 0; +#endif n = rrp->peers->number; @@ -288,6 +445,10 @@ ngx_stream_upstream_init_round_robin_pee n = rrp->peers->next->number; } + s->upstream->peer.tries = ngx_stream_upstream_tries(rrp->peers); + + ngx_stream_upstream_rr_peers_unlock(rrp->peers); + if (n <= 8 * sizeof(uintptr_t)) { rrp->tried = &rrp->data; rrp->data = 0; @@ -304,7 +465,7 @@ ngx_stream_upstream_init_round_robin_pee s->upstream->peer.get = ngx_stream_upstream_get_round_robin_peer; s->upstream->peer.free = ngx_stream_upstream_free_round_robin_peer; s->upstream->peer.notify = ngx_stream_upstream_notify_round_robin_peer; - s->upstream->peer.tries = ngx_stream_upstream_tries(rrp->peers); + #if (NGX_STREAM_SSL) s->upstream->peer.set_session = ngx_stream_upstream_set_round_robin_peer_session; @@ -455,6 +616,12 @@ ngx_stream_upstream_get_round_robin_peer peers = rrp->peers; ngx_stream_upstream_rr_peers_wlock(peers); +#if (NGX_STREAM_UPSTREAM_ZONE) + if (peers->config && rrp->config != *peers->config) { + goto busy; + } +#endif + if (peers->single) { peer = peers->peer; @@ -467,6 +634,7 @@ ngx_stream_upstream_get_round_robin_peer } rrp->current = peer; + ngx_stream_upstream_rr_peer_ref(peers, peer); } else { @@ -519,6 +687,10 @@ failed: ngx_stream_upstream_rr_peers_wlock(peers); } +#if (NGX_STREAM_UPSTREAM_ZONE) +busy: +#endif + ngx_stream_upstream_rr_peers_unlock(peers); pc->name = peers->name; @@ -589,6 +761,7 @@ ngx_stream_upstream_get_peer(ngx_stream_ } rrp->current = best; + ngx_stream_upstream_rr_peer_ref(rrp->peers, best); n = p / (8 * sizeof(uintptr_t)); m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); @@ -623,9 +796,17 @@ ngx_stream_upstream_free_round_robin_pee ngx_stream_upstream_rr_peer_lock(rrp->peers, peer); if (rrp->peers->single) { + + if (peer->fails) { + peer->fails = 0; + } + peer->conns--; - ngx_stream_upstream_rr_peer_unlock(rrp->peers, peer); + if (ngx_stream_upstream_rr_peer_unref(rrp->peers, peer) == NGX_OK) { + ngx_stream_upstream_rr_peer_unlock(rrp->peers, peer); + } + ngx_stream_upstream_rr_peers_unlock(rrp->peers); pc->tries = 0; @@ -667,7 +848,10 @@ ngx_stream_upstream_free_round_robin_pee peer->conns--; - ngx_stream_upstream_rr_peer_unlock(rrp->peers, peer); + if (ngx_stream_upstream_rr_peer_unref(rrp->peers, peer) == NGX_OK) { + ngx_stream_upstream_rr_peer_unlock(rrp->peers, peer); + } + ngx_stream_upstream_rr_peers_unlock(rrp->peers); if (pc->tries) { diff --git a/src/stream/ngx_stream_upstream_round_robin.h b/src/stream/ngx_stream_upstream_round_robin.h --- a/src/stream/ngx_stream_upstream_round_robin.h +++ b/src/stream/ngx_stream_upstream_round_robin.h @@ -14,8 +14,23 @@ #include +typedef struct ngx_stream_upstream_rr_peers_s ngx_stream_upstream_rr_peers_t; typedef struct ngx_stream_upstream_rr_peer_s ngx_stream_upstream_rr_peer_t; + +#if (NGX_STREAM_UPSTREAM_ZONE) + +typedef struct { + ngx_event_t event; /* must be first */ + ngx_uint_t worker; + ngx_str_t name; + ngx_stream_upstream_rr_peers_t *peers; + ngx_stream_upstream_rr_peer_t *peer; +} ngx_stream_upstream_host_t; + +#endif + + struct ngx_stream_upstream_rr_peer_s { struct sockaddr *sockaddr; socklen_t socklen; @@ -44,18 +59,20 @@ struct ngx_stream_upstream_rr_peer_s { int ssl_session_len; #if (NGX_STREAM_UPSTREAM_ZONE) + unsigned zombie:1; + ngx_atomic_t lock; + ngx_uint_t refs; + ngx_stream_upstream_host_t *host; #endif ngx_stream_upstream_rr_peer_t *next; - NGX_COMPAT_BEGIN(25) + NGX_COMPAT_BEGIN(14) NGX_COMPAT_END }; -typedef struct ngx_stream_upstream_rr_peers_s ngx_stream_upstream_rr_peers_t; - struct ngx_stream_upstream_rr_peers_s { ngx_uint_t number; @@ -76,6 +93,11 @@ struct ngx_stream_upstream_rr_peers_s { ngx_stream_upstream_rr_peers_t *next; ngx_stream_upstream_rr_peer_t *peer; + +#if (NGX_STREAM_UPSTREAM_ZONE) + ngx_uint_t *config; + ngx_stream_upstream_rr_peer_t *resolve; +#endif }; @@ -112,6 +134,65 @@ struct ngx_stream_upstream_rr_peers_s { ngx_rwlock_unlock(&peer->lock); \ } + +#define ngx_stream_upstream_rr_peer_ref(peers, peer) \ + (peer)->refs++; + + +static ngx_inline void +ngx_stream_upstream_rr_peer_free_locked(ngx_stream_upstream_rr_peers_t *peers, + ngx_stream_upstream_rr_peer_t *peer) +{ + if (peer->refs) { + peer->zombie = 1; + return; + } + + ngx_slab_free_locked(peers->shpool, peer->sockaddr); + ngx_slab_free_locked(peers->shpool, peer->name.data); + + if (peer->server.data && (peer->host == NULL || peer->host->peer == peer)) { + ngx_slab_free_locked(peers->shpool, peer->server.data); + } + +#if (NGX_STREAM_SSL) + if (peer->ssl_session) { + ngx_slab_free_locked(peers->shpool, peer->ssl_session); + } +#endif + + ngx_slab_free_locked(peers->shpool, peer); +} + + +static ngx_inline void +ngx_stream_upstream_rr_peer_free(ngx_stream_upstream_rr_peers_t *peers, + ngx_stream_upstream_rr_peer_t *peer) +{ + ngx_shmtx_lock(&peers->shpool->mutex); + ngx_stream_upstream_rr_peer_free_locked(peers, peer); + ngx_shmtx_unlock(&peers->shpool->mutex); +} + + +static ngx_inline ngx_int_t +ngx_stream_upstream_rr_peer_unref(ngx_stream_upstream_rr_peers_t *peers, + ngx_stream_upstream_rr_peer_t *peer) +{ + peer->refs--; + + if (peers->shpool == NULL) { + return NGX_OK; + } + + if (peer->refs == 0 && peer->zombie) { + ngx_stream_upstream_rr_peer_free(peers, peer); + return NGX_DONE; + } + + return NGX_OK; +} + #else #define ngx_stream_upstream_rr_peers_rlock(peers) @@ -119,6 +200,8 @@ struct ngx_stream_upstream_rr_peers_s { #define ngx_stream_upstream_rr_peers_unlock(peers) #define ngx_stream_upstream_rr_peer_lock(peers, peer) #define ngx_stream_upstream_rr_peer_unlock(peers, peer) +#define ngx_stream_upstream_rr_peer_ref(peers, peer) +#define ngx_stream_upstream_rr_peer_unref(peers, peer) NGX_OK #endif diff --git a/src/stream/ngx_stream_upstream_zone_module.c b/src/stream/ngx_stream_upstream_zone_module.c --- a/src/stream/ngx_stream_upstream_zone_module.c +++ b/src/stream/ngx_stream_upstream_zone_module.c @@ -18,6 +18,13 @@ static ngx_stream_upstream_rr_peers_t *n ngx_slab_pool_t *shpool, ngx_stream_upstream_srv_conf_t *uscf); static ngx_stream_upstream_rr_peer_t *ngx_stream_upstream_zone_copy_peer( ngx_stream_upstream_rr_peers_t *peers, ngx_stream_upstream_rr_peer_t *src); +static void ngx_stream_upstream_zone_set_single( + ngx_stream_upstream_srv_conf_t *uscf); +static void ngx_stream_upstream_zone_remove_peer_locked( + ngx_stream_upstream_rr_peers_t *peers, ngx_stream_upstream_rr_peer_t *peer); +static ngx_int_t ngx_stream_upstream_zone_init_worker(ngx_cycle_t *cycle); +static void ngx_stream_upstream_zone_resolve_timer(ngx_event_t *event); +static void ngx_stream_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx); static ngx_command_t ngx_stream_upstream_zone_commands[] = { @@ -52,7 +59,7 @@ ngx_module_t ngx_stream_upstream_zone_m NGX_STREAM_MODULE, /* module type */ NULL, /* init master */ NULL, /* init module */ - NULL, /* init process */ + ngx_stream_upstream_zone_init_worker, /* init process */ NULL, /* init thread */ NULL, /* exit thread */ NULL, /* exit process */ @@ -185,9 +192,15 @@ ngx_stream_upstream_zone_copy_peers(ngx_ ngx_stream_upstream_srv_conf_t *uscf) { ngx_str_t *name; + ngx_uint_t *config; ngx_stream_upstream_rr_peer_t *peer, **peerp; ngx_stream_upstream_rr_peers_t *peers, *backup; + config = ngx_slab_calloc(shpool, sizeof(ngx_uint_t)); + if (config == NULL) { + return NULL; + } + peers = ngx_slab_alloc(shpool, sizeof(ngx_stream_upstream_rr_peers_t)); if (peers == NULL) { return NULL; @@ -211,6 +224,7 @@ ngx_stream_upstream_zone_copy_peers(ngx_ peers->name = name; peers->shpool = shpool; + peers->config = config; for (peerp = &peers->peer; *peerp; peerp = &peer->next) { /* pool is unlocked */ @@ -220,6 +234,17 @@ ngx_stream_upstream_zone_copy_peers(ngx_ } *peerp = peer; + (*peers->config)++; + } + + for (peerp = &peers->resolve; *peerp; peerp = &peer->next) { + peer = ngx_stream_upstream_zone_copy_peer(peers, *peerp); + if (peer == NULL) { + return NULL; + } + + *peerp = peer; + (*peers->config)++; } if (peers->next == NULL) { @@ -236,6 +261,7 @@ ngx_stream_upstream_zone_copy_peers(ngx_ backup->name = name; backup->shpool = shpool; + backup->config = config; for (peerp = &backup->peer; *peerp; peerp = &peer->next) { /* pool is unlocked */ @@ -245,6 +271,17 @@ ngx_stream_upstream_zone_copy_peers(ngx_ } *peerp = peer; + (*backup->config)++; + } + + for (peerp = &backup->resolve; *peerp; peerp = &peer->next) { + peer = ngx_stream_upstream_zone_copy_peer(backup, *peerp); + if (peer == NULL) { + return NULL; + } + + *peerp = peer; + (*backup->config)++; } peers->next = backup; @@ -276,6 +313,7 @@ ngx_stream_upstream_zone_copy_peer(ngx_s dst->sockaddr = NULL; dst->name.data = NULL; dst->server.data = NULL; + dst->host = NULL; } dst->sockaddr = ngx_slab_calloc_locked(pool, sizeof(ngx_sockaddr_t)); @@ -298,12 +336,37 @@ ngx_stream_upstream_zone_copy_peer(ngx_s } ngx_memcpy(dst->server.data, src->server.data, src->server.len); + + if (src->host) { + dst->host = ngx_slab_calloc_locked(pool, + sizeof(ngx_stream_upstream_host_t)); + if (dst->host == NULL) { + goto failed; + } + + dst->host->name.data = ngx_slab_alloc_locked(pool, + src->host->name.len); + if (dst->host->name.data == NULL) { + goto failed; + } + + dst->host->peers = peers; + dst->host->peer = dst; + + dst->host->name.len = src->host->name.len; + ngx_memcpy(dst->host->name.data, src->host->name.data, + src->host->name.len); + } } return dst; failed: + if (dst->host) { + ngx_slab_free_locked(pool, dst->host); + } + if (dst->server.data) { ngx_slab_free_locked(pool, dst->server.data); } @@ -320,3 +383,296 @@ failed: return NULL; } + + +static void +ngx_stream_upstream_zone_set_single(ngx_stream_upstream_srv_conf_t *uscf) +{ + ngx_stream_upstream_rr_peers_t *peers; + + peers = uscf->peer.data; + + if (peers->number == 1 + && (peers->next == NULL || peers->next->number == 0)) + { + peers->single = 1; + + } else { + peers->single = 0; + } +} + + +static void +ngx_stream_upstream_zone_remove_peer_locked( + ngx_stream_upstream_rr_peers_t *peers, ngx_stream_upstream_rr_peer_t *peer) +{ + peers->total_weight -= peer->weight; + peers->number--; + peers->tries -= (peer->down == 0); + (*peers->config)++; + peers->weighted = (peers->total_weight != peers->number); + + ngx_stream_upstream_rr_peer_free(peers, peer); +} + + +static ngx_int_t +ngx_stream_upstream_zone_init_worker(ngx_cycle_t *cycle) +{ + ngx_uint_t i; + ngx_event_t *event; + ngx_stream_upstream_rr_peer_t *peer; + ngx_stream_upstream_rr_peers_t *peers; + ngx_stream_upstream_srv_conf_t *uscf, **uscfp; + ngx_stream_upstream_main_conf_t *umcf; + + if ((ngx_process != NGX_PROCESS_WORKER || ngx_worker != 0) + && ngx_process != NGX_PROCESS_SINGLE) + { + return NGX_OK; + } + + umcf = ngx_stream_cycle_get_module_main_conf(cycle, + ngx_stream_upstream_module); + + if (umcf == NULL) { + return NGX_OK; + } + + uscfp = umcf->upstreams.elts; + + for (i = 0; i < umcf->upstreams.nelts; i++) { + + uscf = uscfp[i]; + + if (uscf->shm_zone == NULL) { + continue; + } + + peers = uscf->peer.data; + + do { + ngx_stream_upstream_rr_peers_wlock(peers); + + for (peer = peers->resolve; peer; peer = peer->next) { + + event = &peer->host->event; + ngx_memzero(event, sizeof(ngx_event_t)); + + event->data = uscf; + event->handler = ngx_stream_upstream_zone_resolve_timer; + event->log = cycle->log; + event->cancelable = 1; + + ngx_add_timer(event, 1); + } + + ngx_stream_upstream_rr_peers_unlock(peers); + + peers = peers->next; + + } while (peers); + } + + return NGX_OK; +} + + +static void +ngx_stream_upstream_zone_resolve_timer(ngx_event_t *event) +{ + ngx_resolver_ctx_t *ctx; + ngx_stream_upstream_host_t *host; + ngx_stream_upstream_srv_conf_t *uscf; + + host = (ngx_stream_upstream_host_t *) event; + uscf = event->data; + + ctx = ngx_resolve_start(uscf->resolver, NULL); + if (ctx == NULL) { + goto retry; + } + + if (ctx == NGX_NO_RESOLVER) { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "no resolver defined to resolve %V", &host->name); + return; + } + + ctx->name = host->name; + ctx->handler = ngx_stream_upstream_zone_resolve_handler; + ctx->data = host; + ctx->timeout = uscf->resolver_timeout; + ctx->cancelable = 1; + + if (ngx_resolve_name(ctx) == NGX_OK) { + return; + } + +retry: + + ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); +} + + +static void +ngx_stream_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx) +{ + time_t now; + in_port_t port; + ngx_msec_t timer; + ngx_uint_t i, j; + ngx_event_t *event; + ngx_resolver_addr_t *addr; + ngx_stream_upstream_host_t *host; + ngx_stream_upstream_rr_peer_t *peer, *template, **peerp; + ngx_stream_upstream_rr_peers_t *peers; + ngx_stream_upstream_srv_conf_t *uscf; + + host = ctx->data; + event = &host->event; + uscf = event->data; + peers = host->peers; + template = host->peer; + + ngx_stream_upstream_rr_peers_wlock(peers); + + now = ngx_time(); + + if (ctx->state) { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "%V could not be resolved (%i: %s)", + &ctx->name, ctx->state, + ngx_resolver_strerror(ctx->state)); + + if (ctx->state != NGX_RESOLVE_NXDOMAIN) { + ngx_stream_upstream_rr_peers_unlock(peers); + + ngx_resolve_name_done(ctx); + + ngx_add_timer(event, ngx_max(uscf->resolver_timeout, 1000)); + return; + } + + /* NGX_RESOLVE_NXDOMAIN */ + + ctx->naddrs = 0; + } + +#if (NGX_DEBUG) + { + u_char text[NGX_SOCKADDR_STRLEN]; + size_t len; + + for (i = 0; i < ctx->naddrs; i++) { + len = ngx_sock_ntop(ctx->addrs[i].sockaddr, ctx->addrs[i].socklen, + text, NGX_SOCKADDR_STRLEN, 0); + + ngx_log_debug3(NGX_LOG_DEBUG_STREAM, event->log, 0, + "name %V was resolved to %*s", &host->name, len, text); + } + } +#endif + + for (peerp = &peers->peer; *peerp; /* void */ ) { + peer = *peerp; + + if (peer->host != host) { + goto next; + } + + for (j = 0; j < ctx->naddrs; j++) { + + addr = &ctx->addrs[j]; + + if (addr->name.len == 0 + && ngx_cmp_sockaddr(peer->sockaddr, peer->socklen, + addr->sockaddr, addr->socklen, 0) + == NGX_OK) + { + addr->name.len = 1; + goto next; + } + } + + *peerp = peer->next; + ngx_stream_upstream_zone_remove_peer_locked(peers, peer); + + ngx_stream_upstream_zone_set_single(uscf); + + continue; + + next: + + peerp = &peer->next; + } + + for (i = 0; i < ctx->naddrs; i++) { + + addr = &ctx->addrs[i]; + + if (addr->name.len == 1) { + addr->name.len = 0; + continue; + } + + ngx_shmtx_lock(&peers->shpool->mutex); + peer = ngx_stream_upstream_zone_copy_peer(peers, NULL); + ngx_shmtx_unlock(&peers->shpool->mutex); + if (peer == NULL) { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "cannot add new server to upstream \"%V\", " + "memory exhausted", peers->name); + break; + } + + ngx_memcpy(peer->sockaddr, addr->sockaddr, addr->socklen); + + port = ((struct sockaddr_in *) template->sockaddr)->sin_port; + + switch (peer->sockaddr->sa_family) { +#if (NGX_HAVE_INET6) + case AF_INET6: + ((struct sockaddr_in6 *) peer->sockaddr)->sin6_port = port; + break; +#endif + default: /* AF_INET */ + ((struct sockaddr_in *) peer->sockaddr)->sin_port = port; + } + + peer->socklen = addr->socklen; + + peer->name.len = ngx_sock_ntop(peer->sockaddr, peer->socklen, + peer->name.data, NGX_SOCKADDR_STRLEN, 1); + + peer->host = template->host; + peer->server = template->server; + + peer->weight = template->weight; + peer->effective_weight = peer->weight; + peer->max_conns = template->max_conns; + peer->max_fails = template->max_fails; + peer->fail_timeout = template->fail_timeout; + peer->down = template->down; + + *peerp = peer; + peerp = &peer->next; + + peers->number++; + peers->tries += (peer->down == 0); + peers->total_weight += peer->weight; + peers->weighted = (peers->total_weight != peers->number); + (*peers->config)++; + + ngx_stream_upstream_zone_set_single(uscf); + } + + ngx_stream_upstream_rr_peers_unlock(peers); + + timer = (ngx_msec_t) 1000 * (ctx->valid > now ? ctx->valid - now + 1 : 1); + + ngx_resolve_name_done(ctx); + + ngx_add_timer(event, timer); +} From a.bavshin at nginx.com Thu Jul 18 18:20:39 2024 From: a.bavshin at nginx.com (=?iso-8859-1?q?Aleksei_Bavshin?=) Date: Thu, 18 Jul 2024 11:20:39 -0700 Subject: [PATCH 2 of 7] Upstream: construct upstream peers from DNS SRV records In-Reply-To: References: Message-ID: <5fc7ae429476db5f747e.1721326839@fedora-wsl.> # HG changeset patch # User Dmitry Volyntsev # Date 1458229351 -10800 # Thu Mar 17 18:42:31 2016 +0300 # Node ID 5fc7ae429476db5f747e3b600aab0493da1d52fd # Parent 46d4c383cf3a72db5d579092636a6be3cd907786 Upstream: construct upstream peers from DNS SRV records. diff --git a/src/http/modules/ngx_http_upstream_zone_module.c b/src/http/modules/ngx_http_upstream_zone_module.c --- a/src/http/modules/ngx_http_upstream_zone_module.c +++ b/src/http/modules/ngx_http_upstream_zone_module.c @@ -359,6 +359,18 @@ ngx_http_upstream_zone_copy_peer(ngx_htt dst->host->name.len = src->host->name.len; ngx_memcpy(dst->host->name.data, src->host->name.data, src->host->name.len); + + if (src->host->service.len) { + dst->host->service.data = ngx_slab_alloc_locked(pool, + src->host->service.len); + if (dst->host->service.data == NULL) { + goto failed; + } + + dst->host->service.len = src->host->service.len; + ngx_memcpy(dst->host->service.data, src->host->service.data, + src->host->service.len); + } } } @@ -367,6 +379,10 @@ ngx_http_upstream_zone_copy_peer(ngx_htt failed: if (dst->host) { + if (dst->host->name.data) { + ngx_slab_free_locked(pool, dst->host->name.data); + } + ngx_slab_free_locked(pool, dst->host); } @@ -506,6 +522,7 @@ ngx_http_upstream_zone_resolve_timer(ngx ctx->handler = ngx_http_upstream_zone_resolve_handler; ctx->data = host; ctx->timeout = uscf->resolver_timeout; + ctx->service = host->service; ctx->cancelable = 1; if (ngx_resolve_name(ctx) == NGX_OK) { @@ -518,15 +535,28 @@ retry: } +#define ngx_http_upstream_zone_addr_marked(addr) \ + ((uintptr_t) (addr)->sockaddr & 1) + +#define ngx_http_upstream_zone_mark_addr(addr) \ + (addr)->sockaddr = (struct sockaddr *) ((uintptr_t) (addr)->sockaddr | 1) + +#define ngx_http_upstream_zone_unmark_addr(addr) \ + (addr)->sockaddr = \ + (struct sockaddr *) ((uintptr_t) (addr)->sockaddr & ~((uintptr_t) 1)) + static void ngx_http_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx) { time_t now; + u_short min_priority; in_port_t port; + ngx_str_t *server; ngx_msec_t timer; - ngx_uint_t i, j; + ngx_uint_t i, j, backup, addr_backup; ngx_event_t *event; ngx_resolver_addr_t *addr; + ngx_resolver_srv_name_t *srv; ngx_http_upstream_host_t *host; ngx_http_upstream_rr_peer_t *peer, *template, **peerp; ngx_http_upstream_rr_peers_t *peers; @@ -542,11 +572,32 @@ ngx_http_upstream_zone_resolve_handler(n now = ngx_time(); + for (i = 0; i < ctx->nsrvs; i++) { + srv = &ctx->srvs[i]; + + if (srv->state) { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "%V could not be resolved (%i: %s) " + "while resolving service %V of %V", + &srv->name, srv->state, + ngx_resolver_strerror(srv->state), &ctx->service, + &ctx->name); + } + } + if (ctx->state) { - ngx_log_error(NGX_LOG_ERR, event->log, 0, - "%V could not be resolved (%i: %s)", - &ctx->name, ctx->state, - ngx_resolver_strerror(ctx->state)); + if (ctx->service.len) { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "service %V of %V could not be resolved (%i: %s)", + &ctx->service, &ctx->name, ctx->state, + ngx_resolver_strerror(ctx->state)); + + } else { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "%V could not be resolved (%i: %s)", + &ctx->name, ctx->state, + ngx_resolver_strerror(ctx->state)); + } if (ctx->state != NGX_RESOLVE_NXDOMAIN) { ngx_http_upstream_rr_peers_unlock(peers); @@ -562,6 +613,13 @@ ngx_http_upstream_zone_resolve_handler(n ctx->naddrs = 0; } + backup = 0; + min_priority = 65535; + + for (i = 0; i < ctx->naddrs; i++) { + min_priority = ngx_min(ctx->addrs[i].priority, min_priority); + } + #if (NGX_DEBUG) { u_char text[NGX_SOCKADDR_STRLEN]; @@ -569,14 +627,20 @@ ngx_http_upstream_zone_resolve_handler(n for (i = 0; i < ctx->naddrs; i++) { len = ngx_sock_ntop(ctx->addrs[i].sockaddr, ctx->addrs[i].socklen, - text, NGX_SOCKADDR_STRLEN, 0); + text, NGX_SOCKADDR_STRLEN, 1); - ngx_log_debug3(NGX_LOG_DEBUG_HTTP, event->log, 0, - "name %V was resolved to %*s", &host->name, len, text); + ngx_log_debug7(NGX_LOG_DEBUG_HTTP, event->log, 0, + "name %V was resolved to %*s " + "s:\"%V\" n:\"%V\" w:%d %s", + &host->name, len, text, &host->service, + &ctx->addrs[i].name, ctx->addrs[i].weight, + ctx->addrs[i].priority != min_priority ? "backup" : ""); } } #endif +again: + for (peerp = &peers->peer; *peerp; /* void */ ) { peer = *peerp; @@ -588,14 +652,39 @@ ngx_http_upstream_zone_resolve_handler(n addr = &ctx->addrs[j]; - if (addr->name.len == 0 - && ngx_cmp_sockaddr(peer->sockaddr, peer->socklen, - addr->sockaddr, addr->socklen, 0) - == NGX_OK) + addr_backup = (addr->priority != min_priority); + if (addr_backup != backup) { + continue; + } + + if (ngx_http_upstream_zone_addr_marked(addr)) { + continue; + } + + if (ngx_cmp_sockaddr(peer->sockaddr, peer->socklen, + addr->sockaddr, addr->socklen, + host->service.len != 0) + != NGX_OK) { - addr->name.len = 1; - goto next; + continue; } + + if (host->service.len) { + if (addr->name.len != peer->server.len + || ngx_strncmp(addr->name.data, peer->server.data, + addr->name.len)) + { + continue; + } + + if (template->weight == 1 && addr->weight != peer->weight) { + continue; + } + } + + ngx_http_upstream_zone_mark_addr(addr); + + goto next; } *peerp = peer->next; @@ -614,8 +703,13 @@ ngx_http_upstream_zone_resolve_handler(n addr = &ctx->addrs[i]; - if (addr->name.len == 1) { - addr->name.len = 0; + addr_backup = (addr->priority != min_priority); + if (addr_backup != backup) { + continue; + } + + if (ngx_http_upstream_zone_addr_marked(addr)) { + ngx_http_upstream_zone_unmark_addr(addr); continue; } @@ -627,21 +721,14 @@ ngx_http_upstream_zone_resolve_handler(n ngx_log_error(NGX_LOG_ERR, event->log, 0, "cannot add new server to upstream \"%V\", " "memory exhausted", peers->name); - break; + goto done; } ngx_memcpy(peer->sockaddr, addr->sockaddr, addr->socklen); - port = ((struct sockaddr_in *) template->sockaddr)->sin_port; - - switch (peer->sockaddr->sa_family) { -#if (NGX_HAVE_INET6) - case AF_INET6: - ((struct sockaddr_in6 *) peer->sockaddr)->sin6_port = port; - break; -#endif - default: /* AF_INET */ - ((struct sockaddr_in *) peer->sockaddr)->sin_port = port; + if (host->service.len == 0) { + port = ngx_inet_get_port(template->sockaddr); + ngx_inet_set_port(peer->sockaddr, port); } peer->socklen = addr->socklen; @@ -650,9 +737,30 @@ ngx_http_upstream_zone_resolve_handler(n peer->name.data, NGX_SOCKADDR_STRLEN, 1); peer->host = template->host; - peer->server = template->server; + + server = host->service.len ? &addr->name : &template->server; + + peer->server.data = ngx_slab_alloc(peers->shpool, server->len); + if (peer->server.data == NULL) { + ngx_http_upstream_rr_peer_free(peers, peer); - peer->weight = template->weight; + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "cannot add new server to upstream \"%V\", " + "memory exhausted", peers->name); + goto done; + } + + peer->server.len = server->len; + ngx_memcpy(peer->server.data, server->data, server->len); + + if (host->service.len == 0) { + peer->weight = template->weight; + + } else { + peer->weight = (template->weight != 1 ? template->weight + : addr->weight); + } + peer->effective_weight = peer->weight; peer->max_conns = template->max_conns; peer->max_fails = template->max_fails; @@ -671,8 +779,25 @@ ngx_http_upstream_zone_resolve_handler(n ngx_http_upstream_zone_set_single(uscf); } + if (host->service.len && peers->next) { + ngx_http_upstream_rr_peers_unlock(peers); + + peers = peers->next; + backup = 1; + + ngx_http_upstream_rr_peers_wlock(peers); + + goto again; + } + +done: + ngx_http_upstream_rr_peers_unlock(peers); + while (++i < ctx->naddrs) { + ngx_http_upstream_zone_unmark_addr(&ctx->addrs[i]); + } + timer = (ngx_msec_t) 1000 * (ctx->valid > now ? ctx->valid - now + 1 : 1); ngx_resolve_name_done(ctx); diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -6306,6 +6306,19 @@ ngx_http_upstream_server(ngx_conf_t *cf, resolve = 1; continue; } + + if (ngx_strncmp(value[i].data, "service=", 8) == 0) { + + us->service.len = value[i].len - 8; + us->service.data = &value[i].data[8]; + + if (us->service.len == 0) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "service is empty"); + return NGX_CONF_ERROR; + } + + continue; + } #endif goto invalid; @@ -6321,6 +6334,15 @@ ngx_http_upstream_server(ngx_conf_t *cf, /* resolve at run time */ u.no_resolve = 1; } + + if (us->service.len && !resolve) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "service upstream \"%V\" requires " + "\"resolve\" parameter", + &u.url); + return NGX_CONF_ERROR; + } + #endif if (ngx_parse_url(cf->pool, &u) != NGX_OK) { @@ -6336,6 +6358,22 @@ ngx_http_upstream_server(ngx_conf_t *cf, #if (NGX_HTTP_UPSTREAM_ZONE) + if (us->service.len && !u.no_port) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "service upstream \"%V\" may not have port", + &us->name); + + return NGX_CONF_ERROR; + } + + if (us->service.len && u.naddrs) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "service upstream \"%V\" requires domain name", + &us->name); + + return NGX_CONF_ERROR; + } + if (resolve && u.naddrs == 0) { ngx_addr_t *addr; diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h --- a/src/http/ngx_http_upstream.h +++ b/src/http/ngx_http_upstream.h @@ -106,9 +106,10 @@ typedef struct { #if (NGX_HTTP_UPSTREAM_ZONE) ngx_str_t host; + ngx_str_t service; #endif - NGX_COMPAT_BEGIN(4) + NGX_COMPAT_BEGIN(2) NGX_COMPAT_END } ngx_http_upstream_server_t; diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c --- a/src/http/ngx_http_upstream_round_robin.c +++ b/src/http/ngx_http_upstream_round_robin.c @@ -176,6 +176,7 @@ ngx_http_upstream_init_round_robin(ngx_c } peer[n].host->name = server[i].host; + peer[n].host->service = server[i].service; peer[n].sockaddr = server[i].addrs[0].sockaddr; peer[n].socklen = server[i].addrs[0].socklen; @@ -245,7 +246,15 @@ ngx_http_upstream_init_round_robin(ngx_c } } - if (n + r == 0) { + if (n == 0 +#if (NGX_HTTP_UPSTREAM_ZONE) + && !resolve +#endif + ) { + return NGX_OK; + } + + if (n + r == 0 && !(us->flags & NGX_HTTP_UPSTREAM_BACKUP)) { return NGX_OK; } @@ -293,6 +302,7 @@ ngx_http_upstream_init_round_robin(ngx_c } peer[n].host->name = server[i].host; + peer[n].host->service = server[i].service; peer[n].sockaddr = server[i].addrs[0].sockaddr; peer[n].socklen = server[i].addrs[0].socklen; diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h --- a/src/http/ngx_http_upstream_round_robin.h +++ b/src/http/ngx_http_upstream_round_robin.h @@ -24,6 +24,7 @@ typedef struct { ngx_event_t event; /* must be first */ ngx_uint_t worker; ngx_str_t name; + ngx_str_t service; ngx_http_upstream_rr_peers_t *peers; ngx_http_upstream_rr_peer_t *peer; } ngx_http_upstream_host_t; @@ -153,7 +154,7 @@ ngx_http_upstream_rr_peer_free_locked(ng ngx_slab_free_locked(peers->shpool, peer->sockaddr); ngx_slab_free_locked(peers->shpool, peer->name.data); - if (peer->server.data && (peer->host == NULL || peer->host->peer == peer)) { + if (peer->server.data) { ngx_slab_free_locked(peers->shpool, peer->server.data); } diff --git a/src/stream/ngx_stream_upstream.c b/src/stream/ngx_stream_upstream.c --- a/src/stream/ngx_stream_upstream.c +++ b/src/stream/ngx_stream_upstream.c @@ -523,6 +523,19 @@ ngx_stream_upstream_server(ngx_conf_t *c resolve = 1; continue; } + + if (ngx_strncmp(value[i].data, "service=", 8) == 0) { + + us->service.len = value[i].len - 8; + us->service.data = &value[i].data[8]; + + if (us->service.len == 0) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "service is empty"); + return NGX_CONF_ERROR; + } + + continue; + } #endif goto invalid; @@ -537,6 +550,15 @@ ngx_stream_upstream_server(ngx_conf_t *c /* resolve at run time */ u.no_resolve = 1; } + + if (us->service.len && !resolve) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "service upstream \"%V\" requires " + "\"resolve\" parameter", + &u.url); + return NGX_CONF_ERROR; + } + #endif if (ngx_parse_url(cf->pool, &u) != NGX_OK) { @@ -548,7 +570,12 @@ ngx_stream_upstream_server(ngx_conf_t *c return NGX_CONF_ERROR; } - if (u.no_port) { + if (u.no_port +#if (NGX_STREAM_UPSTREAM_ZONE) + && us->service.len == 0 +#endif + ) + { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "no port in upstream \"%V\"", &u.url); return NGX_CONF_ERROR; @@ -558,6 +585,22 @@ ngx_stream_upstream_server(ngx_conf_t *c #if (NGX_STREAM_UPSTREAM_ZONE) + if (us->service.len && !u.no_port) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "service upstream \"%V\" may not have port", + &us->name); + + return NGX_CONF_ERROR; + } + + if (us->service.len && u.naddrs) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "service upstream \"%V\" requires domain name", + &us->name); + + return NGX_CONF_ERROR; + } + if (resolve && u.naddrs == 0) { ngx_addr_t *addr; diff --git a/src/stream/ngx_stream_upstream.h b/src/stream/ngx_stream_upstream.h --- a/src/stream/ngx_stream_upstream.h +++ b/src/stream/ngx_stream_upstream.h @@ -65,10 +65,8 @@ typedef struct { #if (NGX_STREAM_UPSTREAM_ZONE) ngx_str_t host; + ngx_str_t service; #endif - - NGX_COMPAT_BEGIN(2) - NGX_COMPAT_END } ngx_stream_upstream_server_t; diff --git a/src/stream/ngx_stream_upstream_round_robin.c b/src/stream/ngx_stream_upstream_round_robin.c --- a/src/stream/ngx_stream_upstream_round_robin.c +++ b/src/stream/ngx_stream_upstream_round_robin.c @@ -183,6 +183,7 @@ ngx_stream_upstream_init_round_robin(ngx } peer[n].host->name = server[i].host; + peer[n].host->service = server[i].service; peer[n].sockaddr = server[i].addrs[0].sockaddr; peer[n].socklen = server[i].addrs[0].socklen; @@ -251,7 +252,15 @@ ngx_stream_upstream_init_round_robin(ngx } } - if (n + r == 0) { + if (n == 0 +#if (NGX_STREAM_UPSTREAM_ZONE) + && !resolve +#endif + ) { + return NGX_OK; + } + + if (n + r == 0 && !(us->flags & NGX_STREAM_UPSTREAM_BACKUP)) { return NGX_OK; } @@ -299,6 +308,7 @@ ngx_stream_upstream_init_round_robin(ngx } peer[n].host->name = server[i].host; + peer[n].host->service = server[i].service; peer[n].sockaddr = server[i].addrs[0].sockaddr; peer[n].socklen = server[i].addrs[0].socklen; diff --git a/src/stream/ngx_stream_upstream_round_robin.h b/src/stream/ngx_stream_upstream_round_robin.h --- a/src/stream/ngx_stream_upstream_round_robin.h +++ b/src/stream/ngx_stream_upstream_round_robin.h @@ -24,6 +24,7 @@ typedef struct { ngx_event_t event; /* must be first */ ngx_uint_t worker; ngx_str_t name; + ngx_str_t service; ngx_stream_upstream_rr_peers_t *peers; ngx_stream_upstream_rr_peer_t *peer; } ngx_stream_upstream_host_t; @@ -151,7 +152,7 @@ ngx_stream_upstream_rr_peer_free_locked( ngx_slab_free_locked(peers->shpool, peer->sockaddr); ngx_slab_free_locked(peers->shpool, peer->name.data); - if (peer->server.data && (peer->host == NULL || peer->host->peer == peer)) { + if (peer->server.data) { ngx_slab_free_locked(peers->shpool, peer->server.data); } diff --git a/src/stream/ngx_stream_upstream_zone_module.c b/src/stream/ngx_stream_upstream_zone_module.c --- a/src/stream/ngx_stream_upstream_zone_module.c +++ b/src/stream/ngx_stream_upstream_zone_module.c @@ -356,6 +356,18 @@ ngx_stream_upstream_zone_copy_peer(ngx_s dst->host->name.len = src->host->name.len; ngx_memcpy(dst->host->name.data, src->host->name.data, src->host->name.len); + + if (src->host->service.len) { + dst->host->service.data = ngx_slab_alloc_locked(pool, + src->host->service.len); + if (dst->host->service.data == NULL) { + goto failed; + } + + dst->host->service.len = src->host->service.len; + ngx_memcpy(dst->host->service.data, src->host->service.data, + src->host->service.len); + } } } @@ -364,6 +376,10 @@ ngx_stream_upstream_zone_copy_peer(ngx_s failed: if (dst->host) { + if (dst->host->name.data) { + ngx_slab_free_locked(pool, dst->host->name.data); + } + ngx_slab_free_locked(pool, dst->host); } @@ -504,6 +520,7 @@ ngx_stream_upstream_zone_resolve_timer(n ctx->handler = ngx_stream_upstream_zone_resolve_handler; ctx->data = host; ctx->timeout = uscf->resolver_timeout; + ctx->service = host->service; ctx->cancelable = 1; if (ngx_resolve_name(ctx) == NGX_OK) { @@ -516,15 +533,28 @@ retry: } +#define ngx_stream_upstream_zone_addr_marked(addr) \ + ((uintptr_t) (addr)->sockaddr & 1) + +#define ngx_stream_upstream_zone_mark_addr(addr) \ + (addr)->sockaddr = (struct sockaddr *) ((uintptr_t) (addr)->sockaddr | 1) + +#define ngx_stream_upstream_zone_unmark_addr(addr) \ + (addr)->sockaddr = \ + (struct sockaddr *) ((uintptr_t) (addr)->sockaddr & ~((uintptr_t) 1)) + static void ngx_stream_upstream_zone_resolve_handler(ngx_resolver_ctx_t *ctx) { time_t now; + u_short min_priority; in_port_t port; + ngx_str_t *server; ngx_msec_t timer; - ngx_uint_t i, j; + ngx_uint_t i, j, backup, addr_backup; ngx_event_t *event; ngx_resolver_addr_t *addr; + ngx_resolver_srv_name_t *srv; ngx_stream_upstream_host_t *host; ngx_stream_upstream_rr_peer_t *peer, *template, **peerp; ngx_stream_upstream_rr_peers_t *peers; @@ -540,11 +570,32 @@ ngx_stream_upstream_zone_resolve_handler now = ngx_time(); + for (i = 0; i < ctx->nsrvs; i++) { + srv = &ctx->srvs[i]; + + if (srv->state) { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "%V could not be resolved (%i: %s) " + "while resolving service %V of %V", + &srv->name, srv->state, + ngx_resolver_strerror(srv->state), &ctx->service, + &ctx->name); + } + } + if (ctx->state) { - ngx_log_error(NGX_LOG_ERR, event->log, 0, - "%V could not be resolved (%i: %s)", - &ctx->name, ctx->state, - ngx_resolver_strerror(ctx->state)); + if (ctx->service.len) { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "service %V of %V could not be resolved (%i: %s)", + &ctx->service, &ctx->name, ctx->state, + ngx_resolver_strerror(ctx->state)); + + } else { + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "%V could not be resolved (%i: %s)", + &ctx->name, ctx->state, + ngx_resolver_strerror(ctx->state)); + } if (ctx->state != NGX_RESOLVE_NXDOMAIN) { ngx_stream_upstream_rr_peers_unlock(peers); @@ -560,6 +611,13 @@ ngx_stream_upstream_zone_resolve_handler ctx->naddrs = 0; } + backup = 0; + min_priority = 65535; + + for (i = 0; i < ctx->naddrs; i++) { + min_priority = ngx_min(ctx->addrs[i].priority, min_priority); + } + #if (NGX_DEBUG) { u_char text[NGX_SOCKADDR_STRLEN]; @@ -567,14 +625,20 @@ ngx_stream_upstream_zone_resolve_handler for (i = 0; i < ctx->naddrs; i++) { len = ngx_sock_ntop(ctx->addrs[i].sockaddr, ctx->addrs[i].socklen, - text, NGX_SOCKADDR_STRLEN, 0); + text, NGX_SOCKADDR_STRLEN, 1); - ngx_log_debug3(NGX_LOG_DEBUG_STREAM, event->log, 0, - "name %V was resolved to %*s", &host->name, len, text); + ngx_log_debug7(NGX_LOG_DEBUG_STREAM, event->log, 0, + "name %V was resolved to %*s " + "s:\"%V\" n:\"%V\" w:%d %s", + &host->name, len, text, &host->service, + &ctx->addrs[i].name, ctx->addrs[i].weight, + ctx->addrs[i].priority != min_priority ? "backup" : ""); } } #endif +again: + for (peerp = &peers->peer; *peerp; /* void */ ) { peer = *peerp; @@ -586,14 +650,39 @@ ngx_stream_upstream_zone_resolve_handler addr = &ctx->addrs[j]; - if (addr->name.len == 0 - && ngx_cmp_sockaddr(peer->sockaddr, peer->socklen, - addr->sockaddr, addr->socklen, 0) - == NGX_OK) + addr_backup = (addr->priority != min_priority); + if (addr_backup != backup) { + continue; + } + + if (ngx_stream_upstream_zone_addr_marked(addr)) { + continue; + } + + if (ngx_cmp_sockaddr(peer->sockaddr, peer->socklen, + addr->sockaddr, addr->socklen, + host->service.len != 0) + != NGX_OK) { - addr->name.len = 1; - goto next; + continue; } + + if (host->service.len) { + if (addr->name.len != peer->server.len + || ngx_strncmp(addr->name.data, peer->server.data, + addr->name.len)) + { + continue; + } + + if (template->weight == 1 && addr->weight != peer->weight) { + continue; + } + } + + ngx_stream_upstream_zone_mark_addr(addr); + + goto next; } *peerp = peer->next; @@ -612,33 +701,32 @@ ngx_stream_upstream_zone_resolve_handler addr = &ctx->addrs[i]; - if (addr->name.len == 1) { - addr->name.len = 0; + addr_backup = (addr->priority != min_priority); + if (addr_backup != backup) { + continue; + } + + if (ngx_stream_upstream_zone_addr_marked(addr)) { + ngx_stream_upstream_zone_unmark_addr(addr); continue; } ngx_shmtx_lock(&peers->shpool->mutex); peer = ngx_stream_upstream_zone_copy_peer(peers, NULL); ngx_shmtx_unlock(&peers->shpool->mutex); + if (peer == NULL) { ngx_log_error(NGX_LOG_ERR, event->log, 0, "cannot add new server to upstream \"%V\", " "memory exhausted", peers->name); - break; + goto done; } ngx_memcpy(peer->sockaddr, addr->sockaddr, addr->socklen); - port = ((struct sockaddr_in *) template->sockaddr)->sin_port; - - switch (peer->sockaddr->sa_family) { -#if (NGX_HAVE_INET6) - case AF_INET6: - ((struct sockaddr_in6 *) peer->sockaddr)->sin6_port = port; - break; -#endif - default: /* AF_INET */ - ((struct sockaddr_in *) peer->sockaddr)->sin_port = port; + if (host->service.len == 0) { + port = ngx_inet_get_port(template->sockaddr); + ngx_inet_set_port(peer->sockaddr, port); } peer->socklen = addr->socklen; @@ -647,9 +735,30 @@ ngx_stream_upstream_zone_resolve_handler peer->name.data, NGX_SOCKADDR_STRLEN, 1); peer->host = template->host; - peer->server = template->server; + + server = host->service.len ? &addr->name : &template->server; + + peer->server.data = ngx_slab_alloc(peers->shpool, server->len); + if (peer->server.data == NULL) { + ngx_stream_upstream_rr_peer_free(peers, peer); - peer->weight = template->weight; + ngx_log_error(NGX_LOG_ERR, event->log, 0, + "cannot add new server to upstream \"%V\", " + "memory exhausted", peers->name); + goto done; + } + + peer->server.len = server->len; + ngx_memcpy(peer->server.data, server->data, server->len); + + if (host->service.len == 0) { + peer->weight = template->weight; + + } else { + peer->weight = (template->weight != 1 ? template->weight + : addr->weight); + } + peer->effective_weight = peer->weight; peer->max_conns = template->max_conns; peer->max_fails = template->max_fails; @@ -668,8 +777,25 @@ ngx_stream_upstream_zone_resolve_handler ngx_stream_upstream_zone_set_single(uscf); } + if (host->service.len && peers->next) { + ngx_stream_upstream_rr_peers_unlock(peers); + + peers = peers->next; + backup = 1; + + ngx_stream_upstream_rr_peers_wlock(peers); + + goto again; + } + +done: + ngx_stream_upstream_rr_peers_unlock(peers); + while (++i < ctx->naddrs) { + ngx_stream_upstream_zone_unmark_addr(&ctx->addrs[i]); + } + timer = (ngx_msec_t) 1000 * (ctx->valid > now ? ctx->valid - now + 1 : 1); ngx_resolve_name_done(ctx); From a.bavshin at nginx.com Thu Jul 18 18:20:40 2024 From: a.bavshin at nginx.com (=?iso-8859-1?q?Aleksei_Bavshin?=) Date: Thu, 18 Jul 2024 11:20:40 -0700 Subject: [PATCH 3 of 7] Core: inheritance of non-reusable shared memory zones In-Reply-To: References: Message-ID: # HG changeset patch # User Ruslan Ermilov # Date 1509736941 -10800 # Fri Nov 03 22:22:21 2017 +0300 # Node ID d2f58530125445e0a436e923b368a5879aaaf1ea # Parent 5fc7ae429476db5f747e3b600aab0493da1d52fd Core: inheritance of non-reusable shared memory zones. When re-creating a non-reusable zone, make the pointer to the old zone available during the new zone initialization. diff --git a/src/core/ngx_cycle.c b/src/core/ngx_cycle.c --- a/src/core/ngx_cycle.c +++ b/src/core/ngx_cycle.c @@ -38,7 +38,7 @@ static ngx_connection_t dumb; ngx_cycle_t * ngx_init_cycle(ngx_cycle_t *old_cycle) { - void *rv; + void *rv, *data; char **senv; ngx_uint_t i, n; ngx_log_t *log; @@ -438,6 +438,8 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) opart = &old_cycle->shared_memory.part; oshm_zone = opart->elts; + data = NULL; + for (n = 0; /* void */ ; n++) { if (n >= opart->nelts) { @@ -461,9 +463,13 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) continue; } + if (shm_zone[i].tag == oshm_zone[n].tag && shm_zone[i].noreuse) { + data = oshm_zone[n].data; + break; + } + if (shm_zone[i].tag == oshm_zone[n].tag - && shm_zone[i].shm.size == oshm_zone[n].shm.size - && !shm_zone[i].noreuse) + && shm_zone[i].shm.size == oshm_zone[n].shm.size) { shm_zone[i].shm.addr = oshm_zone[n].shm.addr; #if (NGX_WIN32) @@ -490,7 +496,7 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) goto failed; } - if (shm_zone[i].init(&shm_zone[i], NULL) != NGX_OK) { + if (shm_zone[i].init(&shm_zone[i], data) != NGX_OK) { goto failed; } From a.bavshin at nginx.com Thu Jul 18 18:20:41 2024 From: a.bavshin at nginx.com (=?iso-8859-1?q?Aleksei_Bavshin?=) Date: Thu, 18 Jul 2024 11:20:41 -0700 Subject: [PATCH 4 of 7] Upstream: pre-resolve servers on reload In-Reply-To: References: Message-ID: # HG changeset patch # User Ruslan Ermilov # Date 1509736943 -10800 # Fri Nov 03 22:22:23 2017 +0300 # Node ID e52530ef6851c088ebbe9f400224ef0758583cf0 # Parent d2f58530125445e0a436e923b368a5879aaaf1ea Upstream: pre-resolve servers on reload. After configuration is reloaded, it may take some time for the re-resolvable upstream servers to resolve and become available as peers. During this time, client requests might get dropped. Such servers are now pre-resolved using the "cache" of already resolved peers from the old shared memory zone. diff --git a/src/http/modules/ngx_http_upstream_zone_module.c b/src/http/modules/ngx_http_upstream_zone_module.c --- a/src/http/modules/ngx_http_upstream_zone_module.c +++ b/src/http/modules/ngx_http_upstream_zone_module.c @@ -15,9 +15,15 @@ static char *ngx_http_upstream_zone(ngx_ static ngx_int_t ngx_http_upstream_init_zone(ngx_shm_zone_t *shm_zone, void *data); static ngx_http_upstream_rr_peers_t *ngx_http_upstream_zone_copy_peers( - ngx_slab_pool_t *shpool, ngx_http_upstream_srv_conf_t *uscf); + ngx_slab_pool_t *shpool, ngx_http_upstream_srv_conf_t *uscf, + ngx_http_upstream_srv_conf_t *ouscf); static ngx_http_upstream_rr_peer_t *ngx_http_upstream_zone_copy_peer( ngx_http_upstream_rr_peers_t *peers, ngx_http_upstream_rr_peer_t *src); +static ngx_int_t ngx_http_upstream_zone_preresolve( + ngx_http_upstream_rr_peer_t *resolve, + ngx_http_upstream_rr_peers_t *peers, + ngx_http_upstream_rr_peer_t *oresolve, + ngx_http_upstream_rr_peers_t *opeers); static void ngx_http_upstream_zone_set_single( ngx_http_upstream_srv_conf_t *uscf); static void ngx_http_upstream_zone_remove_peer_locked( @@ -128,11 +134,11 @@ static ngx_int_t ngx_http_upstream_init_zone(ngx_shm_zone_t *shm_zone, void *data) { size_t len; - ngx_uint_t i; + ngx_uint_t i, j; ngx_slab_pool_t *shpool; ngx_http_upstream_rr_peers_t *peers, **peersp; - ngx_http_upstream_srv_conf_t *uscf, **uscfp; - ngx_http_upstream_main_conf_t *umcf; + ngx_http_upstream_srv_conf_t *uscf, *ouscf, **uscfp, **ouscfp; + ngx_http_upstream_main_conf_t *umcf, *oumcf; shpool = (ngx_slab_pool_t *) shm_zone->shm.addr; umcf = shm_zone->data; @@ -169,6 +175,7 @@ ngx_http_upstream_init_zone(ngx_shm_zone /* copy peers to shared memory */ peersp = (ngx_http_upstream_rr_peers_t **) (void *) &shpool->data; + oumcf = data; for (i = 0; i < umcf->upstreams.nelts; i++) { uscf = uscfp[i]; @@ -177,7 +184,38 @@ ngx_http_upstream_init_zone(ngx_shm_zone continue; } - peers = ngx_http_upstream_zone_copy_peers(shpool, uscf); + ouscf = NULL; + + if (oumcf) { + ouscfp = oumcf->upstreams.elts; + + for (j = 0; j < oumcf->upstreams.nelts; j++) { + + if (ouscfp[j]->shm_zone == NULL) { + continue; + } + + if (ouscfp[j]->shm_zone->shm.name.len != shm_zone->shm.name.len + || ngx_memcmp(ouscfp[j]->shm_zone->shm.name.data, + shm_zone->shm.name.data, + shm_zone->shm.name.len) + != 0) + { + continue; + } + + if (ouscfp[j]->host.len == uscf->host.len + && ngx_memcmp(ouscfp[j]->host.data, uscf->host.data, + uscf->host.len) + == 0) + { + ouscf = ouscfp[j]; + break; + } + } + } + + peers = ngx_http_upstream_zone_copy_peers(shpool, uscf, ouscf); if (peers == NULL) { return NGX_ERROR; } @@ -192,12 +230,14 @@ ngx_http_upstream_init_zone(ngx_shm_zone static ngx_http_upstream_rr_peers_t * ngx_http_upstream_zone_copy_peers(ngx_slab_pool_t *shpool, - ngx_http_upstream_srv_conf_t *uscf) + ngx_http_upstream_srv_conf_t *uscf, ngx_http_upstream_srv_conf_t *ouscf) { ngx_str_t *name; ngx_uint_t *config; ngx_http_upstream_rr_peer_t *peer, **peerp; - ngx_http_upstream_rr_peers_t *peers, *backup; + ngx_http_upstream_rr_peers_t *peers, *opeers, *backup; + + opeers = (ouscf ? ouscf->peer.data : NULL); config = ngx_slab_calloc(shpool, sizeof(ngx_uint_t)); if (config == NULL) { @@ -250,6 +290,16 @@ ngx_http_upstream_zone_copy_peers(ngx_sl (*peers->config)++; } + if (opeers) { + + if (ngx_http_upstream_zone_preresolve(peers->resolve, peers, + opeers->resolve, opeers) + != NGX_OK) + { + return NULL; + } + } + if (peers->next == NULL) { goto done; } @@ -289,10 +339,30 @@ ngx_http_upstream_zone_copy_peers(ngx_sl peers->next = backup; + if (opeers && opeers->next) { + + if (ngx_http_upstream_zone_preresolve(peers->resolve, backup, + opeers->resolve, opeers->next) + != NGX_OK) + { + return NULL; + } + + if (ngx_http_upstream_zone_preresolve(backup->resolve, backup, + opeers->next->resolve, + opeers->next) + != NGX_OK) + { + return NULL; + } + } + done: uscf->peer.data = peers; + ngx_http_upstream_zone_set_single(uscf); + return peers; } @@ -404,6 +474,123 @@ failed: } +static ngx_int_t +ngx_http_upstream_zone_preresolve(ngx_http_upstream_rr_peer_t *resolve, + ngx_http_upstream_rr_peers_t *peers, + ngx_http_upstream_rr_peer_t *oresolve, + ngx_http_upstream_rr_peers_t *opeers) +{ + in_port_t port; + ngx_str_t *server; + ngx_http_upstream_host_t *host; + ngx_http_upstream_rr_peer_t *peer, *template, *opeer, **peerp; + + if (resolve == NULL || oresolve == NULL) { + return NGX_OK; + } + + for (peerp = &peers->peer; *peerp; peerp = &(*peerp)->next) { + /* void */ + } + + ngx_http_upstream_rr_peers_rlock(opeers); + + for (template = resolve; template; template = template->next) { + for (opeer = oresolve; opeer; opeer = opeer->next) { + + if (opeer->host->name.len != template->host->name.len + || ngx_memcmp(opeer->host->name.data, + template->host->name.data, + template->host->name.len) + != 0) + { + continue; + } + + if (opeer->host->service.len != template->host->service.len + || ngx_memcmp(opeer->host->service.data, + template->host->service.data, + template->host->service.len) + != 0) + { + continue; + } + + host = opeer->host; + + for (opeer = opeers->peer; opeer; opeer = opeer->next) { + + if (opeer->host != host) { + continue; + } + + peer = ngx_http_upstream_zone_copy_peer(peers, NULL); + if (peer == NULL) { + ngx_http_upstream_rr_peers_unlock(opeers); + return NGX_ERROR; + } + + ngx_memcpy(peer->sockaddr, opeer->sockaddr, opeer->socklen); + + if (template->host->service.len == 0) { + port = ngx_inet_get_port(template->sockaddr); + ngx_inet_set_port(peer->sockaddr, port); + } + + peer->socklen = opeer->socklen; + + peer->name.len = ngx_sock_ntop(peer->sockaddr, peer->socklen, + peer->name.data, + NGX_SOCKADDR_STRLEN, 1); + + peer->host = template->host; + + server = template->host->service.len ? &opeer->server + : &template->server; + + peer->server.data = ngx_slab_alloc(peers->shpool, server->len); + if (peer->server.data == NULL) { + ngx_http_upstream_rr_peers_unlock(opeers); + return NGX_ERROR; + } + + ngx_memcpy(peer->server.data, server->data, server->len); + peer->server.len = server->len; + + if (host->service.len == 0) { + peer->weight = template->weight; + + } else { + peer->weight = (template->weight != 1 ? template->weight + : opeer->weight); + } + + peer->effective_weight = peer->weight; + peer->max_conns = template->max_conns; + peer->max_fails = template->max_fails; + peer->fail_timeout = template->fail_timeout; + peer->down = template->down; + + (*peers->config)++; + + *peerp = peer; + peerp = &peer->next; + + peers->number++; + peers->tries += (peer->down == 0); + peers->total_weight += peer->weight; + peers->weighted = (peers->total_weight != peers->number); + } + + break; + } + } + + ngx_http_upstream_rr_peers_unlock(opeers); + return NGX_OK; +} + + static void ngx_http_upstream_zone_set_single(ngx_http_upstream_srv_conf_t *uscf) { diff --git a/src/stream/ngx_stream_upstream_zone_module.c b/src/stream/ngx_stream_upstream_zone_module.c --- a/src/stream/ngx_stream_upstream_zone_module.c +++ b/src/stream/ngx_stream_upstream_zone_module.c @@ -15,9 +15,15 @@ static char *ngx_stream_upstream_zone(ng static ngx_int_t ngx_stream_upstream_init_zone(ngx_shm_zone_t *shm_zone, void *data); static ngx_stream_upstream_rr_peers_t *ngx_stream_upstream_zone_copy_peers( - ngx_slab_pool_t *shpool, ngx_stream_upstream_srv_conf_t *uscf); + ngx_slab_pool_t *shpool, ngx_stream_upstream_srv_conf_t *uscf, + ngx_stream_upstream_srv_conf_t *ouscf); static ngx_stream_upstream_rr_peer_t *ngx_stream_upstream_zone_copy_peer( ngx_stream_upstream_rr_peers_t *peers, ngx_stream_upstream_rr_peer_t *src); +static ngx_int_t ngx_stream_upstream_zone_preresolve( + ngx_stream_upstream_rr_peer_t *resolve, + ngx_stream_upstream_rr_peers_t *peers, + ngx_stream_upstream_rr_peer_t *oresolve, + ngx_stream_upstream_rr_peers_t *opeers); static void ngx_stream_upstream_zone_set_single( ngx_stream_upstream_srv_conf_t *uscf); static void ngx_stream_upstream_zone_remove_peer_locked( @@ -125,11 +131,11 @@ static ngx_int_t ngx_stream_upstream_init_zone(ngx_shm_zone_t *shm_zone, void *data) { size_t len; - ngx_uint_t i; + ngx_uint_t i, j; ngx_slab_pool_t *shpool; ngx_stream_upstream_rr_peers_t *peers, **peersp; - ngx_stream_upstream_srv_conf_t *uscf, **uscfp; - ngx_stream_upstream_main_conf_t *umcf; + ngx_stream_upstream_srv_conf_t *uscf, *ouscf, **uscfp, **ouscfp; + ngx_stream_upstream_main_conf_t *umcf, *oumcf; shpool = (ngx_slab_pool_t *) shm_zone->shm.addr; umcf = shm_zone->data; @@ -166,6 +172,7 @@ ngx_stream_upstream_init_zone(ngx_shm_zo /* copy peers to shared memory */ peersp = (ngx_stream_upstream_rr_peers_t **) (void *) &shpool->data; + oumcf = data; for (i = 0; i < umcf->upstreams.nelts; i++) { uscf = uscfp[i]; @@ -174,7 +181,38 @@ ngx_stream_upstream_init_zone(ngx_shm_zo continue; } - peers = ngx_stream_upstream_zone_copy_peers(shpool, uscf); + ouscf = NULL; + + if (oumcf) { + ouscfp = oumcf->upstreams.elts; + + for (j = 0; j < oumcf->upstreams.nelts; j++) { + + if (ouscfp[j]->shm_zone == NULL) { + continue; + } + + if (ouscfp[j]->shm_zone->shm.name.len != shm_zone->shm.name.len + || ngx_memcmp(ouscfp[j]->shm_zone->shm.name.data, + shm_zone->shm.name.data, + shm_zone->shm.name.len) + != 0) + { + continue; + } + + if (ouscfp[j]->host.len == uscf->host.len + && ngx_memcmp(ouscfp[j]->host.data, uscf->host.data, + uscf->host.len) + == 0) + { + ouscf = ouscfp[j]; + break; + } + } + } + + peers = ngx_stream_upstream_zone_copy_peers(shpool, uscf, ouscf); if (peers == NULL) { return NGX_ERROR; } @@ -189,12 +227,14 @@ ngx_stream_upstream_init_zone(ngx_shm_zo static ngx_stream_upstream_rr_peers_t * ngx_stream_upstream_zone_copy_peers(ngx_slab_pool_t *shpool, - ngx_stream_upstream_srv_conf_t *uscf) + ngx_stream_upstream_srv_conf_t *uscf, ngx_stream_upstream_srv_conf_t *ouscf) { ngx_str_t *name; ngx_uint_t *config; ngx_stream_upstream_rr_peer_t *peer, **peerp; - ngx_stream_upstream_rr_peers_t *peers, *backup; + ngx_stream_upstream_rr_peers_t *peers, *opeers, *backup; + + opeers = (ouscf ? ouscf->peer.data : NULL); config = ngx_slab_calloc(shpool, sizeof(ngx_uint_t)); if (config == NULL) { @@ -247,6 +287,16 @@ ngx_stream_upstream_zone_copy_peers(ngx_ (*peers->config)++; } + if (opeers) { + + if (ngx_stream_upstream_zone_preresolve(peers->resolve, peers, + opeers->resolve, opeers) + != NGX_OK) + { + return NULL; + } + } + if (peers->next == NULL) { goto done; } @@ -286,10 +336,30 @@ ngx_stream_upstream_zone_copy_peers(ngx_ peers->next = backup; + if (opeers && opeers->next) { + + if (ngx_stream_upstream_zone_preresolve(peers->resolve, backup, + opeers->resolve, opeers->next) + != NGX_OK) + { + return NULL; + } + + if (ngx_stream_upstream_zone_preresolve(backup->resolve, backup, + opeers->next->resolve, + opeers->next) + != NGX_OK) + { + return NULL; + } + } + done: uscf->peer.data = peers; + ngx_stream_upstream_zone_set_single(uscf); + return peers; } @@ -401,6 +471,123 @@ failed: } +static ngx_int_t +ngx_stream_upstream_zone_preresolve(ngx_stream_upstream_rr_peer_t *resolve, + ngx_stream_upstream_rr_peers_t *peers, + ngx_stream_upstream_rr_peer_t *oresolve, + ngx_stream_upstream_rr_peers_t *opeers) +{ + in_port_t port; + ngx_str_t *server; + ngx_stream_upstream_host_t *host; + ngx_stream_upstream_rr_peer_t *peer, *template, *opeer, **peerp; + + if (resolve == NULL || oresolve == NULL) { + return NGX_OK; + } + + for (peerp = &peers->peer; *peerp; peerp = &(*peerp)->next) { + /* void */ + } + + ngx_stream_upstream_rr_peers_rlock(opeers); + + for (template = resolve; template; template = template->next) { + for (opeer = oresolve; opeer; opeer = opeer->next) { + + if (opeer->host->name.len != template->host->name.len + || ngx_memcmp(opeer->host->name.data, + template->host->name.data, + template->host->name.len) + != 0) + { + continue; + } + + if (opeer->host->service.len != template->host->service.len + || ngx_memcmp(opeer->host->service.data, + template->host->service.data, + template->host->service.len) + != 0) + { + continue; + } + + host = opeer->host; + + for (opeer = opeers->peer; opeer; opeer = opeer->next) { + + if (opeer->host != host) { + continue; + } + + peer = ngx_stream_upstream_zone_copy_peer(peers, NULL); + if (peer == NULL) { + ngx_stream_upstream_rr_peers_unlock(opeers); + return NGX_ERROR; + } + + ngx_memcpy(peer->sockaddr, opeer->sockaddr, opeer->socklen); + + if (template->host->service.len == 0) { + port = ngx_inet_get_port(template->sockaddr); + ngx_inet_set_port(peer->sockaddr, port); + } + + peer->socklen = opeer->socklen; + + peer->name.len = ngx_sock_ntop(peer->sockaddr, peer->socklen, + peer->name.data, + NGX_SOCKADDR_STRLEN, 1); + + peer->host = template->host; + + server = template->host->service.len ? &opeer->server + : &template->server; + + peer->server.data = ngx_slab_alloc(peers->shpool, server->len); + if (peer->server.data == NULL) { + ngx_stream_upstream_rr_peers_unlock(opeers); + return NGX_ERROR; + } + + ngx_memcpy(peer->server.data, server->data, server->len); + peer->server.len = server->len; + + if (host->service.len == 0) { + peer->weight = template->weight; + + } else { + peer->weight = (template->weight != 1 ? template->weight + : opeer->weight); + } + + peer->effective_weight = peer->weight; + peer->max_conns = template->max_conns; + peer->max_fails = template->max_fails; + peer->fail_timeout = template->fail_timeout; + peer->down = template->down; + + (*peers->config)++; + + *peerp = peer; + peerp = &peer->next; + + peers->number++; + peers->tries += (peer->down == 0); + peers->total_weight += peer->weight; + peers->weighted = (peers->total_weight != peers->number); + } + + break; + } + } + + ngx_stream_upstream_rr_peers_unlock(opeers); + return NGX_OK; +} + + static void ngx_stream_upstream_zone_set_single(ngx_stream_upstream_srv_conf_t *uscf) { From a.bavshin at nginx.com Thu Jul 18 18:20:42 2024 From: a.bavshin at nginx.com (=?iso-8859-1?q?Aleksei_Bavshin?=) Date: Thu, 18 Jul 2024 11:20:42 -0700 Subject: [PATCH 5 of 7] Upstream: per-upstream resolver In-Reply-To: References: Message-ID: <20cc5e474a0bff2bc1fa.1721326842@fedora-wsl.> # HG changeset patch # User Vladimir Homutov # Date 1571405595 -10800 # Fri Oct 18 16:33:15 2019 +0300 # Node ID 20cc5e474a0bff2bc1fadcc73aae61384f8eefbd # Parent e52530ef6851c088ebbe9f400224ef0758583cf0 Upstream: per-upstream resolver. The "resolver" and "resolver_timeout" directives can now be specified directly in the "upstream" block. diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -169,6 +169,10 @@ static ngx_int_t ngx_http_upstream_cooki static char *ngx_http_upstream(ngx_conf_t *cf, ngx_command_t *cmd, void *dummy); static char *ngx_http_upstream_server(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +#if (NGX_HTTP_UPSTREAM_ZONE) +static char *ngx_http_upstream_resolver(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); +#endif static ngx_int_t ngx_http_upstream_set_local(ngx_http_request_t *r, ngx_http_upstream_t *u, ngx_http_upstream_local_t *local); @@ -339,6 +343,24 @@ static ngx_command_t ngx_http_upstream_ 0, NULL }, +#if (NGX_HTTP_UPSTREAM_ZONE) + + { ngx_string("resolver"), + NGX_HTTP_UPS_CONF|NGX_CONF_1MORE, + ngx_http_upstream_resolver, + NGX_HTTP_SRV_CONF_OFFSET, + 0, + NULL }, + + { ngx_string("resolver_timeout"), + NGX_HTTP_UPS_CONF|NGX_CONF_TAKE1, + ngx_conf_set_msec_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_upstream_srv_conf_t, resolver_timeout), + NULL }, + +#endif + ngx_null_command }; @@ -6434,6 +6456,32 @@ not_supported: } +#if (NGX_HTTP_UPSTREAM_ZONE) + +static char * +ngx_http_upstream_resolver(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_http_upstream_srv_conf_t *uscf = conf; + + ngx_str_t *value; + + if (uscf->resolver) { + return "is duplicate"; + } + + value = cf->args->elts; + + uscf->resolver = ngx_resolver_create(cf, &value[1], cf->args->nelts - 1); + if (uscf->resolver == NULL) { + return NGX_CONF_ERROR; + } + + return NGX_CONF_OK; +} + +#endif + + ngx_http_upstream_srv_conf_t * ngx_http_upstream_add(ngx_conf_t *cf, ngx_url_t *u, ngx_uint_t flags) { @@ -6515,6 +6563,9 @@ ngx_http_upstream_add(ngx_conf_t *cf, ng uscf->line = cf->conf_file->line; uscf->port = u->port; uscf->no_port = u->no_port; +#if (NGX_HTTP_UPSTREAM_ZONE) + uscf->resolver_timeout = NGX_CONF_UNSET_MSEC; +#endif if (u->naddrs == 1 && (u->port || u->family == AF_UNIX)) { uscf->servers = ngx_array_create(cf->pool, 1, diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c --- a/src/http/ngx_http_upstream_round_robin.c +++ b/src/http/ngx_http_upstream_round_robin.c @@ -97,15 +97,15 @@ ngx_http_upstream_init_round_robin(ngx_c clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module); - us->resolver = clcf->resolver; - us->resolver_timeout = clcf->resolver_timeout; + if (us->resolver == NULL) { + us->resolver = clcf->resolver; + } /* - * Without "resolver_timeout" in http{}, the value is unset. - * Even if we set it in ngx_http_core_merge_loc_conf(), it's - * still dependent on the module order and unreliable. + * Without "resolver_timeout" in http{} the merged value is unset. */ - ngx_conf_init_msec_value(us->resolver_timeout, 30000); + ngx_conf_merge_msec_value(us->resolver_timeout, + clcf->resolver_timeout, 30000); if (resolve && (us->resolver == NULL diff --git a/src/stream/ngx_stream_upstream.c b/src/stream/ngx_stream_upstream.c --- a/src/stream/ngx_stream_upstream.c +++ b/src/stream/ngx_stream_upstream.c @@ -22,6 +22,11 @@ static char *ngx_stream_upstream(ngx_con void *dummy); static char *ngx_stream_upstream_server(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +#if (NGX_STREAM_UPSTREAM_ZONE) +static char *ngx_stream_upstream_resolver(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); +#endif + static void *ngx_stream_upstream_create_main_conf(ngx_conf_t *cf); static char *ngx_stream_upstream_init_main_conf(ngx_conf_t *cf, void *conf); @@ -42,6 +47,24 @@ static ngx_command_t ngx_stream_upstrea 0, NULL }, +#if (NGX_STREAM_UPSTREAM_ZONE) + + { ngx_string("resolver"), + NGX_STREAM_UPS_CONF|NGX_CONF_1MORE, + ngx_stream_upstream_resolver, + NGX_STREAM_SRV_CONF_OFFSET, + 0, + NULL }, + + { ngx_string("resolver_timeout"), + NGX_STREAM_UPS_CONF|NGX_CONF_TAKE1, + ngx_conf_set_msec_slot, + NGX_STREAM_SRV_CONF_OFFSET, + offsetof(ngx_stream_upstream_srv_conf_t, resolver_timeout), + NULL }, + +#endif + ngx_null_command }; @@ -661,6 +684,32 @@ not_supported: } +#if (NGX_STREAM_UPSTREAM_ZONE) + +static char * +ngx_stream_upstream_resolver(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_stream_upstream_srv_conf_t *uscf = conf; + + ngx_str_t *value; + + if (uscf->resolver) { + return "is duplicate"; + } + + value = cf->args->elts; + + uscf->resolver = ngx_resolver_create(cf, &value[1], cf->args->nelts - 1); + if (uscf->resolver == NULL) { + return NGX_CONF_ERROR; + } + + return NGX_CONF_OK; +} + +#endif + + ngx_stream_upstream_srv_conf_t * ngx_stream_upstream_add(ngx_conf_t *cf, ngx_url_t *u, ngx_uint_t flags) { @@ -739,6 +788,9 @@ ngx_stream_upstream_add(ngx_conf_t *cf, uscf->line = cf->conf_file->line; uscf->port = u->port; uscf->no_port = u->no_port; +#if (NGX_STREAM_UPSTREAM_ZONE) + uscf->resolver_timeout = NGX_CONF_UNSET_MSEC; +#endif if (u->naddrs == 1 && (u->port || u->family == AF_UNIX)) { uscf->servers = ngx_array_create(cf->pool, 1, diff --git a/src/stream/ngx_stream_upstream_round_robin.c b/src/stream/ngx_stream_upstream_round_robin.c --- a/src/stream/ngx_stream_upstream_round_robin.c +++ b/src/stream/ngx_stream_upstream_round_robin.c @@ -104,15 +104,15 @@ ngx_stream_upstream_init_round_robin(ngx cscf = ngx_stream_conf_get_module_srv_conf(cf, ngx_stream_core_module); - us->resolver = cscf->resolver; - us->resolver_timeout = cscf->resolver_timeout; + if (us->resolver == NULL) { + us->resolver = cscf->resolver; + } /* - * Without "resolver_timeout" in stream{}, the value is unset. - * Even if we set it in ngx_stream_core_merge_srv_conf(), it's - * still dependent on the module order and unreliable. + * Without "resolver_timeout" in stream{} the merged value is unset. */ - ngx_conf_init_msec_value(us->resolver_timeout, 30000); + ngx_conf_merge_msec_value(us->resolver_timeout, + cscf->resolver_timeout, 30000); if (resolve && (us->resolver == NULL From a.bavshin at nginx.com Thu Jul 18 18:20:43 2024 From: a.bavshin at nginx.com (=?iso-8859-1?q?Aleksei_Bavshin?=) Date: Thu, 18 Jul 2024 11:20:43 -0700 Subject: [PATCH 6 of 7] Upstream: copy upstream zone DNS valid time during config reload In-Reply-To: References: Message-ID: # HG changeset patch # User Mini Hawthorne # Date 1689189645 25200 # Wed Jul 12 12:20:45 2023 -0700 # Node ID b396d0c2c62c796c76921dcf20960f8ba2515aba # Parent 20cc5e474a0bff2bc1fadcc73aae61384f8eefbd Upstream: copy upstream zone DNS valid time during config reload. Previously, all upstream DNS entries would be immediately re-resolved on config reload. With a large number of upstreams, this creates a spike of DNS resolution requests. These spikes can overwhelm the DNS server or cause drops on the network. This patch retains the TTL of previous resolutions across reloads by copying each upstream's name's expiry time across configuration cycles. As a result, no additional resolutions are needed. diff --git a/src/http/modules/ngx_http_upstream_zone_module.c b/src/http/modules/ngx_http_upstream_zone_module.c --- a/src/http/modules/ngx_http_upstream_zone_module.c +++ b/src/http/modules/ngx_http_upstream_zone_module.c @@ -545,6 +545,8 @@ ngx_http_upstream_zone_preresolve(ngx_ht peer->host = template->host; + template->host->valid = host->valid; + server = template->host->service.len ? &opeer->server : &template->server; @@ -626,6 +628,8 @@ ngx_http_upstream_zone_remove_peer_locke static ngx_int_t ngx_http_upstream_zone_init_worker(ngx_cycle_t *cycle) { + time_t now; + ngx_msec_t timer; ngx_uint_t i; ngx_event_t *event; ngx_http_upstream_rr_peer_t *peer; @@ -639,6 +643,7 @@ ngx_http_upstream_zone_init_worker(ngx_c return NGX_OK; } + now = ngx_time(); umcf = ngx_http_cycle_get_module_main_conf(cycle, ngx_http_upstream_module); if (umcf == NULL) { @@ -670,7 +675,10 @@ ngx_http_upstream_zone_init_worker(ngx_c event->log = cycle->log; event->cancelable = 1; - ngx_add_timer(event, 1); + timer = (peer->host->valid > now + ? (ngx_msec_t) 1000 * (peer->host->valid - now) : 1); + + ngx_add_timer(event, timer); } ngx_http_upstream_rr_peers_unlock(peers); @@ -979,6 +987,8 @@ again: done: + host->valid = ctx->valid; + ngx_http_upstream_rr_peers_unlock(peers); while (++i < ctx->naddrs) { diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h --- a/src/http/ngx_http_upstream_round_robin.h +++ b/src/http/ngx_http_upstream_round_robin.h @@ -25,6 +25,7 @@ typedef struct { ngx_uint_t worker; ngx_str_t name; ngx_str_t service; + time_t valid; ngx_http_upstream_rr_peers_t *peers; ngx_http_upstream_rr_peer_t *peer; } ngx_http_upstream_host_t; diff --git a/src/stream/ngx_stream_upstream_round_robin.h b/src/stream/ngx_stream_upstream_round_robin.h --- a/src/stream/ngx_stream_upstream_round_robin.h +++ b/src/stream/ngx_stream_upstream_round_robin.h @@ -25,6 +25,7 @@ typedef struct { ngx_uint_t worker; ngx_str_t name; ngx_str_t service; + time_t valid; ngx_stream_upstream_rr_peers_t *peers; ngx_stream_upstream_rr_peer_t *peer; } ngx_stream_upstream_host_t; diff --git a/src/stream/ngx_stream_upstream_zone_module.c b/src/stream/ngx_stream_upstream_zone_module.c --- a/src/stream/ngx_stream_upstream_zone_module.c +++ b/src/stream/ngx_stream_upstream_zone_module.c @@ -542,6 +542,8 @@ ngx_stream_upstream_zone_preresolve(ngx_ peer->host = template->host; + template->host->valid = host->valid; + server = template->host->service.len ? &opeer->server : &template->server; @@ -623,6 +625,8 @@ ngx_stream_upstream_zone_remove_peer_loc static ngx_int_t ngx_stream_upstream_zone_init_worker(ngx_cycle_t *cycle) { + time_t now; + ngx_msec_t timer; ngx_uint_t i; ngx_event_t *event; ngx_stream_upstream_rr_peer_t *peer; @@ -636,6 +640,7 @@ ngx_stream_upstream_zone_init_worker(ngx return NGX_OK; } + now = ngx_time(); umcf = ngx_stream_cycle_get_module_main_conf(cycle, ngx_stream_upstream_module); @@ -668,7 +673,10 @@ ngx_stream_upstream_zone_init_worker(ngx event->log = cycle->log; event->cancelable = 1; - ngx_add_timer(event, 1); + timer = (peer->host->valid > now + ? (ngx_msec_t) 1000 * (peer->host->valid - now) : 1); + + ngx_add_timer(event, timer); } ngx_stream_upstream_rr_peers_unlock(peers); @@ -977,6 +985,8 @@ again: done: + host->valid = ctx->valid; + ngx_stream_upstream_rr_peers_unlock(peers); while (++i < ctx->naddrs) { From a.bavshin at nginx.com Thu Jul 18 18:20:44 2024 From: a.bavshin at nginx.com (=?iso-8859-1?q?Aleksei_Bavshin?=) Date: Thu, 18 Jul 2024 11:20:44 -0700 Subject: [PATCH 7 of 7] Tests: upstream configuration tests with re-resolvable servers In-Reply-To: References: Message-ID: # HG changeset patch # User Aleksei Bavshin # Date 1712098324 25200 # Tue Apr 02 15:52:04 2024 -0700 # Node ID cf291bf98576d5ddd36ead2b7eacc0808120e66b # Parent b396d0c2c62c796c76921dcf20960f8ba2515aba Tests: upstream configuration tests with re-resolvable servers. Based on the NGINX Plus tests authored by Sergey Kandaurov. diff --git a/stream_upstream_resolve.t b/stream_upstream_resolve.t new file mode 100644 --- /dev/null +++ b/stream_upstream_resolve.t @@ -0,0 +1,379 @@ +#!/usr/bin/perl + +# (C) Sergey Kandaurov +# (C) Nginx, Inc. + +# Stream tests for dynamic upstream configuration with re-resolvable servers. + +############################################################################### + +use warnings; +use strict; + +use Test::More; + +use IO::Select; +use Socket qw/ CRLF /; + +BEGIN { use FindBin; chdir($FindBin::Bin); } + +use lib 'lib'; +use Test::Nginx; +use Test::Nginx::Stream qw/ stream /; + +############################################################################### + +select STDERR; $| = 1; +select STDOUT; $| = 1; + +my $t = Test::Nginx->new()->has(qw/stream stream_upstream_zone/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +stream { + %%TEST_GLOBALS_STREAM%% + + upstream u { + zone z 1m; + server example.net:%%PORT_8080%% max_fails=0 resolve; + } + + # lower the retry timeout after empty reply + resolver 127.0.0.1:%%PORT_8983_UDP%% valid=1s; + # retry query shortly after DNS is started + resolver_timeout 1s; + + log_format test $upstream_addr; + + server { + listen 127.0.0.1:8082; + proxy_pass u; + access_log %%TESTDIR%%/cc.log test; + proxy_next_upstream on; + proxy_connect_timeout 50ms; + } +} + +EOF + +port(8084); + +$t->run_daemon(\&dns_daemon, port(8983), $t) + ->waitforfile($t->testdir . '/' . port(8983)); +$t->try_run('no resolve in upstream server')->plan(11); + +############################################################################### + +my $p0 = port(8080); + +update_name({A => '127.0.0.201'}); +stream('127.0.0.1:' . port(8082))->read(); + +# A changed + +update_name({A => '127.0.0.202'}); +stream('127.0.0.1:' . port(8082))->read(); + +# 1 more A added + +update_name({A => '127.0.0.201 127.0.0.202'}); +stream('127.0.0.1:' . port(8082))->read(); + +# 1 A removed, 2 AAAA added + +update_name({A => '127.0.0.201', AAAA => 'fe80::1 fe80::2'}); +stream('127.0.0.1:' . port(8082))->read(); + +# all records removed + +update_name(); +stream('127.0.0.1:' . port(8082))->read(); + +# A added after empty + +update_name({A => '127.0.0.201'}); +stream('127.0.0.1:' . port(8082))->read(); + +# changed to CNAME + +update_name({CNAME => 'alias'}, 4); +stream('127.0.0.1:' . port(8082))->read(); + +# bad DNS reply should not affect existing upstream configuration + +update_name({ERROR => 'SERVFAIL'}); +stream('127.0.0.1:' . port(8082))->read(); + +$t->stop(); + +Test::Nginx::log_core('||', $t->read_file('cc.log')); + +open my $f, '<', "${\($t->testdir())}/cc.log" or die "Can't open cc.log: $!"; +my $line; + +like($f->getline(), qr/127.0.0.201:$p0/, 'log - A'); + +# A changed + +like($f->getline(), qr/127.0.0.202:$p0/, 'log - A changed'); + +# 1 more A added + +$line = $f->getline(); +like($line, qr/127.0.0.201:$p0/, 'log - A A 1'); +like($line, qr/127.0.0.202:$p0/, 'log - A A 2'); + +# 1 A removed, 2 AAAA added + +$line = $f->getline(); +like($line, qr/127.0.0.201:$p0/, 'log - A AAAA AAAA 1'); +like($line, qr/\[fe80::1\]:$p0/, 'log - A AAAA AAAA 2'); +like($line, qr/\[fe80::2\]:$p0/, 'log - A AAAA AAAA 3'); + +# all records removed + +like($f->getline(), qr/^u$/, 'log - empty response'); + +# A added after empty + +like($f->getline(), qr/127.0.0.201:$p0/, 'log - A added 1'); + +# changed to CNAME + +like($f->getline(), qr/127.0.0.203:$p0/, 'log - CNAME 1'); + +# bad DNS reply should not affect existing upstream configuration + +like($f->getline(), qr/127.0.0.203:$p0/, 'log - ERROR 1'); + +############################################################################### + +sub update_name { + my ($name, $plan) = @_; + + $plan = 2 if !defined $plan; + + sub sock { + IO::Socket::INET->new( + Proto => 'tcp', + PeerAddr => '127.0.0.1:' . port(8084) + ) + or die "Can't connect to nginx: $!\n"; + } + + $name->{A} = '' unless $name->{A}; + $name->{AAAA} = '' unless $name->{AAAA}; + $name->{CNAME} = '' unless $name->{CNAME}; + $name->{ERROR} = '' unless $name->{ERROR}; + + my $req =<{A} +X-AAAA: $name->{AAAA} +X-CNAME: $name->{CNAME} +X-ERROR: $name->{ERROR} + +EOF + + my ($gen) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + for (1 .. 10) { + my ($gen2) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + + # let resolver cache expire to finish upstream reconfiguration + select undef, undef, undef, 0.5; + last unless ($gen + $plan > $gen2); + } +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $h) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + use constant SERVFAIL => 2; + use constant NXDOMAIN => 3; + + use constant A => 1; + use constant CNAME => 5; + use constant AAAA => 28; + use constant DNAME => 39; + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl) = (0x8180, NOERROR, 1); + $h = {A => [ "127.0.0.201" ]} unless defined $h; + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + my $name = join('.', @name); + + if ($h->{ERROR}) { + $rcode = SERVFAIL; + goto bad; + } + + if ($name eq 'example.net') { + if ($type == A && $h->{A}) { + map { push @rdata, rd_addr($ttl, $_) } @{$h->{A}}; + } + if ($type == AAAA && $h->{AAAA}) { + map { push @rdata, rd_addr6($ttl, $_) } @{$h->{AAAA}}; + } + my $cname = defined $h->{CNAME} ? $h->{CNAME} : 0; + if ($cname) { + push @rdata, pack("n3N nCa5n", 0xc00c, CNAME, IN, $ttl, + 8, 5, $cname, 0xc00c); + } + + } elsif ($name eq 'alias.example.net') { + if ($type == A) { + push @rdata, rd_addr($ttl, '127.0.0.203'); + } + } + +bad: + + Test::Nginx::log_core('||', "DNS: $name $type $rcode"); + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub expand_ip6 { + my ($addr) = @_; + + substr ($addr, index($addr, "::"), 2) = + join "0", map { ":" } (0 .. 8 - (split /:/, $addr) + 1); + map { hex "0" x (4 - length $_) . "$_" } split /:/, $addr; +} + +sub rd_addr6 { + my ($ttl, $addr) = @_; + + pack 'n3N nn8', 0xc00c, AAAA, IN, $ttl, 16, expand_ip6($addr); +} + +sub dns_daemon { + my ($port, $t) = @_; + my ($data, $recv_data, $h); + + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => $port, + Proto => 'udp', + ) + or die "Can't create listening socket: $!\n"; + + my $control = IO::Socket::INET->new( + Proto => 'tcp', + LocalHost => '127.0.0.1:' . port(8084), + Listen => 5, + Reuse => 1 + ) + or die "Can't create listening socket: $!\n"; + + my $sel = IO::Select->new($socket, $control); + + local $SIG{PIPE} = 'IGNORE'; + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . $port; + close $fh; + my $cnt = 0; + + while (my @ready = $sel->can_read) { + foreach my $fh (@ready) { + if ($control == $fh) { + my $new = $fh->accept; + $new->autoflush(1); + $sel->add($new); + + } elsif ($socket == $fh) { + $fh->recv($recv_data, 65536); + $data = reply_handler($recv_data, $h); + $fh->send($data); + $cnt++; + + } else { + $h = process_name($fh, $cnt); + $sel->remove($fh); + $fh->close; + } + } + } +} + +# parse dns update + +sub process_name { + my ($client, $cnt) = @_; + my $port = $client->sockport(); + + my $headers = ''; + my $uri = ''; + my %h; + + while (<$client>) { + $headers .= $_; + last if (/^\x0d?\x0a?$/); + } + return 1 if $headers eq ''; + + $uri = $1 if $headers =~ /^\S+\s+([^ ]+)\s+HTTP/i; + return 1 if $uri eq ''; + + $headers =~ /X-A: (.*)$/m; + map { push @{$h{A}}, $_ } split(/ /, $1); + $headers =~ /X-AAAA: (.*)$/m; + map { push @{$h{AAAA}}, $_ } split(/ /, $1); + $headers =~ /X-CNAME: (.*)$/m; + $h{CNAME} = $1; + $headers =~ /X-ERROR: (.*)$/m; + $h{ERROR} = $1; + + Test::Nginx::log_core('||', "$port: response, 200"); + print $client < 'win32') if $^O eq 'MSWin32'; + +my $t = Test::Nginx->new()->has(qw/stream stream_upstream_zone/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +stream { + %%TEST_GLOBALS_STREAM%% + + upstream u { + zone z 1m; + server example.net:%%PORT_8081%% resolve; + } + + upstream u2 { + zone z 1m; + server 127.0.0.203:%%PORT_8081%% max_fails=0; + server example.net:%%PORT_8081%% resolve max_fails=0; + } + + # lower the retry timeout after empty reply + resolver 127.0.0.1:%%PORT_8980_UDP%% valid=1s; + # retry query shortly after DNS is started + resolver_timeout 1s; + + log_format test $upstream_addr; + + server { + listen 127.0.0.1:8082; + proxy_pass u; + proxy_connect_timeout 50ms; + access_log %%TESTDIR%%/cc.log test; + } + + server { + listen 127.0.0.1:8083; + proxy_pass u2; + proxy_connect_timeout 50ms; + access_log %%TESTDIR%%/cc2.log test; + } +} + +EOF + +$t->run_daemon(\&dns_daemon, $t)->waitforfile($t->testdir . '/' . port(8980)); +$t->try_run('no resolve in upstream server')->plan(9); + +############################################################################### + +my $p = port(8081); + +update_name({A => '127.0.0.201'}); +stream('127.0.0.1:' . port(8082))->read(); +stream('127.0.0.1:' . port(8082))->read(); +stream('127.0.0.1:' . port(8083))->read(); + +update_name({ERROR => 'SERVFAIL'}, 0); + +my $conf = $t->read_file('nginx.conf'); +$conf =~ s/$p/port(8082)/gmse; +$t->write_file('nginx.conf', $conf); + +$t->reload(); +waitforworker($t); + +stream('127.0.0.1:' . port(8082))->read(); +stream('127.0.0.1:' . port(8082))->read(); +stream('127.0.0.1:' . port(8083))->read(); + +update_name({A => '127.0.0.202'}); +stream('127.0.0.1:' . port(8082))->read(); +stream('127.0.0.1:' . port(8082))->read(); +stream('127.0.0.1:' . port(8083))->read(); + +$t->stop(); + +Test::Nginx::log_core('||', $t->read_file('cc.log')); + +open my $f, '<', "${\($t->testdir())}/cc.log" or die "Can't open cc.log: $!"; + +like($f->getline(), qr/127.0.0.201:$p/, 'log - before'); +like($f->getline(), qr/127.0.0.201:$p/, 'log - before 2'); + +$p = port(8082); + +like($f->getline(), qr/127.0.0.201:$p/, 'log - preresolve'); +like($f->getline(), qr/127.0.0.201:$p/, 'log - preresolve 2'); + +like($f->getline(), qr/127.0.0.202:$p/, 'log - update'); +like($f->getline(), qr/127.0.0.202:$p/, 'log - update 2'); + +Test::Nginx::log_core('||', $t->read_file('cc2.log')); + +$p = port(8081); + +open $f, '<', "${\($t->testdir())}/cc2.log" or die "Can't open cc2.log: $!"; + +like($f->getline(), qr/127.0.0.(201:$p, 127.0.0.203|203:$p, 127.0.0.201):$p/, + 'log many - before'); + +$p = port(8082); + +like($f->getline(), qr/127.0.0.(201:$p, 127.0.0.203|203:$p, 127.0.0.201):$p/, + 'log many - preresolve'); + +like($f->getline(), qr/127.0.0.(202:$p, 127.0.0.203|203:$p, 127.0.0.202):$p/, + 'log many - update'); + +############################################################################### + +sub waitforworker { + my ($t) = @_; + + for (1 .. 30) { + last if $t->read_file('error.log') =~ /exited with code/; + select undef, undef, undef, 0.2; + } +} + +sub update_name { + my ($name, $plan) = @_; + + $plan = 2 if !defined $plan; + + sub sock { + IO::Socket::INET->new( + Proto => 'tcp', + PeerAddr => '127.0.0.1:' . port(8081) + ) + or die "Can't connect to nginx: $!\n"; + } + + $name->{A} = '' unless $name->{A}; + $name->{ERROR} = '' unless $name->{ERROR}; + + my $req =<{A} +X-ERROR: $name->{ERROR} + +EOF + + my ($gen) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + for (1 .. 10) { + my ($gen2) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + + # let resolver cache expire to finish upstream reconfiguration + select undef, undef, undef, 0.5; + last unless ($gen + $plan > $gen2); + } +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $h) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + use constant SERVFAIL => 2; + use constant NXDOMAIN => 3; + + use constant A => 1; + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl) = (0x8180, NOERROR, 1); + $h = {A => [ "127.0.0.201" ]} unless defined $h; + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + my $name = join('.', @name); + + if ($h->{ERROR}) { + $rcode = SERVFAIL; + goto bad; + } + + if ($name eq 'example.net' && $type == A && $h->{A}) { + map { push @rdata, rd_addr($ttl, $_) } @{$h->{A}}; + } + +bad: + + Test::Nginx::log_core('||', "DNS: $name $type $rcode"); + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub dns_daemon { + my ($t) = @_; + my ($data, $recv_data, $h); + + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => port(8980), + Proto => 'udp', + ) + or die "Can't create listening socket: $!\n"; + + my $control = IO::Socket::INET->new( + Proto => 'tcp', + LocalHost => "127.0.0.1:" . port(8081), + Listen => 5, + Reuse => 1 + ) + or die "Can't create listening socket: $!\n"; + + my $sel = IO::Select->new($socket, $control); + + local $SIG{PIPE} = 'IGNORE'; + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . port(8980); + close $fh; + my $cnt = 0; + + while (my @ready = $sel->can_read) { + foreach my $fh (@ready) { + if ($control == $fh) { + my $new = $fh->accept; + $new->autoflush(1); + $sel->add($new); + + } elsif ($socket == $fh) { + $fh->recv($recv_data, 65536); + $data = reply_handler($recv_data, $h); + $fh->send($data); + $cnt++; + + } else { + $h = process_name($fh, $cnt); + $sel->remove($fh); + $fh->close; + } + } + } +} + +# parse dns update + +sub process_name { + my ($client, $cnt) = @_; + my $port = $client->sockport(); + + my $headers = ''; + my $uri = ''; + my %h; + + while (<$client>) { + $headers .= $_; + last if (/^\x0d?\x0a?$/); + } + return 1 if $headers eq ''; + + $uri = $1 if $headers =~ /^\S+\s+([^ ]+)\s+HTTP/i; + return 1 if $uri eq ''; + + $headers =~ /X-A: (.*)$/m; + map { push @{$h{A}}, $_ } split(/ /, $1); + $headers =~ /X-ERROR: (.*)$/m; + $h{ERROR} = $1; + + Test::Nginx::log_core('||', "$port: response, 200"); + print $client <new()->has(qw/stream stream_upstream_zone/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +stream { + %%TEST_GLOBALS_STREAM%% + + resolver 127.0.0.1:%%PORT_8980_UDP%%; + + upstream u { + zone z 1m; + server example.net:%%PORT_8080%% resolve; + } + + upstream u1 { + zone z 1m; + server example.net:%%PORT_8080%% resolve; + resolver 127.0.0.1:%%PORT_8981_UDP%%; + } + + upstream u2 { + zone z 1m; + server example.net:%%PORT_8080%% resolve; + resolver 127.0.0.1:%%PORT_8982_UDP%%; + resolver_timeout 200s; # for coverage + } + + log_format test $upstream_addr; + + proxy_connect_timeout 50ms; + + server { + listen 127.0.0.1:8081; + proxy_pass u; + + access_log %%TESTDIR%%/access.log test; + } + + server { + listen 127.0.0.1:8082; + proxy_pass u1; + + access_log %%TESTDIR%%/access1.log test; + } + + server { + listen 127.0.0.1:8083; + proxy_pass u2; + + access_log %%TESTDIR%%/access2.log test; + } +} + +EOF + +$t->run_daemon(\&dns_daemon, $t, port($_), port($_ + 10)) for (8980 .. 8982); +$t->waitforfile($t->testdir . '/' . port($_)) for (8980 .. 8982); + +$t->try_run('no resolver in upstream')->plan(6); + +############################################################################### + +ok(waitfordns(8980), 'resolved'); +ok(waitfordns(8981), 'resolved in upstream 1'); +ok(waitfordns(8982), 'resolved in upstream 2'); + +stream('127.0.0.1:' . port(8081))->read(); +stream('127.0.0.1:' . port(8082))->read(); +stream('127.0.0.1:' . port(8083))->read(); + +$t->stop(); + +like($t->read_file('access.log'), qr/127.0.0.200/, 'resolver'); +like($t->read_file('access1.log'), qr/127.0.0.201/, 'resolver upstream 1'); +like($t->read_file('access2.log'), qr/127.0.0.202/, 'resolver upstream 2'); + +############################################################################### + +sub waitfordns { + my ($port, $plan) = @_; + + $plan = 1 if !defined $plan; + + sub sock { + my ($port) = @_; + + IO::Socket::INET->new( + Proto => 'tcp', + PeerAddr => '127.0.0.1:' . port($port + 10) + ) + or die "Can't connect to dns control socket: $!\n"; + } + + my $req =< sock($port)) =~ /X-Gen: (\d+)/; + select undef, undef, undef, 0.5; + return 1 if $gen >= $plan; + } +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $port) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + use constant A => 1; + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl) = (0x8180, NOERROR, 1); + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + + my $name = join('.', @name); + if ($name eq 'example.net' && $type == A) { + if ($port == port(8980)) { + push @rdata, rd_addr($ttl, "127.0.0.200"); + } + + if ($port == port(8981)) { + push @rdata, rd_addr($ttl, "127.0.0.201"); + } + + if ($port == port(8982)) { + push @rdata, rd_addr($ttl, "127.0.0.202"); + } + } + + Test::Nginx::log_core('||', "DNS: $name $type $rcode"); + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub dns_daemon { + my ($t, $port, $control_port) = @_; + + my ($data, $recv_data); + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => $port, + Proto => 'udp', + ) + or die "Can't create listening socket: $!\n"; + + my $control = IO::Socket::INET->new( + Proto => 'tcp', + LocalAddr => '127.0.0.1', + LocalPort => $control_port, + Listen => 5, + Reuse => 1 + ) + or die "Can't create listening socket: $!\n"; + + my $sel = IO::Select->new($socket, $control); + + local $SIG{PIPE} = 'IGNORE'; + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . $port; + close $fh; + my $cnt = 0; + + while (my @ready = $sel->can_read) { + foreach my $fh (@ready) { + if ($control == $fh) { + my $new = $fh->accept; + $new->autoflush(1); + $sel->add($new); + + } elsif ($socket == $fh) { + $fh->recv($recv_data, 65536); + $data = reply_handler($recv_data, $port); + $fh->send($data); + $cnt++; + + } else { + process_name($fh, $cnt); + $sel->remove($fh); + $fh->close; + } + } + } +} + +# parse dns update + +sub process_name { + my ($client, $cnt) = @_; + my $port = $client->sockport(); + + my $headers = ''; + my $uri = ''; + my %h; + + while (<$client>) { + $headers .= $_; + last if (/^\x0d?\x0a?$/); + } + return 1 if $headers eq ''; + + $uri = $1 if $headers =~ /^\S+\s+([^ ]+)\s+HTTP/i; + return 1 if $uri eq ''; + + Test::Nginx::log_core('||', "$port: response, 200"); + print $client <new()->has(qw/stream stream_upstream_zone/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +stream { + %%TEST_GLOBALS_STREAM%% + + upstream u { + zone z 1m; + server example.net max_fails=0 resolve service=http; + } + + upstream u2 { + zone z2 1m; + server example.net max_fails=0 resolve service=_http._tcp; + } + + # lower the retry timeout after empty reply + resolver 127.0.0.1:%%PORT_8981_UDP%% valid=1s; + # retry query shortly after DNS is started + resolver_timeout 1s; + + log_format test $upstream_addr; + + server { + listen 127.0.0.1:8081; + proxy_pass u; + proxy_next_upstream on; + proxy_connect_timeout 50ms; + access_log %%TESTDIR%%/cc.log test; + } + + server { + listen 127.0.0.1:8082; + proxy_pass u2; + proxy_next_upstream on; + proxy_connect_timeout 50ms; + access_log %%TESTDIR%%/cc.log test; + } +} + +EOF + +port(8080); +port(8084); + +$t->write_file('t', ''); + +$t->run_daemon(\&dns_daemon, $t)->waitforfile($t->testdir . '/' . port(8981)); +port(8981, socket => 1)->close(); +$t->try_run('no resolve in upstream server')->plan(20); + +############################################################################### + +my ($p0, $p2, $p3) = (port(8080), port(8082), port(8083)); + +update_name({A => '127.0.0.201', SRV => "1 5 $p0 example.net"}); +stream('127.0.0.1:' . port(8081))->read(); + +# fully specified service + +stream('127.0.0.1:' . port(8082))->read(); + +# A changed + +update_name({A => '127.0.0.202', SRV => "1 5 $p0 example.net"}); +stream('127.0.0.1:' . port(8081))->read(); + +# 1 more A added + +update_name({A => '127.0.0.201 127.0.0.202', SRV => "1 5 $p0 example.net"}); +stream('127.0.0.1:' . port(8081))->read(); + +# 1 A removed, 2 AAAA added + +update_name({A => '127.0.0.201', AAAA => 'fe80::1 fe80::2', + SRV => "1 5 $p0 example.net"}); +stream('127.0.0.1:' . port(8081))->read(); + +# all records removed + +update_name({SRV => "1 5 $p0 example.net"}); +stream('127.0.0.1:' . port(8081))->read(); + +# all SRV records removed + +update_name(); +stream('127.0.0.1:' . port(8081))->read(); + +# A added after empty + +update_name({A => '127.0.0.201', SRV => "1 5 $p0 example.net"}); +stream('127.0.0.1:' . port(8081))->read(); + +# SRV changed its weight + +update_name({A => '127.0.0.201', SRV => "1 6 $p0 example.net"}); +stream('127.0.0.1:' . port(8081))->read(); + +# changed to CNAME + +update_name({CNAME => 'alias'}, 2, 2); +stream('127.0.0.1:' . port(8081))->read(); + +# bad SRV reply should not affect existing upstream configuration + +update_name({CNAME => 'alias', ERROR => 'SERVFAIL'}, 1, 0); +stream('127.0.0.1:' . port(8081))->read(); +update_name({ERROR => ''}, 1, 0); + +# 2 equal SRV RR + +update_name({A => '127.0.0.201', + SRV => "1 5 $p0 example.net;1 5 $p0 example.net"}); +stream('127.0.0.1:' . port(8081))->read(); + +# all equal records removed + +update_name(); +stream('127.0.0.1:' . port(8081))->read(); + +# 2 different SRV RR + +update_name({A => '127.0.0.201', + SRV => "1 5 $p2 example.net;2 6 $p3 alias.example.net"}, 1, 2); +stream('127.0.0.1:' . port(8081))->read(); + +# all different records removed + +update_name(); +stream('127.0.0.1:' . port(8081))->read(); + +# bad subordinate reply should not affect existing upstream configuration + +update_name({A => '127.0.0.201', + SRV => "1 5 $p0 example.net;1 5 $p0 example.net"}); +stream('127.0.0.1:' . port(8081))->read(); + +update_name({A => '127.0.0.201', SERROR => 'SERVFAIL', + SRV => "1 5 $p0 example.net;1 5 $p0 example.net"}); +stream('127.0.0.1:' . port(8081))->read(); + +$t->stop(); + +Test::Nginx::log_core('||', $t->read_file('cc.log')); + +open my $f, '<', "${\($t->testdir())}/cc.log" or die "Can't open cc.log: $!"; +my $line; + +like($f->getline(), qr/127.0.0.201:$p0/, 'log - A'); + +# fully specified service + +like($f->getline(), qr/127.0.0.201:$p0/, 'log - A full'); + +# A changed + +like($f->getline(), qr/127.0.0.202:$p0/, 'log - A changed'); + +# 1 more A added + +$line = $f->getline(); +like($line, qr/127.0.0.201:$p0/, 'log - A A 1'); +like($line, qr/127.0.0.202:$p0/, 'log - A A 2'); + +# 1 A removed, 2 AAAA added + +$line = $f->getline(); +like($line, qr/127.0.0.201:$p0/, 'log - A AAAA AAAA 1'); +like($line, qr/\[fe80::1\]:$p0/, 'log - A AAAA AAAA 2'); +like($line, qr/\[fe80::2\]:$p0/, 'log - A AAAA AAAA 3'); + +# all records removed + +like($f->getline(), qr/^u$/, 'log - empty response'); + +# all SRV records removed + +like($f->getline(), qr/^u$/, 'log - empty response'); + +# A added after empty + +like($f->getline(), qr/127.0.0.201:$p0/, 'log - A added 1'); + +# SRV changed its weight + +like($f->getline(), qr/127.0.0.201:$p0/, 'log - SRV weight'); + +# changed to CNAME + +like($f->getline(), qr/127.0.0.203:$p0/, 'log - CNAME'); + +# bad SRV reply should not affect existing upstream configuration + +like($f->getline(), qr/127.0.0.203:$p0/, 'log - ERROR'); + +# 2 equal SRV RR + +like($f->getline(), qr/127.0.0.201:$p0, 127.0.0.201:$p0/, 'log - SRV same'); + +# all equal records removed + +like($f->getline(), qr/^u$/, 'log - SRV same removed'); + +# 2 different SRV RR + +$line = $f->getline(); +like($line, qr/127.0.0.201:$p2, 127.0.0.203:$p3/, 'log - SRV diff'); + +# all different records removed + +like($f->getline(), qr/^u$/, 'log - SRV diff removed'); + +# bad subordinate reply should not affect existing upstream configuration + +like($f->getline(), qr/, /, 'log - subordinate good'); +like($f->getline(), qr/, /, 'log - subordinate error'); + +############################################################################### + +sub update_name { + my ($name, $plan, $plan6) = @_; + + $plan = 1, $plan6 = 0 if !defined $name; + $plan = $plan6 = 1 if !defined $plan; + $plan += $plan6 + $plan6; + + sub sock { + IO::Socket::INET->new( + Proto => 'tcp', + PeerAddr => '127.0.0.1:' . port(8084) + ) + or die "Can't connect to nginx: $!\n"; + } + + $name->{A} = '' unless $name->{A}; + $name->{AAAA} = '' unless $name->{AAAA}; + $name->{CNAME} = '' unless $name->{CNAME}; + $name->{ERROR} = '' unless $name->{ERROR}; + $name->{SERROR} = '' unless $name->{SERROR}; + $name->{SRV} = '' unless $name->{SRV}; + + my $req =<{A} +X-AAAA: $name->{AAAA} +X-CNAME: $name->{CNAME} +X-ERROR: $name->{ERROR} +X-SERROR: $name->{SERROR} +X-SRV: $name->{SRV} + +EOF + + my ($gen) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + for (1 .. 10) { + my ($gen2) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + + # let resolver cache expire to finish upstream reconfiguration + select undef, undef, undef, 0.5; + last unless ($gen + $plan > $gen2); + } +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $h, $cnt, $tcp) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + use constant FORMERR => 1; + use constant SERVFAIL => 2; + use constant NXDOMAIN => 3; + + use constant A => 1; + use constant CNAME => 5; + use constant AAAA => 28; + use constant SRV => 33; + + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl, $port) = (0x8180, NOERROR, 3600, port(8080)); + $h = {A => [ "127.0.0.1" ], SRV => [ "1 5 $port example.net" ]} + unless defined $h; + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + my $name = join('.', @name); + + if ($h->{ERROR} && $type == SRV) { + $rcode = SERVFAIL; + goto bad; + } + + # subordinate error + + if ($h->{SERROR} && $type != SRV) { + $rcode = SERVFAIL; + goto bad; + } + + if ($name eq '_http._tcp.example.net') { + if ($type == SRV && $h->{SRV}) { + map { push @rdata, rd_srv($ttl, (split ' ', $_)) } + @{$h->{SRV}}; + } + + my $cname = defined $h->{CNAME} ? $h->{CNAME} : 0; + if ($cname) { + push @rdata, pack("n3N nCa5n", 0xc00c, CNAME, IN, $ttl, + 8, 5, "alias", 0xc00c + length("_http._tcp ")); + } + + } elsif ($name eq '_http._tcp.trunc.example.net' && $type == SRV) { + push @rdata, $tcp + ? rd_srv($ttl, 1, 1, $port, 'tcp.example.net') + : rd_srv($ttl, 1, 1, $port, 'example.net'); + + $hdr |= 0x0300 if $name eq '_http._tcp.trunc.example.net' + and !$tcp; + + } elsif ($name eq 'example.net' || $name eq 'tcp.example.net') { + if ($type == A && $h->{A}) { + map { push @rdata, rd_addr($ttl, $_) } @{$h->{A}}; + } + if ($type == AAAA && $h->{AAAA}) { + map { push @rdata, rd_addr6($ttl, $_) } @{$h->{AAAA}}; + } + my $cname = defined $h->{CNAME} ? $h->{CNAME} : 0; + if ($cname) { + push @rdata, pack("n3N nCa5n", 0xc00c, CNAME, IN, $ttl, + 8, 5, $cname, 0xc00c); + } + + } elsif ($name eq 'alias.example.net') { + if ($type == SRV) { + push @rdata, rd_srv($ttl, 1, 5, $port, 'example.net'); + } + if ($type == A) { + push @rdata, rd_addr($ttl, '127.0.0.203'); + } + } + +bad: + + Test::Nginx::log_core('||', "DNS: $name $type $rcode"); + + $$cnt++ if $type == SRV || keys %$h; + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_srv { + my ($ttl, $pri, $w, $port, $name) = @_; + my @rdname = split /\./, $name; + my $rdlen = length(join '', @rdname) + @rdname + 7; # pri w port x + + pack 'n3N n n3 (C/a*)* x', + 0xc00c, SRV, IN, $ttl, $rdlen, $pri, $w, $port, @rdname; +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub expand_ip6 { + my ($addr) = @_; + + substr ($addr, index($addr, "::"), 2) = + join "0", map { ":" } (0 .. 8 - (split /:/, $addr) + 1); + map { hex "0" x (4 - length $_) . "$_" } split /:/, $addr; +} + +sub rd_addr6 { + my ($ttl, $addr) = @_; + + pack 'n3N nn8', 0xc00c, AAAA, IN, $ttl, 16, expand_ip6($addr); +} + +sub dns_daemon { + my ($t) = @_; + my ($data, $recv_data, $h); + + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => port(8981), + Proto => 'udp', + ) + or die "Can't create listening socket: $!\n"; + + my $control = IO::Socket::INET->new( + Proto => 'tcp', + LocalHost => '127.0.0.1:' . port(8084), + Listen => 5, + Reuse => 1 + ) + or die "Can't create listening socket: $!\n"; + + my $tcp = port(8981, socket => 1); + my $sel = IO::Select->new($socket, $control, $tcp); + + local $SIG{PIPE} = 'IGNORE'; + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . port(8981); + close $fh; + my $cnt = 0; + + while (my @ready = $sel->can_read) { + foreach my $fh (@ready) { + if ($control == $fh || $tcp == $fh) { + my $new = $fh->accept; + $new->autoflush(1); + $sel->add($new); + + } elsif ($socket == $fh) { + $fh->recv($recv_data, 65536); + $data = reply_handler($recv_data, $h, \$cnt); + $fh->send($data); + + } elsif ($fh->sockport() == port(8084)) { + $h = process_name($fh, $cnt); + $sel->remove($fh); + $fh->close; + + } elsif ($fh->sockport() == port(8981)) { + $fh->recv($recv_data, 65536); + unless (length $recv_data) { + $sel->remove($fh); + $fh->close; + next; + } + +again: + my $len = unpack("n", $recv_data); + my $data = substr $recv_data, 2, $len; + $data = reply_handler($data, $h, \$cnt, 1); + $data = pack("n", length $data) . $data; + $fh->send($data); + $recv_data = substr $recv_data, 2 + $len; + goto again if length $recv_data; + } + } + } +} + +# parse dns update + +sub process_name { + my ($client, $cnt) = @_; + my $port = $client->sockport(); + + my $headers = ''; + my $uri = ''; + my %h; + + while (<$client>) { + $headers .= $_; + last if (/^\x0d?\x0a?$/); + } + return 1 if $headers eq ''; + + $uri = $1 if $headers =~ /^\S+\s+([^ ]+)\s+HTTP/i; + return 1 if $uri eq ''; + + $headers =~ /X-A: (.*)$/m; + map { push @{$h{A}}, $_ } split(/ /, $1); + $headers =~ /X-AAAA: (.*)$/m; + map { push @{$h{AAAA}}, $_ } split(/ /, $1); + $headers =~ /X-SRV: (.*)$/m; + map { push @{$h{SRV}}, $_ } split(/;/, $1); + $headers =~ /X-CNAME: (.+)$/m and $h{CNAME} = $1; + $headers =~ /X-ERROR: (.+)$/m and $h{ERROR} = $1; + $headers =~ /X-SERROR: (.+)$/m and $h{SERROR} = $1; + + Test::Nginx::log_core('||', "$port: response, 200"); + print $client < 'win32') if $^O eq 'MSWin32'; + +my $t = Test::Nginx->new()->has(qw/stream stream_upstream_zone/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +stream { + %%TEST_GLOBALS_STREAM%% + + upstream u { + zone z 1m; + server example.net resolve service=http; + } + + # lower the retry timeout after empty reply + resolver 127.0.0.1:%%PORT_8980_UDP%% valid=1s; + # retry query shortly after DNS is started + resolver_timeout 1s; + + log_format test $upstream_addr; + + server { + listen 127.0.0.1:8082; + proxy_pass u; + proxy_connect_timeout 50ms; + access_log %%TESTDIR%%/cc.log test; + } +} + +EOF + +port(8081); + +$t->run_daemon(\&dns_daemon, $t)->waitforfile($t->testdir . '/' . port(8980)); +$t->try_run('no resolve in upstream server')->plan(6); + +############################################################################### + +update_name({A => '127.0.0.201', SRV => "1 5 8080 example.net"}); +stream('127.0.0.1:' . port(8082))->read(); +stream('127.0.0.1:' . port(8082))->read(); + +update_name({ERROR => 'SERVFAIL'}, 0); + +$t->reload(); +waitforworker($t); + +stream('127.0.0.1:' . port(8082))->read(); +stream('127.0.0.1:' . port(8082))->read(); + +update_name({A => '127.0.0.202', SRV => "1 5 8080 example.net"}); +stream('127.0.0.1:' . port(8082))->read(); +stream('127.0.0.1:' . port(8082))->read(); + +$t->stop(); + +Test::Nginx::log_core('||', $t->read_file('cc.log')); + +open my $f, '<', "${\($t->testdir())}/cc.log" or die "Can't open cc.log: $!"; + +like($f->getline(), qr/127.0.0.201:8080/, 'log - before'); +like($f->getline(), qr/127.0.0.201:8080/, 'log - before 2'); + +like($f->getline(), qr/127.0.0.201:8080/, 'log - preresolve'); +like($f->getline(), qr/127.0.0.201:8080/, 'log - preresolve 2'); + +like($f->getline(), qr/127.0.0.202:8080/, 'log - update'); +like($f->getline(), qr/127.0.0.202:8080/, 'log - update 2'); + +############################################################################### + +sub waitforworker { + my ($t) = @_; + + for (1 .. 30) { + last if $t->read_file('error.log') =~ /exited with code/; + select undef, undef, undef, 0.2; + } +} + +sub update_name { + my ($name, $plan) = @_; + + $plan = 3 if !defined $plan; + + sub sock { + IO::Socket::INET->new( + Proto => 'tcp', + PeerAddr => '127.0.0.1:' . port(8081) + ) + or die "Can't connect to nginx: $!\n"; + } + + $name->{A} = '' unless $name->{A}; + $name->{ERROR} = '' unless $name->{ERROR}; + $name->{SRV} = '' unless $name->{SRV}; + + my $req =<{A} +X-ERROR: $name->{ERROR} +X-SRV: $name->{SRV} + +EOF + + my ($gen) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + for (1 .. 10) { + my ($gen2) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + + # let resolver cache expire to finish upstream reconfiguration + select undef, undef, undef, 0.5; + last unless ($gen + $plan > $gen2); + } +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $h) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + use constant SERVFAIL => 2; + use constant NXDOMAIN => 3; + + use constant A => 1; + use constant SRV => 33; + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl, $port) = (0x8180, NOERROR, 3600, port(8080)); + $h = {A => [ "127.0.0.201" ], SRV => [ "1 5 $port example.net" ]} + unless defined $h; + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + my $name = join('.', @name); + + if ($h->{ERROR}) { + $rcode = SERVFAIL; + goto bad; + } + + if ($name eq 'example.net' && $type == A && $h->{A}) { + map { push @rdata, rd_addr($ttl, $_) } @{$h->{A}}; + + } + if ($name eq '_http._tcp.example.net' && $type == SRV && $h->{SRV}) { + map { push @rdata, rd_srv($ttl, (split ' ', $_)) } + @{$h->{SRV}}; + } + +bad: + + Test::Nginx::log_core('||', "DNS: $name $type $rcode"); + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_srv { + my ($ttl, $pri, $w, $port, $name) = @_; + my @rdname = split /\./, $name; + my $rdlen = length(join '', @rdname) + @rdname + 7; # pri w port x + + pack 'n3N n n3 (C/a*)* x', + 0xc00c, SRV, IN, $ttl, $rdlen, $pri, $w, $port, @rdname; +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub dns_daemon { + my ($t) = @_; + my ($data, $recv_data, $h); + + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => port(8980), + Proto => 'udp', + ) + or die "Can't create listening socket: $!\n"; + + my $control = IO::Socket::INET->new( + Proto => 'tcp', + LocalHost => "127.0.0.1:" . port(8081), + Listen => 5, + Reuse => 1 + ) + or die "Can't create listening socket: $!\n"; + + my $sel = IO::Select->new($socket, $control); + + local $SIG{PIPE} = 'IGNORE'; + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . port(8980); + close $fh; + my $cnt = 0; + + while (my @ready = $sel->can_read) { + foreach my $fh (@ready) { + if ($control == $fh) { + my $new = $fh->accept; + $new->autoflush(1); + $sel->add($new); + + } elsif ($socket == $fh) { + $fh->recv($recv_data, 65536); + $data = reply_handler($recv_data, $h); + $fh->send($data); + $cnt++; + + } else { + $h = process_name($fh, $cnt); + $sel->remove($fh); + $fh->close; + } + } + } +} + +# parse dns update + +sub process_name { + my ($client, $cnt) = @_; + my $port = $client->sockport(); + + my $headers = ''; + my $uri = ''; + my %h; + + while (<$client>) { + $headers .= $_; + last if (/^\x0d?\x0a?$/); + } + return 1 if $headers eq ''; + + $uri = $1 if $headers =~ /^\S+\s+([^ ]+)\s+HTTP/i; + return 1 if $uri eq ''; + + $headers =~ /X-A: (.*)$/m; + map { push @{$h{A}}, $_ } split(/ /, $1); + $headers =~ /X-SRV: (.*)$/m; + map { push @{$h{SRV}}, $_ } split(/;/, $1); + $headers =~ /X-ERROR: (.*)$/m; + $h{ERROR} = $1; + + Test::Nginx::log_core('||', "$port: response, 200"); + print $client <new()->has(qw/http proxy upstream_zone/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +http { + %%TEST_GLOBALS_HTTP%% + + upstream u { + zone z 1m; + server example.net:%%PORT_8080%% resolve max_fails=0; + } + + # lower the retry timeout after empty reply + resolver 127.0.0.1:%%PORT_8982_UDP%% valid=1s; + # retry query shortly after DNS is started + resolver_timeout 1s; + + server { + listen 127.0.0.1:8080; + listen [::1]:%%PORT_8080%%; + server_name localhost; + + location / { + proxy_pass http://u/t; + proxy_connect_timeout 50ms; + add_header X-IP $upstream_addr; + error_page 502 504 redirect; + } + + location /2 { + proxy_pass http://u/t; + add_header X-IP $upstream_addr; + } + + location /t { } + } +} + +EOF + +port(8083); + +$t->write_file('t', ''); + +$t->run_daemon(\&dns_daemon, $t)->waitforfile($t->testdir . '/' . port(8982)); +$t->try_run('no resolve in upstream server')->plan(18); + +############################################################################### + +my ($r, @n); +my $p0 = port(8080); + +update_name({A => '127.0.0.201'}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 1, 'A'); +like($r, qr/127.0.0.201:$p0/, 'A 1'); + +# A changed + +update_name({A => '127.0.0.202'}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 1, 'A changed'); +like($r, qr/127.0.0.202:$p0/, 'A changed 1'); + +# 1 more A added + +update_name({A => '127.0.0.201 127.0.0.202'}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 2, 'A A'); +like($r, qr/127.0.0.201:$p0/, 'A A 1'); +like($r, qr/127.0.0.202:$p0/, 'A A 2'); + +# 1 A removed, 2 AAAA added + +update_name({A => '127.0.0.201', AAAA => 'fe80::1 fe80::2'}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 3, 'A AAAA AAAA responses'); +like($r, qr/127.0.0.201:$p0/, 'A AAAA AAAA 1'); +like($r, qr/\[fe80::1\]:$p0/, 'A AAAA AAAA 2'); +like($r, qr/\[fe80::1\]:$p0/, 'A AAAA AAAA 3'); + +# all records removed + +update_name(); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 0, 'empty response'); + +# A added after empty + +update_name({A => '127.0.0.201'}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 1, 'A added'); +like($r, qr/127.0.0.201:$p0/, 'A added 1'); + +# changed to CNAME + +update_name({CNAME => 'alias'}, 4); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 1, 'CNAME'); +like($r, qr/127.0.0.203:$p0/, 'CNAME 1'); + +# bad DNS reply should not affect existing upstream configuration + +update_name({ERROR => 'SERVFAIL'}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 1, 'ERROR'); +like($r, qr/127.0.0.203:$p0/, 'ERROR 1'); +update_name({A => '127.0.0.1'}); + +############################################################################### + +sub update_name { + my ($name, $plan) = @_; + + $plan = 2 if !defined $plan; + + sub sock { + IO::Socket::INET->new( + Proto => 'tcp', + PeerAddr => '127.0.0.1:' . port(8083) + ) + or die "Can't connect to nginx: $!\n"; + } + + $name->{A} = '' unless $name->{A}; + $name->{AAAA} = '' unless $name->{AAAA}; + $name->{CNAME} = '' unless $name->{CNAME}; + $name->{ERROR} = '' unless $name->{ERROR}; + + my $req =<{A} +X-AAAA: $name->{AAAA} +X-CNAME: $name->{CNAME} +X-ERROR: $name->{ERROR} + +EOF + + my ($gen) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + for (1 .. 10) { + my ($gen2) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + + # let resolver cache expire to finish upstream reconfiguration + select undef, undef, undef, 0.5; + last unless ($gen + $plan > $gen2); + } +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $h) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + use constant SERVFAIL => 2; + use constant NXDOMAIN => 3; + + use constant A => 1; + use constant CNAME => 5; + use constant AAAA => 28; + use constant DNAME => 39; + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl) = (0x8180, NOERROR, 1); + $h = {A => [ "127.0.0.201" ]} unless defined $h; + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + my $name = join('.', @name); + + if ($h->{ERROR}) { + $rcode = SERVFAIL; + goto bad; + } + + if ($name eq 'example.net') { + if ($type == A && $h->{A}) { + map { push @rdata, rd_addr($ttl, $_) } @{$h->{A}}; + } + if ($type == AAAA && $h->{AAAA}) { + map { push @rdata, rd_addr6($ttl, $_) } @{$h->{AAAA}}; + } + my $cname = defined $h->{CNAME} ? $h->{CNAME} : 0; + if ($cname) { + push @rdata, pack("n3N nCa5n", 0xc00c, CNAME, IN, $ttl, + 8, 5, $cname, 0xc00c); + } + + } elsif ($name eq 'alias.example.net') { + if ($type == A) { + push @rdata, rd_addr($ttl, '127.0.0.203'); + } + } + +bad: + + Test::Nginx::log_core('||', "DNS: $name $type $rcode"); + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub expand_ip6 { + my ($addr) = @_; + + substr ($addr, index($addr, "::"), 2) = + join "0", map { ":" } (0 .. 8 - (split /:/, $addr) + 1); + map { hex "0" x (4 - length $_) . "$_" } split /:/, $addr; +} + +sub rd_addr6 { + my ($ttl, $addr) = @_; + + pack 'n3N nn8', 0xc00c, AAAA, IN, $ttl, 16, expand_ip6($addr); +} + +sub dns_daemon { + my ($t) = @_; + my ($data, $recv_data, $h); + + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => port(8982), + Proto=> 'udp', + ) + or die "Can't create listening socket: $!\n"; + + my $control = IO::Socket::INET->new( + Proto => 'tcp', + LocalHost => "127.0.0.1:" . port(8083), + Listen => 5, + Reuse => 1 + ) + or die "Can't create listening socket: $!\n"; + + my $sel = IO::Select->new($socket, $control); + + local $SIG{PIPE} = 'IGNORE'; + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . port(8982); + close $fh; + my $cnt = 0; + + while (my @ready = $sel->can_read) { + foreach my $fh (@ready) { + if ($control == $fh) { + my $new = $fh->accept; + $new->autoflush(1); + $sel->add($new); + + } elsif ($socket == $fh) { + $fh->recv($recv_data, 65536); + $data = reply_handler($recv_data, $h); + $fh->send($data); + $cnt++; + + } else { + $h = process_name($fh, $cnt); + $sel->remove($fh); + $fh->close; + } + } + } +} + +# parse dns update + +sub process_name { + my ($client, $cnt) = @_; + my $port = $client->sockport(); + + my $headers = ''; + my $uri = ''; + my %h; + + while (<$client>) { + $headers .= $_; + last if (/^\x0d?\x0a?$/); + } + return 1 if $headers eq ''; + + $uri = $1 if $headers =~ /^\S+\s+([^ ]+)\s+HTTP/i; + return 1 if $uri eq ''; + + $headers =~ /X-A: (.*)$/m; + map { push @{$h{A}}, $_ } split(/ /, $1); + $headers =~ /X-AAAA: (.*)$/m; + map { push @{$h{AAAA}}, $_ } split(/ /, $1); + $headers =~ /X-CNAME: (.*)$/m; + $h{CNAME} = $1; + $headers =~ /X-ERROR: (.*)$/m; + $h{ERROR} = $1; + + Test::Nginx::log_core('||', "$port: response, 200"); + print $client < 'win32') if $^O eq 'MSWin32'; + +my $t = Test::Nginx->new()->has(qw/http proxy upstream_zone/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +http { + %%TEST_GLOBALS_HTTP%% + + upstream u { + zone z 1m; + server example.net:%%PORT_8081%% resolve; + } + + upstream u2 { + zone z 1m; + server 127.0.0.203:%%PORT_8081%% max_fails=0; + server example.net:%%PORT_8081%% resolve max_fails=0; + } + + # lower the retry timeout after empty reply + resolver 127.0.0.1:%%PORT_8980_UDP%% valid=1s; + # retry query shortly after DNS is started + resolver_timeout 1s; + + server { + listen 127.0.0.1:8080; + server_name localhost; + + location / { + proxy_pass http://u; + proxy_connect_timeout 50ms; + add_header X-IP $upstream_addr always; + } + + location /2 { + proxy_pass http://u2; + proxy_connect_timeout 50ms; + add_header X-IP $upstream_addr always; + } + } +} + +EOF + +$t->run_daemon(\&dns_daemon, $t)->waitforfile($t->testdir . '/' . port(8980)); +$t->try_run('no resolve in upstream server')->plan(9); + +############################################################################### + +my $p = port(8081); + +update_name({A => '127.0.0.201'}); +like(http_get('/'), qr/X-IP: 127.0.0.201:$p/, 'reload - before - request'); +like(http_get('/'), qr/X-IP: 127.0.0.201:$p/, 'reload - before - request 2'); +like(http_get('/2'), qr/127.0.0.(201:$p, 127.0.0.203|203:$p, 127.0.0.201):$p/, + 'reload - before - many'); + +update_name({ERROR => 'SERVFAIL'}, 0); + +my $conf = $t->read_file('nginx.conf'); +$conf =~ s/$p/port(8082)/gmse; +$p = port(8082); +$t->write_file('nginx.conf', $conf); + +$t->reload(); +waitforworker($t); + +like(http_get('/'), qr/X-IP: 127.0.0.201:$p/, 'reload - preresolve - request'); +like(http_get('/'), qr/X-IP: 127.0.0.201:$p/, 'reload - preresolve - request 2'); +like(http_get('/2'), qr/127.0.0.(201:$p, 127.0.0.203|203:$p, 127.0.0.201):$p/, + 'reload - preresolve - many'); + +update_name({A => '127.0.0.202'}); +like(http_get('/'), qr/X-IP: 127.0.0.202:$p/, 'reload - update - request'); +like(http_get('/'), qr/X-IP: 127.0.0.202:$p/, 'reload - update - request 2'); +like(http_get('/2'), qr/127.0.0.(202:$p, 127.0.0.203|203:$p, 127.0.0.202):$p/, + 'reload - update - many'); + +############################################################################### + +sub waitforworker { + my ($t) = @_; + + for (1 .. 30) { + last if $t->read_file('error.log') =~ /exited with code/; + select undef, undef, undef, 0.2; + } +} + +sub update_name { + my ($name, $plan) = @_; + + $plan = 2 if !defined $plan; + + sub sock { + IO::Socket::INET->new( + Proto => 'tcp', + PeerAddr => '127.0.0.1:' . port(8081) + ) + or die "Can't connect to nginx: $!\n"; + } + + $name->{A} = '' unless $name->{A}; + $name->{ERROR} = '' unless $name->{ERROR}; + + my $req =<{A} +X-ERROR: $name->{ERROR} + +EOF + + my ($gen) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + for (1 .. 10) { + my ($gen2) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + + # let resolver cache expire to finish upstream reconfiguration + select undef, undef, undef, 0.5; + last unless ($gen + $plan > $gen2); + } +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $h) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + use constant SERVFAIL => 2; + use constant NXDOMAIN => 3; + + use constant A => 1; + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl) = (0x8180, NOERROR, 1); + $h = {A => [ "127.0.0.201" ]} unless defined $h; + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + my $name = join('.', @name); + + if ($h->{ERROR}) { + $rcode = SERVFAIL; + goto bad; + } + + if ($name eq 'example.net' && $type == A && $h->{A}) { + map { push @rdata, rd_addr($ttl, $_) } @{$h->{A}}; + } + +bad: + + Test::Nginx::log_core('||', "DNS: $name $type $rcode"); + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub dns_daemon { + my ($t) = @_; + my ($data, $recv_data, $h); + + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => port(8980), + Proto => 'udp', + ) + or die "Can't create listening socket: $!\n"; + + my $control = IO::Socket::INET->new( + Proto => 'tcp', + LocalHost => "127.0.0.1:" . port(8081), + Listen => 5, + Reuse => 1 + ) + or die "Can't create listening socket: $!\n"; + + my $sel = IO::Select->new($socket, $control); + + local $SIG{PIPE} = 'IGNORE'; + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . port(8980); + close $fh; + my $cnt = 0; + + while (my @ready = $sel->can_read) { + foreach my $fh (@ready) { + if ($control == $fh) { + my $new = $fh->accept; + $new->autoflush(1); + $sel->add($new); + + } elsif ($socket == $fh) { + $fh->recv($recv_data, 65536); + $data = reply_handler($recv_data, $h); + $fh->send($data); + $cnt++; + + } else { + $h = process_name($fh, $cnt); + $sel->remove($fh); + $fh->close; + } + } + } +} + +# parse dns update + +sub process_name { + my ($client, $cnt) = @_; + my $port = $client->sockport(); + + my $headers = ''; + my $uri = ''; + my %h; + + while (<$client>) { + $headers .= $_; + last if (/^\x0d?\x0a?$/); + } + return 1 if $headers eq ''; + + $uri = $1 if $headers =~ /^\S+\s+([^ ]+)\s+HTTP/i; + return 1 if $uri eq ''; + + $headers =~ /X-A: (.*)$/m; + map { push @{$h{A}}, $_ } split(/ /, $1); + $headers =~ /X-ERROR: (.*)$/m; + $h{ERROR} = $1; + + Test::Nginx::log_core('||', "$port: response, 200"); + print $client <new()->has(qw/http proxy upstream_zone/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +http { + %%TEST_GLOBALS_HTTP%% + + resolver 127.0.0.1:%%PORT_8980_UDP%%; + + upstream u { + zone z 1m; + server example.net:%%PORT_8080%% resolve; + } + + upstream u1 { + zone z 1m; + server example.net:%%PORT_8080%% resolve; + resolver 127.0.0.1:%%PORT_8981_UDP%%; + } + + upstream u2 { + zone z 1m; + server example.net:%%PORT_8080%% resolve; + resolver 127.0.0.1:%%PORT_8982_UDP%%; + resolver_timeout 200s; # for coverage + } + + server { + listen 127.0.0.1:8080; + server_name localhost; + + location / { + proxy_pass http://$args/t; + proxy_connect_timeout 50ms; + add_header X-IP $upstream_addr; + error_page 502 504 redirect; + } + + } +} + +EOF + +$t->run_daemon(\&dns_daemon, $t, port($_), port($_ + 10)) for (8980 .. 8982); +$t->waitforfile($t->testdir . '/' . port($_)) for (8980 .. 8982); + +$t->try_run('no resolver in upstream')->plan(6); + +############################################################################### + +ok(waitfordns(8980), 'resolved'); +ok(waitfordns(8981), 'resolved in upstream 1'); +ok(waitfordns(8982), 'resolved in upstream 2'); + +like(http_get('/?u'), qr/127.0.0.200/, 'resolver'); +like(http_get('/?u1'), qr/127.0.0.201/, 'resolver upstream 1'); +like(http_get('/?u2'), qr/127.0.0.202/, 'resolver upstream 2'); + +############################################################################### + +sub waitfordns { + my ($port, $plan) = @_; + + $plan = 1 if !defined $plan; + + sub sock { + my ($port) = @_; + + IO::Socket::INET->new( + Proto => 'tcp', + PeerAddr => '127.0.0.1:' . port($port + 10) + ) + or die "Can't connect to dns control socket: $!\n"; + } + + my $req =< sock($port)) =~ /X-Gen: (\d+)/; + select undef, undef, undef, 0.5; + return 1 if $gen >= $plan; + } +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $port) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + use constant A => 1; + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl) = (0x8180, NOERROR, 1); + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + + my $name = join('.', @name); + if ($name eq 'example.net' && $type == A) { + if ($port == port(8980)) { + push @rdata, rd_addr($ttl, "127.0.0.200"); + } + + if ($port == port(8981)) { + push @rdata, rd_addr($ttl, "127.0.0.201"); + } + + if ($port == port(8982)) { + push @rdata, rd_addr($ttl, "127.0.0.202"); + } + } + + Test::Nginx::log_core('||', "DNS: $name $type $rcode"); + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub dns_daemon { + my ($t, $port, $control_port) = @_; + + my ($data, $recv_data); + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => $port, + Proto => 'udp', + ) + or die "Can't create listening socket: $!\n"; + + my $control = IO::Socket::INET->new( + Proto => 'tcp', + LocalAddr => '127.0.0.1', + LocalPort => $control_port, + Listen => 5, + Reuse => 1 + ) + or die "Can't create listening socket: $!\n"; + + my $sel = IO::Select->new($socket, $control); + + local $SIG{PIPE} = 'IGNORE'; + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . $port; + close $fh; + my $cnt = 0; + + while (my @ready = $sel->can_read) { + foreach my $fh (@ready) { + if ($control == $fh) { + my $new = $fh->accept; + $new->autoflush(1); + $sel->add($new); + + } elsif ($socket == $fh) { + $fh->recv($recv_data, 65536); + $data = reply_handler($recv_data, $port); + $fh->send($data); + $cnt++; + + } else { + process_name($fh, $cnt); + $sel->remove($fh); + $fh->close; + } + } + } +} + +# parse dns update + +sub process_name { + my ($client, $cnt) = @_; + my $port = $client->sockport(); + + my $headers = ''; + my $uri = ''; + my %h; + + while (<$client>) { + $headers .= $_; + last if (/^\x0d?\x0a?$/); + } + return 1 if $headers eq ''; + + $uri = $1 if $headers =~ /^\S+\s+([^ ]+)\s+HTTP/i; + return 1 if $uri eq ''; + + Test::Nginx::log_core('||', "$port: response, 200"); + print $client <new()->has(qw/http proxy upstream_zone/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +http { + %%TEST_GLOBALS_HTTP%% + + upstream u { + zone z 1m; + server example.net resolve service=http max_fails=0; + } + + upstream u2 { + zone z2 1m; + server example.net resolve service=_http._tcp; + } + + # lower the retry timeout after empty reply + resolver 127.0.0.1:%%PORT_8981_UDP%% valid=1s; + # retry query shortly after DNS is started + resolver_timeout 1s; + + server { + listen 127.0.0.1:8080; + server_name localhost; + + add_header X-IP $upstream_addr; + error_page 502 504 redirect; + proxy_connect_timeout 50ms; + + location / { + proxy_pass http://u/t; + } + + location /full { + proxy_pass http://u2/t; + } + + location /t { } + } +} + +EOF + +port(8084); + +$t->write_file('t', ''); + +$t->run_daemon(\&dns_daemon, $t)->waitforfile($t->testdir . '/' . port(8981)); +port(8981, socket => 1)->close(); +$t->try_run('no service in upstream server')->plan(30); + +############################################################################### + +my ($r, @n); +my ($p0, $p2, $p3) = (port(8080), port(8082), port(8083)); + +update_name({A => '127.0.0.201', SRV => "1 5 $p0 example.net"}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 1, 'A'); +like($r, qr/127.0.0.201:$p0/, 'A 1'); + +# fully specified service + +$r = http_get('/full'); +is(@n = $r =~ /:$p0/g, 1, 'A full'); +like($r, qr/127.0.0.201:$p0/, 'A full 1'); + +# A changed + +update_name({A => '127.0.0.202', SRV => "1 5 $p0 example.net"}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 1, 'A changed'); +like($r, qr/127.0.0.202:$p0/, 'A changed 1'); + +# 1 more A added + +update_name({A => '127.0.0.201 127.0.0.202', SRV => "1 5 $p0 example.net"}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 2, 'A A'); +like($r, qr/127.0.0.201:$p0/, 'A A 1'); +like($r, qr/127.0.0.202:$p0/, 'A A 2'); + +# 1 A removed, 2 AAAA added + +update_name({A => '127.0.0.201', AAAA => 'fe80::1 fe80::2', + SRV => "1 5 $p0 example.net"}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 3, 'A AAAA AAAA responses'); +like($r, qr/127.0.0.201:$p0/, 'A AAAA AAAA 1'); +like($r, qr/\[fe80::1\]:$p0/, 'A AAAA AAAA 2'); +like($r, qr/\[fe80::1\]:$p0/, 'A AAAA AAAA 3'); + +# all records removed + +update_name({SRV => "1 5 $p0 example.net"}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 0, 'empty SRV response'); + +# all SRV records removed + +update_name(); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 0, 'empty response'); + +# A added after empty + +update_name({A => '127.0.0.201', SRV => "1 5 $p0 example.net"}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 1, 'A added'); +like($r, qr/127.0.0.201:$p0/, 'A added 1'); + +# SRV changed its weight + +update_name({A => '127.0.0.201', SRV => "1 6 $p0 example.net"}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 1, 'SRV weight'); +like($r, qr/127.0.0.201:$p0/, 'SRV weight 1'); + +# changed to CNAME + +update_name({CNAME => 'alias'}, 2, 2); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 1, 'CNAME'); +like($r, qr/127.0.0.203:$p0/, 'CNAME 1'); + +# bad SRV reply should not affect existing upstream configuration + +update_name({CNAME => 'alias', ERROR => 'SERVFAIL'}, 1, 0); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 1, 'ERROR'); +like($r, qr/127.0.0.203:$p0/, 'ERROR 1'); +update_name({ERROR => ''}, 1, 0); + +# 2 equal SRV RR + +update_name({A => '127.0.0.201', + SRV => "1 5 $p0 example.net;1 5 $p0 example.net"}); +$r = http_get('/'); +is(@n = $r =~ /:$p0/g, 2, 'SRV same'); +like($r, qr/127.0.0.201:$p0, 127.0.0.201:$p0/, 'SRV same peers'); + +# all equal records removed + +update_name(); +$r = http_get('/'); +is(@n = $r =~ /:($p0|$p2|$p3)/g, 0, 'SRV same removed'); + +# 2 different SRV RR + +update_name({A => '127.0.0.201', + SRV => "1 5 $p2 example.net;2 6 $p3 alias.example.net"}, 1, 2); +$r = http_get('/'); +is(@n = $r =~ /:($p2|$p3)/g, 2, 'SRV diff'); +like($r, qr/127.0.0.201:$p2/, 'SRV diff 1'); +like($r, qr/127.0.0.203:$p3/, 'SRV diff 2'); + +# all different records removed + +update_name(); +$r = http_get('/'); +is(@n = $r =~ /:($p0|$p2|$p3)/g, 0, 'SRV diff removed'); + +############################################################################### + +sub update_name { + my ($name, $plan, $plan6) = @_; + + $plan = 1, $plan6 = 0 if !defined $name; + $plan = $plan6 = 1 if !defined $plan; + $plan += $plan6 + $plan6; + + sub sock { + IO::Socket::INET->new( + Proto => 'tcp', + PeerAddr => '127.0.0.1:' . port(8084) + ) + or die "Can't connect to nginx: $!\n"; + } + + $name->{A} = '' unless $name->{A}; + $name->{AAAA} = '' unless $name->{AAAA}; + $name->{CNAME} = '' unless $name->{CNAME}; + $name->{ERROR} = '' unless $name->{ERROR}; + $name->{SERROR} = '' unless $name->{SERROR}; + $name->{SRV} = '' unless $name->{SRV}; + + my $req =<{A} +X-AAAA: $name->{AAAA} +X-CNAME: $name->{CNAME} +X-ERROR: $name->{ERROR} +X-SERROR: $name->{SERROR} +X-SRV: $name->{SRV} + +EOF + + my ($gen) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + for (1 .. 10) { + my ($gen2) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + + # let resolver cache expire to finish upstream reconfiguration + select undef, undef, undef, 0.5; + last unless ($gen + $plan > $gen2); + } +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $h, $cnt, $tcp) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + use constant FORMERR => 1; + use constant SERVFAIL => 2; + use constant NXDOMAIN => 3; + + use constant A => 1; + use constant CNAME => 5; + use constant AAAA => 28; + use constant SRV => 33; + + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl, $port) = (0x8180, NOERROR, 3600, port(8080)); + $h = {A => [ "127.0.0.1" ], SRV => [ "1 5 $port example.net" ]} + unless defined $h; + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + my $name = join('.', @name); + + if ($h->{ERROR} && $type == SRV) { + $rcode = SERVFAIL; + goto bad; + } + + # subordinate error + + if ($h->{SERROR} && $type != SRV) { + $rcode = SERVFAIL; + goto bad; + } + + if ($name eq '_http._tcp.example.net') { + if ($type == SRV && $h->{SRV}) { + map { push @rdata, rd_srv($ttl, (split ' ', $_)) } + @{$h->{SRV}}; + } + + my $cname = defined $h->{CNAME} ? $h->{CNAME} : 0; + if ($cname) { + push @rdata, pack("n3N nCa5n", 0xc00c, CNAME, IN, $ttl, + 8, 5, "alias", 0xc00c + length("_http._tcp ")); + } + + } elsif ($name eq '_http._tcp.trunc.example.net' && $type == SRV) { + push @rdata, $tcp + ? rd_srv($ttl, 1, 1, $port, 'tcp.example.net') + : rd_srv($ttl, 1, 1, $port, 'example.net'); + + $hdr |= 0x0300 if $name eq '_http._tcp.trunc.example.net' + and !$tcp; + + } elsif ($name eq 'example.net' || $name eq 'tcp.example.net') { + if ($type == A && $h->{A}) { + map { push @rdata, rd_addr($ttl, $_) } @{$h->{A}}; + } + if ($type == AAAA && $h->{AAAA}) { + map { push @rdata, rd_addr6($ttl, $_) } @{$h->{AAAA}}; + } + my $cname = defined $h->{CNAME} ? $h->{CNAME} : 0; + if ($cname) { + push @rdata, pack("n3N nCa5n", 0xc00c, CNAME, IN, $ttl, + 8, 5, $cname, 0xc00c); + } + + } elsif ($name eq 'alias.example.net') { + if ($type == SRV) { + push @rdata, rd_srv($ttl, 1, 5, $port, 'example.net'); + } + if ($type == A) { + push @rdata, rd_addr($ttl, '127.0.0.203'); + } + } + +bad: + + Test::Nginx::log_core('||', "DNS: $name $type $rcode"); + + $$cnt++ if $type == SRV || keys %$h; + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_srv { + my ($ttl, $pri, $w, $port, $name) = @_; + my @rdname = split /\./, $name; + my $rdlen = length(join '', @rdname) + @rdname + 7; # pri w port x + + pack 'n3N n n3 (C/a*)* x', + 0xc00c, SRV, IN, $ttl, $rdlen, $pri, $w, $port, @rdname; +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub expand_ip6 { + my ($addr) = @_; + + substr ($addr, index($addr, "::"), 2) = + join "0", map { ":" } (0 .. 8 - (split /:/, $addr) + 1); + map { hex "0" x (4 - length $_) . "$_" } split /:/, $addr; +} + +sub rd_addr6 { + my ($ttl, $addr) = @_; + + pack 'n3N nn8', 0xc00c, AAAA, IN, $ttl, 16, expand_ip6($addr); +} + +sub dns_daemon { + my ($t) = @_; + my ($data, $recv_data, $h); + + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => port(8981), + Proto => 'udp', + ) + or die "Can't create listening socket: $!\n"; + + my $control = IO::Socket::INET->new( + Proto => 'tcp', + LocalHost => '127.0.0.1:' . port(8084), + Listen => 5, + Reuse => 1 + ) + or die "Can't create listening socket: $!\n"; + + my $tcp = port(8981, socket => 1); + my $sel = IO::Select->new($socket, $control, $tcp); + + local $SIG{PIPE} = 'IGNORE'; + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . port(8981); + close $fh; + my $cnt = 0; + + while (my @ready = $sel->can_read) { + foreach my $fh (@ready) { + if ($control == $fh || $tcp == $fh) { + my $new = $fh->accept; + $new->autoflush(1); + $sel->add($new); + + } elsif ($socket == $fh) { + $fh->recv($recv_data, 65536); + $data = reply_handler($recv_data, $h, \$cnt); + $fh->send($data); + + } elsif ($fh->sockport() == port(8084)) { + $h = process_name($fh, $cnt); + $sel->remove($fh); + $fh->close; + + } elsif ($fh->sockport() == port(8981)) { + $fh->recv($recv_data, 65536); + unless (length $recv_data) { + $sel->remove($fh); + $fh->close; + next; + } + +again: + my $len = unpack("n", $recv_data); + my $data = substr $recv_data, 2, $len; + $data = reply_handler($data, $h, \$cnt, 1); + $data = pack("n", length $data) . $data; + $fh->send($data); + $recv_data = substr $recv_data, 2 + $len; + goto again if length $recv_data; + } + } + } +} + +# parse dns update + +sub process_name { + my ($client, $cnt) = @_; + my $port = $client->sockport(); + + my $headers = ''; + my $uri = ''; + my %h; + + while (<$client>) { + $headers .= $_; + last if (/^\x0d?\x0a?$/); + } + return 1 if $headers eq ''; + + $uri = $1 if $headers =~ /^\S+\s+([^ ]+)\s+HTTP/i; + return 1 if $uri eq ''; + + $headers =~ /X-A: (.*)$/m; + map { push @{$h{A}}, $_ } split(/ /, $1); + $headers =~ /X-AAAA: (.*)$/m; + map { push @{$h{AAAA}}, $_ } split(/ /, $1); + $headers =~ /X-SRV: (.*)$/m; + map { push @{$h{SRV}}, $_ } split(/;/, $1); + $headers =~ /X-CNAME: (.+)$/m and $h{CNAME} = $1; + $headers =~ /X-ERROR: (.+)$/m and $h{ERROR} = $1; + $headers =~ /X-SERROR: (.+)$/m and $h{SERROR} = $1; + + Test::Nginx::log_core('||', "$port: response, 200"); + print $client < 'win32') if $^O eq 'MSWin32'; + +my $t = Test::Nginx->new()->has(qw/http upstream_zone/); + +$t->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +events { +} + +http { + %%TEST_GLOBALS_HTTP%% + + upstream u { + zone z 1m; + server example.net resolve service=http; + } + + # lower the retry timeout after empty reply + resolver 127.0.0.1:%%PORT_8980_UDP%% valid=1s; + # retry query shortly after DNS is started + resolver_timeout 1s; + + server { + listen 127.0.0.1:8080; + server_name localhost; + + location / { + proxy_pass http://u; + proxy_connect_timeout 50ms; + add_header X-IP $upstream_addr always; + } + } +} + +EOF + +port(8081); + +$t->run_daemon(\&dns_daemon, $t)->waitforfile($t->testdir . '/' . port(8980)); +$t->try_run('no resolve in upstream server')->plan(6); + +############################################################################### + +update_name({A => '127.0.0.201', SRV => "1 5 42 example.net"}); +like(http_get('/'), qr/X-IP: 127.0.0.201:42/, 'reload - before - request'); +like(http_get('/'), qr/X-IP: 127.0.0.201:42/, 'reload - before - request 2'); + +update_name({ERROR => 'SERVFAIL'}, 0); + +$t->reload(); +waitforworker($t); + +like(http_get('/'), qr/X-IP: 127.0.0.201:42/, 'reload - preresolve - request'); +like(http_get('/'), qr/X-IP: 127.0.0.201:42/, 'reload - preresolve - request 2'); + +update_name({A => '127.0.0.202', SRV => "1 5 42 example.net"}); +like(http_get('/'), qr/X-IP: 127.0.0.202:42/, 'reload - update - request'); +like(http_get('/'), qr/X-IP: 127.0.0.202:42/, 'reload - update - request 2'); + +############################################################################### + +sub waitforworker { + my ($t) = @_; + + for (1 .. 30) { + last if $t->read_file('error.log') =~ /exited with code/; + select undef, undef, undef, 0.2; + } +} + +sub update_name { + my ($name, $plan) = @_; + + $plan = 3 if !defined $plan; + + sub sock { + IO::Socket::INET->new( + Proto => 'tcp', + PeerAddr => '127.0.0.1:' . port(8081) + ) + or die "Can't connect to nginx: $!\n"; + } + + $name->{A} = '' unless $name->{A}; + $name->{ERROR} = '' unless $name->{ERROR}; + $name->{SRV} = '' unless $name->{SRV}; + + my $req =<{A} +X-ERROR: $name->{ERROR} +X-SRV: $name->{SRV} + +EOF + + my ($gen) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + for (1 .. 10) { + my ($gen2) = http($req, socket => sock()) =~ /X-Gen: (\d+)/; + + # let resolver cache expire to finish upstream reconfiguration + select undef, undef, undef, 0.5; + last unless ($gen + $plan > $gen2); + } +} + +############################################################################### + +sub reply_handler { + my ($recv_data, $h) = @_; + + my (@name, @rdata); + + use constant NOERROR => 0; + use constant SERVFAIL => 2; + use constant NXDOMAIN => 3; + + use constant A => 1; + use constant SRV => 33; + use constant IN => 1; + + # default values + + my ($hdr, $rcode, $ttl, $port) = (0x8180, NOERROR, 3600, port(8080)); + $h = {A => [ "127.0.0.201" ], SRV => [ "1 5 $port example.net" ]} + unless defined $h; + + # decode name + + my ($len, $offset) = (undef, 12); + while (1) { + $len = unpack("\@$offset C", $recv_data); + last if $len == 0; + $offset++; + push @name, unpack("\@$offset A$len", $recv_data); + $offset += $len; + } + + $offset -= 1; + my ($id, $type, $class) = unpack("n x$offset n2", $recv_data); + my $name = join('.', @name); + + if ($h->{ERROR}) { + $rcode = SERVFAIL; + goto bad; + } + + if ($name eq 'example.net' && $type == A && $h->{A}) { + map { push @rdata, rd_addr($ttl, $_) } @{$h->{A}}; + + } + if ($name eq '_http._tcp.example.net' && $type == SRV && $h->{SRV}) { + map { push @rdata, rd_srv($ttl, (split ' ', $_)) } + @{$h->{SRV}}; + } + +bad: + + Test::Nginx::log_core('||', "DNS: $name $type $rcode"); + + $len = @name; + pack("n6 (C/a*)$len x n2", $id, $hdr | $rcode, 1, scalar @rdata, + 0, 0, @name, $type, $class) . join('', @rdata); +} + +sub rd_srv { + my ($ttl, $pri, $w, $port, $name) = @_; + my @rdname = split /\./, $name; + my $rdlen = length(join '', @rdname) + @rdname + 7; # pri w port x + + pack 'n3N n n3 (C/a*)* x', + 0xc00c, SRV, IN, $ttl, $rdlen, $pri, $w, $port, @rdname; +} + +sub rd_addr { + my ($ttl, $addr) = @_; + + my $code = 'split(/\./, $addr)'; + + pack 'n3N nC4', 0xc00c, A, IN, $ttl, eval "scalar $code", eval($code); +} + +sub dns_daemon { + my ($t) = @_; + my ($data, $recv_data, $h); + + my $socket = IO::Socket::INET->new( + LocalAddr => '127.0.0.1', + LocalPort => port(8980), + Proto => 'udp', + ) + or die "Can't create listening socket: $!\n"; + + my $control = IO::Socket::INET->new( + Proto => 'tcp', + LocalHost => "127.0.0.1:" . port(8081), + Listen => 5, + Reuse => 1 + ) + or die "Can't create listening socket: $!\n"; + + my $sel = IO::Select->new($socket, $control); + + local $SIG{PIPE} = 'IGNORE'; + + # signal we are ready + + open my $fh, '>', $t->testdir() . '/' . port(8980); + close $fh; + my $cnt = 0; + + while (my @ready = $sel->can_read) { + foreach my $fh (@ready) { + if ($control == $fh) { + my $new = $fh->accept; + $new->autoflush(1); + $sel->add($new); + + } elsif ($socket == $fh) { + $fh->recv($recv_data, 65536); + $data = reply_handler($recv_data, $h); + $fh->send($data); + $cnt++; + + } else { + $h = process_name($fh, $cnt); + $sel->remove($fh); + $fh->close; + } + } + } +} + +# parse dns update + +sub process_name { + my ($client, $cnt) = @_; + my $port = $client->sockport(); + + my $headers = ''; + my $uri = ''; + my %h; + + while (<$client>) { + $headers .= $_; + last if (/^\x0d?\x0a?$/); + } + return 1 if $headers eq ''; + + $uri = $1 if $headers =~ /^\S+\s+([^ ]+)\s+HTTP/i; + return 1 if $uri eq ''; + + $headers =~ /X-A: (.*)$/m; + map { push @{$h{A}}, $_ } split(/ /, $1); + $headers =~ /X-SRV: (.*)$/m; + map { push @{$h{SRV}}, $_ } split(/;/, $1); + $headers =~ /X-ERROR: (.*)$/m; + $h{ERROR} = $1; + + Test::Nginx::log_core('||', "$port: response, 200"); + print $client < details: https://github.com/nginx/njs/commit/15c66f2a23a42143d43e6ffedfc33a1128428ccb branches: master commit: 15c66f2a23a42143d43e6ffedfc33a1128428ccb user: Dmitry Volyntsev date: Thu, 18 Jul 2024 18:01:57 -0700 description: Version bump. --- src/njs.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/njs.h b/src/njs.h index 2e16ce4e..8950a0db 100644 --- a/src/njs.h +++ b/src/njs.h @@ -11,8 +11,8 @@ #include -#define NJS_VERSION "0.8.5" -#define NJS_VERSION_NUMBER 0x000805 +#define NJS_VERSION "0.8.6" +#define NJS_VERSION_NUMBER 0x000806 #include From noreply at nginx.com Fri Jul 19 01:44:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 19 Jul 2024 01:44:02 +0000 (UTC) Subject: [njs] Tests: ignoring subrequest execution order in js_subrequest.t. Message-ID: <20240719014402.D455C46C92@pubserv1.nginx> details: https://github.com/nginx/njs/commit/2197bf316372232f8305b71c9dde8f95a0b7b486 branches: master commit: 2197bf316372232f8305b71c9dde8f95a0b7b486 user: Dmitry Volyntsev date: Thu, 18 Jul 2024 16:24:04 -0700 description: Tests: ignoring subrequest execution order in js_subrequest.t. --- nginx/t/js_subrequests.t | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/nginx/t/js_subrequests.t b/nginx/t/js_subrequests.t index 4a58d6f3..d38573ba 100644 --- a/nginx/t/js_subrequests.t +++ b/nginx/t/js_subrequests.t @@ -280,7 +280,7 @@ $t->write_file('test.js', <write_file('test.js', <write_file('test.js', < {rep[p] = reply[p]}); replies.push(rep); if (replies.length == total) { + replies.sort((a, b) => a.uri < b.uri ? -1 : 1); reply.parent.return(200, JSON.stringify(replies)); } } @@ -546,9 +548,9 @@ is(get_json('/sr_js_in_subrequest'), '["JS-SUB"]', 'sr_js_in_subrequest'); is(get_json('/sr_unavail'), '[{"status":502,"uri":"/unavail"}]', 'sr_unavail'); is(get_json('/sr_out_of_order'), - '[{"status":404,"uri":"/unknown"},' . + '[{"status":200,"uri":"/p/delayed"},' . '{"status":206,"uri":"/p/sub1"},' . - '{"status":200,"uri":"/p/delayed"}]', + '{"status":404,"uri":"/unknown"}]', 'sr_multi'); is(get_json('/sr_pr'), '{"h":"xxx"}', 'sr_promise'); From noreply at nginx.com Fri Jul 19 01:44:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 19 Jul 2024 01:44:02 +0000 (UTC) Subject: [njs] Tests: removed njs specific code from js_headers.t. Message-ID: <20240719014402.D86D046C94@pubserv1.nginx> details: https://github.com/nginx/njs/commit/22b2300d9c1b86e29894fba5e32eaa73e678d11e branches: master commit: 22b2300d9c1b86e29894fba5e32eaa73e678d11e user: Dmitry Volyntsev date: Wed, 17 Jul 2024 22:07:12 -0700 description: Tests: removed njs specific code from js_headers.t. --- nginx/t/js_headers.t | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nginx/t/js_headers.t b/nginx/t/js_headers.t index 787b5520..2cb8c660 100644 --- a/nginx/t/js_headers.t +++ b/nginx/t/js_headers.t @@ -388,12 +388,12 @@ $t->write_file('test.js', <write_file('test.js', < details: https://github.com/nginx/njs/commit/e45429fe88d3c9c3a61fa6c122157147138bb516 branches: master commit: e45429fe88d3c9c3a61fa6c122157147138bb516 user: Dmitry Volyntsev date: Tue, 16 Jul 2024 22:02:06 -0700 description: HTTP: making ngx_http_js_header_t handler type generic. So it can be reused by QuickJS code. --- nginx/ngx_http_js_module.c | 130 ++++++++++++++++++++++++--------------------- 1 file changed, 68 insertions(+), 62 deletions(-) diff --git a/nginx/ngx_http_js_module.c b/nginx/ngx_http_js_module.c index 0c8a3b11..f2dbffbc 100644 --- a/nginx/ngx_http_js_module.c +++ b/nginx/ngx_http_js_module.c @@ -71,27 +71,20 @@ typedef struct { typedef struct { - ngx_http_request_t *request; - njs_opaque_value_t callbacks[2]; -} ngx_http_js_cb_t; - - -typedef struct { - njs_str_t name; -#if defined(nginx_version) && (nginx_version >= 1023000) + ngx_str_t name; unsigned flags; - njs_int_t (*handler)(njs_vm_t *vm, ngx_http_request_t *r, - unsigned flags, njs_str_t *name, - njs_value_t *setval, njs_value_t *retval); -#else - njs_int_t (*handler)(njs_vm_t *vm, ngx_http_request_t *r, - ngx_list_t *headers, njs_str_t *name, - njs_value_t *setval, njs_value_t *retval); - -#endif + uintptr_t handler; } ngx_http_js_header_t; +typedef njs_int_t (*njs_http_js_header_handler_t)(njs_vm_t *vm, + ngx_http_request_t *r, unsigned flags, njs_str_t *name, njs_value_t *setval, + njs_value_t *retval); +typedef njs_int_t (*njs_http_js_header_handler122_t)(njs_vm_t *vm, + ngx_http_request_t *r, ngx_list_t *headers, njs_str_t *name, + njs_value_t *setval, njs_value_t *retval); + + static ngx_int_t ngx_http_js_content_handler(ngx_http_request_t *r); static void ngx_http_js_content_event_handler(ngx_http_request_t *r); static void ngx_http_js_content_write_event_handler(ngx_http_request_t *r); @@ -1613,33 +1606,41 @@ ngx_http_js_ext_header_out(njs_vm_t *vm, njs_object_prop_t *prop, static ngx_http_js_header_t headers_out[] = { #if defined(nginx_version) && (nginx_version < 1023000) - { njs_str("Age"), ngx_http_js_header_single }, - { njs_str("Content-Type"), ngx_http_js_content_type122 }, - { njs_str("Content-Length"), ngx_http_js_content_length122 }, - { njs_str("Content-Encoding"), ngx_http_js_content_encoding122 }, - { njs_str("Date"), ngx_http_js_date122 }, - { njs_str("Etag"), ngx_http_js_header_single }, - { njs_str("Expires"), ngx_http_js_header_single }, - { njs_str("Last-Modified"), ngx_http_js_last_modified122 }, - { njs_str("Location"), ngx_http_js_location122 }, - { njs_str("Server"), ngx_http_js_server122 }, - { njs_str("Set-Cookie"), ngx_http_js_header_array }, - { njs_str("Retry-After"), ngx_http_js_header_single }, - { njs_str(""), ngx_http_js_header_generic }, + +#define header(name, h) { njs_str(name), 0, (uintptr_t) h } + header("Age", ngx_http_js_header_single), + header("Content-Type", ngx_http_js_content_type122), + header("Content-Length", ngx_http_js_content_length122), + header("Content-Encoding", ngx_http_js_content_encoding122), + header("Date", ngx_http_js_date122), + header("Etag", ngx_http_js_header_single), + header("Expires", ngx_http_js_header_single), + header("Last-Modified", ngx_http_js_last_modified122), + header("Location", ngx_http_js_location122), + header("Server", ngx_http_js_server122), + header("Set-Cookie", ngx_http_js_header_array), + header("Retry-After", ngx_http_js_header_single), + header("", ngx_http_js_header_generic), +#undef header + #else - { njs_str("Age"), NJS_HEADER_SINGLE, ngx_http_js_header_out }, - { njs_str("Content-Encoding"), 0, ngx_http_js_content_encoding }, - { njs_str("Content-Length"), 0, ngx_http_js_content_length }, - { njs_str("Content-Type"), 0, ngx_http_js_content_type }, - { njs_str("Date"), 0, ngx_http_js_date }, - { njs_str("Etag"), NJS_HEADER_SINGLE, ngx_http_js_header_out }, - { njs_str("Expires"), NJS_HEADER_SINGLE, ngx_http_js_header_out }, - { njs_str("Last-Modified"), 0, ngx_http_js_last_modified }, - { njs_str("Location"), 0, ngx_http_js_location }, - { njs_str("Server"), 0, ngx_http_js_server }, - { njs_str("Set-Cookie"), NJS_HEADER_ARRAY, ngx_http_js_header_out }, - { njs_str("Retry-After"), NJS_HEADER_SINGLE, ngx_http_js_header_out }, - { njs_str(""), 0, ngx_http_js_header_out }, + +#define header(name, fl, h) { njs_str(name), fl, (uintptr_t) h } + header("Age", NJS_HEADER_SINGLE, ngx_http_js_header_out), + header("Content-Encoding", 0, ngx_http_js_content_encoding), + header("Content-Length", 0, ngx_http_js_content_length), + header("Content-Type", 0, ngx_http_js_content_type), + header("Date", 0, ngx_http_js_date), + header("Etag", NJS_HEADER_SINGLE, ngx_http_js_header_out), + header("Expires", NJS_HEADER_SINGLE, ngx_http_js_header_out), + header("Last-Modified", 0, ngx_http_js_last_modified), + header("Location", 0, ngx_http_js_location), + header("Server", 0, ngx_http_js_server), + header("Set-Cookie", NJS_HEADER_ARRAY, ngx_http_js_header_out), + header("Retry-After", NJS_HEADER_SINGLE, ngx_http_js_header_out), + header("", 0, ngx_http_js_header_out), +#undef header + #endif }; @@ -1667,18 +1668,20 @@ ngx_http_js_ext_header_out(njs_vm_t *vm, njs_object_prop_t *prop, " headers were already sent", &name); } - for (h = headers_out; h->name.length > 0; h++) { - if (h->name.length == name.length - && ngx_strncasecmp(h->name.start, name.start, name.length) == 0) + for (h = headers_out; h->name.len > 0; h++) { + if (h->name.len == name.length + && ngx_strncasecmp(h->name.data, name.start, name.length) == 0) { break; } } #if defined(nginx_version) && (nginx_version < 1023000) - return h->handler(vm, r, &r->headers_out.headers, &name, setval, retval); + return ((njs_http_js_header_handler122_t) h->handler)(vm, r, + &r->headers_out.headers, &name, setval, retval); #else - return h->handler(vm, r, h->flags, &name, setval, retval); + return ((njs_http_js_header_handler_t) h->handler)(vm, r, h->flags, &name, + setval, retval); #endif } @@ -2746,18 +2749,20 @@ ngx_http_js_ext_header_in(njs_vm_t *vm, njs_object_prop_t *prop, ngx_http_js_header_t *h; static ngx_http_js_header_t headers_in[] = { - { njs_str("Content-Type"), ngx_http_js_header_single }, - { njs_str("Cookie"), ngx_http_js_header_cookie }, - { njs_str("ETag"), ngx_http_js_header_single }, - { njs_str("From"), ngx_http_js_header_single }, - { njs_str("Max-Forwards"), ngx_http_js_header_single }, - { njs_str("Referer"), ngx_http_js_header_single }, - { njs_str("Proxy-Authorization"), ngx_http_js_header_single }, - { njs_str("User-Agent"), ngx_http_js_header_single }, +#define header(name, h) { njs_str(name), 0, (uintptr_t) h } + header("Content-Type", ngx_http_js_header_single), + header("Cookie", ngx_http_js_header_cookie), + header("ETag", ngx_http_js_header_single), + header("From", ngx_http_js_header_single), + header("Max-Forwards", ngx_http_js_header_single), + header("Referer", ngx_http_js_header_single), + header("Proxy-Authorization", ngx_http_js_header_single), + header("User-Agent", ngx_http_js_header_single), #if (NGX_HTTP_X_FORWARDED_FOR) - { njs_str("X-Forwarded-For"), ngx_http_js_header_x_forwarded_for }, + header("X-Forwarded-For", ngx_http_js_header_x_forwarded_for), #endif - { njs_str(""), ngx_http_js_header_generic }, + header("", ngx_http_js_header_generic), +#undef header }; r = njs_vm_external(vm, ngx_http_js_request_proto_id, value); @@ -2778,15 +2783,16 @@ ngx_http_js_ext_header_in(njs_vm_t *vm, njs_object_prop_t *prop, return NJS_DECLINED; } - for (h = headers_in; h->name.length > 0; h++) { - if (h->name.length == name.length - && ngx_strncasecmp(h->name.start, name.start, name.length) == 0) + for (h = headers_in; h->name.len > 0; h++) { + if (h->name.len == name.length + && ngx_strncasecmp(h->name.data, name.start, name.length) == 0) { break; } } - return h->handler(vm, r, &r->headers_in.headers, &name, setval, retval); + return ((njs_http_js_header_handler122_t) h->handler)(vm, r, + &r->headers_in.headers, &name, setval, retval); } From noreply at nginx.com Fri Jul 19 01:44:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 19 Jul 2024 01:44:02 +0000 (UTC) Subject: [njs] HTTP: moving ngx_http_methods table out of subrequest() method. Message-ID: <20240719014402.E4B3646C96@pubserv1.nginx> details: https://github.com/nginx/njs/commit/62b800295f3aa8ec13a06b0c903d41f1e706f78f branches: master commit: 62b800295f3aa8ec13a06b0c903d41f1e706f78f user: Dmitry Volyntsev date: Thu, 18 Jul 2024 17:55:20 -0700 description: HTTP: moving ngx_http_methods table out of subrequest() method. So it can be reused by QuickJS code. --- nginx/ngx_http_js_module.c | 57 +++++++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/nginx/ngx_http_js_module.c b/nginx/ngx_http_js_module.c index f2dbffbc..a38d32cf 100644 --- a/nginx/ngx_http_js_module.c +++ b/nginx/ngx_http_js_module.c @@ -85,6 +85,12 @@ typedef njs_int_t (*njs_http_js_header_handler122_t)(njs_vm_t *vm, njs_value_t *setval, njs_value_t *retval); +typedef struct { + ngx_str_t name; + ngx_uint_t value; +} ngx_http_js_entry_t; + + static ngx_int_t ngx_http_js_content_handler(ngx_http_request_t *r); static void ngx_http_js_content_event_handler(ngx_http_request_t *r); static void ngx_http_js_content_write_event_handler(ngx_http_request_t *r); @@ -888,6 +894,25 @@ njs_module_t *njs_http_js_addon_modules[] = { }; +static ngx_http_js_entry_t ngx_http_methods[] = { + { ngx_string("GET"), NGX_HTTP_GET }, + { ngx_string("POST"), NGX_HTTP_POST }, + { ngx_string("HEAD"), NGX_HTTP_HEAD }, + { ngx_string("OPTIONS"), NGX_HTTP_OPTIONS }, + { ngx_string("PROPFIND"), NGX_HTTP_PROPFIND }, + { ngx_string("PUT"), NGX_HTTP_PUT }, + { ngx_string("MKCOL"), NGX_HTTP_MKCOL }, + { ngx_string("DELETE"), NGX_HTTP_DELETE }, + { ngx_string("COPY"), NGX_HTTP_COPY }, + { ngx_string("MOVE"), NGX_HTTP_MOVE }, + { ngx_string("PROPPATCH"), NGX_HTTP_PROPPATCH }, + { ngx_string("LOCK"), NGX_HTTP_LOCK }, + { ngx_string("UNLOCK"), NGX_HTTP_UNLOCK }, + { ngx_string("PATCH"), NGX_HTTP_PATCH }, + { ngx_string("TRACE"), NGX_HTTP_TRACE }, +}; + + static ngx_int_t ngx_http_js_content_handler(ngx_http_request_t *r) { @@ -3069,27 +3094,6 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, ngx_http_request_body_t *rb; ngx_http_post_subrequest_t *ps; - static const struct { - ngx_str_t name; - ngx_uint_t value; - } methods[] = { - { ngx_string("GET"), NGX_HTTP_GET }, - { ngx_string("POST"), NGX_HTTP_POST }, - { ngx_string("HEAD"), NGX_HTTP_HEAD }, - { ngx_string("OPTIONS"), NGX_HTTP_OPTIONS }, - { ngx_string("PROPFIND"), NGX_HTTP_PROPFIND }, - { ngx_string("PUT"), NGX_HTTP_PUT }, - { ngx_string("MKCOL"), NGX_HTTP_MKCOL }, - { ngx_string("DELETE"), NGX_HTTP_DELETE }, - { ngx_string("COPY"), NGX_HTTP_COPY }, - { ngx_string("MOVE"), NGX_HTTP_MOVE }, - { ngx_string("PROPPATCH"), NGX_HTTP_PROPPATCH }, - { ngx_string("LOCK"), NGX_HTTP_LOCK }, - { ngx_string("UNLOCK"), NGX_HTTP_UNLOCK }, - { ngx_string("PATCH"), NGX_HTTP_PATCH }, - { ngx_string("TRACE"), NGX_HTTP_TRACE }, - }; - static const njs_str_t args_key = njs_str("args"); static const njs_str_t method_key = njs_str("method"); static const njs_str_t body_key = njs_str("body"); @@ -3124,7 +3128,7 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, callback = NULL; method = 0; - methods_max = sizeof(methods) / sizeof(methods[0]); + methods_max = sizeof(ngx_http_methods) / sizeof(ngx_http_methods[0]); args_arg.length = 0; args_arg.start = NULL; @@ -3172,8 +3176,9 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, } while (method < methods_max) { - if (method_name.length == methods[method].name.len - && ngx_memcmp(method_name.start, methods[method].name.data, + if (method_name.length == ngx_http_methods[method].name.len + && ngx_memcmp(method_name.start, + ngx_http_methods[method].name.data, method_name.length) == 0) { @@ -3283,8 +3288,8 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, } if (method != methods_max) { - sr->method = methods[method].value; - sr->method_name = methods[method].name; + sr->method = ngx_http_methods[method].value; + sr->method_name = ngx_http_methods[method].name; } else { sr->method = NGX_HTTP_UNKNOWN; From thorvaldur.thorvaldsson at gmail.com Fri Jul 19 13:05:47 2024 From: thorvaldur.thorvaldsson at gmail.com (Thorvaldur Thorvaldsson) Date: Fri, 19 Jul 2024 13:05:47 +0000 Subject: [PATCH 0 of 7] Upstream: re-resolvable servers. In-Reply-To: <66995d5a.170a0220.40702.7ff6SMTPIN_ADDED_BROKEN@mx.google.com> References: <66995d5a.170a0220.40702.7ff6SMTPIN_ADDED_BROKEN@mx.google.com> Message-ID: Unsubscribe On Thu, 18 Jul 2024 at 21:22, Aleksei Bavshin wrote: > See > https://mailman.nginx.org/pipermail/nginx-devel/2024-June/TSMKAHLFU3X4OBKLPRW5B2PXGBTUEHPU.html > > v3 changes: > > - Addressed comments from Roman > - Removed the patch that disables re-resolve on Windows > - Disabled reload tests on Windows > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > https://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From mini at nginx.com Tue Jul 23 19:30:24 2024 From: mini at nginx.com (=?iso-8859-1?q?Mini_Hawthorne?=) Date: Tue, 23 Jul 2024 19:30:24 +0000 Subject: [PATCH 1 of 6] SSL: moved certificate storage out of SSL_CTX exdata Message-ID: <59ac183dfee8e9641563.1721763024@linux> # HG changeset patch # User Mini Hawthorne # Date 1721762810 0 # Tue Jul 23 19:26:50 2024 +0000 # Node ID 59ac183dfee8e9641563e043eb19480d91dd7cc0 # Parent d1b8568f3042f6019a2302dda4afbadd051fe54b SSL: moved certificate storage out of SSL_CTX exdata. Certain SSL objects (certificates, certificate names, and OCSP staples) are now accessed with ngx_array_t and ngx_rbtree_t instead of cross-linking the objects using SSL_CTX exdata. This allows sharing these objects between SSL contexts. diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -131,10 +131,7 @@ int ngx_ssl_server_conf_index; int ngx_ssl_session_cache_index; int ngx_ssl_ticket_keys_index; int ngx_ssl_ocsp_index; -int ngx_ssl_certificate_index; -int ngx_ssl_next_certificate_index; -int ngx_ssl_certificate_name_index; -int ngx_ssl_stapling_index; +int ngx_ssl_index; ngx_int_t @@ -258,34 +255,11 @@ ngx_ssl_init(ngx_log_t *log) return NGX_ERROR; } - ngx_ssl_certificate_index = SSL_CTX_get_ex_new_index(0, NULL, NULL, NULL, - NULL); - if (ngx_ssl_certificate_index == -1) { - ngx_ssl_error(NGX_LOG_ALERT, log, 0, - "SSL_CTX_get_ex_new_index() failed"); - return NGX_ERROR; - } - - ngx_ssl_next_certificate_index = X509_get_ex_new_index(0, NULL, NULL, NULL, - NULL); - if (ngx_ssl_next_certificate_index == -1) { - ngx_ssl_error(NGX_LOG_ALERT, log, 0, "X509_get_ex_new_index() failed"); - return NGX_ERROR; - } - - ngx_ssl_certificate_name_index = X509_get_ex_new_index(0, NULL, NULL, NULL, - NULL); - - if (ngx_ssl_certificate_name_index == -1) { - ngx_ssl_error(NGX_LOG_ALERT, log, 0, "X509_get_ex_new_index() failed"); - return NGX_ERROR; - } - - ngx_ssl_stapling_index = X509_get_ex_new_index(0, NULL, NULL, NULL, NULL); - - if (ngx_ssl_stapling_index == -1) { - ngx_ssl_error(NGX_LOG_ALERT, log, 0, "X509_get_ex_new_index() failed"); - return NGX_ERROR; + ngx_ssl_index = SSL_CTX_get_ex_new_index(0, NULL, NULL, NULL, NULL); + if (ngx_ssl_index == -1) { + ngx_ssl_error(NGX_LOG_ALERT, log, 0, + "SSL_CTX_get_ex_new_index() failed"); + return NGX_ERROR; } return NGX_OK; @@ -308,12 +282,18 @@ ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_ return NGX_ERROR; } - if (SSL_CTX_set_ex_data(ssl->ctx, ngx_ssl_certificate_index, NULL) == 0) { + if (SSL_CTX_set_ex_data(ssl->ctx, ngx_ssl_index, ssl) == 0) { ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, "SSL_CTX_set_ex_data() failed"); return NGX_ERROR; } + ngx_rbtree_init(&ssl->name_rbtree, &ssl->name_sentinel, + ngx_rbtree_insert_value); + + ngx_rbtree_init(&ssl->staple_rbtree, &ssl->staple_sentinel, + ngx_rbtree_insert_value); + ssl->buffer_size = NGX_SSL_BUFSIZE; /* client side options */ @@ -458,9 +438,10 @@ ngx_ssl_certificate(ngx_conf_t *cf, ngx_ ngx_str_t *key, ngx_array_t *passwords) { char *err; - X509 *x509; + X509 *x509, **elm; EVP_PKEY *pkey; STACK_OF(X509) *chain; + ngx_ssl_name_t *name; x509 = ngx_ssl_load_certificate(cf->pool, &err, cert, &chain); if (x509 == NULL) { @@ -481,42 +462,46 @@ ngx_ssl_certificate(ngx_conf_t *cf, ngx_ return NGX_ERROR; } - if (X509_set_ex_data(x509, ngx_ssl_certificate_name_index, cert->data) - == 0) - { - ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, "X509_set_ex_data() failed"); + name = ngx_pcalloc(cf->pool, sizeof(ngx_ssl_name_t)); + if (name == NULL) { X509_free(x509); sk_X509_pop_free(chain, X509_free); return NGX_ERROR; } - if (X509_set_ex_data(x509, ngx_ssl_next_certificate_index, - SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_certificate_index)) - == 0) - { - ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, "X509_set_ex_data() failed"); + if (ssl->certs.elts == NULL) { + if (ngx_array_init(&ssl->certs, cf->pool, 16, sizeof(X509 *)) + != NGX_OK) + { + X509_free(x509); + sk_X509_pop_free(chain, X509_free); + return NGX_ERROR; + } + } + + elm = ngx_array_push(&ssl->certs); + if (elm == NULL) { X509_free(x509); sk_X509_pop_free(chain, X509_free); return NGX_ERROR; } - if (SSL_CTX_set_ex_data(ssl->ctx, ngx_ssl_certificate_index, x509) == 0) { - ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, - "SSL_CTX_set_ex_data() failed"); - X509_free(x509); - sk_X509_pop_free(chain, X509_free); - return NGX_ERROR; - } + name->node.key = (ngx_rbtree_key_t) x509; + name->name.len = cert->len; + name->name.data = cert->data; + + ngx_rbtree_insert(&ssl->name_rbtree, &name->node); /* * Note that x509 is not freed here, but will be instead freed in * ngx_ssl_cleanup_ctx(). This is because we need to preserve all - * certificates to be able to iterate all of them through exdata - * (ngx_ssl_certificate_index, ngx_ssl_next_certificate_index), + * certificates to be able to iterate all of them through ssl->certs, * while OpenSSL can free a certificate if it is replaced with another * certificate of the same type. */ + *elm = x509; + #ifdef SSL_CTX_set0_chain if (SSL_CTX_set0_chain(ssl->ctx, chain) == 0) { @@ -3820,10 +3805,9 @@ ngx_ssl_session_id_context(ngx_ssl_t *ss goto failed; } - for (cert = SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_certificate_index); - cert; - cert = X509_get_ex_data(cert, ngx_ssl_next_certificate_index)) - { + for (k = 0; k < ssl->certs.nelts; k++) { + cert = ((X509 **) ssl->certs.elts)[k]; + if (X509_digest(cert, EVP_sha1(), buf, &len) == 0) { ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, "X509_digest() failed"); @@ -3837,9 +3821,7 @@ ngx_ssl_session_id_context(ngx_ssl_t *ss } } - if (SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_certificate_index) == NULL - && certificates != NULL) - { + if (ssl->certs.nelts == 0 && certificates != NULL) { /* * If certificates are loaded dynamically, we use certificate * names as specified in the configuration (with variables). @@ -4851,14 +4833,12 @@ ngx_ssl_cleanup_ctx(void *data) { ngx_ssl_t *ssl = data; - X509 *cert, *next; - - cert = SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_certificate_index); - - while (cert) { - next = X509_get_ex_data(cert, ngx_ssl_next_certificate_index); - X509_free(cert); - cert = next; + X509 *cert; + ngx_uint_t i; + + for (i = 0; i < ssl->certs.nelts; i++) { + cert = ((X509 **) ssl->certs.elts)[i]; + X509_free(cert); } SSL_CTX_free(ssl->ctx); diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -86,10 +86,24 @@ typedef struct ngx_ssl_ocsp_s ngx_ssl_ocsp_t; +typedef struct { + ngx_rbtree_node_t node; + ngx_str_t name; +} ngx_ssl_name_t; + + struct ngx_ssl_s { SSL_CTX *ctx; ngx_log_t *log; size_t buffer_size; + + ngx_array_t certs; + + ngx_rbtree_t name_rbtree; + ngx_rbtree_node_t name_sentinel; + + ngx_rbtree_t staple_rbtree; + ngx_rbtree_node_t staple_sentinel; }; @@ -330,10 +344,7 @@ extern int ngx_ssl_server_conf_index; extern int ngx_ssl_session_cache_index; extern int ngx_ssl_ticket_keys_index; extern int ngx_ssl_ocsp_index; -extern int ngx_ssl_certificate_index; -extern int ngx_ssl_next_certificate_index; -extern int ngx_ssl_certificate_name_index; -extern int ngx_ssl_stapling_index; +extern int ngx_ssl_index; #endif /* _NGX_EVENT_OPENSSL_H_INCLUDED_ */ diff --git a/src/event/ngx_event_openssl_stapling.c b/src/event/ngx_event_openssl_stapling.c --- a/src/event/ngx_event_openssl_stapling.c +++ b/src/event/ngx_event_openssl_stapling.c @@ -15,6 +15,8 @@ typedef struct { + ngx_rbtree_node_t node; + ngx_str_t staple; ngx_msec_t timeout; @@ -151,9 +153,13 @@ static ngx_int_t ngx_ssl_stapling_issuer ngx_ssl_stapling_t *staple); static ngx_int_t ngx_ssl_stapling_responder(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_ssl_stapling_t *staple, ngx_str_t *responder); +static ngx_ssl_name_t *ngx_ssl_stapling_lookup_name(ngx_rbtree_t *rbtree, + X509 *cert); static int ngx_ssl_certificate_status_callback(ngx_ssl_conn_t *ssl_conn, void *data); +static ngx_ssl_stapling_t *ngx_ssl_stapling_lookup_staple(ngx_rbtree_t *rbtree, + X509 *cert); static void ngx_ssl_stapling_update(ngx_ssl_stapling_t *staple); static void ngx_ssl_stapling_ocsp_handler(ngx_ssl_ocsp_ctx_t *ctx); @@ -195,12 +201,12 @@ ngx_int_t ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file, ngx_str_t *responder, ngx_uint_t verify) { - X509 *cert; - - for (cert = SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_certificate_index); - cert; - cert = X509_get_ex_data(cert, ngx_ssl_next_certificate_index)) - { + X509 *cert; + ngx_uint_t k; + + for (k = 0; k < ssl->certs.nelts; k++) { + cert = ((X509 **) ssl->certs.elts)[k]; + if (ngx_ssl_stapling_certificate(cf, ssl, cert, file, responder, verify) != NGX_OK) { @@ -219,6 +225,7 @@ ngx_ssl_stapling_certificate(ngx_conf_t ngx_str_t *file, ngx_str_t *responder, ngx_uint_t verify) { ngx_int_t rc; + ngx_ssl_name_t *name; ngx_pool_cleanup_t *cln; ngx_ssl_stapling_t *staple; @@ -235,10 +242,8 @@ ngx_ssl_stapling_certificate(ngx_conf_t cln->handler = ngx_ssl_stapling_cleanup; cln->data = staple; - if (X509_set_ex_data(cert, ngx_ssl_stapling_index, staple) == 0) { - ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, "X509_set_ex_data() failed"); - return NGX_ERROR; - } + staple->node.key = (ngx_rbtree_key_t) cert; + ngx_rbtree_insert(&ssl->staple_rbtree, &staple->node); #ifdef SSL_CTRL_SELECT_CURRENT_CERT /* OpenSSL 1.0.2+ */ @@ -256,8 +261,13 @@ ngx_ssl_stapling_certificate(ngx_conf_t staple->timeout = 60000; staple->verify = verify; staple->cert = cert; - staple->name = X509_get_ex_data(staple->cert, - ngx_ssl_certificate_name_index); + + name = ngx_ssl_stapling_lookup_name(&ssl->name_rbtree, cert); + if (name == NULL) { + return NGX_ERROR; + } + + staple->name = name->name.data; if (file->len) { /* use OCSP response from the file */ @@ -541,18 +551,52 @@ ngx_ssl_stapling_responder(ngx_conf_t *c } +static ngx_ssl_name_t * +ngx_ssl_stapling_lookup_name(ngx_rbtree_t *rbtree, X509 *cert) +{ + ngx_ssl_name_t *n; + ngx_rbtree_key_t key; + ngx_rbtree_node_t *node, *sentinel; + + node = rbtree->root; + sentinel = rbtree->sentinel; + key = (ngx_rbtree_key_t) cert; + + while (node != sentinel) { + + n = ngx_rbtree_data(node, ngx_ssl_name_t, node); + + if (key != node->key) { + node = (key < node->key) ? node->left : node->right; + continue; + } + + return n; + } + + return NULL; +} + + ngx_int_t ngx_ssl_stapling_resolver(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_resolver_t *resolver, ngx_msec_t resolver_timeout) { - X509 *cert; + ngx_rbtree_t *tree; + ngx_rbtree_node_t *node; ngx_ssl_stapling_t *staple; - for (cert = SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_certificate_index); - cert; - cert = X509_get_ex_data(cert, ngx_ssl_next_certificate_index)) + tree = &ssl->staple_rbtree; + + if (tree->root == tree->sentinel) { + return NGX_OK; + } + + for (node = ngx_rbtree_min(tree->root, tree->sentinel); + node; + node = ngx_rbtree_next(tree, node)) { - staple = X509_get_ex_data(cert, ngx_ssl_stapling_index); + staple = (ngx_ssl_stapling_t *) node; staple->resolver = resolver; staple->resolver_timeout = resolver_timeout; } @@ -567,6 +611,8 @@ ngx_ssl_certificate_status_callback(ngx_ int rc; X509 *cert; u_char *p; + SSL_CTX *ssl_ctx; + ngx_ssl_t *ssl; ngx_connection_t *c; ngx_ssl_stapling_t *staple; @@ -583,7 +629,19 @@ ngx_ssl_certificate_status_callback(ngx_ return rc; } - staple = X509_get_ex_data(cert, ngx_ssl_stapling_index); + ssl_ctx = SSL_get_SSL_CTX(ssl_conn); + + if (ssl_ctx == NULL) { + return rc; + } + + ssl = SSL_CTX_get_ex_data(ssl_ctx, ngx_ssl_index); + + if (ssl == NULL) { + return rc; + } + + staple = ngx_ssl_stapling_lookup_staple(&ssl->staple_rbtree, cert); if (staple == NULL) { return rc; @@ -613,6 +671,33 @@ ngx_ssl_certificate_status_callback(ngx_ } +static ngx_ssl_stapling_t * +ngx_ssl_stapling_lookup_staple(ngx_rbtree_t *rbtree, X509 *cert) +{ + ngx_rbtree_key_t key; + ngx_rbtree_node_t *node, *sentinel; + ngx_ssl_stapling_t *n; + + node = rbtree->root; + sentinel = rbtree->sentinel; + key = (ngx_rbtree_key_t) cert; + + while (node != sentinel) { + + n = ngx_rbtree_data(node, ngx_ssl_stapling_t, node); + + if (key != node->key) { + node = (key < node->key) ? node->left : node->right; + continue; + } + + return n; + } + + return NULL; +} + + static void ngx_ssl_stapling_update(ngx_ssl_stapling_t *staple) { From mini at nginx.com Tue Jul 23 19:30:25 2024 From: mini at nginx.com (=?iso-8859-1?q?Mini_Hawthorne?=) Date: Tue, 23 Jul 2024 19:30:25 +0000 Subject: [PATCH 2 of 6] SSL: object caching In-Reply-To: <59ac183dfee8e9641563.1721763024@linux> References: <59ac183dfee8e9641563.1721763024@linux> Message-ID: <8eee61e223bb7cb7475e.1721763025@linux> # HG changeset patch # User Mini Hawthorne # Date 1721762842 0 # Tue Jul 23 19:27:22 2024 +0000 # Node ID 8eee61e223bb7cb7475e50b866fd6b9a83fa5fa0 # Parent 59ac183dfee8e9641563e043eb19480d91dd7cc0 SSL: object caching. Added ngx_openssl_cache_module, which indexes a type-aware object cache. It maps an id to a unique instance, and provides references to it, which are dropped when the cycle's pool is destroyed. Also, for those objects that can be cached, valid references may be pulled from cycle->old_cycle. The cache will be used in subsequent patches. diff --git a/auto/modules b/auto/modules --- a/auto/modules +++ b/auto/modules @@ -1307,10 +1307,11 @@ fi if [ $USE_OPENSSL = YES ]; then ngx_module_type=CORE - ngx_module_name=ngx_openssl_module + ngx_module_name="ngx_openssl_module ngx_openssl_cache_module" ngx_module_incs= ngx_module_deps=src/event/ngx_event_openssl.h ngx_module_srcs="src/event/ngx_event_openssl.c + src/event/ngx_event_openssl_cache.c src/event/ngx_event_openssl_stapling.c" ngx_module_libs= ngx_module_link=YES diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -83,7 +83,8 @@ #endif -typedef struct ngx_ssl_ocsp_s ngx_ssl_ocsp_t; +typedef struct ngx_ssl_ocsp_s ngx_ssl_ocsp_t; +typedef struct ngx_ssl_cache_type_s ngx_ssl_cache_type_t; typedef struct { @@ -233,6 +234,9 @@ ngx_int_t ngx_ssl_ocsp_get_status(ngx_co void ngx_ssl_ocsp_cleanup(ngx_connection_t *c); ngx_int_t ngx_ssl_ocsp_cache_init(ngx_shm_zone_t *shm_zone, void *data); +void *ngx_ssl_cache_fetch(ngx_cycle_t *cycle, ngx_pool_t *pool, + ngx_ssl_cache_type_t *type, char **err, ngx_str_t *id, void *data); + ngx_array_t *ngx_ssl_read_password_file(ngx_conf_t *cf, ngx_str_t *file); ngx_array_t *ngx_ssl_preserve_passwords(ngx_conf_t *cf, ngx_array_t *passwords); diff --git a/src/event/ngx_event_openssl_cache.c b/src/event/ngx_event_openssl_cache.c new file mode 100644 --- /dev/null +++ b/src/event/ngx_event_openssl_cache.c @@ -0,0 +1,277 @@ + +/* + * Copyright (C) Nginx, Inc. + */ + + +#include +#include +#include + + +typedef struct { + ngx_rbtree_node_t node; + ngx_str_t id; + + ngx_ssl_cache_type_t *type; + void *value; +} ngx_ssl_cache_node_t; + + +typedef void *(*ngx_ssl_cache_create_pt)(ngx_str_t *id, char **err, void *data); +typedef void (*ngx_ssl_cache_free_pt)(void *data); +typedef void *(*ngx_ssl_cache_ref_pt)(char **err, void *data); + + +struct ngx_ssl_cache_type_s { + const char *name; + + ngx_ssl_cache_create_pt create; + ngx_ssl_cache_free_pt free; + ngx_ssl_cache_ref_pt ref; +}; + + +typedef struct { + ngx_rbtree_t rbtree; + ngx_rbtree_node_t sentinel; +} ngx_ssl_cache_t; + + +static void *ngx_openssl_cache_create_conf(ngx_cycle_t *cycle); +static void ngx_ssl_cache_cleanup(void *data); +static void ngx_ssl_cache_node_insert(ngx_rbtree_node_t *temp, + ngx_rbtree_node_t *node, ngx_rbtree_node_t *sentinel); + +static ngx_ssl_cache_node_t *ngx_ssl_cache_lookup(ngx_ssl_cache_t *cache, + ngx_ssl_cache_type_t *type, ngx_str_t *id, uint32_t hash); + + +static ngx_core_module_t ngx_openssl_cache_module_ctx = { + ngx_string("openssl_cache"), + ngx_openssl_cache_create_conf, + NULL +}; + + +ngx_module_t ngx_openssl_cache_module = { + NGX_MODULE_V1, + &ngx_openssl_cache_module_ctx, /* module context */ + NULL, /* module directives */ + NGX_CORE_MODULE, /* module type */ + NULL, /* init master */ + NULL, /* init module */ + NULL, /* init process */ + NULL, /* init thread */ + NULL, /* exit thread */ + NULL, /* exit process */ + NULL, /* exit master */ + NGX_MODULE_V1_PADDING +}; + + +static void * +ngx_openssl_cache_create_conf(ngx_cycle_t *cycle) +{ + ngx_ssl_cache_t *cache; + ngx_pool_cleanup_t *cln; + + cache = ngx_pcalloc(cycle->pool, sizeof(ngx_ssl_cache_t)); + if (cache == NULL) { + return NULL; + } + + cln = ngx_pool_cleanup_add(cycle->pool, 0); + if (cln == NULL) { + return NULL; + } + + cln->handler = ngx_ssl_cache_cleanup; + cln->data = cache; + + ngx_rbtree_init(&cache->rbtree, &cache->sentinel, + ngx_ssl_cache_node_insert); + + return cache; +} + + +static void +ngx_ssl_cache_cleanup(void *data) +{ + ngx_ssl_cache_t *cache = data; + + ngx_rbtree_t *tree; + ngx_rbtree_node_t *node; + ngx_ssl_cache_node_t *cn; + + tree = &cache->rbtree; + + if (tree->root == tree->sentinel) { + return; + } + + for (node = ngx_rbtree_min(tree->root, tree->sentinel); + node; + node = ngx_rbtree_next(tree, node)) + { + cn = ngx_rbtree_data(node, ngx_ssl_cache_node_t, node); + + if (cn->type != NULL && cn->value != NULL) { + cn->type->free(cn->value); + } + } +} + + +static void +ngx_ssl_cache_node_insert(ngx_rbtree_node_t *temp, + ngx_rbtree_node_t *node, ngx_rbtree_node_t *sentinel) +{ + ngx_rbtree_node_t **p; + ngx_ssl_cache_node_t *n, *t; + + for ( ;; ) { + + n = ngx_rbtree_data(node, ngx_ssl_cache_node_t, node); + t = ngx_rbtree_data(temp, ngx_ssl_cache_node_t, node); + + if (node->key != temp->key) { + + p = (node->key < temp->key) ? &temp->left : &temp->right; + + } else if (n->type != t->type) { + + p = (n->type < t->type) ? &temp->left : &temp->right; + + } else { + + p = (ngx_memn2cmp(n->id.data, t->id.data, n->id.len, t->id.len) + < 0) ? &temp->left : &temp->right; + } + + if (*p == sentinel) { + break; + } + + temp = *p; + } + + *p = node; + node->parent = temp; + node->left = sentinel; + node->right = sentinel; + ngx_rbt_red(node); +} + + +void * +ngx_ssl_cache_fetch(ngx_cycle_t *cycle, ngx_pool_t *pool, + ngx_ssl_cache_type_t *type, char **err, ngx_str_t *id, void *data) +{ + void *value; + uint32_t hash; + ngx_ssl_cache_t *cache; + ngx_ssl_cache_node_t *cn; + + value = NULL; + + hash = ngx_murmur_hash2(id->data, id->len); + + cache = (ngx_ssl_cache_t *) ngx_get_conf(cycle->conf_ctx, + ngx_openssl_cache_module); + + if (ngx_process == NGX_PROCESS_WORKER + || ngx_process == NGX_PROCESS_SINGLE) + { + return type->create(id, err, data); + } + + cn = ngx_ssl_cache_lookup(cache, type, id, hash); + + if (cn == NULL) { + cn = ngx_palloc(pool, sizeof(ngx_ssl_cache_node_t) + id->len + 1); + if (cn == NULL) { + return NULL; + } + + cn->node.key = hash; + cn->id.data = (u_char *)(cn + 1); + cn->id.len = id->len; + cn->type = type; + cn->value = NULL; + + ngx_cpystrn(cn->id.data, id->data, id->len + 1); + + ngx_rbtree_insert(&cache->rbtree, &cn->node); + } + + /* try to use a reference from the cache */ + + if (cn->value != NULL) { + value = type->ref(err, cn->value); + } + + if (value == NULL) { + value = type->create(id, err, data); + } + + if (value != NULL && cn->value == NULL) { + /* we have a value and the node needs one; try to reference it */ + cn->value = type->ref(err, value); + } + + return value; +} + + +static ngx_ssl_cache_node_t * +ngx_ssl_cache_lookup(ngx_ssl_cache_t *cache, ngx_ssl_cache_type_t *type, + ngx_str_t *id, uint32_t hash) +{ + ngx_int_t rc; + ngx_rbtree_node_t *node, *sentinel; + ngx_ssl_cache_node_t *cn; + + node = cache->rbtree.root; + sentinel = cache->rbtree.sentinel; + + while (node != sentinel) { + + if (hash < node->key) { + node = node->left; + continue; + } + + if (hash > node->key) { + node = node->right; + continue; + } + + /* hash == node->key */ + + cn = (ngx_ssl_cache_node_t *) node; + + if ((ngx_uint_t) type < (ngx_uint_t) cn->type) { + node = node->left; + continue; + } + + if ((ngx_uint_t) type > (ngx_uint_t) cn->type) { + node = node->right; + continue; + } + + /* type == cn->type */ + + rc = ngx_memn2cmp(id->data, cn->id.data, id->len, cn->id.len); + + if (rc == 0) { + return cn; + } + + node = (rc < 0) ? node->left : node->right; + } + + return NULL; +} From mini at nginx.com Tue Jul 23 19:30:26 2024 From: mini at nginx.com (=?iso-8859-1?q?Mini_Hawthorne?=) Date: Tue, 23 Jul 2024 19:30:26 +0000 Subject: [PATCH 3 of 6] SSL: caching certificates In-Reply-To: <59ac183dfee8e9641563.1721763024@linux> References: <59ac183dfee8e9641563.1721763024@linux> Message-ID: <42e86c051200bf00d9ae.1721763026@linux> # HG changeset patch # User Mini Hawthorne # Date 1721762857 0 # Tue Jul 23 19:27:37 2024 +0000 # Node ID 42e86c051200bf00d9ae6e38d6c87a916391b642 # Parent 8eee61e223bb7cb7475e50b866fd6b9a83fa5fa0 SSL: caching certificates. Added ngx_ssl_cache_cert, which loads certificate chains once via BIO's created by ngx_ssl_cache_create_bio() which will be used by the following patches as well. The certificate cache provides each chain as a unique stack of shared references. This shallow copy is required because OpenSSL's stacks aren't reference counted; instead they contain a unique array of referenced entries. Also note that callers must pop the first certificate off of the stack due to awkwardness in OpenSSL certificate API. diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -18,8 +18,6 @@ typedef struct { } ngx_openssl_conf_t; -static X509 *ngx_ssl_load_certificate(ngx_pool_t *pool, char **err, - ngx_str_t *cert, STACK_OF(X509) **chain); static EVP_PKEY *ngx_ssl_load_certificate_key(ngx_pool_t *pool, char **err, ngx_str_t *key, ngx_array_t *passwords); static int ngx_ssl_password_callback(char *buf, int size, int rwflag, @@ -443,8 +441,9 @@ ngx_ssl_certificate(ngx_conf_t *cf, ngx_ STACK_OF(X509) *chain; ngx_ssl_name_t *name; - x509 = ngx_ssl_load_certificate(cf->pool, &err, cert, &chain); - if (x509 == NULL) { + chain = ngx_ssl_cache_fetch(cf->cycle, cf->pool, &ngx_ssl_cache_cert, + &err, cert, NULL); + if (chain == NULL) { if (err != NULL) { ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, "cannot load certificate \"%s\": %s", @@ -454,6 +453,8 @@ ngx_ssl_certificate(ngx_conf_t *cf, ngx_ return NGX_ERROR; } + x509 = sk_X509_shift(chain); + if (SSL_CTX_use_certificate(ssl->ctx, x509) == 0) { ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, "SSL_CTX_use_certificate(\"%s\") failed", cert->data); @@ -568,8 +569,9 @@ ngx_ssl_connection_certificate(ngx_conne EVP_PKEY *pkey; STACK_OF(X509) *chain; - x509 = ngx_ssl_load_certificate(pool, &err, cert, &chain); - if (x509 == NULL) { + chain = ngx_ssl_cache_fetch((ngx_cycle_t *) ngx_cycle, c->pool, + &ngx_ssl_cache_cert, &err, cert, NULL); + if (chain == NULL) { if (err != NULL) { ngx_ssl_error(NGX_LOG_ERR, c->log, 0, "cannot load certificate \"%s\": %s", @@ -579,6 +581,8 @@ ngx_ssl_connection_certificate(ngx_conne return NGX_ERROR; } + x509 = sk_X509_shift(chain); + if (SSL_use_certificate(c->ssl->connection, x509) == 0) { ngx_ssl_error(NGX_LOG_ERR, c->log, 0, "SSL_use_certificate(\"%s\") failed", cert->data); @@ -630,96 +634,6 @@ ngx_ssl_connection_certificate(ngx_conne } -static X509 * -ngx_ssl_load_certificate(ngx_pool_t *pool, char **err, ngx_str_t *cert, - STACK_OF(X509) **chain) -{ - BIO *bio; - X509 *x509, *temp; - u_long n; - - if (ngx_strncmp(cert->data, "data:", sizeof("data:") - 1) == 0) { - - bio = BIO_new_mem_buf(cert->data + sizeof("data:") - 1, - cert->len - (sizeof("data:") - 1)); - if (bio == NULL) { - *err = "BIO_new_mem_buf() failed"; - return NULL; - } - - } else { - - if (ngx_get_full_name(pool, (ngx_str_t *) &ngx_cycle->conf_prefix, cert) - != NGX_OK) - { - *err = NULL; - return NULL; - } - - bio = BIO_new_file((char *) cert->data, "r"); - if (bio == NULL) { - *err = "BIO_new_file() failed"; - return NULL; - } - } - - /* certificate itself */ - - x509 = PEM_read_bio_X509_AUX(bio, NULL, NULL, NULL); - if (x509 == NULL) { - *err = "PEM_read_bio_X509_AUX() failed"; - BIO_free(bio); - return NULL; - } - - /* rest of the chain */ - - *chain = sk_X509_new_null(); - if (*chain == NULL) { - *err = "sk_X509_new_null() failed"; - BIO_free(bio); - X509_free(x509); - return NULL; - } - - for ( ;; ) { - - temp = PEM_read_bio_X509(bio, NULL, NULL, NULL); - if (temp == NULL) { - n = ERR_peek_last_error(); - - if (ERR_GET_LIB(n) == ERR_LIB_PEM - && ERR_GET_REASON(n) == PEM_R_NO_START_LINE) - { - /* end of file */ - ERR_clear_error(); - break; - } - - /* some real error */ - - *err = "PEM_read_bio_X509() failed"; - BIO_free(bio); - X509_free(x509); - sk_X509_pop_free(*chain, X509_free); - return NULL; - } - - if (sk_X509_push(*chain, temp) == 0) { - *err = "sk_X509_push() failed"; - BIO_free(bio); - X509_free(x509); - sk_X509_pop_free(*chain, X509_free); - return NULL; - } - } - - BIO_free(bio); - - return x509; -} - - static EVP_PKEY * ngx_ssl_load_certificate_key(ngx_pool_t *pool, char **err, ngx_str_t *key, ngx_array_t *passwords) diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -351,4 +351,7 @@ extern int ngx_ssl_ocsp_index; extern int ngx_ssl_index; +extern ngx_ssl_cache_type_t ngx_ssl_cache_cert; + + #endif /* _NGX_EVENT_OPENSSL_H_INCLUDED_ */ diff --git a/src/event/ngx_event_openssl_cache.c b/src/event/ngx_event_openssl_cache.c --- a/src/event/ngx_event_openssl_cache.c +++ b/src/event/ngx_event_openssl_cache.c @@ -46,6 +46,12 @@ static void ngx_ssl_cache_node_insert(ng static ngx_ssl_cache_node_t *ngx_ssl_cache_lookup(ngx_ssl_cache_t *cache, ngx_ssl_cache_type_t *type, ngx_str_t *id, uint32_t hash); +static void *ngx_ssl_cache_cert_create(ngx_str_t *id, char **err, void *data); +static void ngx_ssl_cache_cert_free(void *data); +static void *ngx_ssl_cache_cert_ref(char **err, void *data); + +static BIO *ngx_ssl_cache_create_bio(ngx_str_t *id, char **err); + static ngx_core_module_t ngx_openssl_cache_module_ctx = { ngx_string("openssl_cache"), @@ -54,6 +60,15 @@ static ngx_core_module_t ngx_openssl_ca }; +ngx_ssl_cache_type_t ngx_ssl_cache_cert = { + "certificate chain", + + ngx_ssl_cache_cert_create, + ngx_ssl_cache_cert_free, + ngx_ssl_cache_cert_ref, +}; + + ngx_module_t ngx_openssl_cache_module = { NGX_MODULE_V1, &ngx_openssl_cache_module_ctx, /* module context */ @@ -275,3 +290,165 @@ ngx_ssl_cache_lookup(ngx_ssl_cache_t *ca return NULL; } + + +static void * +ngx_ssl_cache_cert_create(ngx_str_t *id, char **err, void *data) +{ + BIO *bio; + X509 *x; + u_long n; + STACK_OF(X509) *sk; + + /* start with an empty certificate chain */ + sk = sk_X509_new_null(); + if (sk == NULL) { + *err = "sk_X509_new_null() failed"; + return NULL; + } + + /* figure out where to load from */ + bio = ngx_ssl_cache_create_bio(id, err); + if (bio == NULL) { + sk_X509_pop_free(sk, X509_free); + return NULL; + } + + /* certificate itself */ + x = PEM_read_bio_X509_AUX(bio, NULL, NULL, NULL); + if (x == NULL) { + *err = "PEM_read_bio_X509_AUX() failed"; + BIO_free(bio); + sk_X509_pop_free(sk, X509_free); + return NULL; + } + + if (sk_X509_push(sk, x) <= 0) { + *err = "sk_X509_push() failed"; + BIO_free(bio); + X509_free(x); + sk_X509_pop_free(sk, X509_free); + return NULL; + } + + /* rest of the chain */ + for ( ;; ) { + + x = PEM_read_bio_X509(bio, NULL, NULL, NULL); + if (x == NULL) { + n = ERR_peek_last_error(); + + if (ERR_GET_LIB(n) == ERR_LIB_PEM + && ERR_GET_REASON(n) == PEM_R_NO_START_LINE) + { + /* end of file */ + ERR_clear_error(); + break; + } + + /* some real error */ + *err = "PEM_read_bio_X509() failed"; + BIO_free(bio); + sk_X509_pop_free(sk, X509_free); + return NULL; + } + + if (sk_X509_push(sk, x) <= 0) { + /* memory allocation failed */ + *err = "sk_X509_push() failed"; + BIO_free(bio); + sk_X509_pop_free(sk, X509_free); + return NULL; + } + } + + BIO_free(bio); + return sk; +} + + +static void +ngx_ssl_cache_cert_free(void *data) +{ + sk_X509_pop_free(data, X509_free); +} + + +static void * +ngx_ssl_cache_cert_ref(char **err, void *data) +{ + int n, i; + X509 *x; + STACK_OF(X509) *sk; + + /* stacks aren't reference-counted, so shallow copy into a new stack */ + sk = sk_X509_dup(data); + if (sk == NULL) { + *err = "sk_X509_dup() failed"; + return NULL; + } + + /* bump the certificates' reference counts */ + n = sk_X509_num(sk); + + for (i = 0; i < n; i++) { + x = sk_X509_value(sk, i); + +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) + X509_up_ref(x); +#else + CRYPTO_add(&x->references, 1, CRYPTO_LOCK_X509); +#endif + } + + return sk; +} + + +static BIO * +ngx_ssl_cache_create_bio(ngx_str_t *id, char **err) +{ + BIO *bio; + ngx_str_t path; + ngx_pool_t *temp_pool; + + if (ngx_strncmp(id->data, "data:", sizeof("data:") - 1) == 0) { + bio = BIO_new_mem_buf(id->data + sizeof("data:") - 1, + id->len - (sizeof("data:") - 1)); + + if (bio == NULL) { + *err = "BIO_new_mem_buf() failed"; + } + + return bio; + } + + /* generate a translated path */ + temp_pool = ngx_create_pool(NGX_MAX_PATH, ngx_cycle->log); + if (temp_pool == NULL) { + *err = NULL; + return NULL; + } + + ngx_memcpy(&path, id, sizeof(ngx_str_t)); + + if (ngx_get_full_name(temp_pool, + (ngx_str_t *) &ngx_cycle->conf_prefix, + &path) + != NGX_OK) + { + *err = NULL; + ngx_destroy_pool(temp_pool); + return NULL; + } + + bio = BIO_new_file((char *) path.data, "r"); + + if (bio == NULL) { + *err = "BIO_new_file() failed"; + } + + ngx_destroy_pool(temp_pool); + + return bio; +} From mini at nginx.com Tue Jul 23 19:30:27 2024 From: mini at nginx.com (=?iso-8859-1?q?Mini_Hawthorne?=) Date: Tue, 23 Jul 2024 19:30:27 +0000 Subject: [PATCH 4 of 6] SSL: caching certificate revocation lists In-Reply-To: <59ac183dfee8e9641563.1721763024@linux> References: <59ac183dfee8e9641563.1721763024@linux> Message-ID: <867e05f555e6f593589a.1721763027@linux> # HG changeset patch # User Mini Hawthorne # Date 1721762914 0 # Tue Jul 23 19:28:34 2024 +0000 # Node ID 867e05f555e6f593589a0278c865e7dcffe597f4 # Parent 42e86c051200bf00d9ae6e38d6c87a916391b642 SSL: caching certificate revocation lists. Added ngx_ssl_cache_crl which is similar to certificate caching. It basically calls X509_CRL versions of APIs instead of X509 versions. diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -886,17 +886,16 @@ ngx_ssl_trusted_certificate(ngx_conf_t * ngx_int_t ngx_ssl_crl(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *crl) { - X509_STORE *store; - X509_LOOKUP *lookup; + int n, i; + char *err; + X509_CRL *xc; + X509_STORE *store; + STACK_OF(X509_CRL) *xcsk; if (crl->len == 0) { return NGX_OK; } - if (ngx_conf_full_name(cf->cycle, crl, 1) != NGX_OK) { - return NGX_ERROR; - } - store = SSL_CTX_get_cert_store(ssl->ctx); if (store == NULL) { @@ -905,20 +904,44 @@ ngx_ssl_crl(ngx_conf_t *cf, ngx_ssl_t *s return NGX_ERROR; } - lookup = X509_STORE_add_lookup(store, X509_LOOKUP_file()); - - if (lookup == NULL) { - ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, - "X509_STORE_add_lookup() failed"); + xcsk = ngx_ssl_cache_fetch(cf->cycle, cf->pool, &ngx_ssl_cache_crl, + &err, crl, NULL); + if (xcsk == NULL) { + if (err != NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "failed to load \"%s\": %s", crl->data, err); + } + return NGX_ERROR; } - if (X509_LOOKUP_load_file(lookup, (char *) crl->data, X509_FILETYPE_PEM) - == 0) - { - ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, - "X509_LOOKUP_load_file(\"%s\") failed", crl->data); - return NGX_ERROR; + n = sk_X509_CRL_num(xcsk); + + for (i = 0; i < n; i++) { + xc = sk_X509_CRL_value(xcsk, i); + + if (X509_STORE_add_crl(store, xc) != 1) { + +#if !(OPENSSL_VERSION_NUMBER >= 0x1010009fL \ + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL) + u_long error; + + /* not reported in OpenSSL 1.1.0i+ */ + + error = ERR_peek_last_error(); + + if (ERR_GET_LIB(error) == ERR_LIB_X509 + && ERR_GET_REASON(error) == X509_R_CERT_ALREADY_IN_HASH_TABLE) + { + ERR_clear_error(); + continue; + } +#endif + + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "X509_STORE_add_crl() failed"); + return NGX_ERROR; + } } X509_STORE_set_flags(store, diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -352,6 +352,7 @@ extern int ngx_ssl_index; extern ngx_ssl_cache_type_t ngx_ssl_cache_cert; +extern ngx_ssl_cache_type_t ngx_ssl_cache_crl; #endif /* _NGX_EVENT_OPENSSL_H_INCLUDED_ */ diff --git a/src/event/ngx_event_openssl_cache.c b/src/event/ngx_event_openssl_cache.c --- a/src/event/ngx_event_openssl_cache.c +++ b/src/event/ngx_event_openssl_cache.c @@ -50,6 +50,10 @@ static void *ngx_ssl_cache_cert_create(n static void ngx_ssl_cache_cert_free(void *data); static void *ngx_ssl_cache_cert_ref(char **err, void *data); +static void *ngx_ssl_cache_crl_create(ngx_str_t *id, char **err, void *data); +static void ngx_ssl_cache_crl_free(void *data); +static void *ngx_ssl_cache_crl_ref(char **err, void *data); + static BIO *ngx_ssl_cache_create_bio(ngx_str_t *id, char **err); @@ -69,6 +73,15 @@ ngx_ssl_cache_type_t ngx_ssl_cache_cert }; +ngx_ssl_cache_type_t ngx_ssl_cache_crl = { + "certificate revocation list", + + ngx_ssl_cache_crl_create, + ngx_ssl_cache_crl_free, + ngx_ssl_cache_crl_ref, +}; + + ngx_module_t ngx_openssl_cache_module = { NGX_MODULE_V1, &ngx_openssl_cache_module_ctx, /* module context */ @@ -405,6 +418,96 @@ ngx_ssl_cache_cert_ref(char **err, void } +static void * +ngx_ssl_cache_crl_create(ngx_str_t *id, char **err, void *data) +{ + BIO *bio; + u_long n; + X509_CRL *xc; + STACK_OF(X509_CRL) *sk; + + /* start with an empty revocation list */ + sk = sk_X509_CRL_new_null(); + if (sk == NULL) { + *err = "sk_X509_CRL_new_null() failed"; + return NULL; + } + + /* figure out where to load from */ + bio = ngx_ssl_cache_create_bio(id, err); + if (bio == NULL) { + sk_X509_CRL_pop_free(sk, X509_CRL_free); + return NULL; + } + + /* read all of the revocations */ + while ((xc = PEM_read_bio_X509_CRL(bio, NULL, NULL, NULL)) != NULL) { + if (sk_X509_CRL_push(sk, xc) <= 0) { + *err = "sk_X509_CRL_push() failed"; + BIO_free(bio); + sk_X509_CRL_pop_free(sk, X509_CRL_free); + return NULL; + } + } + + BIO_free(bio); + + n = ERR_peek_last_error(); + if (sk_X509_CRL_num(sk) == 0 + || ERR_GET_LIB(n) != ERR_LIB_PEM + || ERR_GET_REASON(n) != PEM_R_NO_START_LINE) + { + /* the failure wasn't "no more revocations to load" */ + *err = "PEM_read_bio_X509_CRL() failed"; + sk_X509_CRL_pop_free(sk, X509_CRL_free); + return NULL; + } + + /* success leaves errors on the error stack */ + ERR_clear_error(); + + return sk; +} + + +static void +ngx_ssl_cache_crl_free(void *data) +{ + sk_X509_CRL_pop_free(data, X509_CRL_free); +} + + +static void * +ngx_ssl_cache_crl_ref(char **err, void *data) +{ + int n, i; + X509_CRL *xc; + STACK_OF(X509_CRL) *sk; + + /* stacks aren't reference-counted, so shallow copy into a new stack */ + sk = sk_X509_CRL_dup(data); + if (sk == NULL) { + *err = "sk_X509_CRL_dup() failed"; + return NULL; + } + + /* bump the revocations' reference counts */ + n = sk_X509_CRL_num(sk); + + for (i = 0; i < n; i++) { + xc = sk_X509_CRL_value(sk, i); + +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) + X509_CRL_up_ref(xc); +#else + CRYPTO_add(&xc->references, 1, CRYPTO_LOCK_X509_CRL); +#endif + } + + return sk; +} + + static BIO * ngx_ssl_cache_create_bio(ngx_str_t *id, char **err) { From mini at nginx.com Tue Jul 23 19:30:28 2024 From: mini at nginx.com (=?iso-8859-1?q?Mini_Hawthorne?=) Date: Tue, 23 Jul 2024 19:30:28 +0000 Subject: [PATCH 5 of 6] SSL: caching private keys In-Reply-To: <59ac183dfee8e9641563.1721763024@linux> References: <59ac183dfee8e9641563.1721763024@linux> Message-ID: <298a9eaa59d2a16f85b6.1721763028@linux> # HG changeset patch # User Mini Hawthorne # Date 1721762945 0 # Tue Jul 23 19:29:05 2024 +0000 # Node ID 298a9eaa59d2a16f85b6aa3584eb5f8298e6c9bc # Parent 867e05f555e6f593589a0278c865e7dcffe597f4 SSL: caching private keys. Added ngx_ssl_cache_key which caches private keys. Special support is included for "engine:..." keys. EVP_KEY objects are a reference-counted container for key material, so shallow copies and OpenSSL stack management aren't needed as with certificates. diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -18,10 +18,6 @@ typedef struct { } ngx_openssl_conf_t; -static EVP_PKEY *ngx_ssl_load_certificate_key(ngx_pool_t *pool, char **err, - ngx_str_t *key, ngx_array_t *passwords); -static int ngx_ssl_password_callback(char *buf, int size, int rwflag, - void *userdata); static int ngx_ssl_verify_callback(int ok, X509_STORE_CTX *x509_store); static void ngx_ssl_info_callback(const ngx_ssl_conn_t *ssl_conn, int where, int ret); @@ -536,7 +532,8 @@ ngx_ssl_certificate(ngx_conf_t *cf, ngx_ } #endif - pkey = ngx_ssl_load_certificate_key(cf->pool, &err, key, passwords); + pkey = ngx_ssl_cache_fetch(cf->cycle, cf->pool, &ngx_ssl_cache_key, &err, + key, passwords); if (pkey == NULL) { if (err != NULL) { ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, @@ -610,10 +607,11 @@ ngx_ssl_connection_certificate(ngx_conne #endif - pkey = ngx_ssl_load_certificate_key(pool, &err, key, passwords); + pkey = ngx_ssl_cache_fetch((ngx_cycle_t *) ngx_cycle, c->pool, + &ngx_ssl_cache_key, &err, key, passwords); if (pkey == NULL) { if (err != NULL) { - ngx_ssl_error(NGX_LOG_ERR, c->log, 0, + ngx_ssl_error(NGX_LOG_EMERG, c->log, 0, "cannot load certificate key \"%s\": %s", key->data, err); } @@ -634,151 +632,6 @@ ngx_ssl_connection_certificate(ngx_conne } -static EVP_PKEY * -ngx_ssl_load_certificate_key(ngx_pool_t *pool, char **err, - ngx_str_t *key, ngx_array_t *passwords) -{ - BIO *bio; - EVP_PKEY *pkey; - ngx_str_t *pwd; - ngx_uint_t tries; - pem_password_cb *cb; - - if (ngx_strncmp(key->data, "engine:", sizeof("engine:") - 1) == 0) { - -#ifndef OPENSSL_NO_ENGINE - - u_char *p, *last; - ENGINE *engine; - - p = key->data + sizeof("engine:") - 1; - last = (u_char *) ngx_strchr(p, ':'); - - if (last == NULL) { - *err = "invalid syntax"; - return NULL; - } - - *last = '\0'; - - engine = ENGINE_by_id((char *) p); - - *last++ = ':'; - - if (engine == NULL) { - *err = "ENGINE_by_id() failed"; - return NULL; - } - - pkey = ENGINE_load_private_key(engine, (char *) last, 0, 0); - - if (pkey == NULL) { - *err = "ENGINE_load_private_key() failed"; - ENGINE_free(engine); - return NULL; - } - - ENGINE_free(engine); - - return pkey; - -#else - - *err = "loading \"engine:...\" certificate keys is not supported"; - return NULL; - -#endif - } - - if (ngx_strncmp(key->data, "data:", sizeof("data:") - 1) == 0) { - - bio = BIO_new_mem_buf(key->data + sizeof("data:") - 1, - key->len - (sizeof("data:") - 1)); - if (bio == NULL) { - *err = "BIO_new_mem_buf() failed"; - return NULL; - } - - } else { - - if (ngx_get_full_name(pool, (ngx_str_t *) &ngx_cycle->conf_prefix, key) - != NGX_OK) - { - *err = NULL; - return NULL; - } - - bio = BIO_new_file((char *) key->data, "r"); - if (bio == NULL) { - *err = "BIO_new_file() failed"; - return NULL; - } - } - - if (passwords) { - tries = passwords->nelts; - pwd = passwords->elts; - cb = ngx_ssl_password_callback; - - } else { - tries = 1; - pwd = NULL; - cb = NULL; - } - - for ( ;; ) { - - pkey = PEM_read_bio_PrivateKey(bio, NULL, cb, pwd); - if (pkey != NULL) { - break; - } - - if (tries-- > 1) { - ERR_clear_error(); - (void) BIO_reset(bio); - pwd++; - continue; - } - - *err = "PEM_read_bio_PrivateKey() failed"; - BIO_free(bio); - return NULL; - } - - BIO_free(bio); - - return pkey; -} - - -static int -ngx_ssl_password_callback(char *buf, int size, int rwflag, void *userdata) -{ - ngx_str_t *pwd = userdata; - - if (rwflag) { - ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0, - "ngx_ssl_password_callback() is called for encryption"); - return 0; - } - - if (pwd == NULL) { - return 0; - } - - if (pwd->len > (size_t) size) { - ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0, - "password is truncated to %d bytes", size); - } else { - size = pwd->len; - } - - ngx_memcpy(buf, pwd->data, size); - - return size; -} - - ngx_int_t ngx_ssl_ciphers(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *ciphers, ngx_uint_t prefer_server_ciphers) diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -353,6 +353,7 @@ extern int ngx_ssl_index; extern ngx_ssl_cache_type_t ngx_ssl_cache_cert; extern ngx_ssl_cache_type_t ngx_ssl_cache_crl; +extern ngx_ssl_cache_type_t ngx_ssl_cache_key; #endif /* _NGX_EVENT_OPENSSL_H_INCLUDED_ */ diff --git a/src/event/ngx_event_openssl_cache.c b/src/event/ngx_event_openssl_cache.c --- a/src/event/ngx_event_openssl_cache.c +++ b/src/event/ngx_event_openssl_cache.c @@ -54,6 +54,12 @@ static void *ngx_ssl_cache_crl_create(ng static void ngx_ssl_cache_crl_free(void *data); static void *ngx_ssl_cache_crl_ref(char **err, void *data); +static void *ngx_ssl_cache_key_create(ngx_str_t *id, char **err, void *data); +static int ngx_ssl_cache_key_password_callback(char *buf, int size, int rwflag, + void *userdata); +static void ngx_ssl_cache_key_free(void *data); +static void *ngx_ssl_cache_key_ref(char **err, void *data); + static BIO *ngx_ssl_cache_create_bio(ngx_str_t *id, char **err); @@ -82,6 +88,15 @@ ngx_ssl_cache_type_t ngx_ssl_cache_crl }; +ngx_ssl_cache_type_t ngx_ssl_cache_key = { + "private key", + + ngx_ssl_cache_key_create, + ngx_ssl_cache_key_free, + ngx_ssl_cache_key_ref, +}; + + ngx_module_t ngx_openssl_cache_module = { NGX_MODULE_V1, &ngx_openssl_cache_module_ctx, /* module context */ @@ -508,6 +523,154 @@ ngx_ssl_cache_crl_ref(char **err, void * } +static void * +ngx_ssl_cache_key_create(ngx_str_t *id, char **err, void *data) +{ + ngx_array_t *passwords = data; + + BIO *bio; + EVP_PKEY *pkey; + ngx_str_t *pwd; + ngx_uint_t tries; + pem_password_cb *cb; + + if (ngx_strncmp(id->data, "engine:", sizeof("engine:") - 1) == 0) { + +#ifndef OPENSSL_NO_ENGINE + + u_char *p, *last; + ENGINE *engine; + + p = id->data + sizeof("engine:") - 1; + last = (u_char *) ngx_strchr(p, ':'); + + if (last == NULL) { + *err = "invalid syntax"; + return NULL; + } + + *last = '\0'; + + engine = ENGINE_by_id((char *) p); + + *last++ = ':'; + + if (engine == NULL) { + *err = "ENGINE_by_id() failed"; + return NULL; + } + + pkey = ENGINE_load_private_key(engine, (char *) last, 0, 0); + + if (pkey == NULL) { + *err = "ENGINE_load_private_key() failed"; + ENGINE_free(engine); + return NULL; + } + + ENGINE_free(engine); + + return pkey; +#else + *err = "loading \"engine:...\" certificate keys is not supported"; + return NULL; +#endif + } + + /* figure out where to load from */ + bio = ngx_ssl_cache_create_bio(id, err); + if (bio == NULL) { + return NULL; + } + + if (passwords) { + tries = passwords->nelts; + pwd = passwords->elts; + cb = ngx_ssl_cache_key_password_callback; + + } else { + tries = 1; + pwd = NULL; + cb = NULL; + } + + for ( ;; ) { + + pkey = PEM_read_bio_PrivateKey(bio, NULL, cb, pwd); + if (pkey != NULL) { + break; + } + + if (tries-- > 1) { + ERR_clear_error(); + (void) BIO_reset(bio); + pwd++; + continue; + } + + *err = "PEM_read_bio_PrivateKey() failed"; + BIO_free(bio); + return NULL; + } + + BIO_free(bio); + + return pkey; +} + + +static int +ngx_ssl_cache_key_password_callback(char *buf, int size, int rwflag, + void *userdata) +{ + ngx_str_t *pwd = userdata; + + if (rwflag) { + ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0, + "ngx_ssl_cache_key_password_callback() is called " + "for encryption"); + return 0; + } + + if (pwd == NULL) { + return 0; + } + + if (pwd->len > (size_t) size) { + ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0, + "password is truncated to %d bytes", size); + } else { + size = pwd->len; + } + + ngx_memcpy(buf, pwd->data, size); + + return size; +} + + +static void +ngx_ssl_cache_key_free(void *data) +{ + EVP_PKEY_free(data); +} + + +static void * +ngx_ssl_cache_key_ref(char **err, void *data) +{ + EVP_PKEY *pkey = data; + +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) + EVP_PKEY_up_ref(pkey); +#else + CRYPTO_add(&pkey->references, 1, CRYPTO_LOCK_EVP_PKEY); +#endif + + return data; +} + + static BIO * ngx_ssl_cache_create_bio(ngx_str_t *id, char **err) { From mini at nginx.com Tue Jul 23 19:30:29 2024 From: mini at nginx.com (=?iso-8859-1?q?Mini_Hawthorne?=) Date: Tue, 23 Jul 2024 19:30:29 +0000 Subject: [PATCH 6 of 6] SSL: caching CA certificates In-Reply-To: <59ac183dfee8e9641563.1721763024@linux> References: <59ac183dfee8e9641563.1721763024@linux> Message-ID: # HG changeset patch # User Mini Hawthorne # Date 1721762968 0 # Tue Jul 23 19:29:28 2024 +0000 # Node ID c4a90845888cfa20a4f622eb97954dfbd54af5c6 # Parent 298a9eaa59d2a16f85b6aa3584eb5f8298e6c9bc SSL: caching CA certificates. This can potentially provide a large amount of savings, because CA certificates can be quite large. diff --git a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -18,6 +18,8 @@ typedef struct { } ngx_openssl_conf_t; +static int ngx_ssl_x509_name_cmp(const X509_NAME *const *a, + const X509_NAME *const *b); static int ngx_ssl_verify_callback(int ok, X509_STORE_CTX *x509_store); static void ngx_ssl_info_callback(const ngx_ssl_conn_t *ssl_conn, int where, int ret); @@ -651,10 +653,23 @@ ngx_ssl_ciphers(ngx_conf_t *cf, ngx_ssl_ } +static int +ngx_ssl_x509_name_cmp(const X509_NAME *const *a, const X509_NAME *const *b) +{ + return (X509_NAME_cmp(*a, *b)); +} + + ngx_int_t ngx_ssl_client_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, ngx_int_t depth) { + int n, i; + char *err; + X509 *x; + X509_NAME *xn; + X509_STORE *xs; + STACK_OF(X509) *xsk; STACK_OF(X509_NAME) *list; SSL_CTX_set_verify(ssl->ctx, SSL_VERIFY_PEER, ngx_ssl_verify_callback); @@ -665,33 +680,91 @@ ngx_ssl_client_certificate(ngx_conf_t *c return NGX_OK; } - if (ngx_conf_full_name(cf->cycle, cert, 1) != NGX_OK) { + list = sk_X509_NAME_new(ngx_ssl_x509_name_cmp); + if (list == NULL) { return NGX_ERROR; } - if (SSL_CTX_load_verify_locations(ssl->ctx, (char *) cert->data, NULL) - == 0) - { - ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, - "SSL_CTX_load_verify_locations(\"%s\") failed", - cert->data); + xs = SSL_CTX_get_cert_store(ssl->ctx); + xsk = ngx_ssl_cache_fetch(cf->cycle, cf->pool, &ngx_ssl_cache_ca, &err, + cert, NULL); + if (xsk == NULL) { + if (err != NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "failed to load \"%s\": %s", cert->data, err); + } + + sk_X509_NAME_pop_free(list, X509_NAME_free); return NGX_ERROR; } - /* - * SSL_CTX_load_verify_locations() may leave errors in the error queue - * while returning success - */ - - ERR_clear_error(); - - list = SSL_load_client_CA_file((char *) cert->data); - - if (list == NULL) { - ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, - "SSL_load_client_CA_file(\"%s\") failed", cert->data); - return NGX_ERROR; - } + n = sk_X509_num(xsk); + + for (i = 0; i < n; i++) { + x = sk_X509_value(xsk, i); + + xn = X509_get_subject_name(x); + if (xn == NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "X509_get_subject_name() failed"); + sk_X509_NAME_pop_free(list, X509_NAME_free); + sk_X509_pop_free(xsk, X509_free); + return NGX_ERROR; + } + + xn = X509_NAME_dup(xn); + if (xn == NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, "X509_NAME_dup() failed"); + sk_X509_NAME_pop_free(list, X509_NAME_free); + sk_X509_pop_free(xsk, X509_free); + return NGX_ERROR; + } + +#ifdef OPENSSL_IS_BORINGSSL + if (sk_X509_NAME_find(list, NULL, xn) > 0) { +#else + if (sk_X509_NAME_find(list, xn) >= 0) { +#endif + X509_NAME_free(xn); + continue; + } + + if (!sk_X509_NAME_push(list, xn)) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "sk_X509_NAME_push() failed"); + sk_X509_NAME_pop_free(list, X509_NAME_free); + sk_X509_pop_free(xsk, X509_free); + X509_NAME_free(xn); + return NGX_ERROR; + } + + if (X509_STORE_add_cert(xs, x) != 1) { + +#if !(OPENSSL_VERSION_NUMBER >= 0x1010009fL \ + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL) + u_long error; + + /* not reported in OpenSSL 1.1.0i+ */ + + error = ERR_peek_last_error(); + + if (ERR_GET_LIB(error) == ERR_LIB_X509 + && ERR_GET_REASON(error) == X509_R_CERT_ALREADY_IN_HASH_TABLE) + { + ERR_clear_error(); + continue; + } +#endif + + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "X509_STORE_add_cert() failed"); + sk_X509_NAME_pop_free(list, X509_NAME_free); + sk_X509_pop_free(xsk, X509_free); + return NGX_ERROR; + } + } + + sk_X509_pop_free(xsk, X509_free); SSL_CTX_set_client_CA_list(ssl->ctx, list); @@ -703,6 +776,12 @@ ngx_int_t ngx_ssl_trusted_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, ngx_int_t depth) { + int i, n; + char *err; + X509 *x; + X509_STORE *xs; + STACK_OF(X509) *xsk; + SSL_CTX_set_verify(ssl->ctx, SSL_CTX_get_verify_mode(ssl->ctx), ngx_ssl_verify_callback); @@ -712,25 +791,49 @@ ngx_ssl_trusted_certificate(ngx_conf_t * return NGX_OK; } - if (ngx_conf_full_name(cf->cycle, cert, 1) != NGX_OK) { + xs = SSL_CTX_get_cert_store(ssl->ctx); + xsk = ngx_ssl_cache_fetch(cf->cycle, cf->pool, &ngx_ssl_cache_ca, &err, + cert, NULL); + if (xsk == NULL) { + if (err != NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "failed to load \"%s\": %s", cert->data, err); + } + return NGX_ERROR; } - if (SSL_CTX_load_verify_locations(ssl->ctx, (char *) cert->data, NULL) - == 0) - { - ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, - "SSL_CTX_load_verify_locations(\"%s\") failed", - cert->data); - return NGX_ERROR; - } - - /* - * SSL_CTX_load_verify_locations() may leave errors in the error queue - * while returning success - */ - - ERR_clear_error(); + n = sk_X509_num(xsk); + + for (i = 0; i < n; i++) { + x = sk_X509_value(xsk, i); + + if (X509_STORE_add_cert(xs, x) != 1) { + +#if !(OPENSSL_VERSION_NUMBER >= 0x1010009fL \ + || LIBRESSL_VERSION_NUMBER >= 0x3050000fL) + u_long error; + + /* not reported in OpenSSL 1.1.0i+ */ + + error = ERR_peek_last_error(); + + if (ERR_GET_LIB(error) == ERR_LIB_X509 + && ERR_GET_REASON(error) == X509_R_CERT_ALREADY_IN_HASH_TABLE) + { + ERR_clear_error(); + continue; + } +#endif + + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "X509_STORE_add_cert() failed"); + sk_X509_pop_free(xsk, X509_free); + return NGX_ERROR; + } + } + + sk_X509_pop_free(xsk, X509_free); return NGX_OK; } diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h +++ b/src/event/ngx_event_openssl.h @@ -351,6 +351,7 @@ extern int ngx_ssl_ocsp_index; extern int ngx_ssl_index; +extern ngx_ssl_cache_type_t ngx_ssl_cache_ca; extern ngx_ssl_cache_type_t ngx_ssl_cache_cert; extern ngx_ssl_cache_type_t ngx_ssl_cache_crl; extern ngx_ssl_cache_type_t ngx_ssl_cache_key; diff --git a/src/event/ngx_event_openssl_cache.c b/src/event/ngx_event_openssl_cache.c --- a/src/event/ngx_event_openssl_cache.c +++ b/src/event/ngx_event_openssl_cache.c @@ -46,6 +46,7 @@ static void ngx_ssl_cache_node_insert(ng static ngx_ssl_cache_node_t *ngx_ssl_cache_lookup(ngx_ssl_cache_t *cache, ngx_ssl_cache_type_t *type, ngx_str_t *id, uint32_t hash); +static void *ngx_ssl_cache_ca_create(ngx_str_t *id, char **err, void *data); static void *ngx_ssl_cache_cert_create(ngx_str_t *id, char **err, void *data); static void ngx_ssl_cache_cert_free(void *data); static void *ngx_ssl_cache_cert_ref(char **err, void *data); @@ -70,6 +71,15 @@ static ngx_core_module_t ngx_openssl_ca }; +ngx_ssl_cache_type_t ngx_ssl_cache_ca = { + "certificate CA list", + + ngx_ssl_cache_ca_create, + ngx_ssl_cache_cert_free, + ngx_ssl_cache_cert_ref, +}; + + ngx_ssl_cache_type_t ngx_ssl_cache_cert = { "certificate chain", @@ -321,6 +331,58 @@ ngx_ssl_cache_lookup(ngx_ssl_cache_t *ca static void * +ngx_ssl_cache_ca_create(ngx_str_t *id, char **err, void *data) +{ + BIO *bio; + X509 *x; + u_long n; + STACK_OF(X509) *sk; + + /* start with an empty certificate chain */ + sk = sk_X509_new_null(); + if (sk == NULL) { + *err = "sk_X509_new_null() failed"; + return NULL; + } + + /* figure out where to load from */ + bio = ngx_ssl_cache_create_bio(id, err); + if (bio == NULL) { + sk_X509_pop_free(sk, X509_free); + return NULL; + } + + /* read all of the certificates */ + while ((x = PEM_read_bio_X509_AUX(bio, NULL, NULL, NULL)) != NULL) { + if (sk_X509_push(sk, x) <= 0) { + *err = "sk_X509_push() failed"; + BIO_free(bio); + sk_X509_pop_free(sk, X509_free); + return NULL; + } + } + + BIO_free(bio); + + n = ERR_peek_last_error(); + if (sk_X509_num(sk) == 0 + || ERR_GET_LIB(n) != ERR_LIB_PEM + || ERR_GET_REASON(n) != PEM_R_NO_START_LINE) + { + /* the failure wasn't "no more certificates to load" */ + *err = "PEM_read_bio_X509() failed"; + sk_X509_pop_free(sk, X509_free); + return NULL; + } + + /* success leaves errors on the error stack */ + ERR_clear_error(); + + return sk; +} + + +static void * ngx_ssl_cache_cert_create(ngx_str_t *id, char **err, void *data) { BIO *bio; From noreply at nginx.com Fri Jul 26 00:56:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 26 Jul 2024 00:56:02 +0000 (UTC) Subject: [njs] Tests: adapting unsafe redirect test for QuickJS. Message-ID: <20240726005602.2303748793@pubserv1.nginx> details: https://github.com/nginx/njs/commit/3ac496802862347c5cf8f0b6e3825163dc7bb1c9 branches: master commit: 3ac496802862347c5cf8f0b6e3825163dc7bb1c9 user: Dmitry Volyntsev date: Thu, 25 Jul 2024 17:28:37 -0700 description: Tests: adapting unsafe redirect test for QuickJS. At the moment QuickJS has no API for getting strings with NUL characters in the middle of the string. Instead of a NUL byte make another unsafe redirect URI. --- nginx/t/js_internal_redirect.t | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nginx/t/js_internal_redirect.t b/nginx/t/js_internal_redirect.t index ec6be4e1..abfe79f9 100644 --- a/nginx/t/js_internal_redirect.t +++ b/nginx/t/js_internal_redirect.t @@ -72,7 +72,7 @@ $t->write_file('test.js', < details: https://github.com/nginx/njs/commit/29c71bf771a30e8120bf332f240c14257000dd05 branches: master commit: 29c71bf771a30e8120bf332f240c14257000dd05 user: Dmitry Volyntsev date: Thu, 18 Jul 2024 22:37:25 -0700 description: Tests: making exception test more portable in js.t. --- nginx/t/js.t | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/nginx/t/js.t b/nginx/t/js.t index 1e4a07b6..b7dfe3e6 100644 --- a/nginx/t/js.t +++ b/nginx/t/js.t @@ -260,13 +260,11 @@ $t->write_file('test.js', <stop(); ok(index($t->read_file('error.log'), 'SEE-LOG') > 0, 'log js'); -ok(index($t->read_file('error.log'), 'at fs.readFileSync') > 0, +ok(index($t->read_file('error.log'), 'at decodeURI') > 0, 'js_set backtrace'); -ok(index($t->read_file('error.log'), 'at JSON.parse') > 0, +ok(index($t->read_file('error.log'), 'at content_except') > 0, 'js_content backtrace'); ############################################################################### From noreply at nginx.com Fri Jul 26 00:56:02 2024 From: noreply at nginx.com (noreply at nginx.com) Date: Fri, 26 Jul 2024 00:56:02 +0000 (UTC) Subject: [njs] Modules: removed non needed argument from meta handlers. Message-ID: <20240726005602.1B8A148791@pubserv1.nginx> details: https://github.com/nginx/njs/commit/af7847fa6b5c8e6e5bdcae897dbb4f7ee9e8ef04 branches: master commit: af7847fa6b5c8e6e5bdcae897dbb4f7ee9e8ef04 user: Dmitry Volyntsev date: Wed, 24 Jul 2024 22:19:33 -0700 description: Modules: removed non needed argument from meta handlers. --- nginx/ngx_http_js_module.c | 40 ++++++++++++++++++---------------------- nginx/ngx_js.h | 37 ++++++++++++++++--------------------- nginx/ngx_stream_js_module.c | 41 ++++++++++++++++++----------------------- 3 files changed, 52 insertions(+), 66 deletions(-) diff --git a/nginx/ngx_http_js_module.c b/nginx/ngx_http_js_module.c index a38d32cf..86d907df 100644 --- a/nginx/ngx_http_js_module.c +++ b/nginx/ngx_http_js_module.c @@ -257,18 +257,14 @@ static njs_int_t ngx_http_js_server(njs_vm_t *vm, ngx_http_request_t *r, unsigned flags, njs_str_t *name, njs_value_t *setval, njs_value_t *retval); -static ngx_pool_t *ngx_http_js_pool(njs_vm_t *vm, ngx_http_request_t *r); -static ngx_resolver_t *ngx_http_js_resolver(njs_vm_t *vm, - ngx_http_request_t *r); -static ngx_msec_t ngx_http_js_resolver_timeout(njs_vm_t *vm, - ngx_http_request_t *r); -static ngx_msec_t ngx_http_js_fetch_timeout(njs_vm_t *vm, - ngx_http_request_t *r); -static size_t ngx_http_js_buffer_size(njs_vm_t *vm, ngx_http_request_t *r); -static size_t ngx_http_js_max_response_buffer_size(njs_vm_t *vm, - ngx_http_request_t *r); +static ngx_pool_t *ngx_http_js_pool(ngx_http_request_t *r); +static ngx_resolver_t *ngx_http_js_resolver(ngx_http_request_t *r); +static ngx_msec_t ngx_http_js_resolver_timeout(ngx_http_request_t *r); +static ngx_msec_t ngx_http_js_fetch_timeout(ngx_http_request_t *r); +static size_t ngx_http_js_buffer_size(ngx_http_request_t *r); +static size_t ngx_http_js_max_response_buffer_size(ngx_http_request_t *r); static void ngx_http_js_event_finalize(ngx_http_request_t *r, ngx_int_t rc); -static ngx_js_ctx_t *ngx_http_js_ctx(njs_vm_t *vm, ngx_http_request_t *r); +static ngx_js_ctx_t *ngx_http_js_ctx(ngx_http_request_t *r); static void ngx_http_js_periodic_handler(ngx_event_t *ev); static void ngx_http_js_periodic_shutdown_handler(ngx_event_t *ev); @@ -297,8 +293,8 @@ static void *ngx_http_js_create_loc_conf(ngx_conf_t *cf); static char *ngx_http_js_merge_loc_conf(ngx_conf_t *cf, void *parent, void *child); -static ngx_ssl_t *ngx_http_js_ssl(njs_vm_t *vm, ngx_http_request_t *r); -static ngx_flag_t ngx_http_js_ssl_verify(njs_vm_t *vm, ngx_http_request_t *r); +static ngx_ssl_t *ngx_http_js_ssl(ngx_http_request_t *r); +static ngx_flag_t ngx_http_js_ssl_verify(ngx_http_request_t *r); static ngx_int_t ngx_http_js_parse_unsafe_uri(ngx_http_request_t *r, njs_str_t *uri, njs_str_t *args); @@ -4291,14 +4287,14 @@ ngx_http_js_periodic_init(ngx_js_periodic_t *periodic) static ngx_pool_t * -ngx_http_js_pool(njs_vm_t *vm, ngx_http_request_t *r) +ngx_http_js_pool(ngx_http_request_t *r) { return r->pool; } static ngx_resolver_t * -ngx_http_js_resolver(njs_vm_t *vm, ngx_http_request_t *r) +ngx_http_js_resolver(ngx_http_request_t *r) { ngx_http_core_loc_conf_t *clcf; @@ -4309,7 +4305,7 @@ ngx_http_js_resolver(njs_vm_t *vm, ngx_http_request_t *r) static ngx_msec_t -ngx_http_js_resolver_timeout(njs_vm_t *vm, ngx_http_request_t *r) +ngx_http_js_resolver_timeout(ngx_http_request_t *r) { ngx_http_core_loc_conf_t *clcf; @@ -4320,7 +4316,7 @@ ngx_http_js_resolver_timeout(njs_vm_t *vm, ngx_http_request_t *r) static ngx_msec_t -ngx_http_js_fetch_timeout(njs_vm_t *vm, ngx_http_request_t *r) +ngx_http_js_fetch_timeout(ngx_http_request_t *r) { ngx_http_js_loc_conf_t *jlcf; @@ -4331,7 +4327,7 @@ ngx_http_js_fetch_timeout(njs_vm_t *vm, ngx_http_request_t *r) static size_t -ngx_http_js_buffer_size(njs_vm_t *vm, ngx_http_request_t *r) +ngx_http_js_buffer_size(ngx_http_request_t *r) { ngx_http_js_loc_conf_t *jlcf; @@ -4342,7 +4338,7 @@ ngx_http_js_buffer_size(njs_vm_t *vm, ngx_http_request_t *r) static size_t -ngx_http_js_max_response_buffer_size(njs_vm_t *vm, ngx_http_request_t *r) +ngx_http_js_max_response_buffer_size(ngx_http_request_t *r) { ngx_http_js_loc_conf_t *jlcf; @@ -4375,7 +4371,7 @@ ngx_http_js_event_finalize(ngx_http_request_t *r, ngx_int_t rc) static ngx_js_ctx_t * -ngx_http_js_ctx(njs_vm_t *vm, ngx_http_request_t *r) +ngx_http_js_ctx(ngx_http_request_t *r) { return ngx_http_get_module_ctx(r, ngx_http_js_module); } @@ -4844,7 +4840,7 @@ ngx_http_js_merge_loc_conf(ngx_conf_t *cf, void *parent, void *child) static ngx_ssl_t * -ngx_http_js_ssl(njs_vm_t *vm, ngx_http_request_t *r) +ngx_http_js_ssl(ngx_http_request_t *r) { #if (NGX_HTTP_SSL) ngx_http_js_loc_conf_t *jlcf; @@ -4859,7 +4855,7 @@ ngx_http_js_ssl(njs_vm_t *vm, ngx_http_request_t *r) static ngx_flag_t -ngx_http_js_ssl_verify(njs_vm_t *vm, ngx_http_request_t *r) +ngx_http_js_ssl_verify(ngx_http_request_t *r) { #if (NGX_HTTP_SSL) ngx_http_js_loc_conf_t *jlcf; diff --git a/nginx/ngx_js.h b/nginx/ngx_js.h index 189d4878..beccbfdf 100644 --- a/nginx/ngx_js.h +++ b/nginx/ngx_js.h @@ -39,19 +39,14 @@ typedef struct ngx_js_dict_s ngx_js_dict_t; typedef struct ngx_js_ctx_s ngx_js_ctx_t; -typedef ngx_pool_t *(*ngx_external_pool_pt)(njs_vm_t *vm, njs_external_ptr_t e); +typedef ngx_pool_t *(*ngx_external_pool_pt)(njs_external_ptr_t e); typedef void (*ngx_js_event_finalize_pt)(njs_external_ptr_t e, ngx_int_t rc); -typedef ngx_resolver_t *(*ngx_external_resolver_pt)(njs_vm_t *vm, - njs_external_ptr_t e); -typedef ngx_msec_t (*ngx_external_timeout_pt)(njs_vm_t *vm, - njs_external_ptr_t e); -typedef ngx_flag_t (*ngx_external_flag_pt)(njs_vm_t *vm, - njs_external_ptr_t e); -typedef ngx_flag_t (*ngx_external_size_pt)(njs_vm_t *vm, - njs_external_ptr_t e); -typedef ngx_ssl_t *(*ngx_external_ssl_pt)(njs_vm_t *vm, njs_external_ptr_t e); -typedef ngx_js_ctx_t *(*ngx_js_external_ctx_pt)(njs_vm_t *vm, - njs_external_ptr_t e); +typedef ngx_resolver_t *(*ngx_external_resolver_pt)(njs_external_ptr_t e); +typedef ngx_msec_t (*ngx_external_timeout_pt)(njs_external_ptr_t e); +typedef ngx_flag_t (*ngx_external_flag_pt)(njs_external_ptr_t e); +typedef ngx_flag_t (*ngx_external_size_pt)(njs_external_ptr_t e); +typedef ngx_ssl_t *(*ngx_external_ssl_pt)(njs_external_ptr_t e); +typedef ngx_js_ctx_t *(*ngx_js_external_ctx_pt)(njs_external_ptr_t e); typedef struct { @@ -150,28 +145,28 @@ struct ngx_js_ctx_s { #define ngx_external_connection(vm, e) \ (*((ngx_connection_t **) ((u_char *) (e) + njs_vm_meta(vm, 0)))) #define ngx_external_pool(vm, e) \ - ((ngx_external_pool_pt) njs_vm_meta(vm, 1))(vm, e) + ((ngx_external_pool_pt) njs_vm_meta(vm, 1))(e) #define ngx_external_resolver(vm, e) \ - ((ngx_external_resolver_pt) njs_vm_meta(vm, 2))(vm, e) + ((ngx_external_resolver_pt) njs_vm_meta(vm, 2))(e) #define ngx_external_resolver_timeout(vm, e) \ - ((ngx_external_timeout_pt) njs_vm_meta(vm, 3))(vm, e) + ((ngx_external_timeout_pt) njs_vm_meta(vm, 3))(e) #define ngx_external_event_finalize(vm) \ ((ngx_js_event_finalize_pt) njs_vm_meta(vm, 4)) #define ngx_external_ssl(vm, e) \ - ((ngx_external_ssl_pt) njs_vm_meta(vm, 5))(vm, e) + ((ngx_external_ssl_pt) njs_vm_meta(vm, 5))(e) #define ngx_external_ssl_verify(vm, e) \ - ((ngx_external_flag_pt) njs_vm_meta(vm, 6))(vm, e) + ((ngx_external_flag_pt) njs_vm_meta(vm, 6))(e) #define ngx_external_fetch_timeout(vm, e) \ - ((ngx_external_timeout_pt) njs_vm_meta(vm, 7))(vm, e) + ((ngx_external_timeout_pt) njs_vm_meta(vm, 7))(e) #define ngx_external_buffer_size(vm, e) \ - ((ngx_external_size_pt) njs_vm_meta(vm, 8))(vm, e) + ((ngx_external_size_pt) njs_vm_meta(vm, 8))(e) #define ngx_external_max_response_buffer_size(vm, e) \ - ((ngx_external_size_pt) njs_vm_meta(vm, 9))(vm, e) + ((ngx_external_size_pt) njs_vm_meta(vm, 9))(e) #define NGX_JS_MAIN_CONF_INDEX 10 #define ngx_main_conf(vm) \ ((ngx_js_main_conf_t *) njs_vm_meta(vm, NGX_JS_MAIN_CONF_INDEX)) #define ngx_external_ctx(vm, e) \ - ((ngx_js_external_ctx_pt) njs_vm_meta(vm, 11))(vm, e) + ((ngx_js_external_ctx_pt) njs_vm_meta(vm, 11))(e) #define ngx_js_prop(vm, type, value, start, len) \ diff --git a/nginx/ngx_stream_js_module.c b/nginx/ngx_stream_js_module.c index 23c288bc..4de0cb4f 100644 --- a/nginx/ngx_stream_js_module.c +++ b/nginx/ngx_stream_js_module.c @@ -119,18 +119,14 @@ static njs_int_t ngx_stream_js_periodic_variables(njs_vm_t *vm, njs_object_prop_t *prop, njs_value_t *value, njs_value_t *setval, njs_value_t *retval); -static ngx_pool_t *ngx_stream_js_pool(njs_vm_t *vm, ngx_stream_session_t *s); -static ngx_resolver_t *ngx_stream_js_resolver(njs_vm_t *vm, - ngx_stream_session_t *s); -static ngx_msec_t ngx_stream_js_resolver_timeout(njs_vm_t *vm, - ngx_stream_session_t *s); -static ngx_msec_t ngx_stream_js_fetch_timeout(njs_vm_t *vm, - ngx_stream_session_t *s); -static size_t ngx_stream_js_buffer_size(njs_vm_t *vm, ngx_stream_session_t *s); -static size_t ngx_stream_js_max_response_buffer_size(njs_vm_t *vm, - ngx_stream_session_t *s); +static ngx_pool_t *ngx_stream_js_pool(ngx_stream_session_t *s); +static ngx_resolver_t *ngx_stream_js_resolver(ngx_stream_session_t *s); +static ngx_msec_t ngx_stream_js_resolver_timeout(ngx_stream_session_t *s); +static ngx_msec_t ngx_stream_js_fetch_timeout(ngx_stream_session_t *s); +static size_t ngx_stream_js_buffer_size(ngx_stream_session_t *s); +static size_t ngx_stream_js_max_response_buffer_size(ngx_stream_session_t *s); static void ngx_stream_js_event_finalize(ngx_stream_session_t *s, ngx_int_t rc); -static ngx_js_ctx_t *ngx_stream_js_ctx(njs_vm_t *vm, ngx_stream_session_t *s); +static ngx_js_ctx_t *ngx_stream_js_ctx(ngx_stream_session_t *s); static void ngx_stream_js_periodic_handler(ngx_event_t *ev); static void ngx_stream_js_periodic_event_handler(ngx_event_t *ev); @@ -157,9 +153,8 @@ static char *ngx_stream_js_merge_srv_conf(ngx_conf_t *cf, void *parent, static char *ngx_stream_js_shared_dict_zone(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); -static ngx_ssl_t *ngx_stream_js_ssl(njs_vm_t *vm, ngx_stream_session_t *s); -static ngx_flag_t ngx_stream_js_ssl_verify(njs_vm_t *vm, - ngx_stream_session_t *s); +static ngx_ssl_t *ngx_stream_js_ssl(ngx_stream_session_t *s); +static ngx_flag_t ngx_stream_js_ssl_verify(ngx_stream_session_t *s); #if (NGX_STREAM_SSL) @@ -1648,14 +1643,14 @@ ngx_stream_js_periodic_variables(njs_vm_t *vm, njs_object_prop_t *prop, static ngx_pool_t * -ngx_stream_js_pool(njs_vm_t *vm, ngx_stream_session_t *s) +ngx_stream_js_pool(ngx_stream_session_t *s) { return s->connection->pool; } static ngx_resolver_t * -ngx_stream_js_resolver(njs_vm_t *vm, ngx_stream_session_t *s) +ngx_stream_js_resolver(ngx_stream_session_t *s) { ngx_stream_core_srv_conf_t *cscf; @@ -1666,7 +1661,7 @@ ngx_stream_js_resolver(njs_vm_t *vm, ngx_stream_session_t *s) static ngx_msec_t -ngx_stream_js_resolver_timeout(njs_vm_t *vm, ngx_stream_session_t *s) +ngx_stream_js_resolver_timeout(ngx_stream_session_t *s) { ngx_stream_core_srv_conf_t *cscf; @@ -1677,7 +1672,7 @@ ngx_stream_js_resolver_timeout(njs_vm_t *vm, ngx_stream_session_t *s) static ngx_msec_t -ngx_stream_js_fetch_timeout(njs_vm_t *vm, ngx_stream_session_t *s) +ngx_stream_js_fetch_timeout(ngx_stream_session_t *s) { ngx_stream_js_srv_conf_t *jscf; @@ -1688,7 +1683,7 @@ ngx_stream_js_fetch_timeout(njs_vm_t *vm, ngx_stream_session_t *s) static size_t -ngx_stream_js_buffer_size(njs_vm_t *vm, ngx_stream_session_t *s) +ngx_stream_js_buffer_size(ngx_stream_session_t *s) { ngx_stream_js_srv_conf_t *jscf; @@ -1699,7 +1694,7 @@ ngx_stream_js_buffer_size(njs_vm_t *vm, ngx_stream_session_t *s) static size_t -ngx_stream_js_max_response_buffer_size(njs_vm_t *vm, ngx_stream_session_t *s) +ngx_stream_js_max_response_buffer_size(ngx_stream_session_t *s) { ngx_stream_js_srv_conf_t *jscf; @@ -1732,7 +1727,7 @@ ngx_stream_js_event_finalize(ngx_stream_session_t *s, ngx_int_t rc) static ngx_js_ctx_t * -ngx_stream_js_ctx(njs_vm_t *vm, ngx_stream_session_t *s) +ngx_stream_js_ctx(ngx_stream_session_t *s) { return ngx_stream_get_module_ctx(s, ngx_stream_js_module); } @@ -2384,7 +2379,7 @@ ngx_stream_js_init(ngx_conf_t *cf) static ngx_ssl_t * -ngx_stream_js_ssl(njs_vm_t *vm, ngx_stream_session_t *s) +ngx_stream_js_ssl(ngx_stream_session_t *s) { #if (NGX_STREAM_SSL) ngx_stream_js_srv_conf_t *jscf; @@ -2399,7 +2394,7 @@ ngx_stream_js_ssl(njs_vm_t *vm, ngx_stream_session_t *s) static ngx_flag_t -ngx_stream_js_ssl_verify(njs_vm_t *vm, ngx_stream_session_t *s) +ngx_stream_js_ssl_verify(ngx_stream_session_t *s) { #if (NGX_STREAM_SSL) ngx_stream_js_srv_conf_t *jscf; From maksim.yevmenkin at gmail.com Fri Jul 26 20:15:06 2024 From: maksim.yevmenkin at gmail.com (Maksim Yevmenkin) Date: Fri, 26 Jul 2024 13:15:06 -0700 Subject: Updating r->cache in ngx_http_upstream_process_cache_control() Message-ID: Hello, I would like to understand the rationale for updating r->cache->valid_sec (and other fields in r->cache) in ngx_http_upstream_process_cache_control(). It appears that ngx_http_upstream_process_cache_control() can be called (via ngx_http_proxy_process_header()) in two scenarios: when sending a cached response to the client, and when receiving a response from the upstream server. Obviously, calling ngx_http_upstream_process_cache_control() when receiving a response from the upstream server is completely expected. However, calling ngx_http_upstream_process_cache_control(), and more specifically, modifying r->cache fields while sending a cached (NGX_HTTP_CACHE_HIT and possibly others) response is not completely clear to me. Thanks! Max