From arut at nginx.com Tue Aug 3 08:51:49 2021 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 3 Aug 2021 11:51:49 +0300 Subject: cache: move open to thread pool In-Reply-To: <67879f0c-03e9-8e39-72c2-7edfaaedb603@nginx.com> References: <20180808181653.GX56558@mdounin.ru> <20180810113946.GG56558@mdounin.ru> <20180903160903.GI56558@mdounin.ru> <20180913181023.GW56558@mdounin.ru> <20181004093226.GD62311@Romans-MacBook-Air.local> <20181101123047.GG86326@Romans-MacBook-Air.local> <67879f0c-03e9-8e39-72c2-7edfaaedb603@nginx.com> Message-ID: <20210803085149.u2xfnlgz2bnt4zwz@Romans-MacBook-Pro.local> Hi, On Thu, Nov 01, 2018 at 07:08:10PM +0300, Maxim Konovalov wrote: > On 01/11/2018 15:30, Roman Arutyunyan wrote: > > Hi, > > > > On Thu, Oct 04, 2018 at 12:32:26PM +0300, Roman Arutyunyan wrote: > >> Hi, > >> > >> On Thu, Sep 13, 2018 at 09:10:23PM +0300, Maxim Dounin wrote: > >>> Hello! > >>> > >>> On Tue, Sep 04, 2018 at 04:58:05PM -0700, Ka-Hing Cheung via nginx-devel wrote: > >>> > >>>> On Mon, Sep 3, 2018 at 9:09 AM, Maxim Dounin wrote: > >> [..] > >> > >> Here's another approach to thread open. This time it's 4 patches: > >> > >> - #1 a small open file cache refactoring > >> - #2 thread open in open file cache > >> - #3 thread open in http static module > >> - #4 thread open in http file cache > > The next iteration of the work. > > Only 3 patches this time. > > Testing and feedbacks are welcome. An updated patchset which applies to the current nginx source code. This work is still kept as a separate patchset since we have not got enough feedback from potential users. If you're interested in this feature, please try these patches and report back to us. -- Roman Arutyunyan -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1540900849 -10800 # Tue Oct 30 15:00:49 2018 +0300 # Node ID f386594b298d1442e1bfe1add9cdf3c6cf26917b # Parent 1ebd78df4ce7262967c5dadce7bac454c4086896 Threaded open support in open file cache. diff --git a/src/core/ngx_file.h b/src/core/ngx_file.h --- a/src/core/ngx_file.h +++ b/src/core/ngx_file.h @@ -17,6 +17,7 @@ struct ngx_file_s { ngx_fd_t fd; ngx_str_t name; ngx_file_info_t info; + ngx_err_t err; off_t offset; off_t sys_offset; diff --git a/src/core/ngx_open_file_cache.c b/src/core/ngx_open_file_cache.c --- a/src/core/ngx_open_file_cache.c +++ b/src/core/ngx_open_file_cache.c @@ -22,6 +22,8 @@ static void ngx_open_file_cache_cleanup(void *data); +static ngx_int_t ngx_open_uncached_file(ngx_str_t *name, + ngx_open_file_info_t *of, ngx_pool_t *pool); #if (NGX_HAVE_OPENAT) static ngx_fd_t ngx_openat_file_owner(ngx_fd_t at_fd, const u_char *name, ngx_int_t mode, ngx_int_t create, ngx_int_t access, ngx_log_t *log); @@ -147,54 +149,15 @@ ngx_open_cached_file(ngx_open_file_cache time_t now; uint32_t hash; ngx_int_t rc; - ngx_file_info_t fi; ngx_pool_cleanup_t *cln; ngx_cached_open_file_t *file; - ngx_pool_cleanup_file_t *clnf; ngx_open_file_cache_cleanup_t *ofcln; of->fd = NGX_INVALID_FILE; of->err = 0; if (cache == NULL) { - - if (of->test_only) { - - if (ngx_file_info_wrapper(name, of, &fi, pool->log) - == NGX_FILE_ERROR) - { - return NGX_ERROR; - } - - of->uniq = ngx_file_uniq(&fi); - of->mtime = ngx_file_mtime(&fi); - of->size = ngx_file_size(&fi); - of->fs_size = ngx_file_fs_size(&fi); - of->is_dir = ngx_is_dir(&fi); - of->is_file = ngx_is_file(&fi); - of->is_link = ngx_is_link(&fi); - of->is_exec = ngx_is_exec(&fi); - - return NGX_OK; - } - - cln = ngx_pool_cleanup_add(pool, sizeof(ngx_pool_cleanup_file_t)); - if (cln == NULL) { - return NGX_ERROR; - } - - rc = ngx_open_and_stat_file(name, of, pool->log); - - if (rc == NGX_OK && !of->is_dir) { - cln->handler = ngx_pool_cleanup_file; - clnf = cln->data; - - clnf->fd = of->fd; - clnf->name = name->data; - clnf->log = pool->log; - } - - return rc; + return ngx_open_uncached_file(name, of, pool); } cln = ngx_pool_cleanup_add(pool, sizeof(ngx_open_file_cache_cleanup_t)); @@ -486,6 +449,163 @@ failed: } +static ngx_int_t +ngx_open_uncached_file(ngx_str_t *name, ngx_open_file_info_t *of, + ngx_pool_t *pool) +{ + ngx_fd_t fd; + ngx_int_t mode, create, access; + ngx_file_info_t fi; + ngx_pool_cleanup_t *cln; + ngx_pool_cleanup_file_t *clnf; + + if (of->test_only || of->test_dir) { + + if (ngx_file_info_wrapper(name, of, &fi, pool->log) == NGX_FILE_ERROR) { + return NGX_ERROR; + } + + if (of->test_only || ngx_is_dir(&fi)) { + goto done; + } + } + + if (!of->log) { + + /* + * Use non-blocking open() not to hang on FIFO files, etc. + * This flag has no effect on a regular files. + */ + + mode = NGX_FILE_RDONLY|NGX_FILE_NONBLOCK; + create = NGX_FILE_OPEN; + access = 0; + + } else { + mode = NGX_FILE_APPEND; + create = NGX_FILE_CREATE_OR_OPEN; + access = NGX_FILE_DEFAULT_ACCESS; + } + +#if (NGX_THREADS) + + if (of->thread_handler + && of->disable_symlinks == NGX_DISABLE_SYMLINKS_OFF) + { + ngx_int_t rc; + ngx_file_t file; + + ngx_memzero(&file, sizeof(ngx_file_t)); + + file.log = pool->log; + file.fd = NGX_INVALID_FILE; + file.thread_handler = of->thread_handler; + file.thread_ctx = of->thread_ctx; + file.thread_task = of->thread_task; + + rc = ngx_thread_open(&file, name->data, mode, create, access, pool); + + if (rc == NGX_AGAIN) { + of->thread_task = file.thread_task; + return NGX_AGAIN; + } + + if (rc != NGX_OK) { + of->err = file.err; + of->failed = ngx_open_file_n; + return NGX_ERROR; + } + + fd = file.fd; + + } else { + fd = ngx_open_file_wrapper(name, of, mode, create, access, pool->log); + if (fd == NGX_INVALID_FILE) { + return NGX_ERROR; + } + } + +#else + fd = ngx_open_file_wrapper(name, of, mode, create, access, pool->log); + if (fd == NGX_INVALID_FILE) { + return NGX_ERROR; + } +#endif + + if (ngx_fd_info(fd, &fi) == NGX_FILE_ERROR) { + ngx_log_error(NGX_LOG_CRIT, pool->log, ngx_errno, + ngx_fd_info_n " \"%V\" failed", name); + + if (ngx_close_file(fd) == NGX_FILE_ERROR) { + ngx_log_error(NGX_LOG_ALERT, pool->log, ngx_errno, + ngx_close_file_n " \"%V\" failed", name); + } + + return NGX_ERROR; + } + + if (ngx_is_dir(&fi)) { + if (ngx_close_file(fd) == NGX_FILE_ERROR) { + ngx_log_error(NGX_LOG_ALERT, pool->log, ngx_errno, + ngx_close_file_n " \"%V\" failed", name); + } + + } else { + of->fd = fd; + + if (of->read_ahead && ngx_file_size(&fi) > NGX_MIN_READ_AHEAD) { + if (ngx_read_ahead(fd, of->read_ahead) == NGX_ERROR) { + ngx_log_error(NGX_LOG_ALERT, pool->log, ngx_errno, + ngx_read_ahead_n " \"%V\" failed", name); + } + } + + if (of->directio <= ngx_file_size(&fi)) { + if (ngx_directio_on(fd) == NGX_FILE_ERROR) { + ngx_log_error(NGX_LOG_ALERT, pool->log, ngx_errno, + ngx_directio_on_n " \"%V\" failed", name); + + } else { + of->is_directio = 1; + } + } + } + +done: + + of->uniq = ngx_file_uniq(&fi); + of->mtime = ngx_file_mtime(&fi); + of->size = ngx_file_size(&fi); + of->fs_size = ngx_file_fs_size(&fi); + of->is_dir = ngx_is_dir(&fi); + of->is_file = ngx_is_file(&fi); + of->is_link = ngx_is_link(&fi); + of->is_exec = ngx_is_exec(&fi); + + if (of->fd != NGX_INVALID_FILE) { + cln = ngx_pool_cleanup_add(pool, sizeof(ngx_pool_cleanup_file_t)); + + if (cln == NULL) { + if (ngx_close_file(of->fd) == NGX_FILE_ERROR) { + ngx_log_error(NGX_LOG_ALERT, pool->log, ngx_errno, + ngx_close_file_n " \"%s\" failed", name->data); + } + + return NGX_ERROR; + } + + cln->handler = ngx_pool_cleanup_file; + clnf = cln->data; + + clnf->fd = of->fd; + clnf->name = name->data; + clnf->log = pool->log; + } + + return NGX_OK; +} + + #if (NGX_HAVE_OPENAT) static ngx_fd_t diff --git a/src/core/ngx_open_file_cache.h b/src/core/ngx_open_file_cache.h --- a/src/core/ngx_open_file_cache.h +++ b/src/core/ngx_open_file_cache.h @@ -32,6 +32,13 @@ typedef struct { ngx_uint_t min_uses; +#if (NGX_THREADS || NGX_COMPAT) + ngx_int_t (*thread_handler)(ngx_thread_task_t *task, + ngx_file_t *file); + void *thread_ctx; + ngx_thread_task_t *thread_task; +#endif + #if (NGX_HAVE_OPENAT) size_t disable_symlinks_from; unsigned disable_symlinks:2; diff --git a/src/os/unix/ngx_files.c b/src/os/unix/ngx_files.c --- a/src/os/unix/ngx_files.c +++ b/src/os/unix/ngx_files.c @@ -11,6 +11,7 @@ #if (NGX_THREADS) #include +static void ngx_thread_open_handler(void *data, ngx_log_t *log); static void ngx_thread_read_handler(void *data, ngx_log_t *log); static void ngx_thread_write_chain_to_file_handler(void *data, ngx_log_t *log); #endif @@ -77,20 +78,112 @@ ngx_read_file(ngx_file_t *file, u_char * #if (NGX_THREADS) +typedef enum { + NGX_THREAD_FILE_OPEN = 1, + NGX_THREAD_FILE_READ, + NGX_THREAD_FILE_WRITE +} ngx_thread_file_op_e; + + typedef struct { ngx_fd_t fd; - ngx_uint_t write; /* unsigned write:1; */ + u_char *name; + ngx_uint_t op; /* ngx_thread_file_op_e */ u_char *buf; size_t size; ngx_chain_t *chain; off_t offset; + ngx_int_t mode; + ngx_int_t create; + ngx_int_t access; + size_t nbytes; ngx_err_t err; } ngx_thread_file_ctx_t; +ngx_int_t +ngx_thread_open(ngx_file_t *file, u_char *name, ngx_int_t mode, + ngx_int_t create, ngx_int_t access, ngx_pool_t *pool) +{ + ngx_thread_task_t *task; + ngx_thread_file_ctx_t *ctx; + + ngx_log_debug1(NGX_LOG_DEBUG_CORE, file->log, 0, + "thread open: \"%s\"", name); + + task = file->thread_task; + + if (task == NULL) { + task = ngx_thread_task_alloc(pool, sizeof(ngx_thread_file_ctx_t)); + if (task == NULL) { + return NGX_ERROR; + } + + file->thread_task = task; + } + + ctx = task->ctx; + + if (task->event.complete) { + task->event.complete = 0; + + if (ctx->op != NGX_THREAD_FILE_OPEN) { + ngx_log_error(NGX_LOG_ALERT, file->log, 0, + "invalid thread operation, open expected"); + return NGX_ERROR; + } + + if (ctx->err) { + file->err = ctx->err; + return NGX_ERROR; + } + + file->fd = ctx->fd; + + return NGX_OK; + } + + task->handler = ngx_thread_open_handler; + + ctx->op = NGX_THREAD_FILE_OPEN; + + ctx->name = name; + ctx->mode = mode; + ctx->create = create; + ctx->access = access; + + if (file->thread_handler(task, file) != NGX_OK) { + return NGX_ERROR; + } + + return NGX_AGAIN; +} + + +static void +ngx_thread_open_handler(void *data, ngx_log_t *log) +{ + ngx_thread_file_ctx_t *ctx = data; + + ngx_fd_t fd; + + ngx_log_debug0(NGX_LOG_DEBUG_CORE, log, 0, "thread open handler"); + + fd = ngx_open_file(ctx->name, ctx->mode, ctx->create, ctx->access); + + if (fd == NGX_INVALID_FILE) { + ctx->err = ngx_errno; + + } else { + ctx->fd = fd; + ctx->err = 0; + } +} + + ssize_t ngx_thread_read(ngx_file_t *file, u_char *buf, size_t size, off_t offset, ngx_pool_t *pool) @@ -118,9 +211,9 @@ ngx_thread_read(ngx_file_t *file, u_char if (task->event.complete) { task->event.complete = 0; - if (ctx->write) { + if (ctx->op != NGX_THREAD_FILE_READ) { ngx_log_error(NGX_LOG_ALERT, file->log, 0, - "invalid thread call, read instead of write"); + "invalid thread operation, read expected"); return NGX_ERROR; } @@ -135,7 +228,7 @@ ngx_thread_read(ngx_file_t *file, u_char task->handler = ngx_thread_read_handler; - ctx->write = 0; + ctx->op = NGX_THREAD_FILE_READ; ctx->fd = file->fd; ctx->buf = buf; @@ -501,9 +594,9 @@ ngx_thread_write_chain_to_file(ngx_file_ if (task->event.complete) { task->event.complete = 0; - if (!ctx->write) { + if (ctx->op != NGX_THREAD_FILE_WRITE) { ngx_log_error(NGX_LOG_ALERT, file->log, 0, - "invalid thread call, write instead of read"); + "invalid thread operation, write expected"); return NGX_ERROR; } @@ -519,7 +612,7 @@ ngx_thread_write_chain_to_file(ngx_file_ task->handler = ngx_thread_write_chain_to_file_handler; - ctx->write = 1; + ctx->op = NGX_THREAD_FILE_WRITE; ctx->fd = file->fd; ctx->chain = cl; diff --git a/src/os/unix/ngx_files.h b/src/os/unix/ngx_files.h --- a/src/os/unix/ngx_files.h +++ b/src/os/unix/ngx_files.h @@ -386,6 +386,8 @@ extern ngx_uint_t ngx_file_aio; #endif #if (NGX_THREADS) +ngx_int_t ngx_thread_open(ngx_file_t *file, u_char *name, + ngx_int_t mode, ngx_int_t create, ngx_int_t access, ngx_pool_t *pool); ssize_t ngx_thread_read(ngx_file_t *file, u_char *buf, size_t size, off_t offset, ngx_pool_t *pool); ssize_t ngx_thread_write_chain_to_file(ngx_file_t *file, ngx_chain_t *cl, -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1627978611 -10800 # Tue Aug 03 11:16:51 2021 +0300 # Node ID 70f0a417176574d8f4739bf960fdd0b873ccf048 # Parent f386594b298d1442e1bfe1add9cdf3c6cf26917b Static: threaded open support. diff --git a/src/http/modules/ngx_http_static_module.c b/src/http/modules/ngx_http_static_module.c --- a/src/http/modules/ngx_http_static_module.c +++ b/src/http/modules/ngx_http_static_module.c @@ -11,7 +11,14 @@ static ngx_int_t ngx_http_static_handler(ngx_http_request_t *r); +static ngx_int_t ngx_http_static_send(ngx_http_request_t *r); static ngx_int_t ngx_http_static_init(ngx_conf_t *cf); +#if (NGX_THREADS) +static void ngx_http_static_write_event_handler(ngx_http_request_t *r); +static ngx_int_t ngx_http_static_thread_handler(ngx_thread_task_t *task, + ngx_file_t *file); +static void ngx_http_static_thread_event_handler(ngx_event_t *ev); +#endif static ngx_http_module_t ngx_http_static_module_ctx = { @@ -48,15 +55,9 @@ ngx_module_t ngx_http_static_module = { static ngx_int_t ngx_http_static_handler(ngx_http_request_t *r) { - u_char *last, *location; - size_t root, len; - ngx_str_t path; + size_t root; + u_char *last; ngx_int_t rc; - ngx_uint_t level; - ngx_log_t *log; - ngx_buf_t *b; - ngx_chain_t out; - ngx_open_file_info_t of; ngx_http_core_loc_conf_t *clcf; if (!(r->method & (NGX_HTTP_GET|NGX_HTTP_HEAD|NGX_HTTP_POST))) { @@ -67,42 +68,81 @@ ngx_http_static_handler(ngx_http_request return NGX_DECLINED; } - log = r->connection->log; - /* * ngx_http_map_uri_to_path() allocates memory for terminating '\0' * so we do not need to reserve memory for '/' for possible redirect */ - last = ngx_http_map_uri_to_path(r, &path, &root, 0); + last = ngx_http_map_uri_to_path(r, &r->open_file_name, &root, 0); if (last == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } - path.len = last - path.data; + r->open_file_name.len = last - r->open_file_name.data; - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, - "http filename: \"%s\"", path.data); + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http filename: \"%s\"", r->open_file_name.data); clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); - ngx_memzero(&of, sizeof(ngx_open_file_info_t)); + ngx_memzero(&r->open_file_info, sizeof(ngx_open_file_info_t)); - of.read_ahead = clcf->read_ahead; - of.directio = clcf->directio; - of.valid = clcf->open_file_cache_valid; - of.min_uses = clcf->open_file_cache_min_uses; - of.errors = clcf->open_file_cache_errors; - of.events = clcf->open_file_cache_events; +#if (NGX_THREADS) + if (clcf->aio == NGX_HTTP_AIO_THREADS && clcf->aio_open) { + r->open_file_info.thread_handler = ngx_http_static_thread_handler; + r->open_file_info.thread_ctx = r; + } +#endif - if (ngx_http_set_disable_symlinks(r, clcf, &path, &of) != NGX_OK) { + if (ngx_http_set_open_file(r, clcf, &r->open_file_name, &r->open_file_info) + != NGX_OK) + { return NGX_HTTP_INTERNAL_SERVER_ERROR; } - if (ngx_open_cached_file(clcf->open_file_cache, &path, &of, r->pool) - != NGX_OK) - { - switch (of.err) { + rc = ngx_http_static_send(r); + +#if (NGX_THREADS) + if (rc == NGX_DONE) { + r->main->count++; + r->write_event_handler = ngx_http_static_write_event_handler; + } +#endif + + return rc; +} + + +static ngx_int_t +ngx_http_static_send(ngx_http_request_t *r) +{ + u_char *location, *last; + size_t len; + ngx_log_t *log; + ngx_int_t rc; + ngx_uint_t level; + ngx_buf_t *b; + ngx_chain_t out; + ngx_http_core_loc_conf_t *clcf; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http static send: \"%s\"", r->open_file_name.data); + + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); + + log = r->connection->log; + + rc = ngx_open_cached_file(clcf->open_file_cache, &r->open_file_name, + &r->open_file_info, r->pool); + +#if (NGX_THREADS) + if (rc == NGX_AGAIN) { + return NGX_DONE; + } +#endif + + if (rc != NGX_OK) { + switch (r->open_file_info.err) { case 0: return NGX_HTTP_INTERNAL_SERVER_ERROR; @@ -133,8 +173,9 @@ ngx_http_static_handler(ngx_http_request } if (rc != NGX_HTTP_NOT_FOUND || clcf->log_not_found) { - ngx_log_error(level, log, of.err, - "%s \"%s\" failed", of.failed, path.data); + ngx_log_error(level, log, r->open_file_info.err, + "%s \"%s\" failed", r->open_file_info.failed, + r->open_file_name.data); } return rc; @@ -142,9 +183,10 @@ ngx_http_static_handler(ngx_http_request r->root_tested = !r->error_page; - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "http static fd: %d", of.fd); + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "http static fd: %d", + r->open_file_info.fd); - if (of.is_dir) { + if (r->open_file_info.is_dir) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, log, 0, "http dir"); @@ -157,10 +199,10 @@ ngx_http_static_handler(ngx_http_request len = r->uri.len + 1; - if (!clcf->alias && r->args.len == 0) { - location = path.data + root; + if (!clcf->alias && clcf->root_lengths == NULL && r->args.len == 0) { + location = r->open_file_name.data + clcf->root.len; - *last = '/'; + r->open_file_name.data[r->open_file_name.len] = '/'; } else { if (r->args.len) { @@ -193,9 +235,9 @@ ngx_http_static_handler(ngx_http_request #if !(NGX_WIN32) /* the not regular files are probably Unix specific */ - if (!of.is_file) { + if (!r->open_file_info.is_file) { ngx_log_error(NGX_LOG_CRIT, log, 0, - "\"%s\" is not a regular file", path.data); + "\"%s\" is not a regular file", r->open_file_name.data); return NGX_HTTP_NOT_FOUND; } @@ -215,8 +257,8 @@ ngx_http_static_handler(ngx_http_request log->action = "sending response to client"; r->headers_out.status = NGX_HTTP_OK; - r->headers_out.content_length_n = of.size; - r->headers_out.last_modified_time = of.mtime; + r->headers_out.content_length_n = r->open_file_info.size; + r->headers_out.last_modified_time = r->open_file_info.mtime; if (ngx_http_set_etag(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; @@ -226,7 +268,7 @@ ngx_http_static_handler(ngx_http_request return NGX_HTTP_INTERNAL_SERVER_ERROR; } - if (r != r->main && of.size == 0) { + if (r != r->main && r->open_file_info.size == 0) { return ngx_http_send_header(r); } @@ -251,16 +293,16 @@ ngx_http_static_handler(ngx_http_request } b->file_pos = 0; - b->file_last = of.size; + b->file_last = r->open_file_info.size; b->in_file = b->file_last ? 1: 0; b->last_buf = (r == r->main) ? 1: 0; b->last_in_chain = 1; - b->file->fd = of.fd; - b->file->name = path; + b->file->fd = r->open_file_info.fd; + b->file->name = r->open_file_name; b->file->log = log; - b->file->directio = of.is_directio; + b->file->directio = r->open_file_info.is_directio; out.buf = b; out.next = NULL; @@ -269,6 +311,94 @@ ngx_http_static_handler(ngx_http_request } +#if (NGX_THREADS) + +static void +ngx_http_static_write_event_handler(ngx_http_request_t *r) +{ + ngx_int_t rc; + + if (r->aio) { + return; + } + + rc = ngx_http_static_send(r); + + if (rc != NGX_DONE) { + ngx_http_finalize_request(r, rc); + } +} + + +static ngx_int_t +ngx_http_static_thread_handler(ngx_thread_task_t *task, ngx_file_t *file) +{ + ngx_str_t name; + ngx_thread_pool_t *tp; + ngx_http_request_t *r; + ngx_http_core_loc_conf_t *clcf; + + r = file->thread_ctx; + + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); + + tp = clcf->thread_pool; + + if (tp == NULL) { + if (ngx_http_complex_value(r, clcf->thread_pool_value, &name) + != NGX_OK) + { + return NGX_ERROR; + } + + tp = ngx_thread_pool_get((ngx_cycle_t *) ngx_cycle, &name); + + if (tp == NULL) { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "thread pool \"%V\" not found", &name); + return NGX_ERROR; + } + } + + task->event.data = r; + task->event.handler = ngx_http_static_thread_event_handler; + + if (ngx_thread_task_post(tp, task) != NGX_OK) { + return NGX_ERROR; + } + + r->main->blocked++; + r->aio = 1; + + return NGX_OK; +} + + +static void +ngx_http_static_thread_event_handler(ngx_event_t *ev) +{ + ngx_connection_t *c; + ngx_http_request_t *r; + + r = ev->data; + c = r->connection; + + ngx_http_set_log_request(c->log, r); + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, + "http static thread: \"%V?%V\"", &r->uri, &r->args); + + r->main->blocked--; + r->aio = 0; + + r->write_event_handler(r); + + ngx_http_run_posted_requests(c); +} + +#endif + + static ngx_int_t ngx_http_static_init(ngx_conf_t *cf) { diff --git a/src/http/ngx_http_core_module.c b/src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c +++ b/src/http/ngx_http_core_module.c @@ -423,6 +423,13 @@ static ngx_command_t ngx_http_core_comm offsetof(ngx_http_core_loc_conf_t, aio_write), NULL }, + { ngx_string("aio_open"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_core_loc_conf_t, aio_open), + NULL }, + { ngx_string("read_ahead"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, ngx_conf_set_size_slot, @@ -2649,6 +2656,21 @@ ngx_http_cleanup_add(ngx_http_request_t ngx_int_t +ngx_http_set_open_file(ngx_http_request_t *r, + ngx_http_core_loc_conf_t *clcf, ngx_str_t *path, ngx_open_file_info_t *of) +{ + of->read_ahead = clcf->read_ahead; + of->directio = clcf->directio; + of->valid = clcf->open_file_cache_valid; + of->min_uses = clcf->open_file_cache_min_uses; + of->errors = clcf->open_file_cache_errors; + of->events = clcf->open_file_cache_events; + + return ngx_http_set_disable_symlinks(r, clcf, path, of); +} + + +ngx_int_t ngx_http_set_disable_symlinks(ngx_http_request_t *r, ngx_http_core_loc_conf_t *clcf, ngx_str_t *path, ngx_open_file_info_t *of) { @@ -3488,6 +3510,7 @@ ngx_http_core_create_loc_conf(ngx_conf_t clcf->subrequest_output_buffer_size = NGX_CONF_UNSET_SIZE; clcf->aio = NGX_CONF_UNSET; clcf->aio_write = NGX_CONF_UNSET; + clcf->aio_open = NGX_CONF_UNSET; #if (NGX_THREADS) clcf->thread_pool = NGX_CONF_UNSET_PTR; clcf->thread_pool_value = NGX_CONF_UNSET_PTR; @@ -3712,6 +3735,7 @@ ngx_http_core_merge_loc_conf(ngx_conf_t (size_t) ngx_pagesize); ngx_conf_merge_value(conf->aio, prev->aio, NGX_HTTP_AIO_OFF); ngx_conf_merge_value(conf->aio_write, prev->aio_write, 0); + ngx_conf_merge_value(conf->aio_open, prev->aio_open, 0); #if (NGX_THREADS) ngx_conf_merge_ptr_value(conf->thread_pool, prev->thread_pool, NULL); ngx_conf_merge_ptr_value(conf->thread_pool_value, prev->thread_pool_value, diff --git a/src/http/ngx_http_core_module.h b/src/http/ngx_http_core_module.h --- a/src/http/ngx_http_core_module.h +++ b/src/http/ngx_http_core_module.h @@ -383,6 +383,7 @@ struct ngx_http_core_loc_conf_s { ngx_flag_t sendfile; /* sendfile */ ngx_flag_t aio; /* aio */ ngx_flag_t aio_write; /* aio_write */ + ngx_flag_t aio_open; /* aio_open */ ngx_flag_t tcp_nopush; /* tcp_nopush */ ngx_flag_t tcp_nodelay; /* tcp_nodelay */ ngx_flag_t reset_timedout_connection; /* reset_timedout_connection */ @@ -523,6 +524,8 @@ ngx_int_t ngx_http_request_body_save_fil ngx_chain_t *chain); +ngx_int_t ngx_http_set_open_file(ngx_http_request_t *r, + ngx_http_core_loc_conf_t *clcf, ngx_str_t *path, ngx_open_file_info_t *of); ngx_int_t ngx_http_set_disable_symlinks(ngx_http_request_t *r, ngx_http_core_loc_conf_t *clcf, ngx_str_t *path, ngx_open_file_info_t *of); diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h --- a/src/http/ngx_http_request.h +++ b/src/http/ngx_http_request.h @@ -427,6 +427,9 @@ struct ngx_http_request_s { ngx_http_variable_value_t *variables; + ngx_str_t open_file_name; + ngx_open_file_info_t open_file_info; + #if (NGX_PCRE) ngx_uint_t ncaptures; int *captures; -------------- next part -------------- # HG changeset patch # User Roman Arutyunyan # Date 1627978716 -10800 # Tue Aug 03 11:18:36 2021 +0300 # Node ID b506b57df4a716b253cd7e5e701fd34b67f6850c # Parent 70f0a417176574d8f4739bf960fdd0b873ccf048 Cache: threaded open support. diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c +++ b/src/http/ngx_http_file_cache.c @@ -18,6 +18,8 @@ static void ngx_http_file_cache_lock_wai ngx_http_cache_t *c); static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r, ngx_http_cache_t *c); +static ngx_int_t ngx_http_file_cache_aio_open(ngx_http_request_t *r, + ngx_http_cache_t *c); static ssize_t ngx_http_file_cache_aio_read(ngx_http_request_t *r, ngx_http_cache_t *c); #if (NGX_HAVE_FILE_AIO) @@ -264,13 +266,11 @@ ngx_http_file_cache_create_key(ngx_http_ ngx_int_t ngx_http_file_cache_open(ngx_http_request_t *r) { - ngx_int_t rc, rv; - ngx_uint_t test; - ngx_http_cache_t *c; - ngx_pool_cleanup_t *cln; - ngx_open_file_info_t of; - ngx_http_file_cache_t *cache; - ngx_http_core_loc_conf_t *clcf; + ngx_int_t rc; + ngx_uint_t test; + ngx_http_cache_t *c; + ngx_pool_cleanup_t *cln; + ngx_http_file_cache_t *cache; c = r->cache; @@ -317,7 +317,6 @@ ngx_http_file_cache_open(ngx_http_reques c->temp_file = 1; test = c->exists ? 1 : 0; - rv = NGX_DECLINED; } else { /* rc == NGX_DECLINED */ @@ -329,11 +328,10 @@ ngx_http_file_cache_open(ngx_http_reques return NGX_HTTP_CACHE_SCARCE; } - rv = NGX_HTTP_CACHE_SCARCE; + c->temp_file = 0; } else { c->temp_file = 1; - rv = NGX_DECLINED; } } @@ -342,62 +340,10 @@ ngx_http_file_cache_open(ngx_http_reques } if (!test) { - goto done; - } - - clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); - - ngx_memzero(&of, sizeof(ngx_open_file_info_t)); - - of.uniq = c->uniq; - of.valid = clcf->open_file_cache_valid; - of.min_uses = clcf->open_file_cache_min_uses; - of.events = clcf->open_file_cache_events; - of.directio = NGX_OPEN_FILE_DIRECTIO_OFF; - of.read_ahead = clcf->read_ahead; - - if (ngx_open_cached_file(clcf->open_file_cache, &c->file.name, &of, r->pool) - != NGX_OK) - { - switch (of.err) { - - case 0: - return NGX_ERROR; - - case NGX_ENOENT: - case NGX_ENOTDIR: - goto done; - - default: - ngx_log_error(NGX_LOG_CRIT, r->connection->log, of.err, - ngx_open_file_n " \"%s\" failed", c->file.name.data); - return NGX_ERROR; - } - } - - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, - "http file cache fd: %d", of.fd); - - c->file.fd = of.fd; - c->file.log = r->connection->log; - c->uniq = of.uniq; - c->length = of.size; - c->fs_size = (of.fs_size + cache->bsize - 1) / cache->bsize; - - c->buf = ngx_create_temp_buf(r->pool, c->body_start); - if (c->buf == NULL) { - return NGX_ERROR; + return ngx_http_file_cache_lock(r, c); } return ngx_http_file_cache_read(r, c); - -done: - - if (rv == NGX_DECLINED) { - return ngx_http_file_cache_lock(r, c); - } - - return rv; } @@ -407,6 +353,10 @@ ngx_http_file_cache_lock(ngx_http_reques ngx_msec_t now, timer; ngx_http_file_cache_t *cache; + if (!c->temp_file) { + return NGX_HTTP_CACHE_SCARCE; + } + if (!c->lock) { return NGX_DECLINED; } @@ -536,6 +486,12 @@ ngx_http_file_cache_read(ngx_http_reques ngx_http_file_cache_t *cache; ngx_http_file_cache_header_t *h; + rc = ngx_http_file_cache_aio_open(r, c); + + if (rc != NGX_OK) { + return rc; + } + n = ngx_http_file_cache_aio_read(r, c); if (n < 0) { @@ -665,6 +621,89 @@ ngx_http_file_cache_read(ngx_http_reques } +static ngx_int_t +ngx_http_file_cache_aio_open(ngx_http_request_t *r, ngx_http_cache_t *c) +{ + ngx_int_t rc; + ngx_http_file_cache_t *cache; + ngx_http_core_loc_conf_t *clcf; + + if (c->file.fd != NGX_INVALID_FILE) { + return NGX_OK; + } + + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); + + if (!c->reading) { + ngx_memzero(&r->open_file_info, sizeof(ngx_open_file_info_t)); + + r->open_file_info.uniq = c->uniq; + r->open_file_info.valid = clcf->open_file_cache_valid; + r->open_file_info.min_uses = clcf->open_file_cache_min_uses; + r->open_file_info.events = clcf->open_file_cache_events; + r->open_file_info.directio = NGX_OPEN_FILE_DIRECTIO_OFF; + r->open_file_info.read_ahead = clcf->read_ahead; + +#if (NGX_THREADS) + if (clcf->aio == NGX_HTTP_AIO_THREADS && clcf->aio_open) { + r->open_file_info.thread_task = c->thread_task; + r->open_file_info.thread_handler = ngx_http_cache_thread_handler; + r->open_file_info.thread_ctx = r; + } +#endif + } + + rc = ngx_open_cached_file(clcf->open_file_cache, &c->file.name, + &r->open_file_info, r->pool); + +#if (NGX_THREADS) + + if (rc == NGX_AGAIN) { + c->reading = 1; + return NGX_AGAIN; + } + + c->reading = 0; + +#endif + + if (rc != NGX_OK) { + switch (r->open_file_info.err) { + + case NGX_OK: + return NGX_ERROR; + + case NGX_ENOENT: + case NGX_ENOTDIR: + return ngx_http_file_cache_lock(r, c); + + default: + ngx_log_error(NGX_LOG_CRIT, r->connection->log, + r->open_file_info.err, + ngx_open_file_n " \"%s\" failed", c->file.name.data); + return NGX_ERROR; + } + } + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http file cache fd: %d", r->open_file_info.fd); + + cache = c->file_cache; + c->file.fd = r->open_file_info.fd; + c->file.log = r->connection->log; + c->uniq = r->open_file_info.uniq; + c->length = r->open_file_info.size; + c->fs_size = (r->open_file_info.fs_size + cache->bsize - 1) / cache->bsize; + + c->buf = ngx_create_temp_buf(r->pool, c->body_start); + if (c->buf == NULL) { + return NGX_ERROR; + } + + return NGX_OK; +} + + static ssize_t ngx_http_file_cache_aio_read(ngx_http_request_t *r, ngx_http_cache_t *c) { @@ -1231,6 +1270,7 @@ ngx_http_file_cache_reopen(ngx_http_requ ngx_shmtx_unlock(&cache->shpool->mutex); c->secondary = 1; + c->file.fd = NGX_INVALID_FILE; c->file.name.len = 0; c->body_start = c->buffer_size; From mdounin at mdounin.ru Tue Aug 3 17:51:45 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 03 Aug 2021 17:51:45 +0000 Subject: [nginx] Version bump. Message-ID: details: https://hg.nginx.org/nginx/rev/1563bbcdb90c branches: changeset: 7890:1563bbcdb90c user: Maxim Dounin date: Tue Aug 03 20:50:08 2021 +0300 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r 91f96416f459 -r 1563bbcdb90c src/core/nginx.h --- a/src/core/nginx.h Tue Jul 06 17:59:17 2021 +0300 +++ b/src/core/nginx.h Tue Aug 03 20:50:08 2021 +0300 @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1021001 -#define NGINX_VERSION "1.21.1" +#define nginx_version 1021002 +#define NGINX_VERSION "1.21.2" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From mdounin at mdounin.ru Tue Aug 3 17:51:48 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 03 Aug 2021 17:51:48 +0000 Subject: [nginx] SSL: set events ready flags after handshake. Message-ID: details: https://hg.nginx.org/nginx/rev/573bd30e46b4 branches: changeset: 7891:573bd30e46b4 user: Maxim Dounin date: Tue Aug 03 20:50:30 2021 +0300 description: SSL: set events ready flags after handshake. The c->read->ready and c->write->ready flags might be reset during the handshake, and not set again if the handshake was finished on the other event. At the same time, some data might be read from the socket during the handshake, so missing c->read->ready flag might result in a connection hang, for example, when waiting for an SMTP greeting (which was already received during the handshake). Found by Sergey Kandaurov. diffstat: src/event/ngx_event_openssl.c | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diffs (23 lines): diff -r 1563bbcdb90c -r 573bd30e46b4 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Aug 03 20:50:08 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 03 20:50:30 2021 +0300 @@ -1740,6 +1740,9 @@ ngx_ssl_handshake(ngx_connection_t *c) c->recv_chain = ngx_ssl_recv_chain; c->send_chain = ngx_ssl_send_chain; + c->read->ready = 1; + c->write->ready = 1; + #ifndef SSL_OP_NO_RENEGOTIATION #if OPENSSL_VERSION_NUMBER < 0x10100000L #ifdef SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS @@ -1885,6 +1888,9 @@ ngx_ssl_try_early_data(ngx_connection_t c->recv_chain = ngx_ssl_recv_chain; c->send_chain = ngx_ssl_send_chain; + c->read->ready = 1; + c->write->ready = 1; + rc = ngx_ssl_ocsp_validate(c); if (rc == NGX_ERROR) { From pluknet at nginx.com Wed Aug 4 18:54:23 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 04 Aug 2021 18:54:23 +0000 Subject: [nginx] SSL: SSL_CTX_set_tmp_dh() error handling. Message-ID: details: https://hg.nginx.org/nginx/rev/34a3a1a2d197 branches: changeset: 7892:34a3a1a2d197 user: Sergey Kandaurov date: Wed Aug 04 21:27:51 2021 +0300 description: SSL: SSL_CTX_set_tmp_dh() error handling. For example, it can fail due to weak DH parameters. diffstat: src/event/ngx_event_openssl.c | 8 +++++++- 1 files changed, 7 insertions(+), 1 deletions(-) diffs (18 lines): diff -r 573bd30e46b4 -r 34a3a1a2d197 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Aug 03 20:50:30 2021 +0300 +++ b/src/event/ngx_event_openssl.c Wed Aug 04 21:27:51 2021 +0300 @@ -1376,7 +1376,13 @@ ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_ return NGX_ERROR; } - SSL_CTX_set_tmp_dh(ssl->ctx, dh); + if (SSL_CTX_set_tmp_dh(ssl->ctx, dh) != 1) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "SSL_CTX_set_tmp_dh(\"%s\") failed", file->data); + DH_free(dh); + BIO_free(bio); + return NGX_ERROR; + } DH_free(dh); BIO_free(bio); From pluknet at nginx.com Mon Aug 9 15:59:14 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 09 Aug 2021 15:59:14 +0000 Subject: [nginx] Disabled HTTP/1.0 requests with Transfer-Encoding. Message-ID: details: https://hg.nginx.org/nginx/rev/7a6afd584eb4 branches: changeset: 7893:7a6afd584eb4 user: Sergey Kandaurov date: Mon Aug 09 18:12:12 2021 +0300 description: Disabled HTTP/1.0 requests with Transfer-Encoding. The latest HTTP/1.1 draft describes Transfer-Encoding in HTTP/1.0 as having potentially faulty message framing as that could have been forwarded without handling of the chunked encoding, and forbids processing subsequest requests over that connection: https://github.com/httpwg/http-core/issues/879. While handling of such requests is permitted, the most secure approach seems to reject them. diffstat: src/http/ngx_http_request.c | 8 ++++++++ 1 files changed, 8 insertions(+), 0 deletions(-) diffs (18 lines): diff -r 34a3a1a2d197 -r 7a6afd584eb4 src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Wed Aug 04 21:27:51 2021 +0300 +++ b/src/http/ngx_http_request.c Mon Aug 09 18:12:12 2021 +0300 @@ -1983,6 +1983,14 @@ ngx_http_process_request_header(ngx_http } if (r->headers_in.transfer_encoding) { + if (r->http_version < NGX_HTTP_VERSION_11) { + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "client sent HTTP/1.0 request with " + "\"Transfer-Encoding\" header"); + ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); + return NGX_ERROR; + } + if (r->headers_in.transfer_encoding->value.len == 7 && ngx_strncasecmp(r->headers_in.transfer_encoding->value.data, (u_char *) "chunked", 7) == 0) From dnj0496 at gmail.com Mon Aug 9 18:48:35 2021 From: dnj0496 at gmail.com (Dk Jack) Date: Mon, 9 Aug 2021 11:48:35 -0700 Subject: request body filter Message-ID: Hi, In my module, I am inspecting the request body. My body filter init code is shown below: int module_body_filter_init(ngx_conf_t *cf) { ngx_http_next_body_filter = ngx_http_top_request_body_filter; ngx_http_top_request_body_filter = nginx_module_inspect_body_filter; return NGX_OK; } Even though I have the above initialization, I do not want to inspect the body for all requests. I figured the body filter would be invoked in the content phase. Hence, I registered a handler in the NGX_HTTP_ACCESS_PHASE to create my module context and set the flag to inspect the request body. This works in some cases but is inconsistent. It looks like my inspect_body_filter function is getting called before my access handler function in some cases. I've now moved my handler to POST_READ_PHASE to get consistent results. Is this the correct way to solve this problem? Are there any other ways to accomplish this or are there any pitfalls to this approach. Any help is appreciated. Thanks. Regards, Dk. -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Tue Aug 10 13:19:29 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Aug 2021 16:19:29 +0300 Subject: request body filter In-Reply-To: References: Message-ID: Hello! On Mon, Aug 09, 2021 at 11:48:35AM -0700, Dk Jack wrote: > Hi, > In my module, I am inspecting the request body. My body filter init code is > shown below: > > int > module_body_filter_init(ngx_conf_t *cf) > { > ngx_http_next_body_filter = ngx_http_top_request_body_filter; > ngx_http_top_request_body_filter = nginx_module_inspect_body_filter; > > return NGX_OK; > } > > Even though I have the above initialization, I do not want to inspect the > body for all requests. I figured the body filter would be invoked in the > content phase. Hence, I registered a handler in the NGX_HTTP_ACCESS_PHASE > to create my module context and set the flag to inspect the request body. > This works in some cases but is inconsistent. It looks like my > inspect_body_filter function is getting called before my access handler > function in some cases. I've now moved my handler to POST_READ_PHASE to get > consistent results. Is this the correct way to solve this problem? Are > there any other ways to accomplish this or are there any pitfalls to this > approach. Any help is appreciated. Thanks. Request body filters are called when the request body reading happens. This can happen at any phase, especially when using other 3rd party modules, and it is generally incorrect to assume that some phase handler is called before reading the request body. Note well that module contexts are cleared on internal redirections, and this might also be a problem for your approach. A better approach would be to depend on the location configuration instead. And do appropriate checks in the request body filter itself if some run-time logic is needed. -- Maxim Dounin http://mdounin.ru/ From dnj0496 at gmail.com Tue Aug 10 17:04:20 2021 From: dnj0496 at gmail.com (Dk Jack) Date: Tue, 10 Aug 2021 10:04:20 -0700 Subject: request body filter In-Reply-To: References: Message-ID: Hello Maxim, Thanks for responding. My configuration to determine if I should inspect the body or not is not part of the location config since it's dynamic and I am receiving this config from an external app. The configuration is updated asynchronously. My module matches the requests against this external information and then enables the body filter. Not sure if I can use the location configuration as you suggested. Please suggest any modules using location config and doing something similar to my module. Thanks. Regards, Dk. On Tue, Aug 10, 2021 at 6:19 AM Maxim Dounin wrote: > Hello! > > On Mon, Aug 09, 2021 at 11:48:35AM -0700, Dk Jack wrote: > > > Hi, > > In my module, I am inspecting the request body. My body filter init code > is > > shown below: > > > > int > > module_body_filter_init(ngx_conf_t *cf) > > { > > ngx_http_next_body_filter = ngx_http_top_request_body_filter; > > ngx_http_top_request_body_filter = nginx_module_inspect_body_filter; > > > > return NGX_OK; > > } > > > > Even though I have the above initialization, I do not want to inspect the > > body for all requests. I figured the body filter would be invoked in the > > content phase. Hence, I registered a handler in the NGX_HTTP_ACCESS_PHASE > > to create my module context and set the flag to inspect the request body. > > This works in some cases but is inconsistent. It looks like my > > inspect_body_filter function is getting called before my access handler > > function in some cases. I've now moved my handler to POST_READ_PHASE to > get > > consistent results. Is this the correct way to solve this problem? Are > > there any other ways to accomplish this or are there any pitfalls to this > > approach. Any help is appreciated. Thanks. > > Request body filters are called when the request body reading > happens. This can happen at any phase, especially when using > other 3rd party modules, and it is generally incorrect to assume > that some phase handler is called before reading the request body. > > Note well that module contexts are cleared on internal > redirections, and this might also be a problem for your approach. > > A better approach would be to depend on the location configuration > instead. And do appropriate checks in the request body filter > itself if some run-time logic is needed. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Tue Aug 10 21:46:24 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 10 Aug 2021 21:46:24 +0000 Subject: [nginx] SSL: RSA data type is deprecated in OpenSSL 3.0. Message-ID: details: https://hg.nginx.org/nginx/rev/37be19a3c0ee branches: changeset: 7894:37be19a3c0ee user: Sergey Kandaurov date: Tue Aug 10 23:42:59 2021 +0300 description: SSL: RSA data type is deprecated in OpenSSL 3.0. The only consumer is a callback function for SSL_CTX_set_tmp_rsa_callback() deprecated in OpenSSL 1.1.0. Now the function is conditionally compiled too. diffstat: src/event/ngx_event_openssl.c | 6 +++++- src/event/ngx_event_openssl.h | 2 ++ 2 files changed, 7 insertions(+), 1 deletions(-) diffs (44 lines): diff -r 7a6afd584eb4 -r 37be19a3c0ee src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Mon Aug 09 18:12:12 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 10 23:42:59 2021 +0300 @@ -1116,6 +1116,8 @@ ngx_ssl_info_callback(const ngx_ssl_conn } +#if (OPENSSL_VERSION_NUMBER < 0x10100001L && !defined LIBRESSL_VERSION_NUMBER) + RSA * ngx_ssl_rsa512_key_callback(ngx_ssl_conn_t *ssl_conn, int is_export, int key_length) @@ -1126,7 +1128,7 @@ ngx_ssl_rsa512_key_callback(ngx_ssl_conn return NULL; } -#if (OPENSSL_VERSION_NUMBER < 0x10100003L && !defined OPENSSL_NO_DEPRECATED) +#ifndef OPENSSL_NO_DEPRECATED if (key == NULL) { key = RSA_generate_key(512, RSA_F4, NULL, NULL); @@ -1137,6 +1139,8 @@ ngx_ssl_rsa512_key_callback(ngx_ssl_conn return key; } +#endif + ngx_array_t * ngx_ssl_read_password_file(ngx_conf_t *cf, ngx_str_t *file) diff -r 7a6afd584eb4 -r 37be19a3c0ee src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Mon Aug 09 18:12:12 2021 +0300 +++ b/src/event/ngx_event_openssl.h Tue Aug 10 23:42:59 2021 +0300 @@ -196,8 +196,10 @@ ngx_int_t ngx_ssl_ocsp_validate(ngx_conn ngx_int_t ngx_ssl_ocsp_get_status(ngx_connection_t *c, const char **s); void ngx_ssl_ocsp_cleanup(ngx_connection_t *c); ngx_int_t ngx_ssl_ocsp_cache_init(ngx_shm_zone_t *shm_zone, void *data); +#if (OPENSSL_VERSION_NUMBER < 0x10100001L && !defined LIBRESSL_VERSION_NUMBER) RSA *ngx_ssl_rsa512_key_callback(ngx_ssl_conn_t *ssl_conn, int is_export, int key_length); +#endif ngx_array_t *ngx_ssl_read_password_file(ngx_conf_t *cf, ngx_str_t *file); ngx_array_t *ngx_ssl_preserve_passwords(ngx_conf_t *cf, ngx_array_t *passwords); From pluknet at nginx.com Tue Aug 10 21:46:27 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 10 Aug 2021 21:46:27 +0000 Subject: [nginx] SSL: SSL_get_peer_certificate() is deprecated in OpenSSL 3.0. Message-ID: details: https://hg.nginx.org/nginx/rev/8ebda26e4f98 branches: changeset: 7895:8ebda26e4f98 user: Sergey Kandaurov date: Tue Aug 10 23:43:16 2021 +0300 description: SSL: SSL_get_peer_certificate() is deprecated in OpenSSL 3.0. Switch to SSL_get1_peer_certificate() when building with OpenSSL 3.0 and OPENSSL_NO_DEPRECATED defined. diffstat: src/event/ngx_event_openssl.h | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) diffs (15 lines): diff -r 37be19a3c0ee -r 8ebda26e4f98 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Tue Aug 10 23:42:59 2021 +0300 +++ b/src/event/ngx_event_openssl.h Tue Aug 10 23:43:16 2021 +0300 @@ -64,6 +64,11 @@ #endif +#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && !defined SSL_get_peer_certificate) +#define SSL_get_peer_certificate(s) SSL_get1_peer_certificate(s) +#endif + + typedef struct ngx_ssl_ocsp_s ngx_ssl_ocsp_t; From pluknet at nginx.com Tue Aug 10 21:46:30 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 10 Aug 2021 21:46:30 +0000 Subject: [nginx] SSL: using SSL_CTX_set0_tmp_dh_pkey() with OpenSSL 3.0 in dhparam. Message-ID: details: https://hg.nginx.org/nginx/rev/1e0fabbe01c7 branches: changeset: 7896:1e0fabbe01c7 user: Sergey Kandaurov date: Tue Aug 10 23:43:16 2021 +0300 description: SSL: using SSL_CTX_set0_tmp_dh_pkey() with OpenSSL 3.0 in dhparam. Using PEM_read_bio_DHparams() and SSL_CTX_set_tmp_dh() is deprecated as part of deprecating the low level DH functions in favor of EVP_PKEY: https://git.openssl.org/?p=openssl.git;a=commitdiff;h=163f6dc diffstat: src/event/ngx_event_openssl.c | 32 +++++++++++++++++++++++++++++++- 1 files changed, 31 insertions(+), 1 deletions(-) diffs (56 lines): diff -r 8ebda26e4f98 -r 1e0fabbe01c7 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Aug 10 23:43:16 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 10 23:43:16 2021 +0300 @@ -1354,7 +1354,6 @@ ngx_ssl_passwords_cleanup(void *data) ngx_int_t ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file) { - DH *dh; BIO *bio; if (file->len == 0) { @@ -1372,6 +1371,10 @@ ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_ return NGX_ERROR; } +#ifdef SSL_CTX_set_tmp_dh + { + DH *dh; + dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL); if (dh == NULL) { ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, @@ -1389,6 +1392,33 @@ ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_ } DH_free(dh); + } +#else + { + EVP_PKEY *dh; + + /* + * PEM_read_bio_DHparams() and SSL_CTX_set_tmp_dh() + * are deprecated in OpenSSL 3.0 + */ + + dh = PEM_read_bio_Parameters(bio, NULL); + if (dh == NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "PEM_read_bio_Parameters(\"%s\") failed", file->data); + BIO_free(bio); + return NGX_ERROR; + } + + if (SSL_CTX_set0_tmp_dh_pkey(ssl->ctx, dh) != 1) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "SSL_CTX_set0_tmp_dh_pkey(\%s\") failed", file->data); + BIO_free(bio); + return NGX_ERROR; + } + } +#endif + BIO_free(bio); return NGX_OK; From pluknet at nginx.com Tue Aug 10 21:46:33 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 10 Aug 2021 21:46:33 +0000 Subject: [nginx] SSL: ERR_peek_error_line_data() compatibility with OpenSSL 3.0. Message-ID: details: https://hg.nginx.org/nginx/rev/4195a6f0c61c branches: changeset: 7897:4195a6f0c61c user: Sergey Kandaurov date: Tue Aug 10 23:43:16 2021 +0300 description: SSL: ERR_peek_error_line_data() compatibility with OpenSSL 3.0. ERR_peek_error_line_data() was deprecated in favour of ERR_peek_error_all(). Here we use the ERR_peek_error_data() helper to pass only used arguments. diffstat: src/event/ngx_event_openssl.c | 2 +- src/event/ngx_event_openssl.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletions(-) diffs (27 lines): diff -r 1e0fabbe01c7 -r 4195a6f0c61c src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Aug 10 23:43:16 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 10 23:43:16 2021 +0300 @@ -3280,7 +3280,7 @@ ngx_ssl_error(ngx_uint_t level, ngx_log_ for ( ;; ) { - n = ERR_peek_error_line_data(NULL, NULL, &data, &flags); + n = ERR_peek_error_data(&data, &flags); if (n == 0) { break; diff -r 1e0fabbe01c7 -r 4195a6f0c61c src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Tue Aug 10 23:43:16 2021 +0300 +++ b/src/event/ngx_event_openssl.h Tue Aug 10 23:43:16 2021 +0300 @@ -69,6 +69,11 @@ #endif +#if (OPENSSL_VERSION_NUMBER < 0x30000000L && !defined ERR_peek_error_data) +#define ERR_peek_error_data(d, f) ERR_peek_error_line_data(NULL, NULL, d, f) +#endif + + typedef struct ngx_ssl_ocsp_s ngx_ssl_ocsp_t; From pluknet at nginx.com Tue Aug 10 21:46:36 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 10 Aug 2021 21:46:36 +0000 Subject: [nginx] SSL: silenced warnings when building with OpenSSL 3.0. Message-ID: details: https://hg.nginx.org/nginx/rev/8f7107617550 branches: changeset: 7898:8f7107617550 user: Sergey Kandaurov date: Tue Aug 10 23:43:16 2021 +0300 description: SSL: silenced warnings when building with OpenSSL 3.0. The OPENSSL_SUPPRESS_DEPRECATED macro is used to suppress deprecation warnings. This covers Session Tickets keys, SSL Engine, DH low level API for DHE ciphers. Unlike OPENSSL_API_COMPAT, it works well with OpenSSL built with no-deprecated. In particular, it doesn't unhide various macros in OpenSSL includes, which are meant to be hidden under OPENSSL_NO_DEPRECATED. diffstat: src/event/ngx_event_openssl.h | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diffs (12 lines): diff -r 4195a6f0c61c -r 8f7107617550 src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Tue Aug 10 23:43:16 2021 +0300 +++ b/src/event/ngx_event_openssl.h Tue Aug 10 23:43:16 2021 +0300 @@ -12,6 +12,8 @@ #include #include +#define OPENSSL_SUPPRESS_DEPRECATED + #include #include #include From pluknet at nginx.com Tue Aug 10 21:46:39 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 10 Aug 2021 21:46:39 +0000 Subject: [nginx] SSL: use of the SSL_OP_IGNORE_UNEXPECTED_EOF option. Message-ID: details: https://hg.nginx.org/nginx/rev/1a03af395f44 branches: changeset: 7899:1a03af395f44 user: Sergey Kandaurov date: Tue Aug 10 23:43:17 2021 +0300 description: SSL: use of the SSL_OP_IGNORE_UNEXPECTED_EOF option. A new behaviour was introduced in OpenSSL 1.1.1e, when a peer does not send close_notify before closing the connection. Previously, it was to return SSL_ERROR_SYSCALL with errno 0, known since at least OpenSSL 0.9.7, and is handled gracefully in nginx. Now it returns SSL_ERROR_SSL with a distinct reason SSL_R_UNEXPECTED_EOF_WHILE_READING ("unexpected eof while reading"). This leads to critical errors seen in nginx within various routines such as SSL_do_handshake(), SSL_read(), SSL_shutdown(). The behaviour was restored in OpenSSL 1.1.1f, but presents in OpenSSL 3.0 by default. Use of the SSL_OP_IGNORE_UNEXPECTED_EOF option added in OpenSSL 3.0 allows to set a compatible behaviour to return SSL_ERROR_ZERO_RETURN: https://git.openssl.org/?p=openssl.git;a=commitdiff;h=09b90e0 See for additional details: https://github.com/openssl/openssl/issues/11381 diffstat: src/event/ngx_event_openssl.c | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diffs (14 lines): diff -r 8f7107617550 -r 1a03af395f44 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Aug 10 23:43:16 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 10 23:43:17 2021 +0300 @@ -378,6 +378,10 @@ ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_ SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_CLIENT_RENEGOTIATION); #endif +#ifdef SSL_OP_IGNORE_UNEXPECTED_EOF + SSL_CTX_set_options(ssl->ctx, SSL_OP_IGNORE_UNEXPECTED_EOF); +#endif + #ifdef SSL_MODE_RELEASE_BUFFERS SSL_CTX_set_mode(ssl->ctx, SSL_MODE_RELEASE_BUFFERS); #endif From pluknet at nginx.com Tue Aug 10 21:46:42 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 10 Aug 2021 21:46:42 +0000 Subject: [nginx] SSL: removed export ciphers support. Message-ID: details: https://hg.nginx.org/nginx/rev/509b663a789c branches: changeset: 7900:509b663a789c user: Sergey Kandaurov date: Tue Aug 10 23:43:17 2021 +0300 description: SSL: removed export ciphers support. Export ciphers are forbidden to negotiate in TLS 1.1 and later protocol modes. They are disabled since OpenSSL 1.0.2g by default unless explicitly configured with "enable-weak-ssl-ciphers", and completely removed in OpenSSL 1.1.0. diffstat: src/event/ngx_event_openssl.c | 31 ------------------------------- src/event/ngx_event_openssl.h | 5 ----- 2 files changed, 0 insertions(+), 36 deletions(-) diffs (70 lines): diff -r 1a03af395f44 -r 509b663a789c src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Aug 10 23:43:17 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 10 23:43:17 2021 +0300 @@ -863,11 +863,6 @@ ngx_ssl_ciphers(ngx_conf_t *cf, ngx_ssl_ SSL_CTX_set_options(ssl->ctx, SSL_OP_CIPHER_SERVER_PREFERENCE); } -#if (OPENSSL_VERSION_NUMBER < 0x10100001L && !defined LIBRESSL_VERSION_NUMBER) - /* a temporary 512-bit RSA key is required for export versions of MSIE */ - SSL_CTX_set_tmp_rsa_callback(ssl->ctx, ngx_ssl_rsa512_key_callback); -#endif - return NGX_OK; } @@ -1120,32 +1115,6 @@ ngx_ssl_info_callback(const ngx_ssl_conn } -#if (OPENSSL_VERSION_NUMBER < 0x10100001L && !defined LIBRESSL_VERSION_NUMBER) - -RSA * -ngx_ssl_rsa512_key_callback(ngx_ssl_conn_t *ssl_conn, int is_export, - int key_length) -{ - static RSA *key; - - if (key_length != 512) { - return NULL; - } - -#ifndef OPENSSL_NO_DEPRECATED - - if (key == NULL) { - key = RSA_generate_key(512, RSA_F4, NULL, NULL); - } - -#endif - - return key; -} - -#endif - - ngx_array_t * ngx_ssl_read_password_file(ngx_conf_t *cf, ngx_str_t *file) { diff -r 1a03af395f44 -r 509b663a789c src/event/ngx_event_openssl.h --- a/src/event/ngx_event_openssl.h Tue Aug 10 23:43:17 2021 +0300 +++ b/src/event/ngx_event_openssl.h Tue Aug 10 23:43:17 2021 +0300 @@ -29,7 +29,6 @@ #include #endif #include -#include #include #include @@ -208,10 +207,6 @@ ngx_int_t ngx_ssl_ocsp_validate(ngx_conn ngx_int_t ngx_ssl_ocsp_get_status(ngx_connection_t *c, const char **s); void ngx_ssl_ocsp_cleanup(ngx_connection_t *c); ngx_int_t ngx_ssl_ocsp_cache_init(ngx_shm_zone_t *shm_zone, void *data); -#if (OPENSSL_VERSION_NUMBER < 0x10100001L && !defined LIBRESSL_VERSION_NUMBER) -RSA *ngx_ssl_rsa512_key_callback(ngx_ssl_conn_t *ssl_conn, int is_export, - int key_length); -#endif ngx_array_t *ngx_ssl_read_password_file(ngx_conf_t *cf, ngx_str_t *file); ngx_array_t *ngx_ssl_preserve_passwords(ngx_conf_t *cf, ngx_array_t *passwords); From pluknet at nginx.com Tue Aug 10 21:46:45 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 10 Aug 2021 21:46:45 +0000 Subject: [nginx] SSL: removed use of the SSL_OP_MSIE_SSLV2_RSA_PADDING option. Message-ID: details: https://hg.nginx.org/nginx/rev/dda421871bc2 branches: changeset: 7901:dda421871bc2 user: Sergey Kandaurov date: Tue Aug 10 23:43:17 2021 +0300 description: SSL: removed use of the SSL_OP_MSIE_SSLV2_RSA_PADDING option. It has no effect since OpenSSL 0.9.7h and 0.9.8a. diffstat: src/event/ngx_event_openssl.c | 5 ----- 1 files changed, 0 insertions(+), 5 deletions(-) diffs (15 lines): diff -r 509b663a789c -r dda421871bc2 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Aug 10 23:43:17 2021 +0300 +++ b/src/event/ngx_event_openssl.c Tue Aug 10 23:43:17 2021 +0300 @@ -299,11 +299,6 @@ ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_ SSL_CTX_set_options(ssl->ctx, SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER); #endif -#ifdef SSL_OP_MSIE_SSLV2_RSA_PADDING - /* this option allow a potential SSL 2.0 rollback (CAN-2005-2969) */ - SSL_CTX_set_options(ssl->ctx, SSL_OP_MSIE_SSLV2_RSA_PADDING); -#endif - #ifdef SSL_OP_SSLEAY_080_CLIENT_DH_BUG SSL_CTX_set_options(ssl->ctx, SSL_OP_SSLEAY_080_CLIENT_DH_BUG); #endif From dnj0496 at gmail.com Tue Aug 10 22:35:24 2021 From: dnj0496 at gmail.com (Dk Jack) Date: Tue, 10 Aug 2021 15:35:24 -0700 Subject: http request count is zero Message-ID: Hi, I am seeing an alert in the nginx error.log with the message "http request count is zero". It seems to be from this code in src/http/ngx_http_request.c: if (r->count == 0) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "http request count is zero"); } These alerts are showing up for requests that receive a NGX_HTTP_FORBIDDEN from my module. What does it mean when "r->count" is zero? Is there some additional stuff I must do in my module to avoid seeing this message? Any help in understanding the issue is appreciated. I've included my code for sending the response. Thanks. Regards, Dk. ngx_buf_t *buf = ngx_create_temp_buf(r->pool, buf_size); ... ngx_int_t send_response(ngx_http_request_t *r, ngx_uint_t http_status, ngx_buf_t *buf) { ngx_int_t rc; ngx_log_t *log = r->connection->log; if (NULL == buf) { ngx_log_error(NGX_LOG_ERR, log, 0, "%s: Invalid input buffer", __FUNCTION__); return NGX_ERROR; } rc = ngx_http_discard_request_body(r); if (rc != NGX_OK) { ngx_log_error(NGX_LOG_ERR, log, 0, "%s: Discard req. body failed. rc=%i", __FUNCTION__, rc); return rc; } r->err_status = http_status; r->headers_out.status = http_status; r->headers_out.content_length_n = buf->last - buf->pos; ngx_str_set(&r->headers_out.content_type, "text/plain"); rc = ngx_http_send_header(r); if (rc == NGX_ERROR || rc > NGX_OK || r->header_only) { ngx_log_error(NGX_LOG_ERR, log, 0, "%s: Send header failed. rc=%i", __FUNCTION__, rc); return rc; } ngx_chain_t *out_chain = ngx_alloc_chain_link(r->pool); if (NULL == out_chain) { ngx_log_error(NGX_LOG_ERR, log, 0, "%s: Buffer chain alloc failed", __FUNCTION__); return NGX_ERROR; } out_chain->buf = buf; out_chain->next = NULL; buf->last_buf = 1; buf->last_in_chain = 1; rc = ngx_http_output_filter(r, out_chain); if ((rc != NGX_OK) && (rc != NGX_AGAIN)) { ngx_log_error(NGX_LOG_ERR, log, 0, "%s: Output filter call failed. rc=%i", __FUNCTION__, rc); return NGX_ERROR; } return NGX_OK; } -------------- next part -------------- An HTML attachment was scrubbed... URL: From alexander.borisov at nginx.com Wed Aug 11 18:49:48 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Wed, 11 Aug 2021 18:49:48 +0000 Subject: [njs] Moving generic iterator code to a proper location. Message-ID: details: https://hg.nginx.org/njs/rev/eb6c1c9823f1 branches: changeset: 1683:eb6c1c9823f1 user: Alexander Borisov date: Wed Aug 11 21:48:50 2021 +0300 description: Moving generic iterator code to a proper location. diffstat: src/njs_array.c | 441 ++-------------------------------------------------- src/njs_array.h | 3 + src/njs_iterator.c | 379 +++++++++++++++++++++++++++++++++++++++++++++ src/njs_iterator.h | 22 ++ 4 files changed, 428 insertions(+), 417 deletions(-) diffs (truncated from 1011 to 1000 lines): diff -r 424dd99ada9a -r eb6c1c9823f1 src/njs_array.c --- a/src/njs_array.c Thu Jul 15 20:32:44 2021 +0300 +++ b/src/njs_array.c Wed Aug 11 21:48:50 2021 +0300 @@ -8,9 +8,6 @@ #include -#define njs_fast_object(_sz) ((_sz) <= NJS_ARRAY_FAST_OBJECT_LENGTH) - - #define njs_array_func(type) \ ((type << 1) | NJS_ARRAY_FUNC) @@ -49,22 +46,6 @@ typedef enum { } njs_array_iterator_arg_t; -typedef struct { - njs_function_t *function; - njs_value_t *argument; - njs_value_t *value; - - njs_array_t *array; - - int64_t from; - int64_t to; -} njs_array_iterator_args_t; - - -typedef njs_int_t (*njs_array_iterator_handler_t)(njs_vm_t *vm, - njs_array_iterator_args_t *args, njs_value_t *entry, int64_t n); - - static njs_int_t njs_array_prototype_slice_copy(njs_vm_t *vm, njs_value_t *this, int64_t start, int64_t length); @@ -2065,7 +2046,7 @@ njs_array_prototype_fill(njs_vm_t *vm, n njs_inline njs_int_t -njs_array_iterator_call(njs_vm_t *vm, njs_array_iterator_args_t *args, +njs_array_iterator_call(njs_vm_t *vm, njs_iterator_args_t *args, const njs_value_t *entry, uint32_t n) { njs_value_t arguments[3]; @@ -2082,204 +2063,7 @@ njs_array_iterator_call(njs_vm_t *vm, nj static njs_int_t -njs_array_object_handler(njs_vm_t *vm, njs_array_iterator_handler_t handler, - njs_array_iterator_args_t *args, njs_value_t *key, int64_t i) -{ - njs_int_t ret; - njs_value_t prop, *entry; - - if (key != NULL) { - ret = njs_value_property(vm, args->value, key, &prop); - if (njs_slow_path(ret == NJS_ERROR)) { - return ret; - } - - } else { - ret = njs_value_property_i64(vm, args->value, i, &prop); - if (njs_slow_path(ret == NJS_ERROR)) { - return ret; - } - } - - entry = (ret == NJS_OK) ? &prop : njs_value_arg(&njs_value_invalid); - - ret = handler(vm, args, entry, i); - if (njs_slow_path(ret != NJS_OK)) { - if (ret == NJS_DONE) { - return NJS_DONE; - } - - return NJS_ERROR; - } - - return ret; -} - - -njs_inline njs_int_t -njs_array_iterator(njs_vm_t *vm, njs_array_iterator_args_t *args, - njs_array_iterator_handler_t handler) -{ - double idx; - int64_t length, i, from, to; - njs_int_t ret; - njs_array_t *array, *keys; - njs_value_t *value, *entry, prop, character, string_obj; - njs_object_t *object; - const u_char *p, *end, *pos; - njs_string_prop_t string_prop; - - value = args->value; - from = args->from; - to = args->to; - - if (njs_is_array(value)) { - array = njs_array(value); - - for (; from < to; from++) { - if (njs_slow_path(!array->object.fast_array)) { - goto process_object; - } - - if (njs_fast_path(from < array->length - && njs_is_valid(&array->start[from]))) - { - ret = handler(vm, args, &array->start[from], from); - - } else { - entry = njs_value_arg(&njs_value_invalid); - ret = njs_value_property_i64(vm, value, from, &prop); - if (njs_slow_path(ret != NJS_DECLINED)) { - if (ret == NJS_ERROR) { - return NJS_ERROR; - } - - entry = ∝ - } - - ret = handler(vm, args, entry, from); - } - - if (njs_slow_path(ret != NJS_OK)) { - if (ret == NJS_DONE) { - return NJS_DONE; - } - - return NJS_ERROR; - } - } - - return NJS_OK; - } - - if (njs_is_string(value) || njs_is_object_string(value)) { - - if (njs_is_string(value)) { - object = njs_object_value_alloc(vm, value, NJS_STRING); - if (njs_slow_path(object == NULL)) { - return NJS_ERROR; - } - - njs_set_type_object(&string_obj, object, NJS_OBJECT_STRING); - - args->value = &string_obj; - } - else { - value = njs_object_value(value); - } - - length = njs_string_prop(&string_prop, value); - - p = string_prop.start; - end = p + string_prop.size; - - if ((size_t) length == string_prop.size) { - /* Byte or ASCII string. */ - - for (i = from; i < to; i++) { - /* This cannot fail. */ - (void) njs_string_new(vm, &character, p + i, 1, 1); - - ret = handler(vm, args, &character, i); - if (njs_slow_path(ret != NJS_OK)) { - if (ret == NJS_DONE) { - return NJS_DONE; - } - - return NJS_ERROR; - } - } - - } else { - /* UTF-8 string. */ - - for (i = from; i < to; i++) { - pos = njs_utf8_next(p, end); - - /* This cannot fail. */ - (void) njs_string_new(vm, &character, p, pos - p, 1); - - ret = handler(vm, args, &character, i); - if (njs_slow_path(ret != NJS_OK)) { - if (ret == NJS_DONE) { - return NJS_DONE; - } - - return NJS_ERROR; - } - - p = pos; - } - } - - return NJS_OK; - } - - if (!njs_is_object(value)) { - return NJS_OK; - } - -process_object: - - if (!njs_fast_object(to - from)) { - keys = njs_array_indices(vm, value); - if (njs_slow_path(keys == NULL)) { - return NJS_ERROR; - } - - for (i = 0; i < keys->length; i++) { - idx = njs_string_to_index(&keys->start[i]); - - if (idx < from || idx >= to) { - continue; - } - - ret = njs_array_object_handler(vm, handler, args, &keys->start[i], - idx); - if (njs_slow_path(ret != NJS_OK)) { - njs_array_destroy(vm, keys); - return ret; - } - } - - njs_array_destroy(vm, keys); - - return NJS_OK; - } - - for (i = from; i < to; i++) { - ret = njs_array_object_handler(vm, handler, args, NULL, i); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - } - - return NJS_OK; -} - - -static njs_int_t -njs_array_handler_every(njs_vm_t *vm, njs_array_iterator_args_t *args, +njs_array_handler_every(njs_vm_t *vm, njs_iterator_args_t *args, njs_value_t *entry, int64_t n) { njs_int_t ret; @@ -2301,7 +2085,7 @@ njs_array_handler_every(njs_vm_t *vm, nj static njs_int_t -njs_array_handler_some(njs_vm_t *vm, njs_array_iterator_args_t *args, +njs_array_handler_some(njs_vm_t *vm, njs_iterator_args_t *args, njs_value_t *entry, int64_t n) { njs_int_t ret; @@ -2323,7 +2107,7 @@ njs_array_handler_some(njs_vm_t *vm, njs static njs_int_t -njs_array_handler_includes(njs_vm_t *vm, njs_array_iterator_args_t *args, +njs_array_handler_includes(njs_vm_t *vm, njs_iterator_args_t *args, njs_value_t *entry, int64_t n) { if (!njs_is_valid(entry)) { @@ -2341,7 +2125,7 @@ njs_array_handler_includes(njs_vm_t *vm, static njs_int_t -njs_array_handler_index_of(njs_vm_t *vm, njs_array_iterator_args_t *args, +njs_array_handler_index_of(njs_vm_t *vm, njs_iterator_args_t *args, njs_value_t *entry, int64_t n) { if (njs_values_strict_equal(args->argument, entry)) { @@ -2355,7 +2139,7 @@ njs_array_handler_index_of(njs_vm_t *vm, static njs_int_t -njs_array_handler_for_each(njs_vm_t *vm, njs_array_iterator_args_t *args, +njs_array_handler_for_each(njs_vm_t *vm, njs_iterator_args_t *args, njs_value_t *entry, int64_t n) { if (njs_is_valid(entry)) { @@ -2367,7 +2151,7 @@ njs_array_handler_for_each(njs_vm_t *vm, static njs_int_t -njs_array_handler_find(njs_vm_t *vm, njs_array_iterator_args_t *args, +njs_array_handler_find(njs_vm_t *vm, njs_iterator_args_t *args, njs_value_t *entry, int64_t n) { njs_int_t ret; @@ -2396,7 +2180,7 @@ njs_array_handler_find(njs_vm_t *vm, njs static njs_int_t -njs_array_handler_find_index(njs_vm_t *vm, njs_array_iterator_args_t *args, +njs_array_handler_find_index(njs_vm_t *vm, njs_iterator_args_t *args, njs_value_t *entry, int64_t n) { njs_int_t ret; @@ -2425,7 +2209,7 @@ njs_array_handler_find_index(njs_vm_t *v static njs_int_t -njs_array_handler_reduce(njs_vm_t *vm, njs_array_iterator_args_t *args, +njs_array_handler_reduce(njs_vm_t *vm, njs_iterator_args_t *args, njs_value_t *entry, int64_t n) { njs_int_t ret; @@ -2457,7 +2241,7 @@ njs_array_handler_reduce(njs_vm_t *vm, n static njs_int_t -njs_array_handler_filter(njs_vm_t *vm, njs_array_iterator_args_t *args, +njs_array_handler_filter(njs_vm_t *vm, njs_iterator_args_t *args, njs_value_t *entry, int64_t n) { njs_int_t ret; @@ -2484,7 +2268,7 @@ njs_array_handler_filter(njs_vm_t *vm, n static njs_int_t -njs_array_handler_map(njs_vm_t *vm, njs_array_iterator_args_t *args, +njs_array_handler_map(njs_vm_t *vm, njs_iterator_args_t *args, njs_value_t *entry, int64_t n) { njs_int_t ret; @@ -2526,12 +2310,12 @@ static njs_int_t njs_array_prototype_iterator(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t magic) { - int64_t i, length; - njs_int_t ret; - njs_array_t *array; - njs_value_t accumulator; - njs_array_iterator_args_t iargs; - njs_array_iterator_handler_t handler; + int64_t i, length; + njs_int_t ret; + njs_array_t *array; + njs_value_t accumulator; + njs_iterator_args_t iargs; + njs_iterator_handler_t handler; iargs.value = njs_argument(args, 0); @@ -2652,7 +2436,7 @@ njs_array_prototype_iterator(njs_vm_t *v break; } - ret = njs_array_iterator(vm, &iargs, handler); + ret = njs_object_iterate(vm, &iargs, handler); if (njs_slow_path(ret == NJS_ERROR)) { return ret; } @@ -2704,192 +2488,15 @@ done: } -njs_inline njs_int_t -njs_array_reverse_iterator(njs_vm_t *vm, njs_array_iterator_args_t *args, - njs_array_iterator_handler_t handler) -{ - double idx; - int64_t i, from, to, length; - njs_int_t ret; - njs_array_t *array, *keys; - njs_value_t *entry, *value, prop, character, string_obj; - njs_object_t *object; - const u_char *p, *end, *pos; - njs_string_prop_t string_prop; - - value = args->value; - from = args->from; - to = args->to; - - if (njs_is_array(value)) { - array = njs_array(value); - - from += 1; - - while (from-- > to) { - if (njs_slow_path(!array->object.fast_array)) { - goto process_object; - } - - if (njs_fast_path(from < array->length - && njs_is_valid(&array->start[from]))) - { - ret = handler(vm, args, &array->start[from], from); - - } else { - entry = njs_value_arg(&njs_value_invalid); - ret = njs_value_property_i64(vm, value, from, &prop); - if (njs_slow_path(ret != NJS_DECLINED)) { - if (ret == NJS_ERROR) { - return NJS_ERROR; - } - - entry = ∝ - } - - ret = handler(vm, args, entry, from); - } - - if (njs_slow_path(ret != NJS_OK)) { - if (ret == NJS_DONE) { - return NJS_DONE; - } - - return NJS_ERROR; - } - } - - return NJS_OK; - } - - if (njs_is_string(value) || njs_is_object_string(value)) { - - if (njs_is_string(value)) { - object = njs_object_value_alloc(vm, value, NJS_STRING); - if (njs_slow_path(object == NULL)) { - return NJS_ERROR; - } - - njs_set_type_object(&string_obj, object, NJS_OBJECT_STRING); - - args->value = &string_obj; - } - else { - value = njs_object_value(value); - } - - length = njs_string_prop(&string_prop, value); - end = string_prop.start + string_prop.size; - - if ((size_t) length == string_prop.size) { - /* Byte or ASCII string. */ - - p = string_prop.start + from; - - i = from + 1; - - while (i-- > to) { - /* This cannot fail. */ - (void) njs_string_new(vm, &character, p, 1, 1); - - ret = handler(vm, args, &character, i); - if (njs_slow_path(ret != NJS_OK)) { - if (ret == NJS_DONE) { - return NJS_DONE; - } - - return NJS_ERROR; - } - - p--; - } - - } else { - /* UTF-8 string. */ - - p = njs_string_offset(string_prop.start, end, from); - p = njs_utf8_next(p, end); - - i = from + 1; - - while (i-- > to) { - pos = njs_utf8_prev(p); - - /* This cannot fail. */ - (void) njs_string_new(vm, &character, pos, p - pos , 1); - - ret = handler(vm, args, &character, i); - if (njs_slow_path(ret != NJS_OK)) { - if (ret == NJS_DONE) { - return NJS_DONE; - } - - return NJS_ERROR; - } - - p = pos; - } - } - - return NJS_OK; - } - - if (!njs_is_object(value)) { - return NJS_OK; - } - -process_object: - - if (!njs_fast_object(from - to)) { - keys = njs_array_indices(vm, value); - if (njs_slow_path(keys == NULL)) { - return NJS_ERROR; - } - - i = keys->length; - - while (i > 0) { - idx = njs_string_to_index(&keys->start[--i]); - - if (idx < to || idx > from) { - continue; - } - - ret = njs_array_object_handler(vm, handler, args, &keys->start[i], - idx); - if (njs_slow_path(ret != NJS_OK)) { - njs_array_destroy(vm, keys); - return ret; - } - } - - njs_array_destroy(vm, keys); - - return NJS_OK; - } - - i = from + 1; - - while (i-- > to) { - ret = njs_array_object_handler(vm, handler, args, NULL, i); - if (njs_slow_path(ret != NJS_OK)) { - return ret; - } - } - - return NJS_OK; -} - - static njs_int_t njs_array_prototype_reverse_iterator(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t type) { - int64_t from, length; - njs_int_t ret; - njs_value_t accumulator; - njs_array_iterator_args_t iargs; - njs_array_iterator_handler_t handler; + int64_t from, length; + njs_int_t ret; + njs_value_t accumulator; + njs_iterator_args_t iargs; + njs_iterator_handler_t handler; iargs.value = njs_argument(args, 0); @@ -2960,7 +2567,7 @@ njs_array_prototype_reverse_iterator(njs iargs.from = from; iargs.to = 0; - ret = njs_array_reverse_iterator(vm, &iargs, handler); + ret = njs_object_iterate_reverse(vm, &iargs, handler); if (njs_fast_path(ret == NJS_ERROR)) { return NJS_ERROR; } diff -r 424dd99ada9a -r eb6c1c9823f1 src/njs_array.h --- a/src/njs_array.h Thu Jul 15 20:32:44 2021 +0300 +++ b/src/njs_array.h Wed Aug 11 21:48:50 2021 +0300 @@ -16,6 +16,9 @@ #define NJS_ARRAY_LARGE_OBJECT_LENGTH (32768) #define NJS_ARRAY_FLAT_MAX_LENGTH (1048576) +#define njs_fast_object(_sz) ((_sz) <= NJS_ARRAY_FAST_OBJECT_LENGTH) + + njs_array_t *njs_array_alloc(njs_vm_t *vm, njs_bool_t flat, uint64_t length, uint32_t spare); void njs_array_destroy(njs_vm_t *vm, njs_array_t *array); diff -r 424dd99ada9a -r eb6c1c9823f1 src/njs_iterator.c --- a/src/njs_iterator.c Thu Jul 15 20:32:44 2021 +0300 +++ b/src/njs_iterator.c Wed Aug 11 21:48:50 2021 +0300 @@ -22,6 +22,11 @@ static const njs_value_t string_done = static const njs_value_t string_value = njs_string("value"); +static njs_int_t njs_iterator_object_handler(njs_vm_t *vm, + njs_iterator_handler_t handler, njs_iterator_args_t *args, + njs_value_t *key, int64_t i); + + njs_int_t njs_array_iterator_create(njs_vm_t *vm, const njs_value_t *target, njs_value_t *retval, njs_object_enum_t kind) @@ -297,3 +302,377 @@ const njs_object_type_init_t njs_array_ .prototype_props = &njs_array_iterator_prototype_init, .prototype_value = { .object = { .type = NJS_OBJECT } }, }; + + +njs_int_t +njs_object_iterate(njs_vm_t *vm, njs_iterator_args_t *args, + njs_iterator_handler_t handler) +{ + double idx; + int64_t length, i, from, to; + njs_int_t ret; + njs_array_t *array, *keys; + njs_value_t *value, *entry, prop, character, string_obj; + njs_object_t *object; + const u_char *p, *end, *pos; + njs_string_prop_t string_prop; + + value = args->value; + from = args->from; + to = args->to; + + if (njs_is_array(value)) { + array = njs_array(value); + + for (; from < to; from++) { + if (njs_slow_path(!array->object.fast_array)) { + goto process_object; + } + + if (njs_fast_path(from < array->length + && njs_is_valid(&array->start[from]))) + { + ret = handler(vm, args, &array->start[from], from); + + } else { + entry = njs_value_arg(&njs_value_invalid); + ret = njs_value_property_i64(vm, value, from, &prop); + if (njs_slow_path(ret != NJS_DECLINED)) { + if (ret == NJS_ERROR) { + return NJS_ERROR; + } + + entry = ∝ + } + + ret = handler(vm, args, entry, from); + } + + if (njs_slow_path(ret != NJS_OK)) { + if (ret == NJS_DONE) { + return NJS_DONE; + } + + return NJS_ERROR; + } + } + + return NJS_OK; + } + + if (njs_is_string(value) || njs_is_object_string(value)) { + + if (njs_is_string(value)) { + object = njs_object_value_alloc(vm, value, NJS_STRING); + if (njs_slow_path(object == NULL)) { + return NJS_ERROR; + } + + njs_set_type_object(&string_obj, object, NJS_OBJECT_STRING); + + args->value = &string_obj; + } + else { + value = njs_object_value(value); + } + + length = njs_string_prop(&string_prop, value); + + p = string_prop.start; + end = p + string_prop.size; + + if ((size_t) length == string_prop.size) { + /* Byte or ASCII string. */ + + for (i = from; i < to; i++) { + /* This cannot fail. */ + (void) njs_string_new(vm, &character, p + i, 1, 1); + + ret = handler(vm, args, &character, i); + if (njs_slow_path(ret != NJS_OK)) { + if (ret == NJS_DONE) { + return NJS_DONE; + } + + return NJS_ERROR; + } + } + + } else { + /* UTF-8 string. */ + + for (i = from; i < to; i++) { + pos = njs_utf8_next(p, end); + + /* This cannot fail. */ + (void) njs_string_new(vm, &character, p, pos - p, 1); + + ret = handler(vm, args, &character, i); + if (njs_slow_path(ret != NJS_OK)) { + if (ret == NJS_DONE) { + return NJS_DONE; + } + + return NJS_ERROR; + } + + p = pos; + } + } + + return NJS_OK; + } + + if (!njs_is_object(value)) { + return NJS_OK; + } + +process_object: + + if (!njs_fast_object(to - from)) { + keys = njs_array_indices(vm, value); + if (njs_slow_path(keys == NULL)) { + return NJS_ERROR; + } + + for (i = 0; i < keys->length; i++) { + idx = njs_string_to_index(&keys->start[i]); + + if (idx < from || idx >= to) { + continue; + } + + ret = njs_iterator_object_handler(vm, handler, args, &keys->start[i], + idx); + if (njs_slow_path(ret != NJS_OK)) { + njs_array_destroy(vm, keys); + return ret; + } + } + + njs_array_destroy(vm, keys); + + return NJS_OK; + } + + for (i = from; i < to; i++) { + ret = njs_iterator_object_handler(vm, handler, args, NULL, i); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + } + + return NJS_OK; +} + + +njs_int_t +njs_object_iterate_reverse(njs_vm_t *vm, njs_iterator_args_t *args, + njs_iterator_handler_t handler) +{ + double idx; + int64_t i, from, to, length; + njs_int_t ret; + njs_array_t *array, *keys; + njs_value_t *entry, *value, prop, character, string_obj; + njs_object_t *object; + const u_char *p, *end, *pos; + njs_string_prop_t string_prop; + + value = args->value; + from = args->from; + to = args->to; + + if (njs_is_array(value)) { + array = njs_array(value); + + from += 1; + + while (from-- > to) { + if (njs_slow_path(!array->object.fast_array)) { + goto process_object; + } + + if (njs_fast_path(from < array->length + && njs_is_valid(&array->start[from]))) + { + ret = handler(vm, args, &array->start[from], from); + + } else { + entry = njs_value_arg(&njs_value_invalid); + ret = njs_value_property_i64(vm, value, from, &prop); + if (njs_slow_path(ret != NJS_DECLINED)) { + if (ret == NJS_ERROR) { + return NJS_ERROR; + } + + entry = ∝ + } + + ret = handler(vm, args, entry, from); + } + + if (njs_slow_path(ret != NJS_OK)) { + if (ret == NJS_DONE) { + return NJS_DONE; + } + + return NJS_ERROR; + } + } + + return NJS_OK; + } + + if (njs_is_string(value) || njs_is_object_string(value)) { + + if (njs_is_string(value)) { + object = njs_object_value_alloc(vm, value, NJS_STRING); + if (njs_slow_path(object == NULL)) { + return NJS_ERROR; + } + + njs_set_type_object(&string_obj, object, NJS_OBJECT_STRING); + + args->value = &string_obj; + } + else { + value = njs_object_value(value); + } + + length = njs_string_prop(&string_prop, value); + end = string_prop.start + string_prop.size; + + if ((size_t) length == string_prop.size) { + /* Byte or ASCII string. */ + + p = string_prop.start + from; + + i = from + 1; + + while (i-- > to) { + /* This cannot fail. */ + (void) njs_string_new(vm, &character, p, 1, 1); + + ret = handler(vm, args, &character, i); + if (njs_slow_path(ret != NJS_OK)) { + if (ret == NJS_DONE) { + return NJS_DONE; + } + + return NJS_ERROR; + } + + p--; + } + + } else { + /* UTF-8 string. */ + + p = njs_string_offset(string_prop.start, end, from); + p = njs_utf8_next(p, end); + + i = from + 1; + + while (i-- > to) { + pos = njs_utf8_prev(p); + + /* This cannot fail. */ + (void) njs_string_new(vm, &character, pos, p - pos , 1); + + ret = handler(vm, args, &character, i); + if (njs_slow_path(ret != NJS_OK)) { + if (ret == NJS_DONE) { + return NJS_DONE; + } + + return NJS_ERROR; + } + + p = pos; + } + } + + return NJS_OK; + } + + if (!njs_is_object(value)) { + return NJS_OK; + } + +process_object: + + if (!njs_fast_object(from - to)) { + keys = njs_array_indices(vm, value); + if (njs_slow_path(keys == NULL)) { + return NJS_ERROR; + } + + i = keys->length; + + while (i > 0) { + idx = njs_string_to_index(&keys->start[--i]); + + if (idx < to || idx > from) { + continue; + } + + ret = njs_iterator_object_handler(vm, handler, args, + &keys->start[i], idx); + if (njs_slow_path(ret != NJS_OK)) { + njs_array_destroy(vm, keys); + return ret; + } + } + + njs_array_destroy(vm, keys); + + return NJS_OK; + } + + i = from + 1; + + while (i-- > to) { + ret = njs_iterator_object_handler(vm, handler, args, NULL, i); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + } + + return NJS_OK; +} + + +static njs_int_t +njs_iterator_object_handler(njs_vm_t *vm, njs_iterator_handler_t handler, + njs_iterator_args_t *args, njs_value_t *key, int64_t i) +{ + njs_int_t ret; + njs_value_t prop, *entry; + + if (key != NULL) { + ret = njs_value_property(vm, args->value, key, &prop); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + } else { + ret = njs_value_property_i64(vm, args->value, i, &prop); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + } + + entry = (ret == NJS_OK) ? &prop : njs_value_arg(&njs_value_invalid); + + ret = handler(vm, args, entry, i); + if (njs_slow_path(ret != NJS_OK)) { + if (ret == NJS_DONE) { + return NJS_DONE; + } + + return NJS_ERROR; + } + + return ret; +} diff -r 424dd99ada9a -r eb6c1c9823f1 src/njs_iterator.h --- a/src/njs_iterator.h Thu Jul 15 20:32:44 2021 +0300 +++ b/src/njs_iterator.h Wed Aug 11 21:48:50 2021 +0300 @@ -8,12 +8,34 @@ #define _NJS_ITERATOR_H_INCLUDED_ +typedef struct { + njs_function_t *function; + njs_value_t *argument; + njs_value_t *value; + + njs_array_t *array; + + int64_t from; + int64_t to; +} njs_iterator_args_t; + + +typedef njs_int_t (*njs_iterator_handler_t)(njs_vm_t *vm, + njs_iterator_args_t *args, njs_value_t *entry, int64_t n); + + njs_int_t njs_array_iterator_create(njs_vm_t *vm, const njs_value_t *src, njs_value_t *dst, njs_object_enum_t kind); njs_int_t njs_array_iterator_next(njs_vm_t *vm, njs_value_t *iterator, From alexander.borisov at nginx.com Wed Aug 11 18:49:51 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Wed, 11 Aug 2021 18:49:51 +0000 Subject: [njs] Introduced AggregateError implementation. Message-ID: details: https://hg.nginx.org/njs/rev/ca2f051a4fc9 branches: changeset: 1684:ca2f051a4fc9 user: Alexander Borisov date: Wed Aug 11 21:48:51 2021 +0300 description: Introduced AggregateError implementation. diffstat: src/njs_builtin.c | 11 ++++ src/njs_error.c | 127 +++++++++++++++++++++++++++++++++++++++++++++- src/njs_error.h | 4 +- src/njs_fs.c | 2 +- src/njs_iterator.c | 45 ++++++++++++++++ src/njs_iterator.h | 2 + src/njs_object_hash.h | 29 ++++++++++ src/njs_vm.h | 1 + src/test/njs_unit_test.c | 20 +++++++ 9 files changed, 234 insertions(+), 7 deletions(-) diffs (407 lines): diff -r eb6c1c9823f1 -r ca2f051a4fc9 src/njs_builtin.c --- a/src/njs_builtin.c Wed Aug 11 21:48:50 2021 +0300 +++ b/src/njs_builtin.c Wed Aug 11 21:48:51 2021 +0300 @@ -114,6 +114,7 @@ static const njs_object_type_init_t *con &njs_type_error_type_init, &njs_uri_error_type_init, &njs_memory_error_type_init, + &njs_aggregate_error_type_init, }; @@ -1676,6 +1677,16 @@ static const njs_object_prop_t njs_glob .writable = 1, .configurable = 1, }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("AggregateError"), + .value = njs_prop_handler2(njs_top_level_constructor, + NJS_OBJ_TYPE_AGGREGATE_ERROR, + NJS_AGGREGATE_ERROR_HASH), + .writable = 1, + .configurable = 1, + }, }; diff -r eb6c1c9823f1 -r ca2f051a4fc9 src/njs_error.c --- a/src/njs_error.c Wed Aug 11 21:48:50 2021 +0300 +++ b/src/njs_error.c Wed Aug 11 21:48:51 2021 +0300 @@ -24,6 +24,7 @@ static njs_int_t njs_backtrace_to_string static const njs_value_t njs_error_message_string = njs_string("message"); static const njs_value_t njs_error_name_string = njs_string("name"); static const njs_value_t njs_error_stack_string = njs_string("stack"); +static const njs_value_t njs_error_errors_string = njs_string("errors"); void @@ -45,7 +46,7 @@ njs_error_new(njs_vm_t *vm, njs_value_t return; } - error = njs_error_alloc(vm, type, NULL, &string); + error = njs_error_alloc(vm, type, NULL, &string, NULL); if (njs_slow_path(error == NULL)) { return; } @@ -198,7 +199,7 @@ njs_error_stack(njs_vm_t *vm, njs_value_ njs_object_t * njs_error_alloc(njs_vm_t *vm, njs_object_type_t type, const njs_value_t *name, - const njs_value_t *message) + const njs_value_t *message, const njs_value_t *errors) { njs_int_t ret; njs_object_t *error; @@ -262,6 +263,26 @@ njs_error_alloc(njs_vm_t *vm, njs_object } } + if (errors != NULL) { + lhq.key = njs_str_value("errors"); + lhq.key_hash = NJS_ERRORS_HASH; + + prop = njs_object_prop_alloc(vm, &njs_error_errors_string, errors, 1); + if (njs_slow_path(prop == NULL)) { + goto memory_error; + } + + prop->enumerable = 0; + + lhq.value = prop; + + ret = njs_lvlhsh_insert(&error->hash, &lhq); + if (njs_slow_path(ret != NJS_OK)) { + njs_internal_error(vm, "lvlhsh insert failed"); + return NULL; + } + } + return error; memory_error: @@ -277,10 +298,32 @@ njs_error_constructor(njs_vm_t *vm, njs_ njs_index_t type) { njs_int_t ret; - njs_value_t *value; + njs_value_t *iterator, *value, list; + njs_array_t *array; njs_object_t *error; - value = njs_arg(args, nargs, 1); + if (type != NJS_OBJ_TYPE_AGGREGATE_ERROR) { + iterator = NULL; + value = njs_arg(args, nargs, 1); + + njs_set_undefined(&list); + + } else { + iterator = njs_arg(args, nargs, 1); + value = njs_arg(args, nargs, 2); + + if (njs_slow_path(iterator->type < NJS_STRING)) { + njs_type_error(vm, "first argument is not iterable"); + return NJS_ERROR; + } + + array = njs_iterator_to_array(vm, iterator); + if (njs_slow_path(array == NULL)) { + return NJS_ERROR; + } + + njs_set_array(&list, array); + } if (njs_slow_path(!njs_is_string(value))) { if (!njs_is_undefined(value)) { @@ -292,7 +335,8 @@ njs_error_constructor(njs_vm_t *vm, njs_ } error = njs_error_alloc(vm, type, NULL, - njs_is_defined(value) ? value : NULL); + njs_is_defined(value) ? value : NULL, + njs_is_defined(&list) ? &list : NULL); if (njs_slow_path(error == NULL)) { return NJS_ERROR; } @@ -543,6 +587,36 @@ const njs_object_init_t njs_uri_error_c }; +static const njs_object_prop_t njs_aggregate_error_constructor_properties[] = +{ + { + .type = NJS_PROPERTY, + .name = njs_string("name"), + .value = njs_string("AggregateError"), + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("length"), + .value = njs_value(NJS_NUMBER, 1, 1.0), + .configurable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("prototype"), + .value = njs_prop_handler(njs_object_prototype_create), + }, +}; + + +const njs_object_init_t njs_aggregate_error_constructor_init = { + njs_aggregate_error_constructor_properties, + njs_nitems(njs_aggregate_error_constructor_properties), +}; + + void njs_memory_error_set(njs_vm_t *vm, njs_value_t *value) { @@ -1164,6 +1238,49 @@ const njs_object_type_init_t njs_uri_er }; +static const njs_object_prop_t njs_aggregate_error_prototype_properties[] = +{ + { + .type = NJS_PROPERTY, + .name = njs_string("name"), + .value = njs_string("AggregateError"), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("message"), + .value = njs_string(""), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY_HANDLER, + .name = njs_string("constructor"), + .value = njs_prop_handler(njs_object_prototype_create_constructor), + .writable = 1, + .configurable = 1, + }, +}; + + +const njs_object_init_t njs_aggregate_error_prototype_init = { + njs_aggregate_error_prototype_properties, + njs_nitems(njs_aggregate_error_prototype_properties), +}; + + +const njs_object_type_init_t njs_aggregate_error_type_init = { + .constructor = njs_native_ctor(njs_error_constructor, 1, + NJS_OBJ_TYPE_AGGREGATE_ERROR), + .constructor_props = &njs_aggregate_error_constructor_init, + .prototype_props = &njs_aggregate_error_prototype_init, + .prototype_value = { .object = { .type = NJS_OBJECT } }, +}; + + static njs_int_t njs_add_backtrace_entry(njs_vm_t *vm, njs_arr_t *stack, njs_native_frame_t *native_frame) diff -r eb6c1c9823f1 -r ca2f051a4fc9 src/njs_error.h --- a/src/njs_error.h Wed Aug 11 21:48:50 2021 +0300 +++ b/src/njs_error.h Wed Aug 11 21:48:51 2021 +0300 @@ -41,7 +41,8 @@ void njs_memory_error(njs_vm_t *vm); void njs_memory_error_set(njs_vm_t *vm, njs_value_t *value); njs_object_t *njs_error_alloc(njs_vm_t *vm, njs_object_type_t type, - const njs_value_t *name, const njs_value_t *message); + const njs_value_t *name, const njs_value_t *message, + const njs_value_t *errors); njs_int_t njs_error_to_string(njs_vm_t *vm, njs_value_t *retval, const njs_value_t *error); njs_int_t njs_error_stack(njs_vm_t *vm, njs_value_t *value, njs_value_t *stack); @@ -57,6 +58,7 @@ extern const njs_object_type_init_t njs extern const njs_object_type_init_t njs_type_error_type_init; extern const njs_object_type_init_t njs_uri_error_type_init; extern const njs_object_type_init_t njs_memory_error_type_init; +extern const njs_object_type_init_t njs_aggregate_error_type_init; njs_inline njs_int_t diff -r eb6c1c9823f1 -r ca2f051a4fc9 src/njs_fs.c --- a/src/njs_fs.c Wed Aug 11 21:48:50 2021 +0300 +++ b/src/njs_fs.c Wed Aug 11 21:48:51 2021 +0300 @@ -1491,7 +1491,7 @@ njs_fs_error(njs_vm_t *vm, const char *s return NJS_ERROR; } - error = njs_error_alloc(vm, NJS_OBJ_TYPE_ERROR, NULL, &value); + error = njs_error_alloc(vm, NJS_OBJ_TYPE_ERROR, NULL, &value, NULL); if (njs_slow_path(error == NULL)) { return NJS_ERROR; } diff -r eb6c1c9823f1 -r ca2f051a4fc9 src/njs_iterator.c --- a/src/njs_iterator.c Wed Aug 11 21:48:50 2021 +0300 +++ b/src/njs_iterator.c Wed Aug 11 21:48:51 2021 +0300 @@ -26,6 +26,9 @@ static njs_int_t njs_iterator_object_han njs_iterator_handler_t handler, njs_iterator_args_t *args, njs_value_t *key, int64_t i); +static njs_int_t njs_iterator_to_array_handler(njs_vm_t *vm, + njs_iterator_args_t *args, njs_value_t *value, int64_t index); + njs_int_t njs_array_iterator_create(njs_vm_t *vm, const njs_value_t *target, @@ -676,3 +679,45 @@ njs_iterator_object_handler(njs_vm_t *vm return ret; } + + +njs_array_t * +njs_iterator_to_array(njs_vm_t *vm, njs_value_t *iterator) +{ + int64_t length; + njs_int_t ret; + njs_iterator_args_t args; + + njs_memzero(&args, sizeof(njs_iterator_args_t)); + + ret = njs_object_length(vm, iterator, &length); + if (njs_slow_path(ret != NJS_OK)) { + return NULL; + } + + args.array = njs_array_alloc(vm, 1, length, 0); + if (njs_slow_path(args.array == NULL)) { + return NULL; + } + + args.value = iterator; + args.to = length; + + ret = njs_object_iterate(vm, &args, njs_iterator_to_array_handler); + if (njs_slow_path(ret == NJS_ERROR)) { + njs_mp_free(vm->mem_pool, args.array); + return NULL; + } + + return args.array; +} + + +static njs_int_t +njs_iterator_to_array_handler(njs_vm_t *vm, njs_iterator_args_t *args, + njs_value_t *value, int64_t index) +{ + args->array->start[index] = *value; + + return NJS_OK; +} diff -r eb6c1c9823f1 -r ca2f051a4fc9 src/njs_iterator.h --- a/src/njs_iterator.h Wed Aug 11 21:48:50 2021 +0300 +++ b/src/njs_iterator.h Wed Aug 11 21:48:51 2021 +0300 @@ -36,6 +36,8 @@ njs_int_t njs_object_iterate(njs_vm_t *v njs_int_t njs_object_iterate_reverse(njs_vm_t *vm, njs_iterator_args_t *args, njs_iterator_handler_t handler); +njs_array_t *njs_iterator_to_array(njs_vm_t *vm, njs_value_t *iterator); + extern const njs_object_type_init_t njs_iterator_type_init; extern const njs_object_type_init_t njs_array_iterator_type_init; diff -r eb6c1c9823f1 -r ca2f051a4fc9 src/njs_object_hash.h --- a/src/njs_object_hash.h Wed Aug 11 21:48:50 2021 +0300 +++ b/src/njs_object_hash.h Wed Aug 11 21:48:51 2021 +0300 @@ -343,6 +343,25 @@ 'E'), 'r'), 'r'), 'o'), 'r') +#define NJS_AGGREGATE_ERROR_HASH \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add(NJS_DJB_HASH_INIT, \ + 'A'), 'g'), 'g'), 'r'), 'e'), 'g'), 'a'), 't'), 'e'), \ + 'E'), 'r'), 'r'), 'o'), 'r') + + #define NJS_MESSAGE_HASH \ njs_djb_hash_add( \ njs_djb_hash_add( \ @@ -354,6 +373,16 @@ 'm'), 'e'), 's'), 's'), 'a'), 'g'), 'e') +#define NJS_ERRORS_HASH \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add( \ + njs_djb_hash_add(NJS_DJB_HASH_INIT, \ + 'e'), 'r'), 'r'), 'o'), 'r'), 's') + + #define NJS_MODE_HASH \ njs_djb_hash_add( \ njs_djb_hash_add( \ diff -r eb6c1c9823f1 -r ca2f051a4fc9 src/njs_vm.h --- a/src/njs_vm.h Wed Aug 11 21:48:50 2021 +0300 +++ b/src/njs_vm.h Wed Aug 11 21:48:51 2021 +0300 @@ -86,6 +86,7 @@ typedef enum { NJS_OBJ_TYPE_TYPE_ERROR, NJS_OBJ_TYPE_URI_ERROR, NJS_OBJ_TYPE_MEMORY_ERROR, + NJS_OBJ_TYPE_AGGREGATE_ERROR, NJS_OBJ_TYPE_MAX, } njs_object_type_t; diff -r eb6c1c9823f1 -r ca2f051a4fc9 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Wed Aug 11 21:48:50 2021 +0300 +++ b/src/test/njs_unit_test.c Wed Aug 11 21:48:51 2021 +0300 @@ -11134,6 +11134,26 @@ static njs_unit_test_t njs_test[] = { njs_str("var e = RangeError('e'); Object.preventExtensions(e);e"), njs_str("RangeError: e") }, + /* AggregateError. */ + + { njs_str("AggregateError()"), + njs_str("TypeError: first argument is not iterable") }, + + { njs_str("AggregateError([1, 2, 3])"), + njs_str("AggregateError") }, + + { njs_str("let e = AggregateError([1, 2, 3], 'm'); e.message"), + njs_str("m") }, + + { njs_str("let e = AggregateError([1, 2, 3], 'm'); e.errors"), + njs_str("1,2,3") }, + + { njs_str("let e = AggregateError('abc'); e.errors"), + njs_str("a,b,c") }, + + { njs_str("let e = AggregateError([1, 2, 3], 'm'); e"), + njs_str("AggregateError: m") }, + /* Memory object is immutable. */ { njs_str("var e = MemoryError('e'); e.name = 'E'"), From alexander.borisov at nginx.com Wed Aug 11 18:49:53 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Wed, 11 Aug 2021 18:49:53 +0000 Subject: [njs] Fixed resolve/reject callback for Promise.prototype.finally(). Message-ID: details: https://hg.nginx.org/njs/rev/3e00ce537115 branches: changeset: 1685:3e00ce537115 user: Alexander Borisov date: Wed Aug 11 21:48:51 2021 +0300 description: Fixed resolve/reject callback for Promise.prototype.finally(). diffstat: src/njs_promise.c | 75 ++++++++++++++++++++++++++++++++------------- test/js/promise_finally.js | 7 ++++ test/njs_expect_test.exp | 5 +++ 3 files changed, 65 insertions(+), 22 deletions(-) diffs (230 lines): diff -r ca2f051a4fc9 -r 3e00ce537115 src/njs_promise.c --- a/src/njs_promise.c Wed Aug 11 21:48:51 2021 +0300 +++ b/src/njs_promise.c Wed Aug 11 21:48:51 2021 +0300 @@ -45,6 +45,7 @@ typedef struct { njs_bool_t resolved; njs_bool_t *resolved_ref; njs_promise_capability_t *capability; + njs_function_native_t handler; } njs_promise_context_t; @@ -69,7 +70,9 @@ static njs_int_t njs_promise_perform_the njs_promise_capability_t *capability); static njs_int_t njs_promise_then_finally_function(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused); -static njs_int_t njs_promise_catch_finally_function(njs_vm_t *vm, +static njs_int_t njs_promise_then_finally_return(njs_vm_t *vm, + njs_value_t *args, njs_uint_t nargs, njs_index_t unused); +static njs_int_t njs_promise_catch_finally_return(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused); static njs_int_t njs_promise_reaction_job(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused); @@ -207,7 +210,7 @@ njs_promise_constructor_call(njs_vm_t *v static njs_function_t * -njs_promise_create_function(njs_vm_t *vm) +njs_promise_create_function(njs_vm_t *vm, size_t context_size) { njs_function_t *function; njs_promise_context_t *context; @@ -217,10 +220,15 @@ njs_promise_create_function(njs_vm_t *vm goto memory_error; } - context = njs_mp_zalloc(vm->mem_pool, sizeof(njs_promise_context_t)); - if (njs_slow_path(context == NULL)) { - njs_mp_free(vm->mem_pool, function); - goto memory_error; + if (context_size > 0) { + context = njs_mp_zalloc(vm->mem_pool, context_size); + if (njs_slow_path(context == NULL)) { + njs_mp_free(vm->mem_pool, function); + goto memory_error; + } + + } else { + context = NULL; } function->object.__proto__ = &vm->prototypes[NJS_OBJ_TYPE_FUNCTION].object; @@ -253,7 +261,8 @@ njs_promise_create_resolving_functions(n /* Some compilers give at error an uninitialized context if using for. */ do { - function = njs_promise_create_function(vm); + function = njs_promise_create_function(vm, + sizeof(njs_promise_context_t)); if (njs_slow_path(function == NULL)) { return NJS_ERROR; } @@ -302,7 +311,7 @@ njs_promise_new_capability(njs_vm_t *vm, return NULL; } - function = njs_promise_create_function(vm); + function = njs_promise_create_function(vm, sizeof(njs_promise_context_t)); if (njs_slow_path(function == NULL)) { return NULL; } @@ -456,7 +465,8 @@ njs_promise_trigger_reactions(njs_vm_t * { reaction = njs_queue_link_data(link, njs_promise_reaction_t, link); - function = njs_promise_create_function(vm); + function = njs_promise_create_function(vm, + sizeof(njs_promise_context_t)); function->u.native = njs_promise_reaction_job; njs_set_data(&arguments[0], reaction, 0); @@ -686,7 +696,7 @@ njs_promise_resolve_function(njs_vm_t *v arguments[1] = *resolution; arguments[2] = then; - function = njs_promise_create_function(vm); + function = njs_promise_create_function(vm, sizeof(njs_promise_context_t)); if (njs_slow_path(function == NULL)) { return NJS_ERROR; } @@ -861,7 +871,7 @@ njs_promise_prototype_then(njs_vm_t *vm, goto failed; } - function = njs_promise_create_function(vm); + function = njs_promise_create_function(vm, sizeof(njs_promise_context_t)); function->u.native = njs_promise_constructor; njs_set_function(&constructor, function); @@ -941,7 +951,8 @@ njs_promise_perform_then(njs_vm_t *vm, n njs_queue_insert_tail(&data->reject_queue, &rejected_reaction->link); } else { - function = njs_promise_create_function(vm); + function = njs_promise_create_function(vm, + sizeof(njs_promise_context_t)); function->u.native = njs_promise_reaction_job; if (data->state == NJS_PROMISE_REJECTED) { @@ -1009,7 +1020,7 @@ njs_promise_prototype_finally(njs_vm_t * finally = njs_arg(args, nargs, 1); - function = njs_promise_create_function(vm); + function = njs_promise_create_function(vm, sizeof(njs_promise_context_t)); function->u.native = njs_promise_constructor; njs_set_function(&constructor, function); @@ -1027,7 +1038,7 @@ njs_promise_prototype_finally(njs_vm_t * return njs_promise_invoke_then(vm, promise, arguments, 2); } - function = njs_promise_create_function(vm); + function = njs_promise_create_function(vm, sizeof(njs_promise_context_t)); if (njs_slow_path(function == NULL)) { return NJS_ERROR; } @@ -1038,21 +1049,23 @@ njs_promise_prototype_finally(njs_vm_t * context = function->context; context->constructor = constructor; context->finally = *finally; + context->handler = njs_promise_then_finally_return; njs_set_function(&arguments[0], function); - function = njs_promise_create_function(vm); + function = njs_promise_create_function(vm, sizeof(njs_promise_context_t)); if (njs_slow_path(function == NULL)) { njs_mp_free(vm->mem_pool, njs_function(&arguments[0])); return NJS_ERROR; } - function->u.native = njs_promise_catch_finally_function; + function->u.native = njs_promise_then_finally_function; function->args_count = 1; context = function->context; context->constructor = constructor; context->finally = *finally; + context->handler = njs_promise_catch_finally_return; njs_set_function(&arguments[1], function); @@ -1065,8 +1078,9 @@ njs_promise_then_finally_function(njs_vm njs_uint_t nargs, njs_index_t unused) { njs_int_t ret; - njs_value_t value, retval; + njs_value_t value, retval, argument; njs_promise_t *promise; + njs_function_t *function; njs_native_frame_t *frame; njs_promise_context_t *context; @@ -1086,18 +1100,35 @@ njs_promise_then_finally_function(njs_vm njs_set_promise(&value, promise); - return njs_promise_invoke_then(vm, &value, njs_arg(args, nargs, 1), 1); + function = njs_promise_create_function(vm, sizeof(njs_value_t)); + if (njs_slow_path(function == NULL)) { + return NJS_ERROR; + } + + function->u.native = context->handler; + + *((njs_value_t *) function->context) = *njs_arg(args, nargs, 1); + + njs_set_function(&argument, function); + + return njs_promise_invoke_then(vm, &value, &argument, 1); } static njs_int_t -njs_promise_catch_finally_function(njs_vm_t *vm, njs_value_t *args, +njs_promise_then_finally_return(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { - (void) njs_promise_then_finally_function(vm, args, nargs, unused); + njs_vm_retval_set(vm, vm->top_frame->function->context); + return NJS_OK; +} + - njs_vm_retval_set(vm, njs_arg(args, nargs, 1)); - +static njs_int_t +njs_promise_catch_finally_return(njs_vm_t *vm, njs_value_t *args, + njs_uint_t nargs, njs_index_t unused) +{ + njs_vm_retval_set(vm, vm->top_frame->function->context); return NJS_ERROR; } diff -r ca2f051a4fc9 -r 3e00ce537115 test/js/promise_finally.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_finally.js Wed Aug 11 21:48:51 2021 +0300 @@ -0,0 +1,7 @@ +Promise.resolve('here') +.finally(() => {'nope'}) +.then(v => {console.log(v)}); + +Promise.resolve('here') +.finally(() => {throw 'nope'}) +.then(v => {console.log(v)}); diff -r ca2f051a4fc9 -r 3e00ce537115 test/njs_expect_test.exp --- a/test/njs_expect_test.exp Wed Aug 11 21:48:51 2021 +0300 +++ b/test/njs_expect_test.exp Wed Aug 11 21:48:51 2021 +0300 @@ -1062,6 +1062,11 @@ njs_run {"./test/js/promise_then_throw_c njs_run {"./test/js/promise_catch_then_throw_catch.js"} \ "Done" +njs_run {"./test/js/promise_finally.js"} \ +"here +Thrown: +Error: unhandled promise rejection: nope" + njs_run {"./test/js/promise_finally_throw.js"} \ "Error: unhandled promise rejection: ReferenceError: \"nonExsistingInFinally\" is not defined" From alexander.borisov at nginx.com Wed Aug 11 18:49:55 2021 From: alexander.borisov at nginx.com (Alexander Borisov) Date: Wed, 11 Aug 2021 18:49:55 +0000 Subject: [njs] Added remaining Promise constructor methods. Message-ID: details: https://hg.nginx.org/njs/rev/80adcb502e40 branches: changeset: 1686:80adcb502e40 user: Alexander Borisov date: Wed Aug 11 21:48:52 2021 +0300 description: Added remaining Promise constructor methods. The following methods were added: Promise.all(), Promise.allSettled(), Promise.any(), Promise.race(). diffstat: src/njs_promise.c | 599 +++++++++++++++++++++++++++++++++++ test/js/promise_all.js | 9 + test/js/promise_allSettled.js | 20 + test/js/promise_allSettled_string.js | 10 + test/js/promise_all_throw.js | 9 + test/js/promise_any.js | 8 + test/js/promise_any_all_rejected.js | 7 + test/js/promise_race.js | 12 + test/js/promise_race_throw.js | 12 + test/njs_expect_test.exp | 24 + 10 files changed, 710 insertions(+), 0 deletions(-) diffs (793 lines): diff -r 3e00ce537115 -r 80adcb502e40 src/njs_promise.c --- a/src/njs_promise.c Wed Aug 11 21:48:51 2021 +0300 +++ b/src/njs_promise.c Wed Aug 11 21:48:52 2021 +0300 @@ -1,5 +1,6 @@ /* + * Copyright (C) Alexander Borisov * Copyright (C) Nginx, Inc. */ @@ -17,6 +18,12 @@ typedef enum { NJS_PROMISE_REJECT } njs_promise_rejection_type_t; +typedef enum { + NJS_PROMISE_ALL = 0, + NJS_PROMISE_ALL_SETTLED, + NJS_PROMISE_ANY +} njs_promise_function_type_t; + typedef struct { njs_promise_type_t state; njs_value_t result; @@ -48,6 +55,22 @@ typedef struct { njs_function_native_t handler; } njs_promise_context_t; +typedef struct { + njs_bool_t already_called; + uint32_t index; + uint32_t *remaining_elements; + njs_array_t *values; + njs_promise_capability_t *capability; +} njs_promise_all_context_t; + +typedef struct { + njs_iterator_args_t args; + uint32_t *remaining; + njs_value_t *constructor; + njs_function_t *function; + njs_promise_capability_t *capability; +} njs_promise_iterator_args_t; + static njs_promise_t *njs_promise_constructor_call(njs_vm_t *vm, njs_function_t *function); @@ -78,6 +101,28 @@ static njs_int_t njs_promise_reaction_jo njs_uint_t nargs, njs_index_t unused); static njs_int_t njs_promise_resolve_thenable_job(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused); +static njs_int_t njs_promise_perform_all(njs_vm_t *vm, njs_value_t *iterator, + njs_promise_iterator_args_t *pargs, njs_iterator_handler_t handler, + njs_value_t *retval); +static njs_int_t njs_promise_perform_all_handler(njs_vm_t *vm, + njs_iterator_args_t *args, njs_value_t *value, int64_t index); +static njs_int_t njs_promise_all_resolve_element_functions(njs_vm_t *vm, + njs_value_t *args, njs_uint_t nargs, njs_index_t unused); +static njs_int_t njs_promise_perform_all_settled_handler(njs_vm_t *vm, + njs_iterator_args_t *args, njs_value_t *value, int64_t index); +static njs_int_t njs_promise_all_settled_element_functions(njs_vm_t *vm, + njs_value_t *args, njs_uint_t nargs, njs_index_t rejected); +static njs_int_t njs_promise_perform_any_handler(njs_vm_t *vm, + njs_iterator_args_t *args, njs_value_t *value, int64_t index); +static njs_int_t njs_promise_any_reject_element_functions(njs_vm_t *vm, + njs_value_t *args, njs_uint_t nargs, njs_index_t unused); +static njs_int_t njs_promise_perform_race_handler(njs_vm_t *vm, + njs_iterator_args_t *args, njs_value_t *value, int64_t index); + + +static const njs_value_t string_resolve = njs_string("resolve"); +static const njs_value_t string_any_rejected = + njs_long_string("All promises were rejected"); static njs_promise_t * @@ -1229,6 +1274,527 @@ njs_promise_resolve_thenable_job(njs_vm_ static njs_int_t +njs_promise_all(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t function_type) +{ + njs_int_t ret; + njs_value_t *promise, resolve; + njs_iterator_handler_t handler; + njs_promise_iterator_args_t pargs; + + promise = njs_argument(args, 0); + + pargs.capability = njs_promise_new_capability(vm, promise); + if (njs_slow_path(pargs.capability == NULL)) { + return NJS_ERROR; + } + + ret = njs_value_property(vm, promise, njs_value_arg(&string_resolve), + &resolve); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + if (njs_slow_path(!njs_is_function(&resolve))) { + njs_type_error(vm, "resolve is not callable"); + return NJS_ERROR; + } + + pargs.function = njs_function(&resolve); + pargs.constructor = promise; + + switch (function_type) { + case NJS_PROMISE_ALL_SETTLED: + handler = njs_promise_perform_all_settled_handler; + break; + + case NJS_PROMISE_ANY: + handler = njs_promise_perform_any_handler; + break; + + default: + handler = njs_promise_perform_all_handler; + break; + } + + return njs_promise_perform_all(vm, njs_arg(args, nargs, 1), &pargs, + handler, &vm->retval); +} + + +static njs_int_t +njs_promise_perform_all(njs_vm_t *vm, njs_value_t *iterator, + njs_promise_iterator_args_t *pargs, njs_iterator_handler_t handler, + njs_value_t *retval) +{ + int64_t length; + njs_int_t ret; + njs_value_t argument; + njs_object_t *error; + + if (njs_slow_path(!njs_is_object(pargs->constructor))) { + njs_type_error(vm, "constructor is not object"); + return NJS_ERROR; + } + + njs_memzero(&pargs->args, sizeof(njs_iterator_args_t)); + + ret = njs_object_length(vm, iterator, &length); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + pargs->args.array = njs_array_alloc(vm, 1, length, 0); + if (njs_slow_path(pargs->args.array == NULL)) { + return NJS_ERROR; + } + + pargs->remaining = njs_mp_alloc(vm->mem_pool, sizeof(uint32_t)); + if (njs_slow_path(pargs->remaining == NULL)) { + njs_memory_error(vm); + return NJS_ERROR; + } + + (*pargs->remaining) = 1; + + pargs->args.value = iterator; + pargs->args.to = length; + + ret = njs_object_iterate(vm, &pargs->args, handler); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + if (--(*pargs->remaining) == 0) { + njs_mp_free(vm->mem_pool, pargs->remaining); + + njs_set_array(&argument, pargs->args.array); + + if (handler == njs_promise_perform_any_handler) { + error = njs_error_alloc(vm, NJS_OBJ_TYPE_AGGREGATE_ERROR, + NULL, &string_any_rejected, &argument); + if (njs_slow_path(error == NULL)) { + return NJS_ERROR; + } + + njs_set_object(&argument, error); + } + + ret = njs_function_call(vm, njs_function(&pargs->capability->resolve), + &njs_value_undefined, &argument, 1, retval); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + } + + *retval = pargs->capability->promise; + + return NJS_OK; +} + + +static njs_int_t +njs_promise_perform_all_handler(njs_vm_t *vm, njs_iterator_args_t *args, + njs_value_t *value, int64_t index) +{ + njs_int_t ret; + njs_value_t arguments[2], next; + njs_function_t *on_fulfilled; + njs_promise_capability_t *capability; + njs_promise_all_context_t *context; + njs_promise_iterator_args_t *pargs; + + pargs = (njs_promise_iterator_args_t *) args; + + capability = pargs->capability; + + njs_set_undefined(&pargs->args.array->start[index]); + + ret = njs_function_call(vm, pargs->function, pargs->constructor, value, + 1, &next); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + on_fulfilled = njs_promise_create_function(vm, + sizeof(njs_promise_all_context_t)); + if (njs_slow_path(on_fulfilled == NULL)) { + return NJS_ERROR; + } + + on_fulfilled->u.native = njs_promise_all_resolve_element_functions; + on_fulfilled->args_count = 1; + + context = on_fulfilled->context; + + context->already_called = 0; + context->index = (uint32_t) index; + context->values = pargs->args.array; + context->capability = capability; + context->remaining_elements = pargs->remaining; + + (*pargs->remaining)++; + + njs_set_function(&arguments[0], on_fulfilled); + arguments[1] = capability->reject; + + ret = njs_promise_invoke_then(vm, &next, arguments, 2); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + return NJS_OK; +} + + +static njs_int_t +njs_promise_all_resolve_element_functions(njs_vm_t *vm, njs_value_t *args, + njs_uint_t nargs, njs_index_t unused) +{ + njs_value_t arguments; + njs_promise_all_context_t *context; + + context = vm->top_frame->function->context; + + if (context->already_called) { + njs_vm_retval_set(vm, &njs_value_undefined); + return NJS_OK; + } + + context->already_called = 1; + context->values->start[context->index] = *njs_arg(args, nargs, 1); + + if (--(*context->remaining_elements) == 0) { + njs_mp_free(vm->mem_pool, context->remaining_elements); + + njs_set_array(&arguments, context->values); + + return njs_function_call(vm, + njs_function(&context->capability->resolve), + &njs_value_undefined, &arguments, 1, + &vm->retval); + } + + njs_vm_retval_set(vm, &njs_value_undefined); + + return NJS_OK; +} + + +static njs_int_t +njs_promise_perform_all_settled_handler(njs_vm_t *vm, njs_iterator_args_t *args, + njs_value_t *value, int64_t index) +{ + njs_int_t ret; + njs_value_t arguments[2], next; + njs_function_t *on_fulfilled, *on_rejected; + njs_promise_capability_t *capability; + njs_promise_all_context_t *context; + njs_promise_iterator_args_t *pargs; + + pargs = (njs_promise_iterator_args_t *) args; + + capability = pargs->capability; + + njs_set_undefined(&pargs->args.array->start[index]); + + ret = njs_function_call(vm, pargs->function, pargs->constructor, value, + 1, &next); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + on_fulfilled = njs_promise_create_function(vm, + sizeof(njs_promise_all_context_t)); + if (njs_slow_path(on_fulfilled == NULL)) { + return NJS_ERROR; + } + + context = on_fulfilled->context; + + context->already_called = 0; + context->index = (uint32_t) index; + context->values = pargs->args.array; + context->capability = capability; + context->remaining_elements = pargs->remaining; + + on_rejected = njs_promise_create_function(vm, 0); + if (njs_slow_path(on_rejected == NULL)) { + return NJS_ERROR; + } + + on_fulfilled->u.native = njs_promise_all_settled_element_functions; + on_rejected->u.native = njs_promise_all_settled_element_functions; + on_rejected->magic8 = 1; /* rejected. */ + + on_fulfilled->args_count = 1; + on_rejected->args_count = 1; + + on_rejected->context = context; + + (*pargs->remaining)++; + + njs_set_function(&arguments[0], on_fulfilled); + njs_set_function(&arguments[1], on_rejected); + + ret = njs_promise_invoke_then(vm, &next, arguments, 2); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + return NJS_OK; +} + + +static njs_int_t +njs_promise_all_settled_element_functions(njs_vm_t *vm, + njs_value_t *args, njs_uint_t nargs, njs_index_t rejected) +{ + njs_int_t ret; + njs_value_t arguments, *value; + njs_object_t *obj; + const njs_value_t *status, *set; + njs_promise_all_context_t *context; + + static const njs_value_t string_status = njs_string("status"); + static const njs_value_t string_fulfilled = njs_string("fulfilled"); + static const njs_value_t string_value = njs_string("value"); + static const njs_value_t string_rejected = njs_string("rejected"); + static const njs_value_t string_reason = njs_string("reason"); + + context = vm->top_frame->function->context; + + if (context->already_called) { + njs_vm_retval_set(vm, &njs_value_undefined); + return NJS_OK; + } + + context->already_called = 1; + + obj = njs_object_alloc(vm); + if (njs_slow_path(obj == NULL)) { + return NJS_ERROR; + } + + value = &context->values->start[context->index]; + + njs_set_object(value, obj); + + if (rejected) { + status = &string_rejected; + set = &string_reason; + + } else { + status = &string_fulfilled; + set = &string_value; + } + + ret = njs_value_property_set(vm, value, njs_value_arg(&string_status), + njs_value_arg(status)); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + ret = njs_value_property_set(vm, value, njs_value_arg(set), + njs_arg(args, nargs, 1)); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + if (--(*context->remaining_elements) == 0) { + njs_mp_free(vm->mem_pool, context->remaining_elements); + + njs_set_array(&arguments, context->values); + + return njs_function_call(vm, + njs_function(&context->capability->resolve), + &njs_value_undefined, &arguments, 1, + &vm->retval); + } + + njs_vm_retval_set(vm, &njs_value_undefined); + + return NJS_OK; +} + + +static njs_int_t +njs_promise_perform_any_handler(njs_vm_t *vm, njs_iterator_args_t *args, + njs_value_t *value, int64_t index) +{ + njs_int_t ret; + njs_value_t arguments[2], next; + njs_function_t *on_rejected; + njs_promise_capability_t *capability; + njs_promise_all_context_t *context; + njs_promise_iterator_args_t *pargs; + + pargs = (njs_promise_iterator_args_t *) args; + + capability = pargs->capability; + + njs_set_undefined(&pargs->args.array->start[index]); + + ret = njs_function_call(vm, pargs->function, pargs->constructor, value, 1, + &next); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + on_rejected = njs_promise_create_function(vm, + sizeof(njs_promise_all_context_t)); + if (njs_slow_path(on_rejected == NULL)) { + return NJS_ERROR; + } + + on_rejected->u.native = njs_promise_any_reject_element_functions; + on_rejected->args_count = 1; + + context = on_rejected->context; + + context->already_called = 0; + context->index = (uint32_t) index; + context->values = pargs->args.array; + context->capability = capability; + context->remaining_elements = pargs->remaining; + + (*pargs->remaining)++; + + arguments[0] = capability->resolve; + njs_set_function(&arguments[1], on_rejected); + + ret = njs_promise_invoke_then(vm, &next, arguments, 2); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + return NJS_OK; +} + + +static njs_int_t +njs_promise_any_reject_element_functions(njs_vm_t *vm, njs_value_t *args, + njs_uint_t nargs, njs_index_t unused) +{ + njs_value_t argument; + njs_object_t *error; + njs_promise_all_context_t *context; + + context = vm->top_frame->function->context; + + if (context->already_called) { + njs_vm_retval_set(vm, &njs_value_undefined); + return NJS_OK; + } + + context->already_called = 1; + context->values->start[context->index] = *njs_arg(args, nargs, 1); + + if (--(*context->remaining_elements) == 0) { + njs_mp_free(vm->mem_pool, context->remaining_elements); + + njs_set_array(&argument, context->values); + + error = njs_error_alloc(vm, NJS_OBJ_TYPE_AGGREGATE_ERROR, + NULL, &string_any_rejected, &argument); + if (njs_slow_path(error == NULL)) { + return NJS_ERROR; + } + + njs_set_object(&argument, error); + + return njs_function_call(vm, njs_function(&context->capability->reject), + &njs_value_undefined, &argument, 1, + &vm->retval); + } + + njs_vm_retval_set(vm, &njs_value_undefined); + + return NJS_OK; +} + + +static njs_int_t +njs_promise_race(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, + njs_index_t unused) +{ + int64_t length; + njs_int_t ret; + njs_value_t *promise, *iterator, resolve; + njs_promise_iterator_args_t pargs; + + promise = njs_argument(args, 0); + iterator = njs_arg(args, nargs, 1); + + pargs.capability = njs_promise_new_capability(vm, promise); + if (njs_slow_path(pargs.capability == NULL)) { + return NJS_ERROR; + } + + ret = njs_value_property(vm, promise, njs_value_arg(&string_resolve), + &resolve); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + if (njs_slow_path(!njs_is_function(&resolve))) { + njs_type_error(vm, "resolve is not callable"); + return NJS_ERROR; + } + + ret = njs_object_length(vm, iterator, &length); + if (njs_slow_path(ret != NJS_OK)) { + return ret; + } + + njs_memzero(&pargs.args, sizeof(njs_iterator_args_t)); + + pargs.function = njs_function(&resolve); + pargs.constructor = promise; + + pargs.args.value = iterator; + pargs.args.to = length; + + ret = njs_object_iterate(vm, &pargs.args, njs_promise_perform_race_handler); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + vm->retval = pargs.capability->promise; + + return NJS_OK; +} + + +static njs_int_t +njs_promise_perform_race_handler(njs_vm_t *vm, njs_iterator_args_t *args, + njs_value_t *value, int64_t index) +{ + njs_int_t ret; + njs_value_t arguments[2], next; + njs_promise_capability_t *capability; + njs_promise_iterator_args_t *pargs; + + pargs = (njs_promise_iterator_args_t *) args; + + ret = njs_function_call(vm, pargs->function, pargs->constructor, value, + 1, &next); + if (njs_slow_path(ret == NJS_ERROR)) { + return ret; + } + + capability = pargs->capability; + + arguments[0] = capability->resolve; + arguments[1] = capability->reject; + + (void) njs_promise_invoke_then(vm, &next, arguments, 2); + + return NJS_OK; +} + + +static njs_int_t njs_promise_species(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { @@ -1278,6 +1844,39 @@ static const njs_object_prop_t njs_prom { .type = NJS_PROPERTY, + .name = njs_string("all"), + .value = njs_native_function2(njs_promise_all, 1, NJS_PROMISE_ALL), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("allSettled"), + .value = njs_native_function2(njs_promise_all, 1, + NJS_PROMISE_ALL_SETTLED), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("any"), + .value = njs_native_function2(njs_promise_all, 1, NJS_PROMISE_ANY), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, + .name = njs_string("race"), + .value = njs_native_function(njs_promise_race, 1), + .writable = 1, + .configurable = 1, + }, + + { + .type = NJS_PROPERTY, .name = njs_wellknown_symbol(NJS_SYMBOL_SPECIES), .value = njs_value(NJS_INVALID, 1, NAN), .getter = njs_native_function(njs_promise_species, 0), diff -r 3e00ce537115 -r 80adcb502e40 test/js/promise_all.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_all.js Wed Aug 11 21:48:52 2021 +0300 @@ -0,0 +1,9 @@ +function resolve(value) { + return new Promise(resolve => setTimeout(() => resolve(value), 0)); +} + +Promise.all([resolve(['one', 'two']), resolve(['three', 'four'])]) +.then( + (v) => {console.log(`resolved:${njs.dump(v)}`)}, + (v) => {console.log(`rejected:${njs.dump(v)}`)} +); diff -r 3e00ce537115 -r 80adcb502e40 test/js/promise_allSettled.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_allSettled.js Wed Aug 11 21:48:52 2021 +0300 @@ -0,0 +1,20 @@ +var p0 = Promise.resolve(2).then(v => v + 1); +var p1 = Promise.reject(21).catch(v => v * 2); +var p2 = Promise.resolve('nope').then(() => { throw 'foo' }); +var p3 = Promise.reject('yes').then(() => { throw 'nope'; }); +var p4 = Promise.resolve('here').finally(() => 'nope'); +var p5 = Promise.reject('here too').finally(() => 'nope'); +var p6 = Promise.resolve('nope').finally(() => { throw 'finally'; }); +var p7 = Promise.reject('nope').finally(() => { throw 'finally after rejected'; }); +var p8 = Promise.reject(1).then(() => 'nope', () => 0); + +function dump(v) { + var fulfilled = v.filter(v=>v.status == 'fulfilled').map(v=>v.value).sort(); + var rejected = v.filter(v=>v.status == 'rejected').map(v=>v.reason).sort(); + return `F:${fulfilled}|R:${rejected}` +} + +Promise.allSettled([p0, p1, p2, p3, p4, p5, p6, p7, p8]).then( + (v) => {console.log(`resolved:${dump(v)}`)}, + (v) => {console.log(`rejected:${dump(v)}`)} +); diff -r 3e00ce537115 -r 80adcb502e40 test/js/promise_allSettled_string.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_allSettled_string.js Wed Aug 11 21:48:52 2021 +0300 @@ -0,0 +1,10 @@ +function dump(v) { + var fulfilled = v.filter(v=>v.status == 'fulfilled').map(v=>v.value).sort(); + var rejected = v.filter(v=>v.status == 'rejected').map(v=>v.reason).sort(); + return `F:${fulfilled}|R:${rejected}`; +} + +Promise.allSettled("abc").then( + (v) => {console.log(`resolved:${dump(v)}`)}, + (v) => {console.log(`rejected:${dump(v)}`)} +); diff -r 3e00ce537115 -r 80adcb502e40 test/js/promise_all_throw.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_all_throw.js Wed Aug 11 21:48:52 2021 +0300 @@ -0,0 +1,9 @@ +var p0 = Promise.resolve(1).then(v => v + 1); +var p1 = Promise.reject(2).catch(v => v * 2); +var p2 = Promise.resolve().then(() => { throw 'foo' }); +var p3 = Promise.reject().then(() => { throw 'oof'; }); + +Promise.all([p0, p1, p2, p3]).then( + (v) => {console.log(`resolved:${v}`)}, + (v) => {console.log(`rejected:${v}`)} +); diff -r 3e00ce537115 -r 80adcb502e40 test/js/promise_any.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_any.js Wed Aug 11 21:48:52 2021 +0300 @@ -0,0 +1,8 @@ +var p0 = Promise.resolve().then(() => { throw 'foo' }); +var p1 = Promise.reject(2).catch(v => v * 2); +var p2 = Promise.resolve(1).then(v => v + 1); + +Promise.any([p0, p1, p2]).then( + (v) => {console.log(`resolved:${v}`)}, + (v) => {console.log(`rejected:${v}`)} +); diff -r 3e00ce537115 -r 80adcb502e40 test/js/promise_any_all_rejected.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_any_all_rejected.js Wed Aug 11 21:48:52 2021 +0300 @@ -0,0 +1,7 @@ +var p0 = Promise.reject(1); +var p1 = Promise.reject(2); + +Promise.any([p0, p1]).then( + (v) => {console.log(`resolve:${v}`)}, + (v) => {console.log(`reject:${v}`)} +); diff -r 3e00ce537115 -r 80adcb502e40 test/js/promise_race.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_race.js Wed Aug 11 21:48:52 2021 +0300 @@ -0,0 +1,12 @@ +var p1 = new Promise((resolve, reject) => { + setTimeout(resolve, 0, 'one'); +}); + +var p2 = new Promise((resolve, reject) => { + setTimeout(resolve, 0, 'two'); +}); + +Promise.race([p1, p2]).then( + (v) => {console.log(`resolved:${v}`)}, + (v) => {console.log(`rejected:${v}`)} +); diff -r 3e00ce537115 -r 80adcb502e40 test/js/promise_race_throw.js --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/js/promise_race_throw.js Wed Aug 11 21:48:52 2021 +0300 @@ -0,0 +1,12 @@ +var p1 = new Promise((resolve, reject) => { + throw 'one'; +}); + +var p2 = new Promise((resolve, reject) => { + setTimeout(resolve, 0, 'two'); +}); + +Promise.race([p1, p2]).then( + (v) => {console.log(`resolved:${v}`)}, + (v) => {console.log(`rejected:${v}`)} +); diff -r 3e00ce537115 -r 80adcb502e40 test/njs_expect_test.exp --- a/test/njs_expect_test.exp Wed Aug 11 21:48:51 2021 +0300 +++ b/test/njs_expect_test.exp Wed Aug 11 21:48:52 2021 +0300 @@ -1084,3 +1084,27 @@ njs_run {"./test/js/promise_reject_catch njs_run {"./test/js/promise_reject_post_catch.js"} \ "Error: unhandled promise rejection: undefined" + +njs_run {"./test/js/promise_all.js"} \ +"resolved:\\\[\\\['one','two'],\\\['three','four']]" + +njs_run {"./test/js/promise_all_throw.js"} \ +"rejected:foo" + +njs_run {"./test/js/promise_allSettled.js"} \ +"resolved:resolved:F:0,3,42,here|R:finally,finally after rejected,foo,here too,yes" + +njs_run {"./test/js/promise_allSettled_string.js"} \ +"resolved:F:a,b,c|R:" + +njs_run {"./test/js/promise_any.js"} \ +"resolved:4" + +njs_run {"./test/js/promise_any_all_rejected.js"} \ +"reject:AggregateError: All promises were rejected" + +njs_run {"./test/js/promise_race.js"} \ +"resolved:one" + +njs_run {"./test/js/promise_race_throw.js"} \ +"rejected:one" From xeioex at nginx.com Thu Aug 12 16:40:00 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 12 Aug 2021 16:40:00 +0000 Subject: [njs] Stream: fixed CPU hog when js_filter is registered in both directions. Message-ID: details: https://hg.nginx.org/njs/rev/377743cd9059 branches: changeset: 1687:377743cd9059 user: Miao Wang date: Wed Aug 11 11:44:12 2021 +0800 description: Stream: fixed CPU hog when js_filter is registered in both directions. Previously, a single busy chain was used to track filtered data in both directions. This might lead to a situation when busy chunks are not freed properly and pile up. The fix is to separate busy chain for upstream and downstream directions. This closes #413 issue on Github. diffstat: nginx/ngx_stream_js_module.c | 9 ++++++--- 1 files changed, 6 insertions(+), 3 deletions(-) diffs (41 lines): diff -r 80adcb502e40 -r 377743cd9059 nginx/ngx_stream_js_module.c --- a/nginx/ngx_stream_js_module.c Wed Aug 11 21:48:52 2021 +0300 +++ b/nginx/ngx_stream_js_module.c Wed Aug 11 11:44:12 2021 +0800 @@ -49,7 +49,8 @@ typedef struct { ngx_buf_t *buf; ngx_chain_t **last_out; ngx_chain_t *free; - ngx_chain_t *busy; + ngx_chain_t *upstream_busy; + ngx_chain_t *downstream_busy; ngx_int_t status; #define NGX_JS_EVENT_UPLOAD 0 #define NGX_JS_EVENT_DOWNLOAD 1 @@ -528,7 +529,7 @@ ngx_stream_js_body_filter(ngx_stream_ses njs_str_t exception; njs_int_t ret; ngx_int_t rc; - ngx_chain_t *out, *cl; + ngx_chain_t *out, *cl, **busy; ngx_connection_t *c, *dst; ngx_stream_js_ev_t *event; ngx_stream_js_ctx_t *ctx; @@ -606,15 +607,17 @@ ngx_stream_js_body_filter(ngx_stream_ses if (from_upstream) { dst = c; + busy = &ctx->downstream_busy; } else { dst = s->upstream ? s->upstream->peer.connection : NULL; + busy = &ctx->upstream_busy; } if (out != NULL || dst == NULL || dst->buffered) { rc = ngx_stream_next_filter(s, out, from_upstream); - ngx_chain_update_chains(c->pool, &ctx->free, &ctx->busy, &out, + ngx_chain_update_chains(c->pool, &ctx->free, busy, &out, (ngx_buf_tag_t) &ngx_stream_js_module); } else { From robm at fastmail.fm Fri Aug 13 08:26:52 2021 From: robm at fastmail.fm (Robert Mueller) Date: Fri, 13 Aug 2021 18:26:52 +1000 Subject: [PATCH] Mail: Add Auth-SSL-Cipher header to each imap/pop/smtp auth request Message-ID: # HG changeset patch # User Rob Mueller # Date 1628841467 14400 # Fri Aug 13 03:57:47 2021 -0400 # Node ID 6ea8e179293dbd5d09218658220a64a9ce20cb8a # Parent dda421871bc213dd2eb3da0015d6228839323583 Mail: Add Auth-SSL-Cipher header to each imap/pop/smtp auth request This adds a new Auth-SSL-Cipher header to the mail proxy auth protocol when SSL is enabled the reports the SSL cipher that was negotiated. This can be useful for detecting users using older clients that negotiate old ciphers when you want to upgrade to newer TLS versions of remove suppport for old and insecure ciphers. You can use your auth backend to notify these users before the upgrade that they either need to upgrade their client software or contact your support team to work out an upgrade path. diff -r dda421871bc2 -r 6ea8e179293d src/mail/ngx_mail_auth_http_module.c --- a/src/mail/ngx_mail_auth_http_module.c Tue Aug 10 23:43:17 2021 +0300 +++ b/src/mail/ngx_mail_auth_http_module.c Fri Aug 13 03:57:47 2021 -0400 @@ -1138,7 +1138,7 @@ ngx_connection_t *c; #if (NGX_MAIL_SSL) ngx_str_t verify, subject, issuer, serial, fingerprint, - raw_cert, cert; + raw_cert, cert, cipher; ngx_mail_ssl_conf_t *sslcf; #endif ngx_mail_core_srv_conf_t *cscf; @@ -1157,6 +1157,15 @@ sslcf = ngx_mail_get_module_srv_conf(s, ngx_mail_ssl_module); + if (c->ssl) { + if (ngx_ssl_get_cipher_name(c, pool, &cipher) != NGX_OK) { + return NULL; + } + cipher.len = ngx_strlen(cipher.data); + } else { + ngx_str_null(&cipher); + } + if (c->ssl && sslcf->verify) { /* certificate details */ @@ -1252,6 +1261,8 @@ if (c->ssl) { len += sizeof("Auth-SSL: on" CRLF) - 1 + + sizeof("Auth-SSL-Cipher: ") - 1 + cipher.len + + sizeof(CRLF) - 1 + sizeof("Auth-SSL-Verify: ") - 1 + verify.len + sizeof(CRLF) - 1 + sizeof("Auth-SSL-Subject: ") - 1 + subject.len @@ -1373,6 +1384,13 @@ b->last = ngx_cpymem(b->last, "Auth-SSL: on" CRLF, sizeof("Auth-SSL: on" CRLF) - 1); + if (cipher.len) { + b->last = ngx_cpymem(b->last, "Auth-SSL-Cipher: ", + sizeof("Auth-SSL-Cipher: ") - 1); + b->last = ngx_copy(b->last, cipher.data, cipher.len); + *b->last++ = CR; *b->last++ = LF; + } + if (verify.len) { b->last = ngx_cpymem(b->last, "Auth-SSL-Verify: ", sizeof("Auth-SSL-Verify: ") - 1); From duncan.lock at gmail.com Sat Aug 14 15:10:07 2021 From: duncan.lock at gmail.com (Duncan Lock) Date: Sat, 14 Aug 2021 08:10:07 -0700 Subject: [PATCH] Add support for dark color scheme in default index.html page Message-ID: # HG changeset patch # User Duncan Lock # Date 1628952253 25200 # Sat Aug 14 07:44:13 2021 -0700 # Node ID 81294b370e774c792210904f710abc0a494c5c05 # Parent dda421871bc213dd2eb3da0015d6228839323583 Add support for dark color scheme in default index.html page Add a little CSS to index.html to support dark color schemes. This will display the index page in dark colors if the user has requested a dark color scheme in their system UI or browser, and display the same as the previous version if not. See: https://developer.mozilla.org/en-US/docs/Web/CSS/@media/prefers-color-scheme diff -r dda421871bc2 -r 81294b370e77 docs/html/index.html --- a/docs/html/index.html Tue Aug 10 23:43:17 2021 +0300 +++ b/docs/html/index.html Sat Aug 14 07:44:13 2021 -0700 @@ -8,6 +8,15 @@ margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } + @media (prefers-color-scheme: dark) { + body { + background-color: #363839; + color: #d1cec9; + } + a { + color: #c4c4ff; + } + } From mdounin at mdounin.ru Sun Aug 15 01:25:38 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 15 Aug 2021 04:25:38 +0300 Subject: [PATCH] Add support for dark color scheme in default index.html page In-Reply-To: References: Message-ID: Hello! On Sat, Aug 14, 2021 at 08:10:07AM -0700, Duncan Lock wrote: > # HG changeset patch > # User Duncan Lock > # Date 1628952253 25200 > # Sat Aug 14 07:44:13 2021 -0700 > # Node ID 81294b370e774c792210904f710abc0a494c5c05 > # Parent dda421871bc213dd2eb3da0015d6228839323583 > Add support for dark color scheme in default index.html page > > Add a little CSS to index.html to support dark color schemes. > This will display the index page in dark colors if the user has > requested a dark color scheme in their system UI or browser, and > display the same as the previous version if not. > > See: https://developer.mozilla.org/en-US/docs/Web/CSS/@media/prefers-color-scheme > > diff -r dda421871bc2 -r 81294b370e77 docs/html/index.html > --- a/docs/html/index.html Tue Aug 10 23:43:17 2021 +0300 > +++ b/docs/html/index.html Sat Aug 14 07:44:13 2021 -0700 > @@ -8,6 +8,15 @@ > margin: 0 auto; > font-family: Tahoma, Verdana, Arial, sans-serif; > } > + @media (prefers-color-scheme: dark) { > + body { > + background-color: #363839; > + color: #d1cec9; > + } > + a { > + color: #c4c4ff; > + } > + } > > > Thank you for the patch. I don't think this is something we want to customize in the example pages such as index.html, especially given that we don't set other colors. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Sun Aug 15 02:51:50 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 15 Aug 2021 05:51:50 +0300 Subject: [PATCH] Mail: Add Auth-SSL-Cipher header to each imap/pop/smtp auth request In-Reply-To: References: Message-ID: Hello! On Fri, Aug 13, 2021 at 06:26:52PM +1000, Robert Mueller wrote: > # HG changeset patch > # User Rob Mueller > # Date 1628841467 14400 > # Fri Aug 13 03:57:47 2021 -0400 > # Node ID 6ea8e179293dbd5d09218658220a64a9ce20cb8a > # Parent dda421871bc213dd2eb3da0015d6228839323583 > Mail: Add Auth-SSL-Cipher header to each imap/pop/smtp auth request Something like: Mail: added Auth-SSL-Cipher header. would be enough. > > This adds a new Auth-SSL-Cipher header to the mail proxy auth > protocol when SSL is enabled the reports the SSL cipher that > was negotiated. It look like there are grammar issues here, "the reports ..." part is not related to the text. Probably should be omitted as it is obvious anyway. > > This can be useful for detecting users using older clients that > negotiate old ciphers when you want to upgrade to newer > TLS versions of remove suppport for old and insecure ciphers. ... oR remove? ... suPPort? Shouldn't we also add Auth-SSL-Protocol if one of the declared use cases is to upgrade to newer TLS versions? In general this looks close to ticket #2134 (https://trac.nginx.org/nginx/ticket/2134), so it is good idea to mention it in the commit log. > You can use your auth backend to notify these users before the > upgrade that they either need to upgrade their client software > or contact your support team to work out an upgrade path. > > diff -r dda421871bc2 -r 6ea8e179293d src/mail/ngx_mail_auth_http_module.c > --- a/src/mail/ngx_mail_auth_http_module.c Tue Aug 10 23:43:17 2021 +0300 > +++ b/src/mail/ngx_mail_auth_http_module.c Fri Aug 13 03:57:47 2021 -0400 > @@ -1138,7 +1138,7 @@ > ngx_connection_t *c; > #if (NGX_MAIL_SSL) > ngx_str_t verify, subject, issuer, serial, fingerprint, > - raw_cert, cert; > + raw_cert, cert, cipher; > ngx_mail_ssl_conf_t *sslcf; > #endif > ngx_mail_core_srv_conf_t *cscf; > @@ -1157,6 +1157,15 @@ > > sslcf = ngx_mail_get_module_srv_conf(s, ngx_mail_ssl_module); > > + if (c->ssl) { > + if (ngx_ssl_get_cipher_name(c, pool, &cipher) != NGX_OK) { > + return NULL; > + } > + cipher.len = ngx_strlen(cipher.data); > + } else { > + ngx_str_null(&cipher); > + } > + This does not use sslcf, so probably should be before it is obtained. Also, it lacks empty lines expected per style. > if (c->ssl && sslcf->verify) { > > /* certificate details */ > @@ -1252,6 +1261,8 @@ > > if (c->ssl) { > len += sizeof("Auth-SSL: on" CRLF) - 1 > + + sizeof("Auth-SSL-Cipher: ") - 1 + cipher.len > + + sizeof(CRLF) - 1 > + sizeof("Auth-SSL-Verify: ") - 1 + verify.len > + sizeof(CRLF) - 1 > + sizeof("Auth-SSL-Subject: ") - 1 + subject.len > @@ -1373,6 +1384,13 @@ > b->last = ngx_cpymem(b->last, "Auth-SSL: on" CRLF, > sizeof("Auth-SSL: on" CRLF) - 1); > > + if (cipher.len) { > + b->last = ngx_cpymem(b->last, "Auth-SSL-Cipher: ", > + sizeof("Auth-SSL-Cipher: ") - 1); > + b->last = ngx_copy(b->last, cipher.data, cipher.len); > + *b->last++ = CR; *b->last++ = LF; > + } > + > if (verify.len) { > b->last = ngx_cpymem(b->last, "Auth-SSL-Verify: ", > sizeof("Auth-SSL-Verify: ") - 1); The patch which addresses above comments, please take a look if it works for you: # HG changeset patch # User Rob Mueller # Date 1628841467 14400 # Fri Aug 13 03:57:47 2021 -0400 # Node ID b5d159018a6ad7ecaf52dedd5ff19798cf360d45 # Parent dda421871bc213dd2eb3da0015d6228839323583 Mail: Auth-SSL-Protocol and Auth-SSL-Cipher headers (ticket #2134). This adds new Auth-SSL-Protocol and Auth-SSL-Cipher headers to the mail proxy auth protocol when SSL is enabled. This can be useful for detecting users using older clients that negotiate old ciphers when you want to upgrade to newer TLS versions of remove suppport for old and insecure ciphers. You can use your auth backend to notify these users before the upgrade that they either need to upgrade their client software or contact your support team to work out an upgrade path. diff --git a/src/mail/ngx_mail_auth_http_module.c b/src/mail/ngx_mail_auth_http_module.c --- a/src/mail/ngx_mail_auth_http_module.c +++ b/src/mail/ngx_mail_auth_http_module.c @@ -1137,8 +1137,8 @@ ngx_mail_auth_http_create_request(ngx_ma ngx_str_t login, passwd; ngx_connection_t *c; #if (NGX_MAIL_SSL) - ngx_str_t verify, subject, issuer, serial, fingerprint, - raw_cert, cert; + ngx_str_t protocol, cipher, verify, subject, issuer, + serial, fingerprint, raw_cert, cert; ngx_mail_ssl_conf_t *sslcf; #endif ngx_mail_core_srv_conf_t *cscf; @@ -1155,6 +1155,25 @@ ngx_mail_auth_http_create_request(ngx_ma #if (NGX_MAIL_SSL) + if (c->ssl) { + + if (ngx_ssl_get_protocol(c, pool, &protocol) != NGX_OK) { + return NULL; + } + + protocol.len = ngx_strlen(protocol.data); + + if (ngx_ssl_get_cipher_name(c, pool, &cipher) != NGX_OK) { + return NULL; + } + + cipher.len = ngx_strlen(cipher.data); + + } else { + ngx_str_null(&protocol); + ngx_str_null(&cipher); + } + sslcf = ngx_mail_get_module_srv_conf(s, ngx_mail_ssl_module); if (c->ssl && sslcf->verify) { @@ -1252,6 +1271,10 @@ ngx_mail_auth_http_create_request(ngx_ma if (c->ssl) { len += sizeof("Auth-SSL: on" CRLF) - 1 + + sizeof("Auth-SSL-Protocol: ") - 1 + protocol.len + + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Cipher: ") - 1 + cipher.len + + sizeof(CRLF) - 1 + sizeof("Auth-SSL-Verify: ") - 1 + verify.len + sizeof(CRLF) - 1 + sizeof("Auth-SSL-Subject: ") - 1 + subject.len @@ -1373,6 +1396,20 @@ ngx_mail_auth_http_create_request(ngx_ma b->last = ngx_cpymem(b->last, "Auth-SSL: on" CRLF, sizeof("Auth-SSL: on" CRLF) - 1); + if (protocol.len) { + b->last = ngx_cpymem(b->last, "Auth-SSL-Protocol: ", + sizeof("Auth-SSL-Protocol: ") - 1); + b->last = ngx_copy(b->last, protocol.data, protocol.len); + *b->last++ = CR; *b->last++ = LF; + } + + if (cipher.len) { + b->last = ngx_cpymem(b->last, "Auth-SSL-Cipher: ", + sizeof("Auth-SSL-Cipher: ") - 1); + b->last = ngx_copy(b->last, cipher.data, cipher.len); + *b->last++ = CR; *b->last++ = LF; + } + if (verify.len) { b->last = ngx_cpymem(b->last, "Auth-SSL-Verify: ", sizeof("Auth-SSL-Verify: ") - 1); -- Maxim Dounin http://mdounin.ru/ From steffen.weber at gmail.com Sun Aug 15 05:59:56 2021 From: steffen.weber at gmail.com (Steffen Weber) Date: Sun, 15 Aug 2021 07:59:56 +0200 Subject: [PATCH] Add support for dark color scheme in default index.html page In-Reply-To: References: Message-ID: You could add the following line which makes modern browsers use colors from their built-in dark color scheme: https://web.dev/color-scheme/ Am So., 15. Aug. 2021 um 03:25 Uhr schrieb Maxim Dounin : > Hello! > > On Sat, Aug 14, 2021 at 08:10:07AM -0700, Duncan Lock wrote: > > > # HG changeset patch > > # User Duncan Lock > > # Date 1628952253 25200 > > # Sat Aug 14 07:44:13 2021 -0700 > > # Node ID 81294b370e774c792210904f710abc0a494c5c05 > > # Parent dda421871bc213dd2eb3da0015d6228839323583 > > Add support for dark color scheme in default index.html page > > > > Add a little CSS to index.html to support dark color schemes. > > This will display the index page in dark colors if the user has > > requested a dark color scheme in their system UI or browser, and > > display the same as the previous version if not. > > > > See: > https://developer.mozilla.org/en-US/docs/Web/CSS/@media/prefers-color-scheme > > > > diff -r dda421871bc2 -r 81294b370e77 docs/html/index.html > > --- a/docs/html/index.html Tue Aug 10 23:43:17 2021 +0300 > > +++ b/docs/html/index.html Sat Aug 14 07:44:13 2021 -0700 > > @@ -8,6 +8,15 @@ > > margin: 0 auto; > > font-family: Tahoma, Verdana, Arial, sans-serif; > > } > > + @media (prefers-color-scheme: dark) { > > + body { > > + background-color: #363839; > > + color: #d1cec9; > > + } > > + a { > > + color: #c4c4ff; > > + } > > + } > > > > > > > > Thank you for the patch. I don't think this is something we want > to customize in the example pages such as index.html, especially > given that we don't set other colors. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From duncan.lock at gmail.com Sun Aug 15 07:05:45 2021 From: duncan.lock at gmail.com (Duncan Lock) Date: Sun, 15 Aug 2021 00:05:45 -0700 Subject: [PATCH] Add support for dark color scheme in default index.html page In-Reply-To: References: Message-ID: That seems like a much better approach - I'll submit a patch using this in the morning and see what people think. On Sat, 14 Aug 2021 at 23:00, Steffen Weber wrote: > > You could add the following line which makes modern browsers use colors from their built-in dark color scheme: > > > > https://web.dev/color-scheme/ > > Am So., 15. Aug. 2021 um 03:25 Uhr schrieb Maxim Dounin : >> >> Hello! >> >> On Sat, Aug 14, 2021 at 08:10:07AM -0700, Duncan Lock wrote: >> >> > # HG changeset patch >> > # User Duncan Lock >> > # Date 1628952253 25200 >> > # Sat Aug 14 07:44:13 2021 -0700 >> > # Node ID 81294b370e774c792210904f710abc0a494c5c05 >> > # Parent dda421871bc213dd2eb3da0015d6228839323583 >> > Add support for dark color scheme in default index.html page >> > >> > Add a little CSS to index.html to support dark color schemes. >> > This will display the index page in dark colors if the user has >> > requested a dark color scheme in their system UI or browser, and >> > display the same as the previous version if not. >> > >> > See: https://developer.mozilla.org/en-US/docs/Web/CSS/@media/prefers-color-scheme >> > >> > diff -r dda421871bc2 -r 81294b370e77 docs/html/index.html >> > --- a/docs/html/index.html Tue Aug 10 23:43:17 2021 +0300 >> > +++ b/docs/html/index.html Sat Aug 14 07:44:13 2021 -0700 >> > @@ -8,6 +8,15 @@ >> > margin: 0 auto; >> > font-family: Tahoma, Verdana, Arial, sans-serif; >> > } >> > + @media (prefers-color-scheme: dark) { >> > + body { >> > + background-color: #363839; >> > + color: #d1cec9; >> > + } >> > + a { >> > + color: #c4c4ff; >> > + } >> > + } >> > >> > >> > >> >> Thank you for the patch. I don't think this is something we want >> to customize in the example pages such as index.html, especially >> given that we don't set other colors. >> >> -- >> Maxim Dounin >> http://mdounin.ru/ >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From duncan.lock at gmail.com Sun Aug 15 17:41:05 2021 From: duncan.lock at gmail.com (Duncan Lock) Date: Sun, 15 Aug 2021 10:41:05 -0700 Subject: [PATCH] Add support for dark color scheme in default index.html page In-Reply-To: References: Message-ID: # HG changeset patch # User Duncan Lock # Date 1629049097 25200 # Sun Aug 15 10:38:17 2021 -0700 # Node ID 945d9836012ed84dea05577027a30a38e38a59f3 # Parent dda421871bc213dd2eb3da0015d6228839323583 Add support for dark color scheme in default index & 50x pages Add a meta tag to index.html & 50x.html to support dark color schemes. This will display the index page in dark colors if the user has requested a dark color scheme in their system UI or browser, and display the same as the previous version if not. This uses the browsers built-in styles and doesn't hard code any colors or styles. diff -r dda421871bc2 -r 945d9836012e docs/html/50x.html --- a/docs/html/50x.html Tue Aug 10 23:43:17 2021 +0300 +++ b/docs/html/50x.html Sun Aug 15 10:38:17 2021 -0700 @@ -2,6 +2,7 @@ Error + > >> > > >> > > >> > >> Thank you for the patch. I don't think this is something we want > >> to customize in the example pages such as index.html, especially > >> given that we don't set other colors. > >> > >> -- > >> Maxim Dounin > >> http://mdounin.ru/ > >> _______________________________________________ > >> nginx-devel mailing list > >> nginx-devel at nginx.org > >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > http://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Mon Aug 16 01:21:11 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 16 Aug 2021 04:21:11 +0300 Subject: [PATCH] Add support for dark color scheme in default index.html page In-Reply-To: References: Message-ID: Hello! On Sun, Aug 15, 2021 at 10:41:05AM -0700, Duncan Lock wrote: > # HG changeset patch > # User Duncan Lock > # Date 1629049097 25200 > # Sun Aug 15 10:38:17 2021 -0700 > # Node ID 945d9836012ed84dea05577027a30a38e38a59f3 > # Parent dda421871bc213dd2eb3da0015d6228839323583 > Add support for dark color scheme in default index & 50x pages > > Add a meta tag to index.html & 50x.html to support dark color schemes. > This will display the index page in dark colors if the user has > requested a dark color scheme in their system UI or browser, and > display the same as the previous version if not. > > This uses the browsers built-in styles and doesn't hard code any colors > or styles. > > diff -r dda421871bc2 -r 945d9836012e docs/html/50x.html > --- a/docs/html/50x.html Tue Aug 10 23:43:17 2021 +0300 > +++ b/docs/html/50x.html Sun Aug 15 10:38:17 2021 -0700 > @@ -2,6 +2,7 @@ > > > Error > + > diff --git a/docs/html/index.html b/docs/html/index.html --- a/docs/html/index.html +++ b/docs/html/index.html @@ -3,11 +3,8 @@ Welcome to nginx! # HG changeset patch # User Maxim Dounin # Date 1629076313 -10800 # Mon Aug 16 04:11:53 2021 +0300 # Node ID 698e3229fb87b0f2d523935988d0de830d906d9c # Parent 98d71364e591de91df011ba91028038748baf19e Dark mode support in welcome and 50x error pages. Prodded by Duncan Lock. diff --git a/docs/html/50x.html b/docs/html/50x.html --- a/docs/html/50x.html +++ b/docs/html/50x.html @@ -3,6 +3,7 @@ Error diff --git a/docs/html/index.html b/docs/html/index.html --- a/docs/html/index.html +++ b/docs/html/index.html @@ -3,6 +3,7 @@ Welcome to nginx! -- Maxim Dounin http://mdounin.ru/ From duncan.lock at gmail.com Mon Aug 16 02:47:33 2021 From: duncan.lock at gmail.com (Duncan Lock) Date: Sun, 15 Aug 2021 19:47:33 -0700 Subject: [PATCH] Add support for dark color scheme in default index.html page In-Reply-To: References: Message-ID: That works for me! In this case, because the CSS is inline, there's no difference between using the meta tag and the CSS color-scheme. If you want to stop hard-coding things, I would also take out those hard-coded Windows only fonts - and just let the browsers default cross-platform styles - and the users preferences, decide the font to use. So, I would suggest also changing this: font-family: Tahoma, Verdana, Arial, sans-serif; to this: font-family: sans-serif; Which is simpler and produces better results cross-platform? On Sun, 15 Aug 2021 at 18:21, Maxim Dounin wrote: > > Hello! > > On Sun, Aug 15, 2021 at 10:41:05AM -0700, Duncan Lock wrote: > > > # HG changeset patch > > # User Duncan Lock > > # Date 1629049097 25200 > > # Sun Aug 15 10:38:17 2021 -0700 > > # Node ID 945d9836012ed84dea05577027a30a38e38a59f3 > > # Parent dda421871bc213dd2eb3da0015d6228839323583 > > Add support for dark color scheme in default index & 50x pages > > > > Add a meta tag to index.html & 50x.html to support dark color schemes. > > This will display the index page in dark colors if the user has > > requested a dark color scheme in their system UI or browser, and > > display the same as the previous version if not. > > > > This uses the browsers built-in styles and doesn't hard code any colors > > or styles. > > > > diff -r dda421871bc2 -r 945d9836012e docs/html/50x.html > > --- a/docs/html/50x.html Tue Aug 10 23:43:17 2021 +0300 > > +++ b/docs/html/50x.html Sun Aug 15 10:38:17 2021 -0700 > > @@ -2,6 +2,7 @@ > > > > > > Error > > + > > > > > diff --git a/docs/html/index.html b/docs/html/index.html > --- a/docs/html/index.html > +++ b/docs/html/index.html > @@ -3,11 +3,8 @@ > > Welcome to nginx! > > > > # HG changeset patch > # User Maxim Dounin > # Date 1629076313 -10800 > # Mon Aug 16 04:11:53 2021 +0300 > # Node ID 698e3229fb87b0f2d523935988d0de830d906d9c > # Parent 98d71364e591de91df011ba91028038748baf19e > Dark mode support in welcome and 50x error pages. > > Prodded by Duncan Lock. > > diff --git a/docs/html/50x.html b/docs/html/50x.html > --- a/docs/html/50x.html > +++ b/docs/html/50x.html > @@ -3,6 +3,7 @@ > > Error > > diff --git a/docs/html/index.html b/docs/html/index.html > --- a/docs/html/index.html > +++ b/docs/html/index.html > @@ -3,6 +3,7 @@ > > Welcome to nginx! > > > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Mon Aug 16 13:40:40 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 16 Aug 2021 13:40:40 +0000 Subject: [nginx] Welcome and 50x error pages style. Message-ID: details: https://hg.nginx.org/nginx/rev/67c68cd973b8 branches: changeset: 7902:67c68cd973b8 user: Maxim Dounin date: Mon Aug 16 16:36:06 2021 +0300 description: Welcome and 50x error pages style. Indentation of the CSS code removed to match style of the HTML code. diffstat: docs/html/50x.html | 7 ++----- docs/html/index.html | 7 ++----- 2 files changed, 4 insertions(+), 10 deletions(-) diffs (34 lines): diff -r dda421871bc2 -r 67c68cd973b8 docs/html/50x.html --- a/docs/html/50x.html Tue Aug 10 23:43:17 2021 +0300 +++ b/docs/html/50x.html Mon Aug 16 16:36:06 2021 +0300 @@ -3,11 +3,8 @@ Error diff -r dda421871bc2 -r 67c68cd973b8 docs/html/index.html --- a/docs/html/index.html Tue Aug 10 23:43:17 2021 +0300 +++ b/docs/html/index.html Mon Aug 16 16:36:06 2021 +0300 @@ -3,11 +3,8 @@ Welcome to nginx! From mdounin at mdounin.ru Mon Aug 16 13:40:43 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 16 Aug 2021 13:40:43 +0000 Subject: [nginx] Dark mode support in welcome and 50x error pages. Message-ID: details: https://hg.nginx.org/nginx/rev/f2ddd0c491bf branches: changeset: 7903:f2ddd0c491bf user: Maxim Dounin date: Mon Aug 16 16:36:08 2021 +0300 description: Dark mode support in welcome and 50x error pages. Prodded by Duncan Lock. diffstat: docs/html/50x.html | 1 + docs/html/index.html | 1 + 2 files changed, 2 insertions(+), 0 deletions(-) diffs (22 lines): diff -r 67c68cd973b8 -r f2ddd0c491bf docs/html/50x.html --- a/docs/html/50x.html Mon Aug 16 16:36:06 2021 +0300 +++ b/docs/html/50x.html Mon Aug 16 16:36:08 2021 +0300 @@ -3,6 +3,7 @@ Error diff -r 67c68cd973b8 -r f2ddd0c491bf docs/html/index.html --- a/docs/html/index.html Mon Aug 16 16:36:06 2021 +0300 +++ b/docs/html/index.html Mon Aug 16 16:36:08 2021 +0300 @@ -3,6 +3,7 @@ Welcome to nginx! From mdounin at mdounin.ru Mon Aug 16 13:41:24 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 16 Aug 2021 16:41:24 +0300 Subject: [PATCH] Add support for dark color scheme in default index.html page In-Reply-To: References: Message-ID: Hello! On Sun, Aug 15, 2021 at 07:47:33PM -0700, Duncan Lock wrote: > That works for me! > > In this case, because the CSS is inline, there's no difference between > using the meta tag and the CSS color-scheme. Committed, thanks for testing. > If you want to stop hard-coding things, I would also take out those > hard-coded Windows only fonts - and just let the browsers default > cross-platform styles - and the users preferences, decide the font to > use. > So, I would suggest also changing this: > > font-family: Tahoma, Verdana, Arial, sans-serif; > > to this: > > font-family: sans-serif; > > Which is simpler and produces better results cross-platform? Define "better". Tahoma certainly looks better than Arial to me, and AFAIR that's basically the reason why Valentin used this font-family instead of just sans-serif back in ed470a7bf7fd (http://hg.nginx.org/nginx/rev/ed470a7bf7fd). Since this falls back to sans-serif anyway, I don't see reasons to change things here. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Mon Aug 16 21:25:15 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 16 Aug 2021 21:25:15 +0000 Subject: [nginx] SSL: ciphers now set before loading certificates (ticket #2035). Message-ID: details: https://hg.nginx.org/nginx/rev/419c066cb710 branches: changeset: 7904:419c066cb710 user: Maxim Dounin date: Mon Aug 16 22:40:31 2021 +0300 description: SSL: ciphers now set before loading certificates (ticket #2035). To load old/weak server or client certificates it might be needed to adjust the security level, as introduced in OpenSSL 1.1.0. This change ensures that ciphers are set before loading the certificates, so security level changes via the cipher string apply to certificate loading. diffstat: src/http/modules/ngx_http_grpc_module.c | 12 ++++++------ src/http/modules/ngx_http_proxy_module.c | 12 ++++++------ src/http/modules/ngx_http_ssl_module.c | 14 +++++++------- src/http/modules/ngx_http_uwsgi_module.c | 12 ++++++------ src/mail/ngx_mail_ssl_module.c | 14 +++++++------- src/stream/ngx_stream_proxy_module.c | 8 ++++---- src/stream/ngx_stream_ssl_module.c | 14 +++++++------- 7 files changed, 43 insertions(+), 43 deletions(-) diffs (205 lines): diff -r f2ddd0c491bf -r 419c066cb710 src/http/modules/ngx_http_grpc_module.c --- a/src/http/modules/ngx_http_grpc_module.c Mon Aug 16 16:36:08 2021 +0300 +++ b/src/http/modules/ngx_http_grpc_module.c Mon Aug 16 22:40:31 2021 +0300 @@ -4896,6 +4896,12 @@ ngx_http_grpc_set_ssl(ngx_conf_t *cf, ng cln->handler = ngx_ssl_cleanup_ctx; cln->data = glcf->upstream.ssl; + if (ngx_ssl_ciphers(cf, glcf->upstream.ssl, &glcf->ssl_ciphers, 0) + != NGX_OK) + { + return NGX_ERROR; + } + if (glcf->upstream.ssl_certificate) { if (glcf->upstream.ssl_certificate_key == NULL) { @@ -4927,12 +4933,6 @@ ngx_http_grpc_set_ssl(ngx_conf_t *cf, ng } } - if (ngx_ssl_ciphers(cf, glcf->upstream.ssl, &glcf->ssl_ciphers, 0) - != NGX_OK) - { - return NGX_ERROR; - } - if (glcf->upstream.ssl_verify) { if (glcf->ssl_trusted_certificate.len == 0) { ngx_log_error(NGX_LOG_EMERG, cf->log, 0, diff -r f2ddd0c491bf -r 419c066cb710 src/http/modules/ngx_http_proxy_module.c --- a/src/http/modules/ngx_http_proxy_module.c Mon Aug 16 16:36:08 2021 +0300 +++ b/src/http/modules/ngx_http_proxy_module.c Mon Aug 16 22:40:31 2021 +0300 @@ -4944,6 +4944,12 @@ ngx_http_proxy_set_ssl(ngx_conf_t *cf, n cln->handler = ngx_ssl_cleanup_ctx; cln->data = plcf->upstream.ssl; + if (ngx_ssl_ciphers(cf, plcf->upstream.ssl, &plcf->ssl_ciphers, 0) + != NGX_OK) + { + return NGX_ERROR; + } + if (plcf->upstream.ssl_certificate) { if (plcf->upstream.ssl_certificate_key == NULL) { @@ -4975,12 +4981,6 @@ ngx_http_proxy_set_ssl(ngx_conf_t *cf, n } } - if (ngx_ssl_ciphers(cf, plcf->upstream.ssl, &plcf->ssl_ciphers, 0) - != NGX_OK) - { - return NGX_ERROR; - } - if (plcf->upstream.ssl_verify) { if (plcf->ssl_trusted_certificate.len == 0) { ngx_log_error(NGX_LOG_EMERG, cf->log, 0, diff -r f2ddd0c491bf -r 419c066cb710 src/http/modules/ngx_http_ssl_module.c --- a/src/http/modules/ngx_http_ssl_module.c Mon Aug 16 16:36:08 2021 +0300 +++ b/src/http/modules/ngx_http_ssl_module.c Mon Aug 16 22:40:31 2021 +0300 @@ -797,6 +797,13 @@ ngx_http_ssl_merge_srv_conf(ngx_conf_t * ngx_http_ssl_npn_advertised, NULL); #endif + if (ngx_ssl_ciphers(cf, &conf->ssl, &conf->ciphers, + conf->prefer_server_ciphers) + != NGX_OK) + { + return NGX_CONF_ERROR; + } + if (ngx_http_ssl_compile_certificates(cf, conf) != NGX_OK) { return NGX_CONF_ERROR; } @@ -829,13 +836,6 @@ ngx_http_ssl_merge_srv_conf(ngx_conf_t * } } - if (ngx_ssl_ciphers(cf, &conf->ssl, &conf->ciphers, - conf->prefer_server_ciphers) - != NGX_OK) - { - return NGX_CONF_ERROR; - } - conf->ssl.buffer_size = conf->buffer_size; if (conf->verify) { diff -r f2ddd0c491bf -r 419c066cb710 src/http/modules/ngx_http_uwsgi_module.c --- a/src/http/modules/ngx_http_uwsgi_module.c Mon Aug 16 16:36:08 2021 +0300 +++ b/src/http/modules/ngx_http_uwsgi_module.c Mon Aug 16 22:40:31 2021 +0300 @@ -2432,6 +2432,12 @@ ngx_http_uwsgi_set_ssl(ngx_conf_t *cf, n cln->handler = ngx_ssl_cleanup_ctx; cln->data = uwcf->upstream.ssl; + if (ngx_ssl_ciphers(cf, uwcf->upstream.ssl, &uwcf->ssl_ciphers, 0) + != NGX_OK) + { + return NGX_ERROR; + } + if (uwcf->upstream.ssl_certificate) { if (uwcf->upstream.ssl_certificate_key == NULL) { @@ -2463,12 +2469,6 @@ ngx_http_uwsgi_set_ssl(ngx_conf_t *cf, n } } - if (ngx_ssl_ciphers(cf, uwcf->upstream.ssl, &uwcf->ssl_ciphers, 0) - != NGX_OK) - { - return NGX_ERROR; - } - if (uwcf->upstream.ssl_verify) { if (uwcf->ssl_trusted_certificate.len == 0) { ngx_log_error(NGX_LOG_EMERG, cf->log, 0, diff -r f2ddd0c491bf -r 419c066cb710 src/mail/ngx_mail_ssl_module.c --- a/src/mail/ngx_mail_ssl_module.c Mon Aug 16 16:36:08 2021 +0300 +++ b/src/mail/ngx_mail_ssl_module.c Mon Aug 16 22:40:31 2021 +0300 @@ -394,6 +394,13 @@ ngx_mail_ssl_merge_conf(ngx_conf_t *cf, cln->handler = ngx_ssl_cleanup_ctx; cln->data = &conf->ssl; + if (ngx_ssl_ciphers(cf, &conf->ssl, &conf->ciphers, + conf->prefer_server_ciphers) + != NGX_OK) + { + return NGX_CONF_ERROR; + } + if (ngx_ssl_certificates(cf, &conf->ssl, conf->certificates, conf->certificate_keys, conf->passwords) != NGX_OK) @@ -430,13 +437,6 @@ ngx_mail_ssl_merge_conf(ngx_conf_t *cf, } } - if (ngx_ssl_ciphers(cf, &conf->ssl, &conf->ciphers, - conf->prefer_server_ciphers) - != NGX_OK) - { - return NGX_CONF_ERROR; - } - if (ngx_ssl_dhparam(cf, &conf->ssl, &conf->dhparam) != NGX_OK) { return NGX_CONF_ERROR; } diff -r f2ddd0c491bf -r 419c066cb710 src/stream/ngx_stream_proxy_module.c --- a/src/stream/ngx_stream_proxy_module.c Mon Aug 16 16:36:08 2021 +0300 +++ b/src/stream/ngx_stream_proxy_module.c Mon Aug 16 22:40:31 2021 +0300 @@ -2185,6 +2185,10 @@ ngx_stream_proxy_set_ssl(ngx_conf_t *cf, cln->handler = ngx_ssl_cleanup_ctx; cln->data = pscf->ssl; + if (ngx_ssl_ciphers(cf, pscf->ssl, &pscf->ssl_ciphers, 0) != NGX_OK) { + return NGX_ERROR; + } + if (pscf->ssl_certificate) { if (pscf->ssl_certificate_key == NULL) { @@ -2216,10 +2220,6 @@ ngx_stream_proxy_set_ssl(ngx_conf_t *cf, } } - if (ngx_ssl_ciphers(cf, pscf->ssl, &pscf->ssl_ciphers, 0) != NGX_OK) { - return NGX_ERROR; - } - if (pscf->ssl_verify) { if (pscf->ssl_trusted_certificate.len == 0) { ngx_log_error(NGX_LOG_EMERG, cf->log, 0, diff -r f2ddd0c491bf -r 419c066cb710 src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c Mon Aug 16 16:36:08 2021 +0300 +++ b/src/stream/ngx_stream_ssl_module.c Mon Aug 16 22:40:31 2021 +0300 @@ -720,6 +720,13 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf ngx_stream_ssl_servername); #endif + if (ngx_ssl_ciphers(cf, &conf->ssl, &conf->ciphers, + conf->prefer_server_ciphers) + != NGX_OK) + { + return NGX_CONF_ERROR; + } + if (ngx_stream_ssl_compile_certificates(cf, conf) != NGX_OK) { return NGX_CONF_ERROR; } @@ -752,13 +759,6 @@ ngx_stream_ssl_merge_conf(ngx_conf_t *cf } } - if (ngx_ssl_ciphers(cf, &conf->ssl, &conf->ciphers, - conf->prefer_server_ciphers) - != NGX_OK) - { - return NGX_CONF_ERROR; - } - if (conf->verify) { if (conf->client_certificate.len == 0 && conf->verify != 3) { From robm at fastmail.fm Tue Aug 17 03:07:41 2021 From: robm at fastmail.fm (Robert Mueller) Date: Tue, 17 Aug 2021 13:07:41 +1000 Subject: [PATCH] Mail: Add Auth-SSL-Cipher header to each imap/pop/smtp auth request In-Reply-To: References: Message-ID: Hi > Shouldn't we also add Auth-SSL-Protocol if one of the declared use > cases is to upgrade to newer TLS versions? For what we were doing at the time, we found that the ciphers were enough. Having said that, I think on the next round of upgrades, knowing the protocol would be useful as well so I appreciate you adding this. > The patch which addresses above comments, please take a look if it > works for you: This looks good to me. Thanks for the feedback and cleanup. -- Rob Mueller robm at fastmail.fm From robm at fastmail.fm Tue Aug 17 03:35:19 2021 From: robm at fastmail.fm (Robert Mueller) Date: Tue, 17 Aug 2021 13:35:19 +1000 Subject: [PATCH] Mail: add the "reuseport" option of the "listen" directive Message-ID: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> # HG changeset patch # User Rob Mueller # Date 1629171218 14400 # Mon Aug 16 23:33:38 2021 -0400 # Node ID 89ff95b268e9817b344447b7e6785354229a4bab # Parent dda421871bc213dd2eb3da0015d6228839323583 Mail: add the "reuseport" option of the "listen" directive The "reuseport" option was added to the "listen" directive of the http and stream modules in 1.9.1, but it wasn't added to the mail module. This adds the option to the mail module to make it consistent with the http and stream modules. In newer linux kernel versions (somewhere between 4.9 and 5.10) this option seems much more important. On production debian servers, not having or using this option caused processes to become very unbalanced. With 8 worker processes, we would see one worker process accepting 70%+ of all connections, a second process with about 10% or so, and the remaining 20% of connections spread over the other 6 processes. This obviously started causing problems as the worker process accepting the majority of connections would end up being 100% CPU bound well before the servers overall capacity. Adding and enabling this option fixed this entirely, and now all worker processes seem to accept and even spread of connections. diff -r dda421871bc2 -r 89ff95b268e9 src/mail/ngx_mail.c --- a/src/mail/ngx_mail.c Tue Aug 10 23:43:17 2021 +0300 +++ b/src/mail/ngx_mail.c Mon Aug 16 23:33:38 2021 -0400 @@ -347,6 +347,10 @@ ls->ipv6only = addr[i].opt.ipv6only; #endif +#if (NGX_HAVE_REUSEPORT) + ls->reuseport = addr[i].opt.reuseport; +#endif + mport = ngx_palloc(cf->pool, sizeof(ngx_mail_port_t)); if (mport == NULL) { return NGX_CONF_ERROR; diff -r dda421871bc2 -r 89ff95b268e9 src/mail/ngx_mail.h --- a/src/mail/ngx_mail.h Tue Aug 10 23:43:17 2021 +0300 +++ b/src/mail/ngx_mail.h Mon Aug 16 23:33:38 2021 -0400 @@ -40,6 +40,7 @@ #if (NGX_HAVE_INET6) unsigned ipv6only:1; #endif + unsigned reuseport:1; unsigned so_keepalive:2; unsigned proxy_protocol:1; #if (NGX_HAVE_KEEPALIVE_TUNABLE) diff -r dda421871bc2 -r 89ff95b268e9 src/mail/ngx_mail_core_module.c --- a/src/mail/ngx_mail_core_module.c Tue Aug 10 23:43:17 2021 +0300 +++ b/src/mail/ngx_mail_core_module.c Mon Aug 16 23:33:38 2021 -0400 @@ -447,6 +447,18 @@ #endif } + if (ngx_strcmp(value[i].data, "reuseport") == 0) { +#if (NGX_HAVE_REUSEPORT) + ls->reuseport = 1; + ls->bind = 1; +#else + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "reuseport is not supported " + "on this platform, ignored"); +#endif + continue; + } + if (ngx_strcmp(value[i].data, "ssl") == 0) { #if (NGX_MAIL_SSL) ngx_mail_ssl_conf_t *sslcf; Rob Mueller robm at fastmail.fm From maxim at nginx.com Tue Aug 17 13:48:25 2021 From: maxim at nginx.com (Maxim Konovalov) Date: Tue, 17 Aug 2021 16:48:25 +0300 Subject: [PATCH] Mail: add the "reuseport" option of the "listen" directive In-Reply-To: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> References: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> Message-ID: <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> Hi Rob, On 17.08.2021 06:35, Robert Mueller wrote: > # HG changeset patch > # User Rob Mueller > # Date 1629171218 14400 > # Mon Aug 16 23:33:38 2021 -0400 > # Node ID 89ff95b268e9817b344447b7e6785354229a4bab > # Parent dda421871bc213dd2eb3da0015d6228839323583 > Mail: add the "reuseport" option of the "listen" directive > > The "reuseport" option was added to the "listen" directive of the http > and stream modules in 1.9.1, but it wasn't added to the mail module. This > adds the option to the mail module to make it consistent with the http > and stream modules. > > In newer linux kernel versions (somewhere between 4.9 and 5.10) this > option seems much more important. On production debian servers, not having > or using this option caused processes to become very unbalanced. With 8 > worker processes, we would see one worker process accepting 70%+ of all > connections, a second process with about 10% or so, and the remaining > 20% of connections spread over the other 6 processes. This obviously > started causing problems as the worker process accepting the majority > of connections would end up being 100% CPU bound well before the servers > overall capacity. > > Adding and enabling this option fixed this entirely, and now all > worker processes seem to accept and even spread of connections. > First, thanks for the patch. While the reuseport could cure (or hide if you will) the unbalancing you see it makes sense to get better understanding what exactly is going on. So far we haven't seen such weird behaviour ourself neither received reports about such uneven connections distribution among nginx workers. Any chances you have accept_mutex and/or multi_accept? Any other ideas? -- Maxim Konovalov From eran.kornblau at kaltura.com Tue Aug 17 15:28:10 2021 From: eran.kornblau at kaltura.com (Eran Kornblau) Date: Tue, 17 Aug 2021 15:28:10 +0000 Subject: Inconsistent len of complex value using zero flag Message-ID: Hi all, Something I bumped into today ? When using the zero flag of ngx_http_compile_complex_value_t (for example, using ngx_http_set_complex_value_zero_slot) the length of the resulting string is inconsistent - 1. If the complex value is a simple string, the length does not include the null terminator. 2. If the complex value includes variables, the length includes the null terminator. This happens because ngx_http_script_done adds code to copy the null explicitly, while simple strings are copied as-is from val->value at the beginning of ngx_http_complex_value. I assume that usually the length is not used when asking for a null terminated string. However, in my case, I?m parsing the resulting value, and I?m supporting several different formats. In some flows, I?m parsing the value using nginx functions (e.g. ngx_atoi) while in other flows, I?m using a glibc function (strptime). IMHO, the correct behavior is to change the impl so that it will never count the null terminator in the length. Maybe add a ?zero? flag on ngx_http_complex_value_t, and do something like ? value->len = len ? val->zero; Thanks Eran -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Tue Aug 17 22:26:58 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 17 Aug 2021 22:26:58 +0000 Subject: [nginx] Mail: Auth-SSL-Protocol and Auth-SSL-Cipher headers (ticket #2134). Message-ID: details: https://hg.nginx.org/nginx/rev/13d0c1d26d47 branches: changeset: 7905:13d0c1d26d47 user: Rob Mueller date: Fri Aug 13 03:57:47 2021 -0400 description: Mail: Auth-SSL-Protocol and Auth-SSL-Cipher headers (ticket #2134). This adds new Auth-SSL-Protocol and Auth-SSL-Cipher headers to the mail proxy auth protocol when SSL is enabled. This can be useful for detecting users using older clients that negotiate old ciphers when you want to upgrade to newer TLS versions of remove suppport for old and insecure ciphers. You can use your auth backend to notify these users before the upgrade that they either need to upgrade their client software or contact your support team to work out an upgrade path. diffstat: src/mail/ngx_mail_auth_http_module.c | 41 ++++++++++++++++++++++++++++++++++- 1 files changed, 39 insertions(+), 2 deletions(-) diffs (72 lines): diff -r 419c066cb710 -r 13d0c1d26d47 src/mail/ngx_mail_auth_http_module.c --- a/src/mail/ngx_mail_auth_http_module.c Mon Aug 16 22:40:31 2021 +0300 +++ b/src/mail/ngx_mail_auth_http_module.c Fri Aug 13 03:57:47 2021 -0400 @@ -1137,8 +1137,8 @@ ngx_mail_auth_http_create_request(ngx_ma ngx_str_t login, passwd; ngx_connection_t *c; #if (NGX_MAIL_SSL) - ngx_str_t verify, subject, issuer, serial, fingerprint, - raw_cert, cert; + ngx_str_t protocol, cipher, verify, subject, issuer, + serial, fingerprint, raw_cert, cert; ngx_mail_ssl_conf_t *sslcf; #endif ngx_mail_core_srv_conf_t *cscf; @@ -1155,6 +1155,25 @@ ngx_mail_auth_http_create_request(ngx_ma #if (NGX_MAIL_SSL) + if (c->ssl) { + + if (ngx_ssl_get_protocol(c, pool, &protocol) != NGX_OK) { + return NULL; + } + + protocol.len = ngx_strlen(protocol.data); + + if (ngx_ssl_get_cipher_name(c, pool, &cipher) != NGX_OK) { + return NULL; + } + + cipher.len = ngx_strlen(cipher.data); + + } else { + ngx_str_null(&protocol); + ngx_str_null(&cipher); + } + sslcf = ngx_mail_get_module_srv_conf(s, ngx_mail_ssl_module); if (c->ssl && sslcf->verify) { @@ -1252,6 +1271,10 @@ ngx_mail_auth_http_create_request(ngx_ma if (c->ssl) { len += sizeof("Auth-SSL: on" CRLF) - 1 + + sizeof("Auth-SSL-Protocol: ") - 1 + protocol.len + + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Cipher: ") - 1 + cipher.len + + sizeof(CRLF) - 1 + sizeof("Auth-SSL-Verify: ") - 1 + verify.len + sizeof(CRLF) - 1 + sizeof("Auth-SSL-Subject: ") - 1 + subject.len @@ -1373,6 +1396,20 @@ ngx_mail_auth_http_create_request(ngx_ma b->last = ngx_cpymem(b->last, "Auth-SSL: on" CRLF, sizeof("Auth-SSL: on" CRLF) - 1); + if (protocol.len) { + b->last = ngx_cpymem(b->last, "Auth-SSL-Protocol: ", + sizeof("Auth-SSL-Protocol: ") - 1); + b->last = ngx_copy(b->last, protocol.data, protocol.len); + *b->last++ = CR; *b->last++ = LF; + } + + if (cipher.len) { + b->last = ngx_cpymem(b->last, "Auth-SSL-Cipher: ", + sizeof("Auth-SSL-Cipher: ") - 1); + b->last = ngx_copy(b->last, cipher.data, cipher.len); + *b->last++ = CR; *b->last++ = LF; + } + if (verify.len) { b->last = ngx_cpymem(b->last, "Auth-SSL-Verify: ", sizeof("Auth-SSL-Verify: ") - 1); From mdounin at mdounin.ru Tue Aug 17 22:28:13 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 18 Aug 2021 01:28:13 +0300 Subject: [PATCH] Mail: Add Auth-SSL-Cipher header to each imap/pop/smtp auth request In-Reply-To: References: Message-ID: Hello! On Tue, Aug 17, 2021 at 01:07:41PM +1000, Robert Mueller wrote: > > Shouldn't we also add Auth-SSL-Protocol if one of the declared > > use cases is to upgrade to newer TLS versions? > > For what we were doing at the time, we found that the ciphers > were enough. Having said that, I think on the next round of > upgrades, knowing the protocol would be useful as well so I > appreciate you adding this. > > > The patch which addresses above comments, please take a look > > if it works for you: > > This looks good to me. Thanks for the feedback and cleanup. Committed, thanks. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Aug 17 22:43:07 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 18 Aug 2021 01:43:07 +0300 Subject: Inconsistent len of complex value using zero flag In-Reply-To: References: Message-ID: Hello! On Tue, Aug 17, 2021 at 03:28:10PM +0000, Eran Kornblau wrote: > Something I bumped into today ? > > When using the zero flag of ngx_http_compile_complex_value_t (for example, using > ngx_http_set_complex_value_zero_slot) the length of the resulting string is inconsistent - > > 1. If the complex value is a simple string, the length does not include the null terminator. > 2. If the complex value includes variables, the length includes the null terminator. > > This happens because ngx_http_script_done adds code to copy the null explicitly, while simple strings > are copied as-is from val->value at the beginning of ngx_http_complex_value. > > I assume that usually the length is not used when asking for a null terminated string. > However, in my case, I?m parsing the resulting value, and I?m supporting several different formats. > In some flows, I?m parsing the value using nginx functions (e.g. ngx_atoi) while in other flows, > I?m using a glibc function (strptime). > > IMHO, the correct behavior is to change the impl so that it will never count the null terminator in the length. > Maybe add a ?zero? flag on ngx_http_complex_value_t, and do something like ? > value->len = len ? val->zero; Yes, that's known inconsistency. Unfortunately, there is no easy fix. Current approach is to use null-terminated string ignoring len if zero is used (see e48ac0136ee3, 9a970c905045) until someone will came up with a good fix. -- Maxim Dounin http://mdounin.ru/ From robm at fastmail.fm Wed Aug 18 01:14:25 2021 From: robm at fastmail.fm (Robert Mueller) Date: Wed, 18 Aug 2021 11:14:25 +1000 Subject: [PATCH] Mail: add the "reuseport" option of the "listen" directive In-Reply-To: <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> References: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> Message-ID: <31a0511d-7512-4b49-9feb-f88ef19dd11d@www.fastmail.com> > First, thanks for the patch. > > While the reuseport could cure (or hide if you will) the unbalancing you > see it makes sense to get better understanding what exactly is going on. > So far we haven't seen such weird behaviour ourself neither received > reports about such uneven connections distribution among nginx workers. > > Any chances you have accept_mutex and/or multi_accept? Any other ideas? Unfortunately I'm not 100% sure what's causing it, but it's pretty easy for us to reproduce even on our development machines. Just to show there's no accept_mutex or multi_accept in our config. ``` # grep accept /etc/nginx/mail.conf # ``` And here's what a cut down version of our config looks like. ``` worker_processes auto; worker_shutdown_timeout 5m; events { use epoll; worker_connections 65536; } ... mail { auth_http http://unix:/var/run/nginx/mail_auth.sock:/nginx/; imap_client_buffer 16k; imap_capabilities "IMAP4" "IMAP4rev1" "LITERAL+" "ENABLE" "UIDPLUS" "SASL-IR" "NAMESPACE" "CONDSTORE" "SORT" "LIST-EXTENDED" "QRESYNC" "MOVE" "SPECIAL-USE" "CREATE-SPECIAL-USE" "IDLE"; ssl_session_cache shared:sslcache:50m; ssl_session_timeout 30m; server { listen 10.a.b.c:993 ssl reuseport; auth_http_header "ServerHostname" "imap.foo"; ssl_prefer_server_ciphers on; ssl_protocols ... ssl_ciphers ...; ssl_certificate ...; ssl_certificate_key ...; protocol imap; proxy on; proxy_timeout 1h; } ``` With that on a development machine which has 4 vcpus we see: ``` # ps auxw | grep nginx | grep mail root 3839 0.0 0.0 68472 1372 ? Ss 08:16 0:00 nginx: master process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf nobody 3841 0.0 0.0 95732 3572 ? S 08:16 0:01 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf nobody 3842 0.0 0.0 95732 3284 ? S 08:16 0:01 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf nobody 3843 0.0 0.0 95796 4096 ? S 08:16 0:01 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf nobody 3846 0.0 0.0 95732 3092 ? S 08:16 0:01 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf ``` Now lets just create 1000 SSL connections and see how they get distributed between those procs. ``` # perl -e 'use IO::Socket::SSL; for (1..1000) { push @s, IO::Socket::SSL->new("imap.foo:993"); } print "done\n"; sleep 1000;' done ^Z [3]+ Stopped # for i in 3841 3842 3843 3846; do echo "$i - " `ls /proc/$i/fd | wc -l`; done 3841 - 335 3842 - 295 3843 - 293 3846 - 320 ``` Reasonably even. Now lets change `listen 10.a.b.c:993 ssl reuseport` to `listen 10.a.b.c:993 ssl` and restart. ``` # ps auxw | grep nginx | grep mail root 559885 0.0 0.0 68472 3104 ? Ss 21:01 0:00 nginx: master process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf nobody 559886 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf nobody 559887 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf nobody 559888 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf nobody 559889 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf # perl -e 'use IO::Socket::SSL; for (1..1000) { push @s, IO::Socket::SSL->new("imap.foo:993"); } print "done\n"; sleep 1000;' done ^Z [5]+ Stopped # for i in 559886 559887 559888 559889; do echo "$i - " `ls /proc/$i/fd | wc -l`; done 559886 - 1054 559887 - 57 559888 - 60 559889 - 57 ``` And as you can see, a completely uneven distribution of connections between processes! This doesn't just occur on our development machines either (e.g. it's not related to the source IP or anything), it occurs on production systems with connections arriving from real world customers and clients scattered around the world. This is a fairly standard debian buster distribution, though we use a back ported newer kernel, and a recent version of nginx. ``` # uname -a Linux xyz 5.10.0-0.bpo.4-amd64 #1 SMP Debian 5.10.19-1~bpo10+1 (2021-03-13) x86_64 GNU/Linux # /usr/local/nginx/sbin/nginx -v nginx version: nginx/1.20.1 ``` As you can see, without the reuseport option, this causes severe scalability problems for us. Even without that though, it would just be nice to have some more consistency of the `listen` options between http/stream/mail modules as well. -- Rob Mueller robm at fastmail.fm From jan.prachar at gmail.com Wed Aug 18 06:46:23 2021 From: jan.prachar at gmail.com (=?UTF-8?Q?Honza_Pracha=C5=99?=) Date: Wed, 18 Aug 2021 08:46:23 +0200 Subject: [PATCH] Mail: add the "reuseport" option of the "listen" directive In-Reply-To: <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> References: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> Message-ID: On Tue 17. 8. 2021 at 15:49, Maxim Konovalov wrote: > Hi Rob, > > On 17.08.2021 06:35, Robert Mueller wrote: > > # HG changeset patch > > # User Rob Mueller > > # Date 1629171218 14400 > > # Mon Aug 16 23:33:38 2021 -0400 > > # Node ID 89ff95b268e9817b344447b7e6785354229a4bab > > # Parent dda421871bc213dd2eb3da0015d6228839323583 > > Mail: add the "reuseport" option of the "listen" directive > > > > The "reuseport" option was added to the "listen" directive of the http > > and stream modules in 1.9.1, but it wasn't added to the mail module. This > > adds the option to the mail module to make it consistent with the http > > and stream modules. > > > > In newer linux kernel versions (somewhere between 4.9 and 5.10) this > > option seems much more important. On production debian servers, not > having > > or using this option caused processes to become very unbalanced. With 8 > > worker processes, we would see one worker process accepting 70%+ of all > > connections, a second process with about 10% or so, and the remaining > > 20% of connections spread over the other 6 processes. This obviously > > started causing problems as the worker process accepting the majority > > of connections would end up being 100% CPU bound well before the servers > > overall capacity. > > > > Adding and enabling this option fixed this entirely, and now all > > worker processes seem to accept and even spread of connections. > > > First, thanks for the patch. > > While the reuseport could cure (or hide if you will) the unbalancing you > see it makes sense to get better understanding what exactly is going on. > So far we haven't seen such weird behaviour ourself neither received > reports about such uneven connections distribution among nginx workers. Hello! It looks exactly like known linux epoll behavior, which is nicely explained here: https://blog.cloudflare.com/the-sad-state-of-linux-socket-balancing/ Best, Jan Pracha? > > Any chances you have accept_mutex and/or multi_accept? Any other ideas? > > -- > Maxim Konovalov > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From info at phpgangsta.de Wed Aug 18 07:21:16 2021 From: info at phpgangsta.de (Michael Kliewe) Date: Wed, 18 Aug 2021 09:21:16 +0200 Subject: [PATCH] Mail: add the "reuseport" option of the "listen" directive In-Reply-To: <31a0511d-7512-4b49-9feb-f88ef19dd11d@www.fastmail.com> References: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> <31a0511d-7512-4b49-9feb-f88ef19dd11d@www.fastmail.com> Message-ID: <7382ba9e-ac23-c49d-0cad-a4c1e634e7e8@phpgangsta.de> Am 18.08.2021 um 03:14 schrieb Robert Mueller: >> First, thanks for the patch. >> >> While the reuseport could cure (or hide if you will) the unbalancing you >> see it makes sense to get better understanding what exactly is going on. >> So far we haven't seen such weird behaviour ourself neither received >> reports about such uneven connections distribution among nginx workers. >> >> Any chances you have accept_mutex and/or multi_accept? Any other ideas? > xxxx > > As you can see, without the reuseport option, this causes severe scalability problems for us. Hi, I just checked, we see the same problem, the connection distribution is very uneven. One process is using 30% CPU, one 15% CPU, and the rest is below 5%. # for i in 1285 1286 1287 1288 1289 1290 1291 1292; do echo "$i - " `ls /proc/$i/fd | wc -l`; done 1285 -? 106 1286 -? 9447 1287 -? 430 1288 -? 2222 1289 -? 76 1290 -? 48 1291 -? 24447 1292 -? 42 We are using nginx version: nginx/1.20.1 Configuration snippet: ``` worker_processes auto; worker_rlimit_nofile 120000; events { ??? worker_connections 60000; } http { ... } mail { ? auth_http? 127.0.0.1/mailauthXXXXX; ? proxy???? on; ? starttls? on; ## enable STARTTLS for all mail servers ? ssl_prefer_server_ciphers? on; ? ssl_protocols????????????? TLSv1.3 TLSv1.2 TLSv1.1 TLSv1; ? ssl_ciphers??????????????? xxxx; ? ssl_session_cache????????? shared:TLSSL:16m; ? ssl_session_timeout??????? 10m; ? imap_auth login plain; ? ## STARTTLS is appended because of starttls directive above, AUTH=LOGIN and AUTH=PLAIN are also appended automatically ? imap_capabilities? "IMAP4rev1" "LITERAL+" "SASL-IR" "LOGIN-REFERRALS" "ID" "ENABLE" "IDLE" "NAMESPACE"; ? pop3_capabilities? "TOP" "USER" "UIDL"; ? server { ??? listen?????????????????? a.b.c.d:993 ssl; ??? listen?????????????????? [xxx:xxx:xxx:xxx::xxx]:993 ssl; ??? protocol???????????????? imap; ??? server_name????????????? imap.domain.com; ??? auth_http_header???????? X-Auth-Port? 993; ??? auth_http_header???????? X-Domain???? "imap.domain.com"; ??? auth_http_header???????? X-Auth?????? "xxxx"; ??? auth_http_header???????? User-Agent?? "Nginx POP3/IMAP4 proxy"; ??? proxy_pass_error_message on; ??? ssl_certificate????????? xxx.crt; ??? ssl_certificate_key????? xxx.key; ? } } ``` Just wanted to show that you are not alone, Rob :-) Michael From maxim at nginx.com Wed Aug 18 09:16:10 2021 From: maxim at nginx.com (Maxim Konovalov) Date: Wed, 18 Aug 2021 12:16:10 +0300 Subject: [PATCH] Mail: add the "reuseport" option of the "listen" directive In-Reply-To: <31a0511d-7512-4b49-9feb-f88ef19dd11d@www.fastmail.com> References: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> <31a0511d-7512-4b49-9feb-f88ef19dd11d@www.fastmail.com> Message-ID: <97ba6ce4-507b-1b9f-0ce5-1870a6361d71@nginx.com> On 18.08.2021 04:14, Robert Mueller wrote: > >> First, thanks for the patch. >> >> While the reuseport could cure (or hide if you will) the unbalancing you >> see it makes sense to get better understanding what exactly is going on. >> So far we haven't seen such weird behaviour ourself neither received >> reports about such uneven connections distribution among nginx workers. >> >> Any chances you have accept_mutex and/or multi_accept? Any other ideas? > > Unfortunately I'm not 100% sure what's causing it, but it's pretty easy for us to reproduce even on our development machines. Just to show there's no accept_mutex or multi_accept in our config. > > ``` > # grep accept /etc/nginx/mail.conf > # > ``` > > And here's what a cut down version of our config looks like. > > ``` > worker_processes auto; > worker_shutdown_timeout 5m; > > events { > use epoll; > worker_connections 65536; > } > ... > mail { > auth_http http://unix:/var/run/nginx/mail_auth.sock:/nginx/; > imap_client_buffer 16k; > imap_capabilities "IMAP4" "IMAP4rev1" "LITERAL+" "ENABLE" "UIDPLUS" "SASL-IR" "NAMESPACE" "CONDSTORE" "SORT" "LIST-EXTENDED" "QRESYNC" "MOVE" "SPECIAL-USE" "CREATE-SPECIAL-USE" "IDLE"; > ssl_session_cache shared:sslcache:50m; > ssl_session_timeout 30m; > > server { > listen 10.a.b.c:993 ssl reuseport; > auth_http_header "ServerHostname" "imap.foo"; > ssl_prefer_server_ciphers on; > ssl_protocols ... > ssl_ciphers ...; > ssl_certificate ...; > ssl_certificate_key ...; > protocol imap; > proxy on; > proxy_timeout 1h; > } > ``` > > With that on a development machine which has 4 vcpus we see: > > ``` > # ps auxw | grep nginx | grep mail > root 3839 0.0 0.0 68472 1372 ? Ss 08:16 0:00 nginx: master process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 3841 0.0 0.0 95732 3572 ? S 08:16 0:01 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 3842 0.0 0.0 95732 3284 ? S 08:16 0:01 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 3843 0.0 0.0 95796 4096 ? S 08:16 0:01 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 3846 0.0 0.0 95732 3092 ? S 08:16 0:01 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > ``` > > Now lets just create 1000 SSL connections and see how they get distributed between those procs. > > ``` > # perl -e 'use IO::Socket::SSL; for (1..1000) { push @s, IO::Socket::SSL->new("imap.foo:993"); } print "done\n"; sleep 1000;' > done > ^Z > [3]+ Stopped > # for i in 3841 3842 3843 3846; do echo "$i - " `ls /proc/$i/fd | wc -l`; done > 3841 - 335 > 3842 - 295 > 3843 - 293 > 3846 - 320 > ``` > > Reasonably even. > > Now lets change `listen 10.a.b.c:993 ssl reuseport` to `listen 10.a.b.c:993 ssl` and restart. > > ``` > # ps auxw | grep nginx | grep mail > root 559885 0.0 0.0 68472 3104 ? Ss 21:01 0:00 nginx: master process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 559886 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 559887 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 559888 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 559889 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > # perl -e 'use IO::Socket::SSL; for (1..1000) { push @s, IO::Socket::SSL->new("imap.foo:993"); } print "done\n"; sleep 1000;' > done > ^Z > [5]+ Stopped > # for i in 559886 559887 559888 559889; do echo "$i - " `ls /proc/$i/fd | wc -l`; done > 559886 - 1054 > 559887 - 57 > 559888 - 60 > 559889 - 57 > ``` > > And as you can see, a completely uneven distribution of connections between processes! This doesn't just occur on our development machines either (e.g. it's not related to the source IP or anything), it occurs on production systems with connections arriving from real world customers and clients scattered around the world. > > This is a fairly standard debian buster distribution, though we use a back ported newer kernel, and a recent version of nginx. > > ``` > # uname -a > Linux xyz 5.10.0-0.bpo.4-amd64 #1 SMP Debian 5.10.19-1~bpo10+1 (2021-03-13) x86_64 GNU/Linux > # /usr/local/nginx/sbin/nginx -v > nginx version: nginx/1.20.1 > ``` > > As you can see, without the reuseport option, this causes severe scalability problems for us. > > Even without that though, it would just be nice to have some more consistency of the `listen` options between http/stream/mail modules as well. > This looks weird. We'll try to reproduce this in our lab. Thanks for the detailed script. Maxim -- Maxim Konovalov From mdounin at mdounin.ru Wed Aug 18 13:58:30 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 18 Aug 2021 16:58:30 +0300 Subject: [PATCH] Mail: add the "reuseport" option of the "listen" directive In-Reply-To: <31a0511d-7512-4b49-9feb-f88ef19dd11d@www.fastmail.com> References: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> <31a0511d-7512-4b49-9feb-f88ef19dd11d@www.fastmail.com> Message-ID: Hello! On Wed, Aug 18, 2021 at 11:14:25AM +1000, Robert Mueller wrote: > > First, thanks for the patch. > > > > While the reuseport could cure (or hide if you will) the unbalancing you > > see it makes sense to get better understanding what exactly is going on. > > So far we haven't seen such weird behaviour ourself neither received > > reports about such uneven connections distribution among nginx workers. > > > > Any chances you have accept_mutex and/or multi_accept? Any other ideas? > > Unfortunately I'm not 100% sure what's causing it, but it's > pretty easy for us to reproduce even on our development > machines. Just to show there's no accept_mutex or multi_accept > in our config. [...] > Now lets change `listen 10.a.b.c:993 ssl reuseport` to `listen 10.a.b.c:993 ssl` and restart. > > ``` > # ps auxw | grep nginx | grep mail > root 559885 0.0 0.0 68472 3104 ? Ss 21:01 0:00 nginx: master process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 559886 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 559887 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 559888 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > nobody 559889 0.0 0.3 95620 30448 ? S 21:01 0:00 nginx: worker process /usr/local/nginx/sbin/nginx -c /etc/nginx/mail.conf > # perl -e 'use IO::Socket::SSL; for (1..1000) { push @s, IO::Socket::SSL->new("imap.foo:993"); } print "done\n"; sleep 1000;' > done > ^Z > [5]+ Stopped > # for i in 559886 559887 559888 559889; do echo "$i - " `ls /proc/$i/fd | wc -l`; done > 559886 - 1054 > 559887 - 57 > 559888 - 60 > 559889 - 57 > ``` > > And as you can see, a completely uneven distribution of > connections between processes! This doesn't just occur on our > development machines either (e.g. it's not related to the source > IP or anything), it occurs on production systems with > connections arriving from real world customers and clients > scattered around the world. Could you please test if compiling with --with-cc-opt="-DNGX_HAVE_EPOLLEXCLUSIVE=0" improves things, notably on production systems? In my limited testing it seems to be improve things, and if this is indeed the case, we can consider removing use of EPOLLEXCLUSIVE. Note well that the above test may, and probably will, result in uneven distribution in many cases. Things are, however, expected to improve over time, since new connections are more likely to be accepted by idle worker processes and not by worker processes which are already close to 100% CPU usage. That is, unless worker processes are restarted regularly in your setup for some reason, you probably should observe balancing improvements over time. The behaviour might depend on EPOLLEXCLUSIVE though, and it would be interesting to observe what happens in practice. [...] > As you can see, without the reuseport option, this causes severe > scalability problems for us. I tend to think that reuseport is a bad option for load balancing between worker processes, as it can be easily tricked by an outside actor to select a particular worker process, and this opens an obvious DoS attack vector. -- Maxim Dounin http://mdounin.ru/ From robm at fastmail.fm Wed Aug 18 14:28:59 2021 From: robm at fastmail.fm (Robert Mueller) Date: Thu, 19 Aug 2021 00:28:59 +1000 Subject: [PATCH] Mail: add the "reuseport" option of the "listen" directive In-Reply-To: References: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> <31a0511d-7512-4b49-9feb-f88ef19dd11d@www.fastmail.com> Message-ID: <741d7691-0329-4f6c-b8de-8371d1e98547@www.fastmail.com> > Could you please test if compiling with > --with-cc-opt="-DNGX_HAVE_EPOLLEXCLUSIVE=0" > improves things, notably on production systems? In my limited > testing it seems to be improve things, and if this is indeed the > case, we can consider removing use of EPOLLEXCLUSIVE. I can try this tomorrow, but did you see the link Jan posted to the cloudflare blog? https://blog.cloudflare.com/the-sad-state-of-linux-socket-balancing/ This explains the problem we're seeing exactly and why reuseport fixes it. > > As you can see, without the reuseport option, this causes severe > > scalability problems for us. > > I tend to think that reuseport is a bad option for load balancing > between worker processes, as it can be easily tricked by an outside > actor to select a particular worker process, and this opens an > obvious DoS attack vector. Really? Can you explain how this is possible? Also given that cloudflare use this option, and I expect cloudflare are literally the largest users of nginx in the world and also have to deal with extreme adversarial environments given they run a service to protect against DDoS, I would expect they would be aware of any potential DoS vector in this regard, or if not aware, extremely interested in hearing about it! -- Rob Mueller robm at fastmail.fm From mdounin at mdounin.ru Wed Aug 18 17:05:40 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 18 Aug 2021 20:05:40 +0300 Subject: [PATCH] Mail: add the "reuseport" option of the "listen" directive In-Reply-To: <741d7691-0329-4f6c-b8de-8371d1e98547@www.fastmail.com> References: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> <31a0511d-7512-4b49-9feb-f88ef19dd11d@www.fastmail.com> <741d7691-0329-4f6c-b8de-8371d1e98547@www.fastmail.com> Message-ID: Hello! On Thu, Aug 19, 2021 at 12:28:59AM +1000, Robert Mueller wrote: > > Could you please test if compiling with > > --with-cc-opt="-DNGX_HAVE_EPOLLEXCLUSIVE=0" > > improves things, notably on production systems? In my limited > > testing it seems to be improve things, and if this is indeed the > > case, we can consider removing use of EPOLLEXCLUSIVE. > > I can try this tomorrow, but did you see the link Jan posted to the cloudflare blog? > > https://blog.cloudflare.com/the-sad-state-of-linux-socket-balancing/ > > This explains the problem we're seeing exactly and why reuseport fixes it. Yes, I've seen it. It also suggests that EPOLLEXCLUSIVE might be responsible for the balancing change you've observed with recent kernels, something I've also suspected. > > > As you can see, without the reuseport option, this causes severe > > > scalability problems for us. > > > > I tend to think that reuseport is a bad option for load balancing > > between worker processes, as it can be easily tricked by an outside > > actor to select a particular worker process, and this opens an > > obvious DoS attack vector. > > Really? Can you explain how this is possible? Since reuseport uses hash of the source address to balance incoming connections between sockets, the client can choose a source port to use so the hash will direct the connection to a particular socket, that is, to a particular worker process. This in turn makes it possible to overload this worker process (which is usually several times easier than overloading all worker processes), degrading or completely denying service to clients who happen to be balanced to the same worker process. > Also given that cloudflare use this option, and I expect > cloudflare are literally the largest users of nginx in the world > and also have to deal with extreme adversarial environments > given they run a service to protect against DDoS, I would expect > they would be aware of any potential DoS vector in this regard, > or if not aware, extremely interested in hearing about it! I believe Cloudflare has enough resources and/or enough mitigations in place to don't care. -- Maxim Dounin http://mdounin.ru/ From robm at fastmail.fm Thu Aug 19 01:23:05 2021 From: robm at fastmail.fm (Robert Mueller) Date: Thu, 19 Aug 2021 11:23:05 +1000 Subject: [PATCH] Mail: add the "reuseport" option of the "listen" directive In-Reply-To: References: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> <31a0511d-7512-4b49-9feb-f88ef19dd11d@www.fastmail.com> <741d7691-0329-4f6c-b8de-8371d1e98547@www.fastmail.com> Message-ID: > Since reuseport uses hash of the source address to balance > incoming connections between sockets, the client can choose a > source port to use so the hash will direct the connection to a > particular socket, that is, to a particular worker process. But if a client is choosing the same source-ip:source-port, it's not a real client or OS TCP stack, it's some system using raw packets designed for attacking another system, and in that case there's many other attack options available. I'm not convinced this is a large real-world concern for most users of nginx. IMHO I would still really like to see this patch applied because: 1. The patch is relatively small and matches the http and stream modules 2. It makes the mail module consistent with the http and stream modules which both support reuseport on their listen arguments 3. The current situation is clearly really bad, and other users have reported that they're seeing the same issue. Your suggestion is to recompile nginx with a particular option disabled, but this isn't required for stream or http handlers, just adding reuseport is an acceptable option to fix those handlers, and I think it should be something mail handlers can do as well. Regards -- Rob Mueller robm at fastmail.fm From mdounin at mdounin.ru Thu Aug 19 13:47:43 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 19 Aug 2021 16:47:43 +0300 Subject: [PATCH] Mail: add the "reuseport" option of the "listen" directive In-Reply-To: References: <469502cf-d50f-4f66-88be-0acdafb2ea6e@www.fastmail.com> <02d6d813-3b1d-f3c5-5c6a-8c5605ec2ec7@nginx.com> <31a0511d-7512-4b49-9feb-f88ef19dd11d@www.fastmail.com> <741d7691-0329-4f6c-b8de-8371d1e98547@www.fastmail.com> Message-ID: Hello! On Thu, Aug 19, 2021 at 11:23:05AM +1000, Robert Mueller wrote: > > Since reuseport uses hash of the source address to balance > > incoming connections between sockets, the client can choose a > > source port to use so the hash will direct the connection to a > > particular socket, that is, to a particular worker process. > > But if a client is choosing the same source-ip:source-port, it's > not a real client or OS TCP stack, it's some system using raw > packets designed for attacking another system, and in that case > there's many other attack options available. I'm not convinced > this is a large real-world concern for most users of nginx. It is trivial to select arbitrary source port using normal TCP stack. Even if it wasn't, nothing can stop attackers from using special tools. I'm not saying this is large concern, yet this is a concern one should be aware of. > IMHO I would still really like to see this patch applied > because: > > 1. The patch is relatively small and matches the http and stream > modules > 2. It makes the mail module consistent with the http and stream > modules which both support reuseport on their listen arguments > 3. The current situation is clearly really bad, and other users > have reported that they're seeing the same issue. Your > suggestion is to recompile nginx with a particular option > disabled, but this isn't required for stream or http handlers, > just adding reuseport is an acceptable option to fix those > handlers, and I think it should be something mail handlers can > do as well. The reuseport is implemented in nginx to mitigate lock contention issues on multiprocessor configurations in case of very high accept rates. It is not available in mail because high accept rates are not expected to be the case in mail protocols. While reuseport can be misused as a balancing solution, this is not something I can recommend, given various reuseport limitations and issues. Notably, reuseport requires a lot of additional sockets, which is known to be a problem in some configurations, does not prevent use of the same address in other processes, which requires great care when managing running server, and opens a DoS vector to overload a particular worker process, as outlined above. In the particular case you and others are reporting a better solution would be to address the issue which causes bad balancing. For now it looks like removing EPOLLEXCLUSIVE should fix things, making misuse of reuseport unnecessary in all modules. Testing on real servers affected by the issue is appreciated. -- Maxim Dounin http://mdounin.ru/ From xeioex at nginx.com Thu Aug 19 16:18:52 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 19 Aug 2021 16:18:52 +0000 Subject: [njs] Allowing to create external objects with NULL external pointer. Message-ID: details: https://hg.nginx.org/njs/rev/0fb3ced41fdc branches: changeset: 1688:0fb3ced41fdc user: Dmitry Volyntsev date: Fri Aug 13 12:20:46 2021 +0000 description: Allowing to create external objects with NULL external pointer. diffstat: src/njs_extern.c | 8 ++------ src/test/njs_unit_test.c | 12 ++++++++++-- 2 files changed, 12 insertions(+), 8 deletions(-) diffs (75 lines): diff -r 377743cd9059 -r 0fb3ced41fdc src/njs_extern.c --- a/src/njs_extern.c Wed Aug 11 11:44:12 2021 +0800 +++ b/src/njs_extern.c Fri Aug 13 12:20:46 2021 +0000 @@ -179,12 +179,6 @@ njs_external_prop_handler(njs_vm_t *vm, *retval = *setval; } else { - external = njs_vm_external(vm, NJS_PROTO_ID_ANY, value); - if (njs_slow_path(external == NULL)) { - njs_value_undefined_set(retval); - return NJS_OK; - } - ov = njs_mp_alloc(vm->mem_pool, sizeof(njs_object_value_t)); if (njs_slow_path(ov == NULL)) { njs_memory_error(vm); @@ -203,6 +197,8 @@ njs_external_prop_handler(njs_vm_t *vm, ov->object.__proto__ = &vm->prototypes[NJS_OBJ_TYPE_OBJECT].object; ov->object.slots = slots; + external = njs_vm_external(vm, NJS_PROTO_ID_ANY, value); + njs_set_data(&ov->value, external, njs_value_external_tag(value)); njs_set_object_value(retval, ov); } diff -r 377743cd9059 -r 0fb3ced41fdc src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Wed Aug 11 11:44:12 2021 +0800 +++ b/src/test/njs_unit_test.c Fri Aug 13 12:20:46 2021 +0000 @@ -20862,7 +20862,8 @@ static njs_unit_test_t njs_shared_test[ njs_str("false") }, { njs_str("isFin()"), - njs_str("ReferenceError: \"isFin\" is not defined") }, + njs_str("ReferenceError: \"isFin\" is not defined\n" + " at main (:1)\n") }, { njs_str("isNaN(function(){})"), njs_str("true") }, @@ -20918,6 +20919,11 @@ static njs_unit_test_t njs_shared_test[ { njs_str("$r.bind('XXX', 37); XXX"), njs_str("37") }, + + { njs_str("var fs = require('fs'); fs.readFileSync()"), + njs_str("TypeError: \"path\" must be a string or Buffer\n" + " at fs.readFileSync (native)\n" + " at main (:1)\n") }, }; @@ -21403,6 +21409,7 @@ typedef struct { njs_bool_t module; njs_uint_t repeat; njs_bool_t unsafe; + njs_bool_t backtrace; } njs_opts_t; @@ -21456,6 +21463,7 @@ njs_unit_test(njs_unit_test_t tests[], s options.module = opts->module; options.unsafe = opts->unsafe; + options.backtrace = opts->backtrace; vm = njs_vm_create(&options); if (vm == NULL) { @@ -22829,7 +22837,7 @@ static njs_test_suite_t njs_suites[] = njs_unit_test }, { njs_str("shared"), - { .externals = 1, .repeat = 128, .unsafe = 1 }, + { .externals = 1, .repeat = 128, .unsafe = 1, .backtrace = 1 }, njs_shared_test, njs_nitems(njs_shared_test), njs_unit_test }, From xeioex at nginx.com Thu Aug 19 16:18:54 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 19 Aug 2021 16:18:54 +0000 Subject: [njs] Refactored njs_object_traverse(). Message-ID: details: https://hg.nginx.org/njs/rev/b0177571ce1d branches: changeset: 1689:b0177571ce1d user: Dmitry Volyntsev date: Thu Aug 19 16:17:19 2021 +0000 description: Refactored njs_object_traverse(). The previous approach was inconsistent in treating shared properties with other places (JSON.stringify()) where object is iterated. diffstat: src/njs_builtin.c | 1 + src/njs_object.c | 109 +++++++++++++++++++++++++++------------------- src/njs_object.h | 6 +- src/test/njs_unit_test.c | 3 +- test/njs_expect_test.exp | 6 +- 5 files changed, 72 insertions(+), 53 deletions(-) diffs (212 lines): diff -r 0fb3ced41fdc -r b0177571ce1d src/njs_builtin.c --- a/src/njs_builtin.c Fri Aug 13 12:20:46 2021 +0000 +++ b/src/njs_builtin.c Thu Aug 19 16:17:19 2021 +0000 @@ -470,6 +470,7 @@ njs_builtin_traverse(njs_vm_t *vm, njs_t } } + njs_assert(njs_is_string(&key)); njs_string_get(&key, &name); if (njs_slow_path((p + name.length + 3) > end)) { diff -r 0fb3ced41fdc -r b0177571ce1d src/njs_object.c --- a/src/njs_object.c Fri Aug 13 12:20:46 2021 +0000 +++ b/src/njs_object.c Thu Aug 19 16:17:19 2021 +0000 @@ -1128,87 +1128,104 @@ njs_int_t njs_object_traverse(njs_vm_t *vm, njs_object_t *object, void *ctx, njs_object_traverse_cb_t cb) { - njs_int_t depth, ret; - njs_str_t name; - njs_arr_t visited; - njs_object_t **start; - njs_value_t value, obj; - njs_object_prop_t *prop; - njs_traverse_t state[NJS_TRAVERSE_MAX_DEPTH]; - - depth = 0; - - state[depth].prop = NULL; - state[depth].parent = NULL; - state[depth].object = object; - state[depth].hash = &object->shared_hash; - njs_lvlhsh_each_init(&state[depth].lhe, &njs_object_hash_proto); + njs_int_t ret; + njs_arr_t visited; + njs_object_t **start; + njs_value_t value, *key; + njs_traverse_t *s; + njs_object_prop_t *prop; + njs_property_query_t pq; + njs_traverse_t state[NJS_TRAVERSE_MAX_DEPTH]; + + s = &state[0]; + s->prop = NULL; + s->parent = NULL; + s->index = 0; + njs_set_object(&s->value, object); + s->keys = njs_value_own_enumerate(vm, &s->value, NJS_ENUM_KEYS, + NJS_ENUM_STRING | NJS_ENUM_SYMBOL, 1); + if (njs_slow_path(s->keys == NULL)) { + return NJS_ERROR; + } start = njs_arr_init(vm->mem_pool, &visited, NULL, 8, sizeof(void *)); if (njs_slow_path(start == NULL)) { return NJS_ERROR; } - njs_set_object(&value, object); - (void) njs_traverse_visit(&visited, &value); + (void) njs_traverse_visit(&visited, &s->value); for ( ;; ) { - prop = njs_lvlhsh_each(state[depth].hash, &state[depth].lhe); - - if (prop == NULL) { - if (state[depth].hash == &state[depth].object->shared_hash) { - state[depth].hash = &state[depth].object->hash; - njs_lvlhsh_each_init(&state[depth].lhe, &njs_object_hash_proto); + + if (s->index >= s->keys->length) { + njs_array_destroy(vm, s->keys); + s->keys = NULL; + + if (s == &state[0]) { + goto done; + } + + s--; + continue; + } + + njs_property_query_init(&pq, NJS_PROPERTY_QUERY_GET, 0); + key = &s->keys->start[s->index++]; + + ret = njs_property_query(vm, &pq, &s->value, key); + if (njs_slow_path(ret != NJS_OK)) { + if (ret == NJS_DECLINED) { continue; } - if (depth == 0) { - goto done; - } - - depth--; - continue; + return NJS_ERROR; } - state[depth].prop = prop; - - ret = cb(vm, &state[depth], ctx); + prop = pq.lhq.value; + s->prop = prop; + + ret = cb(vm, s, ctx); if (njs_slow_path(ret != NJS_OK)) { return ret; } - value = prop->value; + njs_value_assign(&value, &prop->value); if (prop->type == NJS_PROPERTY_HANDLER) { - njs_set_object(&obj, state[depth].object); - ret = prop->value.data.u.prop_handler(vm, prop, &obj, NULL, &value); + ret = prop->value.data.u.prop_handler(vm, prop, &s->value, NULL, + &value); if (njs_slow_path(ret == NJS_ERROR)) { return ret; } } - njs_string_get(&prop->name, &name); - - if (njs_is_object(&value) && !njs_traverse_visited(&visited, &value)) { + if (njs_is_object(&value) + && !njs_is_array(&value) + && !njs_traverse_visited(&visited, &value)) + { ret = njs_traverse_visit(&visited, &value); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } - if (++depth > (NJS_TRAVERSE_MAX_DEPTH - 1)) { + if (s == &state[NJS_TRAVERSE_MAX_DEPTH - 1]) { njs_type_error(vm, "njs_object_traverse() recursion limit:%d", - depth); + NJS_TRAVERSE_MAX_DEPTH); return NJS_ERROR; } - state[depth].prop = NULL; - state[depth].parent = &state[depth - 1]; - state[depth].object = njs_object(&value); - state[depth].hash = &njs_object(&value)->shared_hash; - njs_lvlhsh_each_init(&state[depth].lhe, &njs_object_hash_proto); + s++; + s->prop = NULL; + s->parent = &s[-1]; + s->index = 0; + njs_value_assign(&s->value, &value); + s->keys = njs_value_own_enumerate(vm, &s->value, NJS_ENUM_KEYS, + NJS_ENUM_STRING | NJS_ENUM_SYMBOL, 1); + if (njs_slow_path(s->keys == NULL)) { + return NJS_ERROR; + } } - } done: diff -r 0fb3ced41fdc -r b0177571ce1d src/njs_object.h --- a/src/njs_object.h Fri Aug 13 12:20:46 2021 +0000 +++ b/src/njs_object.h Thu Aug 19 16:17:19 2021 +0000 @@ -27,9 +27,9 @@ struct njs_traverse_s { struct njs_traverse_s *parent; njs_object_prop_t *prop; - njs_object_t *object; - njs_lvlhsh_t *hash; - njs_lvlhsh_each_t lhe; + njs_value_t value; + njs_array_t *keys; + int64_t index; #define NJS_TRAVERSE_MAX_DEPTH 32 }; diff -r 0fb3ced41fdc -r b0177571ce1d src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Fri Aug 13 12:20:46 2021 +0000 +++ b/src/test/njs_unit_test.c Thu Aug 19 16:17:19 2021 +0000 @@ -21265,8 +21265,9 @@ static njs_unit_test_t njs_shell_test[] " at main (:1)\n") }, { njs_str("$shared.method({}.a.a)" ENTER), + /* FIXME: at $shared.method (native) */ njs_str("TypeError: cannot get property \"a\" of undefined\n" - " at $shared.method (native)\n" + " at $r.method (native)\n" " at main (:1)\n") }, { njs_str("new Function(\n\n@)" ENTER), diff -r 0fb3ced41fdc -r b0177571ce1d test/njs_expect_test.exp --- a/test/njs_expect_test.exp Fri Aug 13 12:20:46 2021 +0000 +++ b/test/njs_expect_test.exp Thu Aug 19 16:17:19 2021 +0000 @@ -121,10 +121,10 @@ njs_test { } njs_test { - {"Ma\t" - "Ma\a*th"} + {"JS\t" + "JS\a*ON"} {"\t\t" - "Math.abs*Math.atan2"} + "JSON.parse*JSON.stringify"} } # Global completions, no matches From alexey.radkov at gmail.com Thu Aug 19 18:34:39 2021 From: alexey.radkov at gmail.com (=?iso-8859-1?q?Alexey_Radkov?=) Date: Thu, 19 Aug 2021 21:34:39 +0300 Subject: [PATCH] Avoid unnecessary restriction on nohash http variables Message-ID: # HG changeset patch # User Alexey Radkov # Date 1629395487 -10800 # Thu Aug 19 20:51:27 2021 +0300 # Node ID a1065b2252855730ed8e5368c88fe41a7ff5a698 # Parent 13d0c1d26d47c203b1874ca1ffdb7a9ba7fd2d77 Avoid unnecessary restriction on nohash http variables. When I use variables with long names albeit being tagged as NGX_HTTP_VARIABLE_NOHASH, Nginx says "could not build variables_hash, you should increase variables_hash_bucket_size: 64". It seems that this is an unnecessary restriction, as soon as the hash gets only built for variables with names[n].key.data == NULL (note that other pieces in ngx_hash_init() where the macro NGX_HASH_ELT_SIZE is used, are always guarded with this condition). This fix puts this same condition into the only unguarded piece: when testing against the hash_bucket_size. The issue arises after assignment of key[n].key.data = NULL without symmetric assignment of key[n].key.len in ngx_http_variables_init_vars(): after this, the key[n].key comes to an inconsistent state. Perhaps this was made intentionally, as hash initialization in other places seems to follow the same pattern (for instance, see how ngx_hash_init() gets called from ngx_http_upstream_hide_headers_hash()). Without this fix, I must put in the config "variables_hash_bucket_size 128;" even if the long-named variables are nohash. diff -r 13d0c1d26d47 -r a1065b225285 src/core/ngx_hash.c --- a/src/core/ngx_hash.c Fri Aug 13 03:57:47 2021 -0400 +++ b/src/core/ngx_hash.c Thu Aug 19 20:51:27 2021 +0300 @@ -274,6 +274,9 @@ } for (n = 0; n < nelts; n++) { + if (names[n].key.data == NULL) { + continue; + } if (hinit->bucket_size < NGX_HASH_ELT_SIZE(&names[n]) + sizeof(void *)) { ngx_log_error(NGX_LOG_EMERG, hinit->pool->log, 0, From mdounin at mdounin.ru Fri Aug 20 19:14:06 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 20 Aug 2021 19:14:06 +0000 Subject: [nginx] Upstream: fixed timeouts with gRPC, SSL and select (ticket #2229). Message-ID: details: https://hg.nginx.org/nginx/rev/058a67435e83 branches: changeset: 7906:058a67435e83 user: Maxim Dounin date: Fri Aug 20 03:53:56 2021 +0300 description: Upstream: fixed timeouts with gRPC, SSL and select (ticket #2229). With SSL it is possible that an established connection is ready for reading after the handshake. Further, events might be already disabled in case of level-triggered event methods. If this happens and ngx_http_upstream_send_request() blocks waiting for some data from the upstream, such as flow control in case of gRPC, the connection will time out due to no read events on the upstream connection. Fix is to explicitly check the c->read->ready flag if sending request blocks and post a read event if it is set. Note that while it is possible to modify ngx_ssl_handshake() to keep read events active, this won't completely resolve the issue, since there can be data already received during the SSL handshake (see 573bd30e46b4). diffstat: src/http/ngx_http_upstream.c | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diffs (14 lines): diff -r 13d0c1d26d47 -r 058a67435e83 src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Fri Aug 13 03:57:47 2021 -0400 +++ b/src/http/ngx_http_upstream.c Fri Aug 20 03:53:56 2021 +0300 @@ -2113,6 +2113,10 @@ ngx_http_upstream_send_request(ngx_http_ c->tcp_nopush = NGX_TCP_NOPUSH_UNSET; } + if (c->read->ready) { + ngx_post_event(c->read, &ngx_posted_events); + } + return; } From ping.zhao at intel.com Thu Aug 26 01:48:23 2021 From: ping.zhao at intel.com (Zhao, Ping) Date: Thu, 26 Aug 2021 01:48:23 +0000 Subject: [PATCH] Add io_uring support in AIO(async io) module Message-ID: Hi Maxim, It's been long time and I lost the mail thread. Is it now the good time to return to io_uring? I saw kernel group made many progress on it. How does Nginx forum think about this new kernel feature? Best, Ping -----Original Message----- From: nginx-devel On Behalf Of Maxim Dounin Sent: March 22, 2021 12:18PM To: nginx-devel at nginx.org Subject: Re: [PATCH] Add io_uring support in AIO(async io) module Hello! On Sat, Feb 27, 2021 at 12:48:04PM +0000, Zhao, Ping wrote: > Yes, io_uring can help Nginx achieve same performance with lower > resource cost. This is the key improvement of io_uring I think. > It can't break through the HW limitation. Thanks for the patch and testing. Potentially this looks interesting, despite the fact that improvements even in terms of resource costs seem to be minor compared to properly configured nginx using other I/O variants. There are, however, some concerns regarding the interface itself and tunings need to be applied in order for it to work, as well as regressions in various kernels out there. There are also some questions about the patch, notably retries in case of short reads, though this probably needs better understanding of the interface. As of now, the consensus is that we'll get back to this some time later, though probably we want to make this not a default aio method at first, but rather an alternative one, available with something like "aio io_uring;", similarly to "aio threads;". Thanks again for your work. We'll contact you once we return to this topic. -- Maxim Dounin http://mdounin.ru/ _______________________________________________ nginx-devel mailing list nginx-devel at nginx.org http://mailman.nginx.org/mailman/listinfo/nginx-devel From mat999 at gmail.com Thu Aug 26 02:25:56 2021 From: mat999 at gmail.com (Mathew Heard) Date: Thu, 26 Aug 2021 12:25:56 +1000 Subject: [PATCH] Add io_uring support in AIO(async io) module In-Reply-To: References: Message-ID: If there are performance regressions perhaps these could be documented in the events documentation. Something along the lines of a recommended minimum kernel. On Thu, 26 Aug 2021 at 11:48, Zhao, Ping wrote: > > Hi Maxim, > > It's been long time and I lost the mail thread. Is it now the good time to return to io_uring? I saw kernel group made many progress on it. How does Nginx forum think about this new kernel feature? > > Best, > Ping > > -----Original Message----- > From: nginx-devel On Behalf Of Maxim Dounin > Sent: March 22, 2021 12:18PM > To: nginx-devel at nginx.org > Subject: Re: [PATCH] Add io_uring support in AIO(async io) module > > Hello! > > On Sat, Feb 27, 2021 at 12:48:04PM +0000, Zhao, Ping wrote: > > > Yes, io_uring can help Nginx achieve same performance with lower > > resource cost. This is the key improvement of io_uring I think. > > It can't break through the HW limitation. > > Thanks for the patch and testing. > > Potentially this looks interesting, despite the fact that > improvements even in terms of resource costs seem to be minor > compared to properly configured nginx using other I/O variants. > > There are, however, some concerns regarding the interface itself > and tunings need to be applied in order for it to work, as well > as regressions in various kernels out there. There are also some > questions about the patch, notably retries in case of short reads, > though this probably needs better understanding of the interface. > > As of now, the consensus is that we'll get back to this some time > later, though probably we want to make this not a default aio > method at first, but rather an alternative one, available with > something like "aio io_uring;", similarly to "aio threads;". > > Thanks again for your work. We'll contact you once we return to > this topic. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From ottavio at campana.vi.it Thu Aug 26 13:12:08 2021 From: ottavio at campana.vi.it (Ottavio Campana) Date: Thu, 26 Aug 2021 15:12:08 +0200 Subject: Adding a fd that is not obtained through accept to the list the active connections Message-ID: Hello, I want to write a module for an nginx that runs on a device with a private IP address and behind NAT, connects to a remote server and adds the newly created connection to the list of connections handled by nginx. At this point the remote server will invert the connection and start making requests. I tried studying the documentation on nginx.org, but I am not able to get an idea about how to achieve this. Can you please give me a suggestion about how to do it? Thank you, Ottavio -- Non c'? pi? forza nella normalit?, c'? solo monotonia -------------- next part -------------- An HTML attachment was scrubbed... URL: From phillip.odam at nitorgroup.com Thu Aug 26 19:57:59 2021 From: phillip.odam at nitorgroup.com (Phillip Odam) Date: Thu, 26 Aug 2021 15:57:59 -0400 Subject: Adding a fd that is not obtained through accept to the list the active connections In-Reply-To: References: Message-ID: Hi Ottavio I?m probably overlooking something in the description of what you want but I think NGINX out of the box provides you with what you want. Here?s an example of one I?ve implemented. A software project I don?t develop retrieves dependencies from w3.org (it?s a SOAP web service that retrieves a remote XSD) The trouble with depending on a file sourced from w3.org is that if the file is requested too frequently w3.org will rate limit your requests. One solution would be to store the XSD in the web service but that?d require changing the application. I chose to run an NGINX where it?s upstream was configured to point at w3.org and to avoid needlessly retrieving the same content repeatedly a cache was used. So the NGINX was listening on local host in this case and for the backend it was connecting to whatever w3.org resolves to. And so no changes were needed in the app I changed the servers hosts file to point w3.org to localhost. This isn?t a problem for NGINX to connect to w3.org as the w3.org IP was hard coded in the upstream. And NGINX?s built in connection counting works with this. Barring the cache is this what you?re describing you want? Phillip On Thursday, August 26, 2021, Ottavio Campana wrote: > Hello, > > I want to write a module for an nginx that runs on a device with a private > IP address and behind NAT, connects to a remote server and adds the newly > created connection to the list of connections handled by nginx. At this > point the remote server will invert the connection and start making > requests. > > I tried studying the documentation on nginx.org, but I am not able to get > an idea about how to achieve this. > > Can you please give me a suggestion about how to do it? > > Thank you, > > Ottavio > > -- > Non c'? pi? forza nella normalit?, c'? solo monotonia > -------------- next part -------------- An HTML attachment was scrubbed... URL: From ottavio at campana.vi.it Fri Aug 27 09:47:05 2021 From: ottavio at campana.vi.it (Ottavio Campana) Date: Fri, 27 Aug 2021 11:47:05 +0200 Subject: Adding a fd that is not obtained through accept to the list the active connections In-Reply-To: References: Message-ID: Dear Phillip, I think it is not what I am trying to do (or I did not completely understand your solution). As far as I understand, when nginx dispatches a request it checks if it is a file or, among others, something that can be retrieved from an upstream. But upstreams act as clients, they do accept requests coming from the remote server. My goal is to have a connection to a remote server that is somehow started by the nginx, but then the connection is reversed, the "upstream" makes requests and nginx responds. Is there a way to achieve this with upstreams? Thank you, Ottavio Il giorno gio 26 ago 2021 alle ore 21:58 Phillip Odam < phillip.odam at nitorgroup.com> ha scritto: > Hi Ottavio > > I?m probably overlooking something in the description of what you want but > I think NGINX out of the box provides you with what you want. > > Here?s an example of one I?ve implemented. > > A software project I don?t develop retrieves dependencies from w3.org > (it?s a SOAP web service that retrieves a remote XSD) > > The trouble with depending on a file sourced from w3.org is that if the > file is requested too frequently w3.org will rate limit your requests. > > One solution would be to store the XSD in the web service but that?d > require changing the application. > > I chose to run an NGINX where it?s upstream was configured to point at > w3.org and to avoid needlessly retrieving the same content repeatedly a > cache was used. > > So the NGINX was listening on local host in this case and for the backend > it was connecting to whatever w3.org resolves to. And so no changes were > needed in the app I changed the servers hosts file to point w3.org to > localhost. This isn?t a problem for NGINX to connect to w3.org as the > w3.org IP was hard coded in the upstream. > > And NGINX?s built in connection counting works with this. > > Barring the cache is this what you?re describing you want? > > Phillip > > On Thursday, August 26, 2021, Ottavio Campana > wrote: > >> Hello, >> >> I want to write a module for an nginx that runs on a device with a >> private IP address and behind NAT, connects to a remote server and adds the >> newly created connection to the list of connections handled by nginx. At >> this point the remote server will invert the connection and start making >> requests. >> >> I tried studying the documentation on nginx.org, but I am not able to >> get an idea about how to achieve this. >> >> Can you please give me a suggestion about how to do it? >> >> Thank you, >> >> Ottavio >> >> -- >> Non c'? pi? forza nella normalit?, c'? solo monotonia >> > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Non c'? pi? forza nella normalit?, c'? solo monotonia -------------- next part -------------- An HTML attachment was scrubbed... URL: From phillip.odam at nitorgroup.com Fri Aug 27 11:10:20 2021 From: phillip.odam at nitorgroup.com (Phillip Odam) Date: Fri, 27 Aug 2021 07:10:20 -0400 Subject: Adding a fd that is not obtained through accept to the list the active connections In-Reply-To: References: Message-ID: Hi Ottavio I get you now, you?re trying to find a way for the remote server to get through your NAT router. Who controls the remote server? Because even if you initiate the TCP connection to it unless it ?knows? to make an HTTP request back nothings going to happen. The simplest approach for getting through your NAT would be to just setup a port forward, any particular qualms with that? It can be shied away from for security but managed properly it?s a perfectly acceptable approach. As an aside, these guys wrote up a brilliant piece on punching holes through two NATs (the public sides each facing each other over the internet) https://tailscale.com/blog/, at a quick glance I couldn?t spot the particular post. Cheers Phillip On Friday, August 27, 2021, Ottavio Campana wrote: > Dear Phillip, > > I think it is not what I am trying to do (or I did not completely > understand your solution). > > As far as I understand, when nginx dispatches a request it checks if it is > a file or, among others, something that can be retrieved from an upstream. > But upstreams act as clients, they do accept requests coming from the > remote server. > > My goal is to have a connection to a remote server that is somehow started > by the nginx, but then the connection is reversed, the "upstream" makes > requests and nginx responds. > > Is there a way to achieve this with upstreams? > > Thank you, > > Ottavio > > Il giorno gio 26 ago 2021 alle ore 21:58 Phillip Odam < > phillip.odam at nitorgroup.com> ha scritto: > >> Hi Ottavio >> >> I?m probably overlooking something in the description of what you want >> but I think NGINX out of the box provides you with what you want. >> >> Here?s an example of one I?ve implemented. >> >> A software project I don?t develop retrieves dependencies from w3.org >> (it?s a SOAP web service that retrieves a remote XSD) >> >> The trouble with depending on a file sourced from w3.org is that if the >> file is requested too frequently w3.org will rate limit your requests. >> >> One solution would be to store the XSD in the web service but that?d >> require changing the application. >> >> I chose to run an NGINX where it?s upstream was configured to point at >> w3.org and to avoid needlessly retrieving the same content repeatedly a >> cache was used. >> >> So the NGINX was listening on local host in this case and for the backend >> it was connecting to whatever w3.org resolves to. And so no changes were >> needed in the app I changed the servers hosts file to point w3.org to >> localhost. This isn?t a problem for NGINX to connect to w3.org as the >> w3.org IP was hard coded in the upstream. >> >> And NGINX?s built in connection counting works with this. >> >> Barring the cache is this what you?re describing you want? >> >> Phillip >> >> On Thursday, August 26, 2021, Ottavio Campana >> wrote: >> >>> Hello, >>> >>> I want to write a module for an nginx that runs on a device with a >>> private IP address and behind NAT, connects to a remote server and adds the >>> newly created connection to the list of connections handled by nginx. At >>> this point the remote server will invert the connection and start making >>> requests. >>> >>> I tried studying the documentation on nginx.org, but I am not able to >>> get an idea about how to achieve this. >>> >>> Can you please give me a suggestion about how to do it? >>> >>> Thank you, >>> >>> Ottavio >>> >>> -- >>> Non c'? pi? forza nella normalit?, c'? solo monotonia >>> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > -- > Non c'? pi? forza nella normalit?, c'? solo monotonia > -------------- next part -------------- An HTML attachment was scrubbed... URL: From ottavio at campana.vi.it Fri Aug 27 11:59:03 2021 From: ottavio at campana.vi.it (Ottavio Campana) Date: Fri, 27 Aug 2021 13:59:03 +0200 Subject: Adding a fd that is not obtained through accept to the list the active connections In-Reply-To: References: Message-ID: Dear Phillip, I know Tailscale very well, I use it and like it a lot. But my final goal is finding a way to implement the ONVIF Uplink service, https://www.onvif.org/specs/srv/uplink/ONVIF-Uplink-Spec.pdf , where I can have several devices on the LAN that need to connect to a remote server, which will then send commands. Therefore I need a way to start a connection from nginx (or an external program and then passing the fd through a unix socket domain) and make it act as if the fd were obtained from an accept. Nginx works with events and I find it very difficult to find a mechanism to pass this connection to it. Do you have other ideas? Thank you, Ottavio Il giorno ven 27 ago 2021 alle ore 13:10 Phillip Odam < phillip.odam at nitorgroup.com> ha scritto: > Hi Ottavio > > I get you now, you?re trying to find a way for the remote server to get > through your NAT router. Who controls the remote server? Because even if > you initiate the TCP connection to it unless it ?knows? to make an HTTP > request back nothings going to happen. > > The simplest approach for getting through your NAT would be to just setup > a port forward, any particular qualms with that? It can be shied away from > for security but managed properly it?s a perfectly acceptable approach. > > As an aside, these guys wrote up a brilliant piece on punching holes > through two NATs (the public sides each facing each other over the > internet) https://tailscale.com/blog/, at a quick glance I couldn?t spot > the particular post. > > Cheers > Phillip > > On Friday, August 27, 2021, Ottavio Campana wrote: > >> Dear Phillip, >> >> I think it is not what I am trying to do (or I did not completely >> understand your solution). >> >> As far as I understand, when nginx dispatches a request it checks if it >> is a file or, among others, something that can be retrieved from an >> upstream. But upstreams act as clients, they do accept requests coming from >> the remote server. >> >> My goal is to have a connection to a remote server that is somehow >> started by the nginx, but then the connection is reversed, the "upstream" >> makes requests and nginx responds. >> >> Is there a way to achieve this with upstreams? >> >> Thank you, >> >> Ottavio >> >> Il giorno gio 26 ago 2021 alle ore 21:58 Phillip Odam < >> phillip.odam at nitorgroup.com> ha scritto: >> >>> Hi Ottavio >>> >>> I?m probably overlooking something in the description of what you want >>> but I think NGINX out of the box provides you with what you want. >>> >>> Here?s an example of one I?ve implemented. >>> >>> A software project I don?t develop retrieves dependencies from w3.org >>> (it?s a SOAP web service that retrieves a remote XSD) >>> >>> The trouble with depending on a file sourced from w3.org is that if the >>> file is requested too frequently w3.org will rate limit your requests. >>> >>> One solution would be to store the XSD in the web service but that?d >>> require changing the application. >>> >>> I chose to run an NGINX where it?s upstream was configured to point at >>> w3.org and to avoid needlessly retrieving the same content repeatedly a >>> cache was used. >>> >>> So the NGINX was listening on local host in this case and for the >>> backend it was connecting to whatever w3.org resolves to. And so no >>> changes were needed in the app I changed the servers hosts file to point >>> w3.org to localhost. This isn?t a problem for NGINX to connect to w3.org >>> as the w3.org IP was hard coded in the upstream. >>> >>> And NGINX?s built in connection counting works with this. >>> >>> Barring the cache is this what you?re describing you want? >>> >>> Phillip >>> >>> On Thursday, August 26, 2021, Ottavio Campana >>> wrote: >>> >>>> Hello, >>>> >>>> I want to write a module for an nginx that runs on a device with a >>>> private IP address and behind NAT, connects to a remote server and adds the >>>> newly created connection to the list of connections handled by nginx. At >>>> this point the remote server will invert the connection and start making >>>> requests. >>>> >>>> I tried studying the documentation on nginx.org, but I am not able to >>>> get an idea about how to achieve this. >>>> >>>> Can you please give me a suggestion about how to do it? >>>> >>>> Thank you, >>>> >>>> Ottavio >>>> >>>> -- >>>> Non c'? pi? forza nella normalit?, c'? solo monotonia >>>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> >> >> -- >> Non c'? pi? forza nella normalit?, c'? solo monotonia >> > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Non c'? pi? forza nella normalit?, c'? solo monotonia -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Aug 27 15:40:56 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 27 Aug 2021 18:40:56 +0300 Subject: Adding a fd that is not obtained through accept to the list the active connections In-Reply-To: References: Message-ID: Hello! On Fri, Aug 27, 2021 at 01:59:03PM +0200, Ottavio Campana wrote: > Dear Phillip, > > I know Tailscale very well, I use it and like it a lot. But my final goal > is finding a way to implement the ONVIF Uplink service, > https://www.onvif.org/specs/srv/uplink/ONVIF-Uplink-Spec.pdf , where I can > have several devices on the LAN that need to connect to a remote server, > which will then send commands. > > Therefore I need a way to start a connection from nginx (or an external > program and then passing the fd through a unix socket domain) and make it > act as if the fd were obtained from an accept. > > Nginx works with events and I find it very difficult to find a mechanism to > pass this connection to it. > > Do you have other ideas? The most simple solution I can think of is to open two connections: to your command endpoint and to nginx, and proxy everything once the connections are established. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Sun Aug 29 21:00:50 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 29 Aug 2021 21:00:50 +0000 Subject: [nginx] HTTP/2: improved body reading logging. Message-ID: details: https://hg.nginx.org/nginx/rev/51f463301f86 branches: changeset: 7907:51f463301f86 user: Maxim Dounin date: Sun Aug 29 22:20:34 2021 +0300 description: HTTP/2: improved body reading logging. diffstat: src/http/v2/ngx_http_v2.c | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diffs (23 lines): diff -r 058a67435e83 -r 51f463301f86 src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Fri Aug 20 03:53:56 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Sun Aug 29 22:20:34 2021 +0300 @@ -4154,6 +4154,9 @@ ngx_http_v2_process_request_body(ngx_htt rb = r->request_body; buf = rb->buf; + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, fc->log, 0, + "http2 process request body"); + if (size) { if (buf->sync) { buf->pos = buf->start = pos; @@ -4364,6 +4367,9 @@ ngx_http_v2_read_unbuffered_request_body stream = r->stream; fc = r->connection; + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, fc->log, 0, + "http2 read unbuffered request body"); + if (fc->read->timedout) { if (stream->recv_window) { stream->skip_data = 1; From mdounin at mdounin.ru Sun Aug 29 21:00:53 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 29 Aug 2021 21:00:53 +0000 Subject: [nginx] HTTP/2: reworked body reading to better match HTTP/1.x code. Message-ID: details: https://hg.nginx.org/nginx/rev/0dcec8e5d50a branches: changeset: 7908:0dcec8e5d50a user: Maxim Dounin date: Sun Aug 29 22:20:36 2021 +0300 description: HTTP/2: reworked body reading to better match HTTP/1.x code. In particular, now the code always uses a buffer limited by client_body_buffer_size. At the cost of an additional copy it ensures that small DATA frames are not directly mapped to small write() syscalls, but rather buffered in memory before writing. Further, requests without Content-Length are no longer forced to use temporary files. diffstat: src/http/v2/ngx_http_v2.c | 186 +++++++++++++++++++++++++++------------------ 1 files changed, 110 insertions(+), 76 deletions(-) diffs (236 lines): diff -r 51f463301f86 -r 0dcec8e5d50a src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Sun Aug 29 22:20:34 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Sun Aug 29 22:20:36 2021 +0300 @@ -4032,12 +4032,12 @@ ngx_http_v2_read_request_body(ngx_http_r len = r->headers_in.content_length_n; + if (len < 0 || len > (off_t) clcf->client_body_buffer_size) { + len = clcf->client_body_buffer_size; + } + if (r->request_body_no_buffering && !stream->in_closed) { - if (len < 0 || len > (off_t) clcf->client_body_buffer_size) { - len = clcf->client_body_buffer_size; - } - /* * We need a room to store data up to the stream's initial window size, * at least until this window will be exhausted. @@ -4050,21 +4050,9 @@ ngx_http_v2_read_request_body(ngx_http_r if (len > NGX_HTTP_V2_MAX_WINDOW) { len = NGX_HTTP_V2_MAX_WINDOW; } - - rb->buf = ngx_create_temp_buf(r->pool, (size_t) len); - - } else if (len >= 0 && len <= (off_t) clcf->client_body_buffer_size - && !r->request_body_in_file_only) - { - rb->buf = ngx_create_temp_buf(r->pool, (size_t) len); - - } else { - rb->buf = ngx_calloc_buf(r->pool); - - if (rb->buf != NULL) { - rb->buf->sync = 1; - } - } + } + + rb->buf = ngx_create_temp_buf(r->pool, (size_t) len); if (rb->buf == NULL) { stream->skip_data = 1; @@ -4144,7 +4132,7 @@ static ngx_int_t ngx_http_v2_process_request_body(ngx_http_request_t *r, u_char *pos, size_t size, ngx_uint_t last) { - ngx_buf_t *buf; + size_t n; ngx_int_t rc; ngx_connection_t *fc; ngx_http_request_body_t *rb; @@ -4152,79 +4140,125 @@ ngx_http_v2_process_request_body(ngx_htt fc = r->connection; rb = r->request_body; - buf = rb->buf; ngx_log_debug0(NGX_LOG_DEBUG_HTTP, fc->log, 0, "http2 process request body"); - if (size) { - if (buf->sync) { - buf->pos = buf->start = pos; - buf->last = buf->end = pos + size; - - r->request_body_in_file_only = 1; - - } else { - if (size > (size_t) (buf->end - buf->last)) { - ngx_log_error(NGX_LOG_INFO, fc->log, 0, - "client intended to send body data " - "larger than declared"); - - return NGX_HTTP_BAD_REQUEST; + if (size == 0 && !last) { + return NGX_OK; + } + + for ( ;; ) { + for ( ;; ) { + if (rb->buf->last == rb->buf->end && size) { + + if (r->request_body_no_buffering) { + + /* should never happen due to flow control */ + + ngx_log_error(NGX_LOG_ALERT, fc->log, 0, + "no space in http2 body buffer"); + + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + /* update chains */ + + ngx_log_error(NGX_LOG_DEBUG, fc->log, 0, + "http2 body update chains"); + + rc = ngx_http_v2_filter_request_body(r); + + if (rc != NGX_OK) { + return rc; + } + + if (rb->busy != NULL) { + ngx_log_error(NGX_LOG_ALERT, fc->log, 0, + "busy buffers after request body flush"); + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + rb->buf->pos = rb->buf->start; + rb->buf->last = rb->buf->start; + } + + /* copy body data to the buffer */ + + n = rb->buf->end - rb->buf->last; + + if (n > size) { + n = size; } - buf->last = ngx_cpymem(buf->last, pos, size); + rb->buf->last = ngx_cpymem(rb->buf->last, pos, n); + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, + "http2 request body recv %uz", n); + + pos += n; + size -= n; + + if (size == 0 && last) { + rb->rest = 0; + } + + if (r->request_body_no_buffering) { + break; + } + + /* pass buffer to request body filter chain */ + + rc = ngx_http_v2_filter_request_body(r); + + if (rc != NGX_OK) { + return rc; + } + + if (rb->rest == 0) { + break; + } + + if (size == 0) { + break; + } } - } - - if (last) { - rb->rest = 0; - - if (fc->read->timer_set) { - ngx_del_timer(fc->read); + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, + "http2 request body rest %O", rb->rest); + + if (rb->rest == 0) { + break; } - if (r->request_body_no_buffering) { - ngx_post_event(fc->read, &ngx_posted_events); + if (size == 0) { + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); + ngx_add_timer(fc->read, clcf->client_body_timeout); + + if (r->request_body_no_buffering) { + ngx_post_event(fc->read, &ngx_posted_events); + return NGX_OK; + } + return NGX_OK; } - - rc = ngx_http_v2_filter_request_body(r); - - if (rc != NGX_OK) { - return rc; - } - - if (buf->sync) { - /* prevent reusing this buffer in the upstream module */ - rb->buf = NULL; - } - - if (r->headers_in.chunked) { - r->headers_in.content_length_n = rb->received; - } - - r->read_event_handler = ngx_http_block_reading; - rb->post_handler(r); - - return NGX_OK; - } - - if (size == 0) { - return NGX_OK; - } - - clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); - ngx_add_timer(fc->read, clcf->client_body_timeout); + } + + if (fc->read->timer_set) { + ngx_del_timer(fc->read); + } if (r->request_body_no_buffering) { ngx_post_event(fc->read, &ngx_posted_events); return NGX_OK; } - if (buf->sync) { - return ngx_http_v2_filter_request_body(r); - } + if (r->headers_in.chunked) { + r->headers_in.content_length_n = rb->received; + } + + r->read_event_handler = ngx_http_block_reading; + rb->post_handler(r); return NGX_OK; } From mdounin at mdounin.ru Sun Aug 29 21:00:56 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 29 Aug 2021 21:00:56 +0000 Subject: [nginx] HTTP/2: improved handling of END_STREAM in a separate DATA frame. Message-ID: details: https://hg.nginx.org/nginx/rev/f302c1096f7b branches: changeset: 7909:f302c1096f7b user: Maxim Dounin date: Sun Aug 29 22:20:38 2021 +0300 description: HTTP/2: improved handling of END_STREAM in a separate DATA frame. The save body filter saves the request body to disk once the buffer is full. Yet in HTTP/2 this might happen even if there is no need to save anything to disk, notably when content length is known and the END_STREAM flag is sent in a separate empty DATA frame. Workaround is to provide additional byte in the buffer, so saving the request body won't be triggered. This fixes unexpected request body disk buffering in HTTP/2 observed after the previous change when content length is known and the END_STREAM flag is sent in a separate empty DATA frame. diffstat: src/http/v2/ngx_http_v2.c | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diffs (13 lines): diff -r 0dcec8e5d50a -r f302c1096f7b src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Sun Aug 29 22:20:36 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Sun Aug 29 22:20:38 2021 +0300 @@ -4034,6 +4034,9 @@ ngx_http_v2_read_request_body(ngx_http_r if (len < 0 || len > (off_t) clcf->client_body_buffer_size) { len = clcf->client_body_buffer_size; + + } else { + len++; } if (r->request_body_no_buffering && !stream->in_closed) { From mdounin at mdounin.ru Sun Aug 29 21:00:59 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 29 Aug 2021 21:00:59 +0000 Subject: [nginx] HTTP/2: improved handling of preread unbuffered requests. Message-ID: details: https://hg.nginx.org/nginx/rev/1d78437dbc3f branches: changeset: 7910:1d78437dbc3f user: Maxim Dounin date: Sun Aug 29 22:20:44 2021 +0300 description: HTTP/2: improved handling of preread unbuffered requests. Previously, fully preread unbuffered requests larger than client body buffer size were saved to disk, despite the fact that "unbuffered" is expected to imply no disk buffering. diffstat: src/http/v2/ngx_http_v2.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r f302c1096f7b -r 1d78437dbc3f src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Sun Aug 29 22:20:38 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Sun Aug 29 22:20:44 2021 +0300 @@ -4039,7 +4039,7 @@ ngx_http_v2_read_request_body(ngx_http_r len++; } - if (r->request_body_no_buffering && !stream->in_closed) { + if (r->request_body_no_buffering) { /* * We need a room to store data up to the stream's initial window size, From mdounin at mdounin.ru Sun Aug 29 21:01:03 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 29 Aug 2021 21:01:03 +0000 Subject: [nginx] Request body: missing comments about initialization. Message-ID: details: https://hg.nginx.org/nginx/rev/d869e43643ac branches: changeset: 7911:d869e43643ac user: Maxim Dounin date: Sun Aug 29 22:20:49 2021 +0300 description: Request body: missing comments about initialization. diffstat: src/http/ngx_http_request_body.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diffs (17 lines): diff -r 1d78437dbc3f -r d869e43643ac src/http/ngx_http_request_body.c --- a/src/http/ngx_http_request_body.c Sun Aug 29 22:20:44 2021 +0300 +++ b/src/http/ngx_http_request_body.c Sun Aug 29 22:20:49 2021 +0300 @@ -62,11 +62,13 @@ ngx_http_read_client_request_body(ngx_ht /* * set by ngx_pcalloc(): * + * rb->temp_file = NULL; * rb->bufs = NULL; * rb->buf = NULL; * rb->free = NULL; * rb->busy = NULL; * rb->chunked = NULL; + * rb->received = 0; */ rb->rest = -1; From mdounin at mdounin.ru Sun Aug 29 21:01:06 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 29 Aug 2021 21:01:06 +0000 Subject: [nginx] Request body: added alert to catch duplicate body saving. Message-ID: details: https://hg.nginx.org/nginx/rev/96e09beaa2cf branches: changeset: 7912:96e09beaa2cf user: Maxim Dounin date: Sun Aug 29 22:20:54 2021 +0300 description: Request body: added alert to catch duplicate body saving. If due to an error ngx_http_request_body_save_filter() is called more than once with rb->rest == 0, this used to result in a segmentation fault. Added an alert to catch such errors, just in case. diffstat: src/http/ngx_http_request_body.c | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diffs (16 lines): diff -r d869e43643ac -r 96e09beaa2cf src/http/ngx_http_request_body.c --- a/src/http/ngx_http_request_body.c Sun Aug 29 22:20:49 2021 +0300 +++ b/src/http/ngx_http_request_body.c Sun Aug 29 22:20:54 2021 +0300 @@ -1246,6 +1246,12 @@ ngx_http_request_body_save_filter(ngx_ht if (rb->temp_file || r->request_body_in_file_only) { + if (rb->bufs && rb->bufs->buf->in_file) { + ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, + "body already in file"); + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + if (ngx_http_write_request_body(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } From mdounin at mdounin.ru Sun Aug 29 21:01:09 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 29 Aug 2021 21:01:09 +0000 Subject: [nginx] Request body: introduced rb->last_saved flag. Message-ID: details: https://hg.nginx.org/nginx/rev/185c86b830ef branches: changeset: 7913:185c86b830ef user: Maxim Dounin date: Sun Aug 29 22:21:03 2021 +0300 description: Request body: introduced rb->last_saved flag. It indicates that the last buffer was received by the save filter, and can be used to check this at higher levels. To be used in the following changes. diffstat: src/http/ngx_http_request.h | 1 + src/http/ngx_http_request_body.c | 72 ++++++++++++++++++++++++++++++--------- 2 files changed, 56 insertions(+), 17 deletions(-) diffs (152 lines): diff -r 96e09beaa2cf -r 185c86b830ef src/http/ngx_http_request.h --- a/src/http/ngx_http_request.h Sun Aug 29 22:20:54 2021 +0300 +++ b/src/http/ngx_http_request.h Sun Aug 29 22:21:03 2021 +0300 @@ -302,6 +302,7 @@ typedef struct { ngx_chain_t *busy; ngx_http_chunked_t *chunked; ngx_http_client_body_handler_pt post_handler; + unsigned last_saved:1; } ngx_http_request_body_t; diff -r 96e09beaa2cf -r 185c86b830ef src/http/ngx_http_request_body.c --- a/src/http/ngx_http_request_body.c Sun Aug 29 22:20:54 2021 +0300 +++ b/src/http/ngx_http_request_body.c Sun Aug 29 22:21:03 2021 +0300 @@ -69,6 +69,7 @@ ngx_http_read_client_request_body(ngx_ht * rb->busy = NULL; * rb->chunked = NULL; * rb->received = 0; + * rb->last_saved = 0; */ rb->rest = -1; @@ -941,15 +942,32 @@ ngx_http_request_body_length_filter(ngx_ rb = r->request_body; + out = NULL; + ll = &out; + if (rb->rest == -1) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http request body content length filter"); rb->rest = r->headers_in.content_length_n; - } + + if (rb->rest == 0) { + + tl = ngx_chain_get_free_buf(r->pool, &rb->free); + if (tl == NULL) { + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } - out = NULL; - ll = &out; + b = tl->buf; + + ngx_memzero(b, sizeof(ngx_buf_t)); + + b->last_buf = 1; + + *ll = tl; + ll = &tl->next; + } + } for (cl = in; cl; cl = cl->next) { @@ -1013,6 +1031,9 @@ ngx_http_request_body_chunked_filter(ngx rb = r->request_body; + out = NULL; + ll = &out; + if (rb->rest == -1) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, @@ -1029,9 +1050,6 @@ ngx_http_request_body_chunked_filter(ngx rb->rest = cscf->large_client_header_buffers.size; } - out = NULL; - ll = &out; - for (cl = in; cl; cl = cl->next) { b = NULL; @@ -1188,15 +1206,16 @@ ngx_int_t ngx_http_request_body_save_filter(ngx_http_request_t *r, ngx_chain_t *in) { ngx_buf_t *b; - ngx_chain_t *cl; + ngx_chain_t *cl, *tl, **ll; ngx_http_request_body_t *rb; rb = r->request_body; -#if (NGX_DEBUG) + ll = &rb->bufs; + + for (cl = rb->bufs; cl; cl = cl->next) { #if 0 - for (cl = rb->bufs; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http body old buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", @@ -1205,10 +1224,13 @@ ngx_http_request_body_save_filter(ngx_ht cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); - } #endif + ll = &cl->next; + } + for (cl = in; cl; cl = cl->next) { + ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http body new buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", @@ -1217,15 +1239,31 @@ ngx_http_request_body_save_filter(ngx_ht cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); + + if (cl->buf->last_buf) { + + if (rb->last_saved) { + ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, + "duplicate last buf in save filter"); + *ll = NULL; + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + rb->last_saved = 1; + } + + tl = ngx_alloc_chain_link(r->pool); + if (tl == NULL) { + *ll = NULL; + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + tl->buf = cl->buf; + *ll = tl; + ll = &tl->next; } -#endif - - /* TODO: coalesce neighbouring buffers */ - - if (ngx_chain_add_copy(r->pool, &rb->bufs, in) != NGX_OK) { - return NGX_HTTP_INTERNAL_SERVER_ERROR; - } + *ll = NULL; if (r->request_body_no_buffering) { return NGX_OK; From mdounin at mdounin.ru Sun Aug 29 21:01:12 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 29 Aug 2021 21:01:12 +0000 Subject: [nginx] Request body: reading body buffering in filters. Message-ID: details: https://hg.nginx.org/nginx/rev/9cf043a5d9ca branches: changeset: 7914:9cf043a5d9ca user: Maxim Dounin date: Sun Aug 29 22:22:02 2021 +0300 description: Request body: reading body buffering in filters. If a filter wants to buffer the request body during reading (for example, to check an external scanner), it can now do so. To make it possible, the code now checks rb->last_saved (introduced in the previous change) along with rb->rest == 0. Since in HTTP/2 this requires flow control to avoid overflowing the request body buffer, so filters which need buffering have to set the rb->filter_need_buffering flag on the first filter call. (Note that each filter is expected to call the next filter, so all filters will be able set the flag if needed.) diffstat: src/http/ngx_http_request.h | 2 + src/http/ngx_http_request_body.c | 48 ++++++++++- src/http/v2/ngx_http_v2.c | 160 +++++++++++++++++++++++++++++++++----- 3 files changed, 183 insertions(+), 27 deletions(-) diffs (447 lines): diff -r 185c86b830ef -r 9cf043a5d9ca src/http/ngx_http_request.h --- a/src/http/ngx_http_request.h Sun Aug 29 22:21:03 2021 +0300 +++ b/src/http/ngx_http_request.h Sun Aug 29 22:22:02 2021 +0300 @@ -302,6 +302,8 @@ typedef struct { ngx_chain_t *busy; ngx_http_chunked_t *chunked; ngx_http_client_body_handler_pt post_handler; + unsigned filter_need_buffering:1; + unsigned last_sent:1; unsigned last_saved:1; } ngx_http_request_body_t; diff -r 185c86b830ef -r 9cf043a5d9ca src/http/ngx_http_request_body.c --- a/src/http/ngx_http_request_body.c Sun Aug 29 22:21:03 2021 +0300 +++ b/src/http/ngx_http_request_body.c Sun Aug 29 22:22:02 2021 +0300 @@ -69,6 +69,8 @@ ngx_http_read_client_request_body(ngx_ht * rb->busy = NULL; * rb->chunked = NULL; * rb->received = 0; + * rb->filter_need_buffering = 0; + * rb->last_sent = 0; * rb->last_saved = 0; */ @@ -147,7 +149,7 @@ ngx_http_read_client_request_body(ngx_ht } } - if (rb->rest == 0) { + if (rb->rest == 0 && rb->last_saved) { /* the whole request body was pre-read */ r->request_body_no_buffering = 0; post_handler(r); @@ -175,6 +177,10 @@ ngx_http_read_client_request_body(ngx_ht size += preread; } + if (size == 0) { + size++; + } + } else { size = clcf->client_body_buffer_size; } @@ -273,6 +279,7 @@ ngx_http_do_read_client_request_body(ngx size_t size; ssize_t n; ngx_int_t rc; + ngx_uint_t flush; ngx_chain_t out; ngx_connection_t *c; ngx_http_request_body_t *rb; @@ -280,12 +287,17 @@ ngx_http_do_read_client_request_body(ngx c = r->connection; rb = r->request_body; + flush = 1; ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http read client request body"); for ( ;; ) { for ( ;; ) { + if (rb->rest == 0) { + break; + } + if (rb->buf->last == rb->buf->end) { /* update chains */ @@ -309,12 +321,25 @@ ngx_http_do_read_client_request_body(ngx return NGX_AGAIN; } + if (rb->filter_need_buffering) { + clcf = ngx_http_get_module_loc_conf(r, + ngx_http_core_module); + ngx_add_timer(c->read, clcf->client_body_timeout); + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + return NGX_AGAIN; + } + ngx_log_error(NGX_LOG_ALERT, c->log, 0, "busy buffers after request body flush"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } + flush = 0; rb->buf->pos = rb->buf->start; rb->buf->last = rb->buf->start; } @@ -326,6 +351,10 @@ ngx_http_do_read_client_request_body(ngx size = (size_t) rest; } + if (size == 0) { + break; + } + n = c->recv(c, rb->buf->last, size); ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, @@ -350,6 +379,7 @@ ngx_http_do_read_client_request_body(ngx /* pass buffer to request body filter chain */ + flush = 0; out.buf = rb->buf; out.next = NULL; @@ -371,11 +401,19 @@ ngx_http_do_read_client_request_body(ngx ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http client request body rest %O", rb->rest); - if (rb->rest == 0) { + if (flush) { + rc = ngx_http_request_body_filter(r, NULL); + + if (rc != NGX_OK) { + return rc; + } + } + + if (rb->rest == 0 && rb->last_saved) { break; } - if (!c->read->ready) { + if (!c->read->ready || rb->rest == 0) { clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); ngx_add_timer(c->read, clcf->client_body_timeout); @@ -1280,7 +1318,9 @@ ngx_http_request_body_save_filter(ngx_ht return NGX_OK; } - /* rb->rest == 0 */ + if (!rb->last_saved) { + return NGX_OK; + } if (rb->temp_file || r->request_body_in_file_only) { diff -r 185c86b830ef -r 9cf043a5d9ca src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Sun Aug 29 22:21:03 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Sun Aug 29 22:22:02 2021 +0300 @@ -173,7 +173,7 @@ static ngx_int_t ngx_http_v2_construct_c static void ngx_http_v2_run_request(ngx_http_request_t *r); static void ngx_http_v2_run_request_handler(ngx_event_t *ev); static ngx_int_t ngx_http_v2_process_request_body(ngx_http_request_t *r, - u_char *pos, size_t size, ngx_uint_t last); + u_char *pos, size_t size, ngx_uint_t last, ngx_uint_t flush); static ngx_int_t ngx_http_v2_filter_request_body(ngx_http_request_t *r); static void ngx_http_v2_read_client_request_body_handler(ngx_http_request_t *r); @@ -1092,7 +1092,7 @@ static u_char * ngx_http_v2_state_read_data(ngx_http_v2_connection_t *h2c, u_char *pos, u_char *end) { - size_t size; + size_t size, window; ngx_buf_t *buf; ngx_int_t rc; ngx_connection_t *fc; @@ -1140,13 +1140,40 @@ ngx_http_v2_state_read_data(ngx_http_v2_ h2c->payload_bytes += size; if (r->request_body) { - rc = ngx_http_v2_process_request_body(r, pos, size, stream->in_closed); - - if (rc != NGX_OK) { + rc = ngx_http_v2_process_request_body(r, pos, size, + stream->in_closed, 0); + + if (rc != NGX_OK && rc != NGX_AGAIN) { stream->skip_data = 1; ngx_http_finalize_request(r, rc); } + if (rc == NGX_AGAIN && !stream->no_flow_control) { + buf = r->request_body->buf; + window = buf->end - buf->last; + + window -= h2c->state.length - size; + + if (window < stream->recv_window) { + ngx_log_error(NGX_LOG_ALERT, h2c->connection->log, 0, + "http2 negative window update"); + return ngx_http_v2_connection_error(h2c, + NGX_HTTP_V2_INTERNAL_ERROR); + } + + if (window > stream->recv_window) { + if (ngx_http_v2_send_window_update(h2c, stream->node->id, + window - stream->recv_window) + == NGX_ERROR) + { + return ngx_http_v2_connection_error(h2c, + NGX_HTTP_V2_INTERNAL_ERROR); + } + + stream->recv_window = window; + } + } + ngx_http_run_posted_requests(fc); } else if (size) { @@ -4027,6 +4054,17 @@ ngx_http_v2_read_request_body(ngx_http_r return NGX_OK; } + rb->rest = 1; + + /* set rb->filter_need_buffering */ + + rc = ngx_http_top_request_body_filter(r, NULL); + + if (rc != NGX_OK) { + stream->skip_data = 1; + return rc; + } + h2scf = ngx_http_get_module_srv_conf(r, ngx_http_v2_module); clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); @@ -4039,7 +4077,7 @@ ngx_http_v2_read_request_body(ngx_http_r len++; } - if (r->request_body_no_buffering) { + if (r->request_body_no_buffering || rb->filter_need_buffering) { /* * We need a room to store data up to the stream's initial window size, @@ -4062,36 +4100,45 @@ ngx_http_v2_read_request_body(ngx_http_r return NGX_HTTP_INTERNAL_SERVER_ERROR; } - rb->rest = 1; - buf = stream->preread; if (stream->in_closed) { - r->request_body_no_buffering = 0; + if (!rb->filter_need_buffering) { + r->request_body_no_buffering = 0; + } if (buf) { rc = ngx_http_v2_process_request_body(r, buf->pos, - buf->last - buf->pos, 1); + buf->last - buf->pos, 1, 0); ngx_pfree(r->pool, buf->start); + + } else { + rc = ngx_http_v2_process_request_body(r, NULL, 0, 1, 0); + } + + if (rc != NGX_AGAIN) { return rc; } - return ngx_http_v2_process_request_body(r, NULL, 0, 1); + r->read_event_handler = ngx_http_v2_read_client_request_body_handler; + r->write_event_handler = ngx_http_request_empty_handler; + + return NGX_AGAIN; } if (buf) { rc = ngx_http_v2_process_request_body(r, buf->pos, - buf->last - buf->pos, 0); + buf->last - buf->pos, 0, 0); ngx_pfree(r->pool, buf->start); - if (rc != NGX_OK) { + if (rc != NGX_OK && rc != NGX_AGAIN) { stream->skip_data = 1; return rc; } } - if (r->request_body_no_buffering) { + if (r->request_body_no_buffering || rb->filter_need_buffering) { size = (size_t) len - h2scf->preread_size; } else { @@ -4133,7 +4180,7 @@ ngx_http_v2_read_request_body(ngx_http_r static ngx_int_t ngx_http_v2_process_request_body(ngx_http_request_t *r, u_char *pos, - size_t size, ngx_uint_t last) + size_t size, ngx_uint_t last, ngx_uint_t flush) { size_t n; ngx_int_t rc; @@ -4147,8 +4194,8 @@ ngx_http_v2_process_request_body(ngx_htt ngx_log_debug0(NGX_LOG_DEBUG_HTTP, fc->log, 0, "http2 process request body"); - if (size == 0 && !last) { - return NGX_OK; + if (size == 0 && !last && !flush) { + return NGX_AGAIN; } for ( ;; ) { @@ -4230,7 +4277,7 @@ ngx_http_v2_process_request_body(ngx_htt ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, "http2 request body rest %O", rb->rest); - if (rb->rest == 0) { + if (rb->rest == 0 && rb->last_saved) { break; } @@ -4240,10 +4287,10 @@ ngx_http_v2_process_request_body(ngx_htt if (r->request_body_no_buffering) { ngx_post_event(fc->read, &ngx_posted_events); - return NGX_OK; + return NGX_AGAIN; } - return NGX_OK; + return NGX_AGAIN; } } @@ -4279,7 +4326,7 @@ ngx_http_v2_filter_request_body(ngx_http rb = r->request_body; buf = rb->buf; - if (buf->pos == buf->last && rb->rest) { + if (buf->pos == buf->last && (rb->rest || rb->last_sent)) { cl = NULL; goto update; } @@ -4342,6 +4389,7 @@ ngx_http_v2_filter_request_body(ngx_http } b->last_buf = 1; + rb->last_sent = 1; } b->tag = (ngx_buf_tag_t) &ngx_http_v2_filter_request_body; @@ -4361,7 +4409,12 @@ update: static void ngx_http_v2_read_client_request_body_handler(ngx_http_request_t *r) { - ngx_connection_t *fc; + size_t window; + ngx_buf_t *buf; + ngx_int_t rc; + ngx_connection_t *fc; + ngx_http_v2_stream_t *stream; + ngx_http_v2_connection_t *h2c; fc = r->connection; @@ -4387,6 +4440,63 @@ ngx_http_v2_read_client_request_body_han ngx_http_finalize_request(r, NGX_HTTP_CLIENT_CLOSED_REQUEST); return; } + + rc = ngx_http_v2_process_request_body(r, NULL, 0, r->stream->in_closed, 1); + + if (rc != NGX_OK && rc != NGX_AGAIN) { + r->stream->skip_data = 1; + ngx_http_finalize_request(r, rc); + return; + } + + if (rc == NGX_OK) { + return; + } + + if (r->request_body->rest == 0) { + return; + } + + stream = r->stream; + h2c = stream->connection; + + buf = r->request_body->buf; + window = buf->end - buf->start; + + if (h2c->state.stream == stream) { + window -= h2c->state.length; + } + + if (window <= stream->recv_window) { + if (window < stream->recv_window) { + ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, + "http2 negative window update"); + + stream->skip_data = 1; + + ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + + return; + } + + if (ngx_http_v2_send_window_update(h2c, stream->node->id, + window - stream->recv_window) + == NGX_ERROR) + { + stream->skip_data = 1; + ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + + stream->recv_window = window; + + if (ngx_http_v2_send_output_queue(h2c) == NGX_ERROR) { + stream->skip_data = 1; + ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } } @@ -4430,10 +4540,14 @@ ngx_http_v2_read_unbuffered_request_body return rc; } - if (!r->request_body->rest) { + if (r->request_body->rest == 0 && r->request_body->last_saved) { return NGX_OK; } + if (r->request_body->rest == 0) { + return NGX_AGAIN; + } + if (r->request_body->busy != NULL) { return NGX_AGAIN; } From phillip.odam at nitorgroup.com Mon Aug 30 11:24:22 2021 From: phillip.odam at nitorgroup.com (Phillip Odam) Date: Mon, 30 Aug 2021 07:24:22 -0400 Subject: Adding a fd that is not obtained through accept to the list the active connections In-Reply-To: References: Message-ID: Hi Ottavio There?s no solution just with nginx as it currently that I know of, to avoid the need for a port forward in the NAT router a simple solution would be to use a ssh tunnel - this does separate initial connection from subsequent requests as you?re unlikely to want to establish a new tunnel for each and every request and ?knowledge? the connection is established is no longer inherently part of the application making the HHTP request. So to simplify things you could just expect the ssh tunnel to be established as a precondition (once off initial setup) Phillip On Friday, August 27, 2021, Maxim Dounin wrote: > Hello! > > On Fri, Aug 27, 2021 at 01:59:03PM +0200, Ottavio Campana wrote: > > > Dear Phillip, > > > > I know Tailscale very well, I use it and like it a lot. But my final goal > > is finding a way to implement the ONVIF Uplink service, > > https://www.onvif.org/specs/srv/uplink/ONVIF-Uplink-Spec.pdf , where I > can > > have several devices on the LAN that need to connect to a remote server, > > which will then send commands. > > > > Therefore I need a way to start a connection from nginx (or an external > > program and then passing the fd through a unix socket domain) and make it > > act as if the fd were obtained from an accept. > > > > Nginx works with events and I find it very difficult to find a mechanism > to > > pass this connection to it. > > > > Do you have other ideas? > > The most simple solution I can think of is to open two > connections: to your command endpoint and to nginx, and proxy > everything once the connections are established. > > -- > Maxim Dounin > http://mdounin.ru/ > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From pluknet at nginx.com Mon Aug 30 12:48:58 2021 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 30 Aug 2021 12:48:58 +0000 Subject: [nginx] Give GCC atomics precedence over deprecated Darwin atomic(3). Message-ID: details: https://hg.nginx.org/nginx/rev/09d15a2dbc6b branches: changeset: 7915:09d15a2dbc6b user: Sergey Kandaurov date: Mon Aug 30 14:45:21 2021 +0300 description: Give GCC atomics precedence over deprecated Darwin atomic(3). This allows to build nginx on macOS with -Wdeprecated-declarations. diffstat: src/os/unix/ngx_atomic.h | 66 ++++++++++++++++++++++++------------------------ 1 files changed, 33 insertions(+), 33 deletions(-) diffs (83 lines): diff -r 9cf043a5d9ca -r 09d15a2dbc6b src/os/unix/ngx_atomic.h --- a/src/os/unix/ngx_atomic.h Sun Aug 29 22:22:02 2021 +0300 +++ b/src/os/unix/ngx_atomic.h Mon Aug 30 14:45:21 2021 +0300 @@ -38,6 +38,39 @@ typedef volatile ngx_atomic_uint_t ngx_ #define ngx_cpu_pause() +#elif (NGX_HAVE_GCC_ATOMIC) + +/* GCC 4.1 builtin atomic operations */ + +#define NGX_HAVE_ATOMIC_OPS 1 + +typedef long ngx_atomic_int_t; +typedef unsigned long ngx_atomic_uint_t; + +#if (NGX_PTR_SIZE == 8) +#define NGX_ATOMIC_T_LEN (sizeof("-9223372036854775808") - 1) +#else +#define NGX_ATOMIC_T_LEN (sizeof("-2147483648") - 1) +#endif + +typedef volatile ngx_atomic_uint_t ngx_atomic_t; + + +#define ngx_atomic_cmp_set(lock, old, set) \ + __sync_bool_compare_and_swap(lock, old, set) + +#define ngx_atomic_fetch_add(value, add) \ + __sync_fetch_and_add(value, add) + +#define ngx_memory_barrier() __sync_synchronize() + +#if ( __i386__ || __i386 || __amd64__ || __amd64 ) +#define ngx_cpu_pause() __asm__ ("pause") +#else +#define ngx_cpu_pause() +#endif + + #elif (NGX_DARWIN_ATOMIC) /* @@ -88,39 +121,6 @@ typedef uint32_t ngx_ typedef volatile ngx_atomic_uint_t ngx_atomic_t; -#elif (NGX_HAVE_GCC_ATOMIC) - -/* GCC 4.1 builtin atomic operations */ - -#define NGX_HAVE_ATOMIC_OPS 1 - -typedef long ngx_atomic_int_t; -typedef unsigned long ngx_atomic_uint_t; - -#if (NGX_PTR_SIZE == 8) -#define NGX_ATOMIC_T_LEN (sizeof("-9223372036854775808") - 1) -#else -#define NGX_ATOMIC_T_LEN (sizeof("-2147483648") - 1) -#endif - -typedef volatile ngx_atomic_uint_t ngx_atomic_t; - - -#define ngx_atomic_cmp_set(lock, old, set) \ - __sync_bool_compare_and_swap(lock, old, set) - -#define ngx_atomic_fetch_add(value, add) \ - __sync_fetch_and_add(value, add) - -#define ngx_memory_barrier() __sync_synchronize() - -#if ( __i386__ || __i386 || __amd64__ || __amd64 ) -#define ngx_cpu_pause() __asm__ ("pause") -#else -#define ngx_cpu_pause() -#endif - - #elif ( __i386__ || __i386 ) typedef int32_t ngx_atomic_int_t; From xeioex at nginx.com Tue Aug 31 13:17:37 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 31 Aug 2021 13:17:37 +0000 Subject: [njs] Fixed backtrace output for arrays broken in b0177571ce1d. Message-ID: details: https://hg.nginx.org/njs/rev/c9d24865a9e0 branches: changeset: 1690:c9d24865a9e0 user: Dmitry Volyntsev date: Tue Aug 31 13:16:32 2021 +0000 description: Fixed backtrace output for arrays broken in b0177571ce1d. After the previous commit the array prototype methods were not found during njs_object_traverse() invocation. As the result the exceptions in Array.prototype methods were reported with a backtrace containing "native (native)" instead of a proper function name. diffstat: src/njs_builtin.c | 6 +++++- src/njs_object.c | 1 - src/njs_value.c | 2 ++ src/test/njs_unit_test.c | 5 +++++ 4 files changed, 12 insertions(+), 2 deletions(-) diffs (54 lines): diff -r b0177571ce1d -r c9d24865a9e0 src/njs_builtin.c --- a/src/njs_builtin.c Thu Aug 19 16:17:19 2021 +0000 +++ b/src/njs_builtin.c Tue Aug 31 13:16:32 2021 +0000 @@ -470,7 +470,11 @@ njs_builtin_traverse(njs_vm_t *vm, njs_t } } - njs_assert(njs_is_string(&key)); + if (njs_slow_path(!njs_is_string(&key))) { + /* Skipping special properties (e.g. array index properties). */ + return NJS_OK; + } + njs_string_get(&key, &name); if (njs_slow_path((p + name.length + 3) > end)) { diff -r b0177571ce1d -r c9d24865a9e0 src/njs_object.c --- a/src/njs_object.c Thu Aug 19 16:17:19 2021 +0000 +++ b/src/njs_object.c Tue Aug 31 13:16:32 2021 +0000 @@ -1201,7 +1201,6 @@ njs_object_traverse(njs_vm_t *vm, njs_ob } if (njs_is_object(&value) - && !njs_is_array(&value) && !njs_traverse_visited(&visited, &value)) { ret = njs_traverse_visit(&visited, &value); diff -r b0177571ce1d -r c9d24865a9e0 src/njs_value.c --- a/src/njs_value.c Thu Aug 19 16:17:19 2021 +0000 +++ b/src/njs_value.c Tue Aug 31 13:16:32 2021 +0000 @@ -834,6 +834,8 @@ prop: prop->type = NJS_PROPERTY_REF; } + njs_set_number(&prop->name, index); + prop->writable = 1; prop->enumerable = 1; prop->configurable = 1; diff -r b0177571ce1d -r c9d24865a9e0 src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Thu Aug 19 16:17:19 2021 +0000 +++ b/src/test/njs_unit_test.c Tue Aug 31 13:16:32 2021 +0000 @@ -21240,6 +21240,11 @@ static njs_unit_test_t njs_shell_test[] " at f (:1)\n" " at main (:1)\n") }, + { njs_str("[].concat({}.a.a)" ENTER), + njs_str("TypeError: cannot get property \"a\" of undefined\n" + " at Array.prototype.concat (native)\n" + " at main (:1)\n") }, + { njs_str("''.repeat(-1)" ENTER), njs_str("RangeError\n" " at String.prototype.repeat (native)\n" From xeioex at nginx.com Tue Aug 31 13:17:39 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 31 Aug 2021 13:17:39 +0000 Subject: [njs] Shell: fixed memory leak in script options. Message-ID: details: https://hg.nginx.org/njs/rev/bf1464d84fd1 branches: changeset: 1691:bf1464d84fd1 user: Dmitry Volyntsev date: Tue Aug 31 13:16:42 2021 +0000 description: Shell: fixed memory leak in script options. diffstat: src/njs_shell.c | 24 ++++++++++++++++++------ 1 files changed, 18 insertions(+), 6 deletions(-) diffs (61 lines): diff -r c9d24865a9e0 -r bf1464d84fd1 src/njs_shell.c --- a/src/njs_shell.c Tue Aug 31 13:16:32 2021 +0000 +++ b/src/njs_shell.c Tue Aug 31 13:16:42 2021 +0000 @@ -93,7 +93,8 @@ static njs_int_t njs_process_script(njs_ #ifndef NJS_FUZZER_TARGET -static njs_int_t njs_get_options(njs_opts_t *opts, int argc, char **argv); +static njs_int_t njs_options_parse(njs_opts_t *opts, int argc, char **argv); +static void njs_options_free(njs_opts_t *opts); static njs_int_t njs_process_file(njs_opts_t *opts, njs_vm_opt_t *vm_options); static njs_int_t njs_interactive_shell(njs_opts_t *opts, njs_vm_opt_t *vm_options); @@ -223,7 +224,7 @@ main(int argc, char **argv) njs_memzero(&opts, sizeof(njs_opts_t)); opts.interactive = 1; - ret = njs_get_options(&opts, argc, argv); + ret = njs_options_parse(&opts, argc, argv); if (ret != NJS_OK) { ret = (ret == NJS_DONE) ? NJS_OK : NJS_ERROR; goto done; @@ -294,16 +295,14 @@ main(int argc, char **argv) done: - if (opts.paths != NULL) { - free(opts.paths); - } + njs_options_free(&opts); return (ret == NJS_OK) ? EXIT_SUCCESS : EXIT_FAILURE; } static njs_int_t -njs_get_options(njs_opts_t *opts, int argc, char **argv) +njs_options_parse(njs_opts_t *opts, int argc, char **argv) { char *p, **paths; njs_int_t i, ret; @@ -461,6 +460,19 @@ done: } +static void +njs_options_free(njs_opts_t *opts) +{ + if (opts->paths != NULL) { + free(opts->paths); + } + + if (opts->argv != NULL) { + free(opts->argv); + } +} + + static njs_int_t njs_process_file(njs_opts_t *opts, njs_vm_opt_t *vm_options) { From xeioex at nginx.com Tue Aug 31 13:17:41 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 31 Aug 2021 13:17:41 +0000 Subject: [njs] Allowing to pass arbitrary pointer to object iterator handler. Message-ID: details: https://hg.nginx.org/njs/rev/4c0b2392a5ef branches: changeset: 1692:4c0b2392a5ef user: Dmitry Volyntsev date: Tue Aug 31 13:16:42 2021 +0000 description: Allowing to pass arbitrary pointer to object iterator handler. diffstat: src/njs_array.c | 8 ++++---- src/njs_iterator.c | 13 ++++++++----- src/njs_iterator.h | 2 +- src/njs_promise.c | 24 +++++++++++++++--------- 4 files changed, 28 insertions(+), 19 deletions(-) diffs (195 lines): diff -r bf1464d84fd1 -r 4c0b2392a5ef src/njs_array.c --- a/src/njs_array.c Tue Aug 31 13:16:42 2021 +0000 +++ b/src/njs_array.c Tue Aug 31 13:16:42 2021 +0000 @@ -2256,7 +2256,7 @@ njs_array_handler_filter(njs_vm_t *vm, n } if (njs_is_true(&vm->retval)) { - ret = njs_array_add(vm, args->array, ©); + ret = njs_array_add(vm, args->data, ©); if (njs_slow_path(ret != NJS_OK)) { return ret; } @@ -2275,7 +2275,7 @@ njs_array_handler_map(njs_vm_t *vm, njs_ njs_array_t *retval; njs_value_t this; - retval = args->array; + retval = args->data; if (retval->object.fast_array) { njs_set_invalid(&retval->start[n]); @@ -2431,7 +2431,7 @@ njs_array_prototype_iterator(njs_vm_t *v } } - iargs.array = array; + iargs.data = array; break; } @@ -2481,7 +2481,7 @@ done: case NJS_ARRAY_FILTER: case NJS_ARRAY_MAP: default: - njs_set_array(&vm->retval, iargs.array); + njs_set_array(&vm->retval, iargs.data); } return NJS_OK; diff -r bf1464d84fd1 -r 4c0b2392a5ef src/njs_iterator.c --- a/src/njs_iterator.c Tue Aug 31 13:16:42 2021 +0000 +++ b/src/njs_iterator.c Tue Aug 31 13:16:42 2021 +0000 @@ -695,8 +695,8 @@ njs_iterator_to_array(njs_vm_t *vm, njs_ return NULL; } - args.array = njs_array_alloc(vm, 1, length, 0); - if (njs_slow_path(args.array == NULL)) { + args.data = njs_array_alloc(vm, 1, length, 0); + if (njs_slow_path(args.data == NULL)) { return NULL; } @@ -705,11 +705,11 @@ njs_iterator_to_array(njs_vm_t *vm, njs_ ret = njs_object_iterate(vm, &args, njs_iterator_to_array_handler); if (njs_slow_path(ret == NJS_ERROR)) { - njs_mp_free(vm->mem_pool, args.array); + njs_mp_free(vm->mem_pool, args.data); return NULL; } - return args.array; + return args.data; } @@ -717,7 +717,10 @@ static njs_int_t njs_iterator_to_array_handler(njs_vm_t *vm, njs_iterator_args_t *args, njs_value_t *value, int64_t index) { - args->array->start[index] = *value; + njs_array_t *array; + + array = args->data; + array->start[index] = *value; return NJS_OK; } diff -r bf1464d84fd1 -r 4c0b2392a5ef src/njs_iterator.h --- a/src/njs_iterator.h Tue Aug 31 13:16:42 2021 +0000 +++ b/src/njs_iterator.h Tue Aug 31 13:16:42 2021 +0000 @@ -13,7 +13,7 @@ typedef struct { njs_value_t *argument; njs_value_t *value; - njs_array_t *array; + void *data; int64_t from; int64_t to; diff -r bf1464d84fd1 -r 4c0b2392a5ef src/njs_promise.c --- a/src/njs_promise.c Tue Aug 31 13:16:42 2021 +0000 +++ b/src/njs_promise.c Tue Aug 31 13:16:42 2021 +0000 @@ -1344,8 +1344,8 @@ njs_promise_perform_all(njs_vm_t *vm, nj return ret; } - pargs->args.array = njs_array_alloc(vm, 1, length, 0); - if (njs_slow_path(pargs->args.array == NULL)) { + pargs->args.data = njs_array_alloc(vm, 1, length, 0); + if (njs_slow_path(pargs->args.data == NULL)) { return NJS_ERROR; } @@ -1368,7 +1368,7 @@ njs_promise_perform_all(njs_vm_t *vm, nj if (--(*pargs->remaining) == 0) { njs_mp_free(vm->mem_pool, pargs->remaining); - njs_set_array(&argument, pargs->args.array); + njs_set_array(&argument, pargs->args.data); if (handler == njs_promise_perform_any_handler) { error = njs_error_alloc(vm, NJS_OBJ_TYPE_AGGREGATE_ERROR, @@ -1398,6 +1398,7 @@ njs_promise_perform_all_handler(njs_vm_t njs_value_t *value, int64_t index) { njs_int_t ret; + njs_array_t *array; njs_value_t arguments[2], next; njs_function_t *on_fulfilled; njs_promise_capability_t *capability; @@ -1408,7 +1409,8 @@ njs_promise_perform_all_handler(njs_vm_t capability = pargs->capability; - njs_set_undefined(&pargs->args.array->start[index]); + array = args->data; + njs_set_undefined(&array->start[index]); ret = njs_function_call(vm, pargs->function, pargs->constructor, value, 1, &next); @@ -1429,7 +1431,7 @@ njs_promise_perform_all_handler(njs_vm_t context->already_called = 0; context->index = (uint32_t) index; - context->values = pargs->args.array; + context->values = pargs->args.data; context->capability = capability; context->remaining_elements = pargs->remaining; @@ -1486,6 +1488,7 @@ njs_promise_perform_all_settled_handler( njs_value_t *value, int64_t index) { njs_int_t ret; + njs_array_t *array; njs_value_t arguments[2], next; njs_function_t *on_fulfilled, *on_rejected; njs_promise_capability_t *capability; @@ -1496,7 +1499,8 @@ njs_promise_perform_all_settled_handler( capability = pargs->capability; - njs_set_undefined(&pargs->args.array->start[index]); + array = args->data; + njs_set_undefined(&array->start[index]); ret = njs_function_call(vm, pargs->function, pargs->constructor, value, 1, &next); @@ -1514,7 +1518,7 @@ njs_promise_perform_all_settled_handler( context->already_called = 0; context->index = (uint32_t) index; - context->values = pargs->args.array; + context->values = pargs->args.data; context->capability = capability; context->remaining_elements = pargs->remaining; @@ -1623,6 +1627,7 @@ njs_promise_perform_any_handler(njs_vm_t njs_value_t *value, int64_t index) { njs_int_t ret; + njs_array_t *array; njs_value_t arguments[2], next; njs_function_t *on_rejected; njs_promise_capability_t *capability; @@ -1633,7 +1638,8 @@ njs_promise_perform_any_handler(njs_vm_t capability = pargs->capability; - njs_set_undefined(&pargs->args.array->start[index]); + array = pargs->args.data; + njs_set_undefined(&array->start[index]); ret = njs_function_call(vm, pargs->function, pargs->constructor, value, 1, &next); @@ -1654,7 +1660,7 @@ njs_promise_perform_any_handler(njs_vm_t context->already_called = 0; context->index = (uint32_t) index; - context->values = pargs->args.array; + context->values = pargs->args.data; context->capability = capability; context->remaining_elements = pargs->remaining; From xeioex at nginx.com Tue Aug 31 13:17:44 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 31 Aug 2021 13:17:44 +0000 Subject: [njs] Memory: added cleanup handlers support. Message-ID: details: https://hg.nginx.org/njs/rev/99afe1a7f71d branches: changeset: 1693:99afe1a7f71d user: Dmitry Volyntsev date: Tue Aug 31 13:16:43 2021 +0000 description: Memory: added cleanup handlers support. diffstat: src/njs_mp.c | 41 +++++++++++++++++++++++++++++++++++++++++ src/njs_mp.h | 10 ++++++++++ 2 files changed, 51 insertions(+), 0 deletions(-) diffs (96 lines): diff -r 4c0b2392a5ef -r 99afe1a7f71d src/njs_mp.c --- a/src/njs_mp.c Tue Aug 31 13:16:42 2021 +0000 +++ b/src/njs_mp.c Tue Aug 31 13:16:43 2021 +0000 @@ -106,6 +106,8 @@ struct njs_mp_s { uint32_t page_alignment; uint32_t cluster_size; + njs_mp_cleanup_t *cleanup; + njs_mp_slot_t slots[]; }; @@ -251,10 +253,18 @@ njs_mp_destroy(njs_mp_t *mp) { void *p; njs_mp_block_t *block; + njs_mp_cleanup_t *c; njs_rbtree_node_t *node, *next; njs_debug_alloc("mp destroy\n"); + for (c = mp->cleanup; c != NULL; c = c->next) { + if (c->handler != NULL) { + njs_debug_alloc("mp run cleanup: @%p\n", c); + c->handler(c->data); + } + } + next = njs_rbtree_root(&mp->blocks); while (next != njs_rbtree_sentinel(&mp->blocks)) { @@ -606,6 +616,37 @@ njs_mp_rbtree_compare(njs_rbtree_node_t } +njs_mp_cleanup_t * +njs_mp_cleanup_add(njs_mp_t *mp, size_t size) +{ + njs_mp_cleanup_t *c; + + c = njs_mp_alloc(mp, sizeof(njs_mp_cleanup_t)); + if (njs_slow_path(c == NULL)) { + return NULL; + } + + if (size) { + c->data = njs_mp_alloc(mp, size); + if (njs_slow_path(c->data == NULL)) { + return NULL; + } + + } else { + c->data = NULL; + } + + c->handler = NULL; + c->next = mp->cleanup; + + mp->cleanup = c; + + njs_debug_alloc("mp add cleanup: @%p\n", c); + + return c; +} + + void njs_mp_free(njs_mp_t *mp, void *p) { diff -r 4c0b2392a5ef -r 99afe1a7f71d src/njs_mp.h --- a/src/njs_mp.h Tue Aug 31 13:16:42 2021 +0000 +++ b/src/njs_mp.h Tue Aug 31 13:16:43 2021 +0000 @@ -9,6 +9,15 @@ typedef struct njs_mp_s njs_mp_t; +typedef struct njs_mp_cleanup_s njs_mp_cleanup_t; + +typedef void (*njs_mp_cleanup_pt)(void *data); + +struct njs_mp_cleanup_s { + njs_mp_cleanup_pt handler; + void *data; + njs_mp_cleanup_t *next; +}; NJS_EXPORT njs_mp_t *njs_mp_create(size_t cluster_size, size_t page_alignment, @@ -28,6 +37,7 @@ NJS_EXPORT void *njs_mp_align(njs_mp_t * NJS_EXPORT void *njs_mp_zalign(njs_mp_t *mp, size_t alignment, size_t size) NJS_MALLOC_LIKE; +NJS_EXPORT njs_mp_cleanup_t *njs_mp_cleanup_add(njs_mp_t *mp, size_t size); NJS_EXPORT void njs_mp_free(njs_mp_t *mp, void *p); From xeioex at nginx.com Tue Aug 31 13:17:47 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 31 Aug 2021 13:17:47 +0000 Subject: [njs] Fixed %TypedArray%.prototype.join() with detached buffer. Message-ID: details: https://hg.nginx.org/njs/rev/8799bbb1cb5d branches: changeset: 1694:8799bbb1cb5d user: Dmitry Volyntsev date: Tue Aug 31 13:16:44 2021 +0000 description: Fixed %TypedArray%.prototype.join() with detached buffer. The TypedArray buffer may be detached while evaluating custom "separator" argument. The fix is to move the buffer check below this point. Found by Official ECMAScript Conformance Test Suite. diffstat: src/njs_typed_array.c | 5 +++++ src/test/njs_unit_test.c | 7 +++++++ 2 files changed, 12 insertions(+), 0 deletions(-) diffs (32 lines): diff -r 99afe1a7f71d -r 8799bbb1cb5d src/njs_typed_array.c --- a/src/njs_typed_array.c Tue Aug 31 13:16:43 2021 +0000 +++ b/src/njs_typed_array.c Tue Aug 31 13:16:44 2021 +0000 @@ -2166,6 +2166,11 @@ njs_typed_array_prototype_join(njs_vm_t return NJS_OK; } + if (njs_slow_path(njs_is_detached_buffer(array->buffer))) { + njs_type_error(vm, "detached buffer"); + return NJS_ERROR; + } + njs_chb_init(&chain, vm->mem_pool); length = njs_typed_array_to_chain(vm, &chain, array, separator); diff -r 99afe1a7f71d -r 8799bbb1cb5d src/test/njs_unit_test.c --- a/src/test/njs_unit_test.c Tue Aug 31 13:16:43 2021 +0000 +++ b/src/test/njs_unit_test.c Tue Aug 31 13:16:44 2021 +0000 @@ -6234,6 +6234,13 @@ static njs_unit_test_t njs_test[] = " return a.map(q=>q/2).join('|') === '3|2|1'})"), njs_str("true") }, +#ifdef NJS_TEST262 + { njs_str("const arr = new Uint8Array([1,2,3]);" + "const sep = {toString(){$262.detachArrayBuffer(arr.buffer); return ','}};" + "arr.join(sep)"), + njs_str("TypeError: detached buffer") }, +#endif + { njs_str("Uint8Array.prototype.reduce.call(1)"), njs_str("TypeError: this is not a typed array") }, From mdounin at mdounin.ru Tue Aug 31 13:52:22 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 31 Aug 2021 13:52:22 +0000 Subject: [nginx] HTTP/2: avoid memcpy() with NULL source and zero length. Message-ID: details: https://hg.nginx.org/nginx/rev/29795b697e14 branches: changeset: 7916:29795b697e14 user: Maxim Dounin date: Tue Aug 31 16:44:13 2021 +0300 description: HTTP/2: avoid memcpy() with NULL source and zero length. Prodded by Clang Static Analyzer. diffstat: src/http/v2/ngx_http_v2.c | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diffs (14 lines): diff -r 09d15a2dbc6b -r 29795b697e14 src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Mon Aug 30 14:45:21 2021 +0300 +++ b/src/http/v2/ngx_http_v2.c Tue Aug 31 16:44:13 2021 +0300 @@ -4241,7 +4241,9 @@ ngx_http_v2_process_request_body(ngx_htt n = size; } - rb->buf->last = ngx_cpymem(rb->buf->last, pos, n); + if (n > 0) { + rb->buf->last = ngx_cpymem(rb->buf->last, pos, n); + } ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, "http2 request body recv %uz", n); From xeioex at nginx.com Tue Aug 31 14:04:55 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 31 Aug 2021 14:04:55 +0000 Subject: [njs] Version 0.6.2. Message-ID: details: https://hg.nginx.org/njs/rev/dfba7f61745c branches: changeset: 1695:dfba7f61745c user: Dmitry Volyntsev date: Tue Aug 31 13:38:39 2021 +0000 description: Version 0.6.2. diffstat: CHANGES | 28 ++++++++++++++++++++++++++++ 1 files changed, 28 insertions(+), 0 deletions(-) diffs (35 lines): diff -r 8799bbb1cb5d -r dfba7f61745c CHANGES --- a/CHANGES Tue Aug 31 13:16:44 2021 +0000 +++ b/CHANGES Tue Aug 31 13:38:39 2021 +0000 @@ -1,3 +1,31 @@ +Changes with njs 0.6.2 31 Aug 2021 + + nginx modules: + + *) Bugfix: fixed CPU hog when js_filter is registered + in both directions. + + Core: + + *) Feature: introduced AggregateError implementation. + + *) Feature: added remaining Promise constructor methods. + The following methods were added: Promise.all(), + Promise.allSettled(), Promise.any(), Promise.race(). + + *) Improvement: removed recursion from code generator. + + *) Bugfix: fixed rest parameter parsing without binding + identifier. + + *) Bugfix: fixed resolve/reject callback for + Promise.prototype.finally(). + + *) Bugfix: fixed %TypedArray%.prototype.join() with + detached buffer. + + *) Bugfix: fixed memory leak in interactive shell. + Changes with njs 0.6.1 29 Jun 2021 *) Bugfix: fixed RegExpBuiltinExec() with UTF-8 only regexps. From xeioex at nginx.com Tue Aug 31 14:04:57 2021 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 31 Aug 2021 14:04:57 +0000 Subject: [njs] Added tag 0.6.2 for changeset dfba7f61745c Message-ID: details: https://hg.nginx.org/njs/rev/65cd982d134f branches: changeset: 1696:65cd982d134f user: Dmitry Volyntsev date: Tue Aug 31 14:04:23 2021 +0000 description: Added tag 0.6.2 for changeset dfba7f61745c diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r dfba7f61745c -r 65cd982d134f .hgtags --- a/.hgtags Tue Aug 31 13:38:39 2021 +0000 +++ b/.hgtags Tue Aug 31 14:04:23 2021 +0000 @@ -44,3 +44,4 @@ e5de01378b1a8ab0a94dd3a8c4c6bb7a235f4b9c 282b9412976ceee31eb12876f1499fe975e6f08c 0.5.3 742ebceef2b5d15febc093172fe6174e427b26c8 0.6.0 4adbe67b292af2adc0a6fde4ec6cb95dbba9470a 0.6.1 +dfba7f61745c7454ffdd55303a793206d0a9a84a 0.6.2 From mdounin at mdounin.ru Tue Aug 31 15:31:44 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 31 Aug 2021 15:31:44 +0000 Subject: [nginx] Updated OpenSSL used for win32 builds. Message-ID: details: https://hg.nginx.org/nginx/rev/f0ab1db646d5 branches: changeset: 7917:f0ab1db646d5 user: Maxim Dounin date: Tue Aug 31 17:54:54 2021 +0300 description: Updated OpenSSL used for win32 builds. diffstat: misc/GNUmakefile | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 29795b697e14 -r f0ab1db646d5 misc/GNUmakefile --- a/misc/GNUmakefile Tue Aug 31 16:44:13 2021 +0300 +++ b/misc/GNUmakefile Tue Aug 31 17:54:54 2021 +0300 @@ -6,7 +6,7 @@ TEMP = tmp CC = cl OBJS = objs.msvc8 -OPENSSL = openssl-1.1.1k +OPENSSL = openssl-1.1.1l ZLIB = zlib-1.2.11 PCRE = pcre-8.44 From mdounin at mdounin.ru Tue Aug 31 15:31:47 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 31 Aug 2021 15:31:47 +0000 Subject: [nginx] nginx-1.21.2-RELEASE Message-ID: details: https://hg.nginx.org/nginx/rev/bfbc52374adc branches: changeset: 7918:bfbc52374adc user: Maxim Dounin date: Tue Aug 31 18:13:46 2021 +0300 description: nginx-1.21.2-RELEASE diffstat: docs/xml/nginx/changes.xml | 107 +++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 107 insertions(+), 0 deletions(-) diffs (117 lines): diff -r f0ab1db646d5 -r bfbc52374adc docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml Tue Aug 31 17:54:54 2021 +0300 +++ b/docs/xml/nginx/changes.xml Tue Aug 31 18:13:46 2021 +0300 @@ -5,6 +5,113 @@ + + + + +?????? nginx ?????????? ??????, +???? ? ??????? ?? ????????? HTTP/1.0 ???????????? +?????? ????????? "Transfer-Encoding". + + +now nginx rejects HTTP/1.0 requests +with the "Transfer-Encoding" header line. + + + + + +?????????? ????? ?????? ?? ??????????????. + + +export ciphers are no longer supported. + + + + + +????????????? ? OpenSSL 3.0. + + +OpenSSL 3.0 compatibility. + + + + + +?????? ??????? ?????????????? ????????? ??????-??????? +?????????? ?????? ????????? "Auth-SSL-Protocol" ? "Auth-SSL-Cipher".
+??????? Rob Mueller. +
+ +the "Auth-SSL-Protocol" and "Auth-SSL-Cipher" header lines +are now passed to the mail proxy authentication server.
+Thanks to Rob Mueller. +
+
+ + + +API ??? ????????? ???? ??????? +?????? ????????? ?????????????? ?????????????? ??????. + + +request body filters API +now permits buffering of the data being processed. + + + + + +SSL-?????????? ? ???????? ? ?????? stream +????? ???????? ????? SSL handshake. + + +backend SSL connections in the stream module +might hang after an SSL handshake. + + + + + +??????? ????????????, ????????? ? OpenSSL 1.1.0 ? ?????, +?? ?????????? ??? ???????? ???????????? ???????, +???? ??? ????? ????? "@SECLEVEL=N" ? ????????? ssl_ciphers. + + +the security level, which is available in OpenSSL 1.1.0 or newer, +did not affect loading of the server certificates +when set with "@SECLEVEL=N" in the "ssl_ciphers" directive. + + + + + +SSL-?????????? ? gRPC-????????? ????? ????????, +???? ?????????????? ?????? select, poll ??? /dev/poll. + + +SSL connections with gRPC backends might hang +if select, poll, or /dev/poll methods were used. + + + + + +??? ????????????? HTTP/2 +???? ??????? ?????? ???????????? ?? ????, +???? ? ??????? ?? ???? ?????? ????????? "Content-Length". + + +when using HTTP/2 +client request body was always written to disk +if the "Content-Length" header line was not present in the request. + + + +
+ + From mdounin at mdounin.ru Tue Aug 31 15:31:50 2021 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 31 Aug 2021 15:31:50 +0000 Subject: [nginx] release-1.21.2 tag Message-ID: details: https://hg.nginx.org/nginx/rev/b2d1a602b241 branches: changeset: 7919:b2d1a602b241 user: Maxim Dounin date: Tue Aug 31 18:13:47 2021 +0300 description: release-1.21.2 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r bfbc52374adc -r b2d1a602b241 .hgtags --- a/.hgtags Tue Aug 31 18:13:46 2021 +0300 +++ b/.hgtags Tue Aug 31 18:13:47 2021 +0300 @@ -462,3 +462,4 @@ da571b8eaf8f30f36c43b3c9b25e01e31f47149c ffcbb9980ee2bad27b4d7b1cd680b14ff47b29aa release-1.19.10 df34dcc9ac072ffd0945e5a1f3eb7987e8275375 release-1.21.0 a68ac0677f8553b1f84d357bc9da114731ab5f47 release-1.21.1 +bfbc52374adcbf2f9060afd62de940f6fab3bba5 release-1.21.2 From ottavio at campana.vi.it Tue Aug 31 19:18:33 2021 From: ottavio at campana.vi.it (Ottavio Campana) Date: Tue, 31 Aug 2021 21:18:33 +0200 Subject: Adding a fd that is not obtained through accept to the list the active connections In-Reply-To: References: Message-ID: Dear Phillip, an ssh tunnel is not compatible with the specs of the ONVIF uplink service. I understand that there is no "free lunch" to achieve this, thus I expect to have to write code. What is the suggested way to add a connection, by passing the fd of the connection to nginx before establishing the SSL context. Where could I hook up? How can I implement a fake listener that does not listen but connects to a remote server? Thank you, Ottavio Il giorno lun 30 ago 2021 alle ore 13:24 Phillip Odam < phillip.odam at nitorgroup.com> ha scritto: > Hi Ottavio > > There?s no solution just with nginx as it currently that I know of, to > avoid the need for a port forward in the NAT router a simple solution would > be to use a ssh tunnel - this does separate initial connection from > subsequent requests as you?re unlikely to want to establish a new tunnel > for each and every request and ?knowledge? the connection is established is > no longer inherently part of the application making the HHTP request. So to > simplify things you could just expect the ssh tunnel to be established as a > precondition (once off initial setup) > > Phillip > > On Friday, August 27, 2021, Maxim Dounin wrote: > >> Hello! >> >> On Fri, Aug 27, 2021 at 01:59:03PM +0200, Ottavio Campana wrote: >> >> > Dear Phillip, >> > >> > I know Tailscale very well, I use it and like it a lot. But my final >> goal >> > is finding a way to implement the ONVIF Uplink service, >> > https://www.onvif.org/specs/srv/uplink/ONVIF-Uplink-Spec.pdf , where I >> can >> > have several devices on the LAN that need to connect to a remote server, >> > which will then send commands. >> > >> > Therefore I need a way to start a connection from nginx (or an external >> > program and then passing the fd through a unix socket domain) and make >> it >> > act as if the fd were obtained from an accept. >> > >> > Nginx works with events and I find it very difficult to find a >> mechanism to >> > pass this connection to it. >> > >> > Do you have other ideas? >> >> The most simple solution I can think of is to open two >> connections: to your command endpoint and to nginx, and proxy >> everything once the connections are established. >> >> -- >> Maxim Dounin >> http://mdounin.ru/ >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Non c'? pi? forza nella normalit?, c'? solo monotonia -------------- next part -------------- An HTML attachment was scrubbed... URL: