From vbart at nginx.com Mon Feb 2 14:23:38 2015 From: vbart at nginx.com (Valentin Bartenev) Date: Mon, 02 Feb 2015 14:23:38 +0000 Subject: [nginx] Fixed AIO handling in the output chain. Message-ID: details: http://hg.nginx.org/nginx/rev/bcdfc39bf44d branches: changeset: 5966:bcdfc39bf44d user: Valentin Bartenev date: Wed Jan 28 21:33:06 2015 +0300 description: Fixed AIO handling in the output chain. The ctx->aio flag must be taken into account in the short path too. diffstat: src/core/ngx_output_chain.c | 8 ++++++-- 1 files changed, 6 insertions(+), 2 deletions(-) diffs (18 lines): diff -r 78271500b8de -r bcdfc39bf44d src/core/ngx_output_chain.c --- a/src/core/ngx_output_chain.c Tue Jan 27 15:38:15 2015 +0300 +++ b/src/core/ngx_output_chain.c Wed Jan 28 21:33:06 2015 +0300 @@ -45,8 +45,12 @@ ngx_output_chain(ngx_output_chain_ctx_t ngx_int_t rc, last; ngx_chain_t *cl, *out, **last_out; - if (ctx->in == NULL && ctx->busy == NULL) { - + if (ctx->in == NULL && ctx->busy == NULL +#if (NGX_HAVE_FILE_AIO) + && !ctx->aio +#endif + ) + { /* * the short path for the case when the ctx->in and ctx->busy chains * are empty, the incoming chain is empty too or has the single buf From arut at nginx.com Mon Feb 2 16:40:37 2015 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 02 Feb 2015 16:40:37 +0000 Subject: [nginx] Core: supported directory skipping in ngx_walk_tree(). Message-ID: details: http://hg.nginx.org/nginx/rev/863d9de1e62b branches: changeset: 5967:863d9de1e62b user: Roman Arutyunyan date: Mon Feb 02 19:38:32 2015 +0300 description: Core: supported directory skipping in ngx_walk_tree(). If pre_tree_handler() returns NGX_DECLINED, the directory is ignored. diffstat: src/core/ngx_file.c | 10 +++++++++- 1 files changed, 9 insertions(+), 1 deletions(-) diffs (23 lines): diff -r bcdfc39bf44d -r 863d9de1e62b src/core/ngx_file.c --- a/src/core/ngx_file.c Wed Jan 28 21:33:06 2015 +0300 +++ b/src/core/ngx_file.c Mon Feb 02 19:38:32 2015 +0300 @@ -1047,10 +1047,18 @@ ngx_walk_tree(ngx_tree_ctx_t *ctx, ngx_s ctx->access = ngx_de_access(&dir); ctx->mtime = ngx_de_mtime(&dir); - if (ctx->pre_tree_handler(ctx, &file) == NGX_ABORT) { + rc = ctx->pre_tree_handler(ctx, &file); + + if (rc == NGX_ABORT) { goto failed; } + if (rc == NGX_DECLINED) { + ngx_log_debug1(NGX_LOG_DEBUG_CORE, ctx->log, 0, + "tree skip dir \"%s\"", file.data); + continue; + } + if (ngx_walk_tree(ctx, &file) == NGX_ABORT) { goto failed; } From arut at nginx.com Mon Feb 2 16:40:40 2015 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 02 Feb 2015 16:40:40 +0000 Subject: [nginx] Cache: added temp_path to file cache. Message-ID: details: http://hg.nginx.org/nginx/rev/99639bfdfa2a branches: changeset: 5968:99639bfdfa2a user: Roman Arutyunyan date: Mon Feb 02 19:38:35 2015 +0300 description: Cache: added temp_path to file cache. If use_temp_path is set to off, a subdirectory "temp" is created in the cache directory. It's used instead of proxy_temp_path and friends for caching upstream response. diffstat: src/http/ngx_http_cache.h | 4 +-- src/http/ngx_http_file_cache.c | 62 ++++++++++++++++++++++++++++++++--------- src/http/ngx_http_upstream.c | 6 +-- 3 files changed, 51 insertions(+), 21 deletions(-) diffs (151 lines): diff -r 863d9de1e62b -r 99639bfdfa2a src/http/ngx_http_cache.h --- a/src/http/ngx_http_cache.h Mon Feb 02 19:38:32 2015 +0300 +++ b/src/http/ngx_http_cache.h Mon Feb 02 19:38:35 2015 +0300 @@ -142,6 +142,7 @@ struct ngx_http_file_cache_s { ngx_slab_pool_t *shpool; ngx_path_t *path; + ngx_path_t *temp_path; off_t max_size; size_t bsize; @@ -155,9 +156,6 @@ struct ngx_http_file_cache_s { ngx_msec_t loader_threshold; ngx_shm_zone_t *shm_zone; - - ngx_uint_t use_temp_path; - /* unsigned use_temp_path:1 */ }; diff -r 863d9de1e62b -r 99639bfdfa2a src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c Mon Feb 02 19:38:32 2015 +0300 +++ b/src/http/ngx_http_file_cache.c Mon Feb 02 19:38:35 2015 +0300 @@ -49,6 +49,8 @@ static ngx_int_t ngx_http_file_cache_noo ngx_str_t *path); static ngx_int_t ngx_http_file_cache_manage_file(ngx_tree_ctx_t *ctx, ngx_str_t *path); +static ngx_int_t ngx_http_file_cache_manage_directory(ngx_tree_ctx_t *ctx, + ngx_str_t *path); static ngx_int_t ngx_http_file_cache_add_file(ngx_tree_ctx_t *ctx, ngx_str_t *path); static ngx_int_t ngx_http_file_cache_add(ngx_http_file_cache_t *cache, @@ -1845,7 +1847,7 @@ ngx_http_file_cache_loader(void *data) tree.init_handler = NULL; tree.file_handler = ngx_http_file_cache_manage_file; - tree.pre_tree_handler = ngx_http_file_cache_noop; + tree.pre_tree_handler = ngx_http_file_cache_manage_directory; tree.post_tree_handler = ngx_http_file_cache_noop; tree.spec_handler = ngx_http_file_cache_delete_file; tree.data = cache; @@ -1910,6 +1912,19 @@ ngx_http_file_cache_manage_file(ngx_tree } +static ngx_int_t +ngx_http_file_cache_manage_directory(ngx_tree_ctx_t *ctx, ngx_str_t *path) +{ + if (path->len >= 5 + && ngx_strncmp(path->data + path->len - 5, "/temp", 5) == 0) + { + return NGX_DECLINED; + } + + return NGX_OK; +} + + static void ngx_http_file_cache_loader_sleep(ngx_http_file_cache_t *cache) { @@ -1935,17 +1950,6 @@ ngx_http_file_cache_add_file(ngx_tree_ct return NGX_ERROR; } - /* - * Temporary files in cache have a suffix consisting of a dot - * followed by 10 digits. - */ - - if (name->len >= 2 * NGX_HTTP_CACHE_KEY_LEN + 1 + 10 - && name->data[name->len - 10 - 1] == '.') - { - return NGX_OK; - } - if (ctx->size < (off_t) sizeof(ngx_http_file_cache_header_t)) { ngx_log_error(NGX_LOG_CRIT, ctx->log, 0, "cache file \"%s\" is too small", name->data); @@ -2070,6 +2074,7 @@ ngx_http_file_cache_set_slot(ngx_conf_t off_t max_size; u_char *last, *p; time_t inactive; + size_t len; ssize_t size; ngx_str_t s, name, *value; ngx_int_t loader_files; @@ -2291,6 +2296,37 @@ ngx_http_file_cache_set_slot(ngx_conf_t return NGX_CONF_ERROR; } + if (!use_temp_path) { + cache->temp_path = ngx_pcalloc(cf->pool, sizeof(ngx_path_t)); + if (cache->temp_path == NULL) { + return NGX_CONF_ERROR; + } + + len = cache->path->name.len + sizeof("/temp") - 1; + + p = ngx_pnalloc(cf->pool, len + 1); + if (p == NULL) { + return NGX_CONF_ERROR; + } + + cache->temp_path->name.len = len; + cache->temp_path->name.data = p; + + p = ngx_cpymem(p, cache->path->name.data, cache->path->name.len); + ngx_memcpy(p, "/temp", sizeof("/temp")); + + ngx_memcpy(&cache->temp_path->level, &cache->path->level, + 3 * sizeof(size_t)); + + cache->temp_path->len = cache->path->len; + cache->temp_path->conf_file = cf->conf_file->file.name.data; + cache->temp_path->line = cf->conf_file->line; + + if (ngx_add_path(cf, &cache->temp_path) != NGX_OK) { + return NGX_CONF_ERROR; + } + } + cache->shm_zone = ngx_shared_memory_add(cf, &name, size, cmd->post); if (cache->shm_zone == NULL) { return NGX_CONF_ERROR; @@ -2306,8 +2342,6 @@ ngx_http_file_cache_set_slot(ngx_conf_t cache->shm_zone->init = ngx_http_file_cache_init; cache->shm_zone->data = cache; - cache->use_temp_path = use_temp_path; - cache->inactive = inactive; cache->max_size = max_size; diff -r 863d9de1e62b -r 99639bfdfa2a src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Mon Feb 02 19:38:32 2015 +0300 +++ b/src/http/ngx_http_upstream.c Mon Feb 02 19:38:35 2015 +0300 @@ -2688,10 +2688,8 @@ ngx_http_upstream_send_response(ngx_http p->temp_file->persistent = 1; #if (NGX_HTTP_CACHE) - if (r->cache && !r->cache->file_cache->use_temp_path) { - p->temp_file->file.name = r->cache->file.name; - p->temp_file->path = r->cache->file_cache->path; - p->temp_file->prefix = 1; + if (r->cache && r->cache->file_cache->temp_path) { + p->temp_file->path = r->cache->file_cache->temp_path; } #endif From teward at dark-net.net Mon Feb 2 18:13:24 2015 From: teward at dark-net.net (Thomas Ward) Date: Mon, 02 Feb 2015 13:13:24 -0500 Subject: CVE-2011-4968 Fix Included in Which Release? Message-ID: <54CFBE44.9070005@dark-net.net> Hello. I am well aware that CVE-2011-4968 had a fix included for it (based on http://trac.nginx.org/nginx/ticket/13 and http://trac.nginx.org/nginx/changeset/060c2e692b96a150b584b8e30d596be1f2defa9c/nginx) however I do not see an entry for it in the changelog. With what release/version did this get included in NGINX? Thomas -------------- next part -------------- An HTML attachment was scrubbed... URL: From luky-37 at hotmail.com Mon Feb 2 18:23:24 2015 From: luky-37 at hotmail.com (Lukas Tribus) Date: Mon, 2 Feb 2015 19:23:24 +0100 Subject: CVE-2011-4968 Fix Included in Which Release? In-Reply-To: <54CFBE44.9070005@dark-net.net> References: <54CFBE44.9070005@dark-net.net> Message-ID: > Hello. > > I am well aware that CVE-2011-4968 had a fix included for it (based on > http://trac.nginx.org/nginx/ticket/13 and > http://trac.nginx.org/nginx/changeset/060c2e692b96a150b584b8e30d596be1f2defa9c/nginx) > however I do not see an entry for it in the changelog. > > With what release/version did this get included in NGINX? proxy_ssl_verify and proxy_ssl_verify_depth keywords are supported since nginx 1.7.0 and appear in the changlog as: "Feature: backend SSL certificate verification" By adivsed that you need to configure this, it doesn't just work out of the box. Lukas [1] http://nginx.org/en/docs/http/ngx_http_proxy_module.html&proxy_ssl_verify [2] http://nginx.org/en/docs/http/ngx_http_proxy_module.html&proxy_ssl_verify_depth [3] http://nginx.org/en/CHANGES From arut at nginx.com Mon Feb 2 18:29:31 2015 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 02 Feb 2015 18:29:31 +0000 Subject: [nginx] Core: reverted prefix-based temp files (a9138c35120d). Message-ID: details: http://hg.nginx.org/nginx/rev/3281de8142f5 branches: changeset: 5969:3281de8142f5 user: Roman Arutyunyan date: Mon Feb 02 21:28:09 2015 +0300 description: Core: reverted prefix-based temp files (a9138c35120d). The use_temp_path http cache feature is now implemented using a separate temp hierarchy in cache directory. Prefix-based temp files are no longer needed. diffstat: src/core/ngx_file.c | 28 ++++++++-------------------- src/core/ngx_file.h | 1 - 2 files changed, 8 insertions(+), 21 deletions(-) diffs (83 lines): diff -r 99639bfdfa2a -r 3281de8142f5 src/core/ngx_file.c --- a/src/core/ngx_file.c Mon Feb 02 19:38:35 2015 +0300 +++ b/src/core/ngx_file.c Mon Feb 02 21:28:09 2015 +0300 @@ -111,9 +111,8 @@ ngx_write_chain_to_temp_file(ngx_temp_fi ngx_int_t rc; if (tf->file.fd == NGX_INVALID_FILE) { - rc = ngx_create_temp_file(&tf->file, tf->prefix ? NULL : tf->path, - tf->pool, tf->persistent, tf->clean, - tf->access); + rc = ngx_create_temp_file(&tf->file, tf->path, tf->pool, + tf->persistent, tf->clean, tf->access); if (rc != NGX_OK) { return rc; @@ -133,15 +132,12 @@ ngx_int_t ngx_create_temp_file(ngx_file_t *file, ngx_path_t *path, ngx_pool_t *pool, ngx_uint_t persistent, ngx_uint_t clean, ngx_uint_t access) { - u_char *p; uint32_t n; ngx_err_t err; - ngx_str_t prefix; ngx_pool_cleanup_t *cln; ngx_pool_cleanup_file_t *clnf; - prefix = path ? path->name : file->name; - file->name.len = prefix.len + 1 + (path ? path->len : 0) + 10; + file->name.len = path->name.len + 1 + path->len + 10; file->name.data = ngx_pnalloc(pool, file->name.len + 1); if (file->name.data == NULL) { @@ -154,14 +150,7 @@ ngx_create_temp_file(ngx_file_t *file, n } #endif - p = ngx_cpymem(file->name.data, prefix.data, prefix.len); - - if (path) { - p += 1 + path->len; - - } else { - *p++ = '.'; - } + ngx_memcpy(file->name.data, path->name.data, path->name.len); n = (uint32_t) ngx_next_temp_number(0); @@ -171,11 +160,10 @@ ngx_create_temp_file(ngx_file_t *file, n } for ( ;; ) { - (void) ngx_sprintf(p, "%010uD%Z", n); + (void) ngx_sprintf(file->name.data + path->name.len + 1 + path->len, + "%010uD%Z", n); - if (path) { - ngx_create_hashed_filename(path, file->name.data, file->name.len); - } + ngx_create_hashed_filename(path, file->name.data, file->name.len); ngx_log_debug1(NGX_LOG_DEBUG_CORE, file->log, 0, "hashed path: %s", file->name.data); @@ -204,7 +192,7 @@ ngx_create_temp_file(ngx_file_t *file, n continue; } - if ((path == NULL) || (path->level[0] == 0) || (err != NGX_ENOPATH)) { + if ((path->level[0] == 0) || (err != NGX_ENOPATH)) { ngx_log_error(NGX_LOG_CRIT, file->log, err, ngx_open_tempfile_n " \"%s\" failed", file->name.data); diff -r 99639bfdfa2a -r 3281de8142f5 src/core/ngx_file.h --- a/src/core/ngx_file.h Mon Feb 02 19:38:35 2015 +0300 +++ b/src/core/ngx_file.h Mon Feb 02 21:28:09 2015 +0300 @@ -71,7 +71,6 @@ typedef struct { unsigned log_level:8; unsigned persistent:1; unsigned clean:1; - unsigned prefix:1; } ngx_temp_file_t; From arut at nginx.com Mon Feb 2 19:37:23 2015 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 2 Feb 2015 22:37:23 +0300 Subject: [PATCH] Upstream: add use_temp_path=tmp to proxy_cache_path and friends In-Reply-To: References: <6F10FE84-DBBC-4B92-85A3-E7B907093174@nginx.com> <20150112174848.GN47350@mdounin.ru> Message-ID: <20150202193723.GA634@Romans-MacBook-Air.local> Hello Piotr, We finally came back to the idea you suggested in this thread. Now use_temp_path option creates a separte temp hierarchy in the cache directory. This solution looks simpler than prefix-based temporary files. Thanks for cooperation! On Mon, Jan 12, 2015 at 03:47:27PM -0800, Piotr Sikora wrote: > Hey Maxim, > > > The downside of this approach is that with a separate temp > > directory cache can't be effectively spread over multiple file > > systems. > > I disagree, we've been using exactly this structure to spread cache > over multiple file systems (via multiple cache paths, each with its > own "tmp") for the last 2 years. > > Best regards, > Piotr Sikora > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -- Roman Arutyunyan From tigran.bayburtsyan at gmail.com Tue Feb 3 09:26:33 2015 From: tigran.bayburtsyan at gmail.com (Tigran Bayburtsyan) Date: Tue, 3 Feb 2015 13:26:33 +0400 Subject: Fwd: Nginx module "data sent" handler In-Reply-To: References: Message-ID: Hi. I'm developing Nginx module where I need to handle some function when all data have been sent to client or when client closed the connection. For example I have ngx_http_finalize_request(r, ngx_http_output_filter ( r , out_chain )); Where out_chain contains over 700KB of data. I can't find where to add function to handle event that all 700kb have been sent to client, or client closed the connection. As I understand all that 700kb data Nginx not sending at once it will take some Nginx loops to be sent. So is there any function or event to handle "data sent" event ? Thanks. -------------- next part -------------- An HTML attachment was scrubbed... URL: From agentzh at gmail.com Tue Feb 3 22:18:30 2015 From: agentzh at gmail.com (Yichun Zhang (agentzh)) Date: Tue, 3 Feb 2015 14:18:30 -0800 Subject: Nginx module "data sent" handler In-Reply-To: References: Message-ID: Hello! On Tue, Feb 3, 2015 at 1:26 AM, Tigran Bayburtsyan wrote: > > As I understand all that 700kb data Nginx not sending at once it will take > some Nginx loops to be sent. > Right. Both of the ngx_http_finalize_request and ngx_http_output_filter are asynchronous calls. So data might be later flushed out upon new write events via the ngx_http_writer handler. > So is there any function or event to handle "data sent" event ? > One good approximation for this is to register your own *pool cleanup* handler in r->pool. The request pool will not be destroyed when there's still pending data (don't get confused it with the request cleanup thing created by ngx_http_cleanup_add, which gets called too early for this). Regards, -agentzh From agentzh at gmail.com Tue Feb 3 22:21:11 2015 From: agentzh at gmail.com (Yichun Zhang (agentzh)) Date: Tue, 3 Feb 2015 14:21:11 -0800 Subject: Nginx module "data sent" handler In-Reply-To: References: Message-ID: Hello! On Tue, Feb 3, 2015 at 2:18 PM, Yichun Zhang (agentzh) wrote: > One good approximation for this is to register your own *pool cleanup* > handler in r->pool. The request pool will not be destroyed when > there's still pending data (don't get confused it with the request > cleanup thing created by ngx_http_cleanup_add, which gets called too > early for this). > Oh, sorry to mention that your log-phase handler can also be a good place for this. Maybe even better if you're trying to log something upon the "data sent" event :) Regards, -agentzh From arut at nginx.com Wed Feb 4 13:24:05 2015 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 04 Feb 2015 13:24:05 +0000 Subject: [nginx] Core: fixed a race resulting in extra sem_post()'s. Message-ID: details: http://hg.nginx.org/nginx/rev/74edc0ccf27a branches: changeset: 5970:74edc0ccf27a user: Roman Arutyunyan date: Wed Feb 04 16:22:43 2015 +0300 description: Core: fixed a race resulting in extra sem_post()'s. The mtx->wait counter was not decremented if we were able to obtain the lock right after incrementing it. This resulted in unneeded sem_post() calls, eventually leading to EOVERFLOW errors being logged, "sem_post() failed while wake shmtx (75: Value too large for defined data type)". To close the race, mtx->wait is now decremented if we obtain the lock right after incrementing it in ngx_shmtx_lock(). The result can become -1 if a concurrent ngx_shmtx_unlock() decrements mtx->wait before the added code does. However, that only leads to one extra iteration in the next call of ngx_shmtx_lock(). diffstat: src/core/ngx_shmtx.c | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diffs (20 lines): diff -r 3281de8142f5 -r 74edc0ccf27a src/core/ngx_shmtx.c --- a/src/core/ngx_shmtx.c Mon Feb 02 21:28:09 2015 +0300 +++ b/src/core/ngx_shmtx.c Wed Feb 04 16:22:43 2015 +0300 @@ -101,6 +101,7 @@ ngx_shmtx_lock(ngx_shmtx_t *mtx) (void) ngx_atomic_fetch_add(mtx->wait, 1); if (*mtx->lock == 0 && ngx_atomic_cmp_set(mtx->lock, 0, ngx_pid)) { + (void) ngx_atomic_fetch_add(mtx->wait, -1); return; } @@ -174,7 +175,7 @@ ngx_shmtx_wakeup(ngx_shmtx_t *mtx) wait = *mtx->wait; - if (wait == 0) { + if ((ngx_atomic_int_t) wait <= 0) { return; } From dani at telecom.pt Wed Feb 4 15:20:03 2015 From: dani at telecom.pt (Dani Bento) Date: Wed, 4 Feb 2015 15:20:03 +0000 Subject: nginx + DSCP (Differentied services) In-Reply-To: <20141217112729.55ca175c@alma> References: <20141216142159.3201aa59@alma> <20141216154636.GL45960@mdounin.ru> <20141216172442.3ba2d9d8@alma> <20141216184828.GO45960@mdounin.ru> <20141217112729.55ca175c@alma> Message-ID: <20150204152003.5c619f8e@alma> Hello, Any advance on this idea? Or it will be easier to adapt Maxim's module to do what we want? (mainly IPv6 compatibility and the possibility to change DSCP mark via backend header interaction). Thanks. Dani On Wed, 17 Dec 2014 11:27:29 +0000 Dani Bento wrote: > Hello, > > In attachment is our patch to the nginx core. You can take a look. > > We found that your module isn't IPv6 compatible too, but it may be > simple to implement that. > > Thanks. > > On Tue, 16 Dec 2014 18:48:28 +0000 > Maxim Dounin wrote: > > > Hello! > > > > On Tue, Dec 16, 2014 at 05:24:42PM +0000, Dani Bento wrote: > > > > > Thanks for the reply. > > > > > > We looked at your module and found it interesting that you > > > avoided to change the core. > > > > > > Our code marks each buffer flush individually. We can control the > > > ToS for each request even when using keep-alive (using the > > > X-Accel-ClassID header). After the response has been sent the ToS > > > is reset back to the default (location configuration). > > > > > > In your module you mark only at the beginning of the response (at > > > the header filter stage). Don't you have problems with different > > > requests using the same connection? > > > > If you want ToS to be reset, you can use "ip_tos 0x00;" by > > default, see example here: > > > > http://mdounin.ru/hg/ngx_http_ip_tos_filter_module/file/tip/README > > > > > You can only have one ToS mark for > > > each location if you relying on a static configuration. Can it be > > > set by variable "tos $class_id" ? > > > > Not now, but this can be added if needed - like it was recently > > added to the "expires" directive, see > > http://hg.nginx.org/nginx/rev/4983f7d18fe3. > > > > > -- Dani Bento Dire??o de Internet e Tecnologia DTS/DVS tlm: +351 91 429 72 81 dani at telecom.pt From damien at commerceguys.com Wed Feb 4 15:35:13 2015 From: damien at commerceguys.com (Damien Tournoud) Date: Wed, 4 Feb 2015 16:35:13 +0100 Subject: [PATCH] http_core: Do not match a file for a directory in try_files In-Reply-To: <1B0532BF-5827-41FF-A1DE-96455E1DFD69@nginx.com> References: <1B0532BF-5827-41FF-A1DE-96455E1DFD69@nginx.com> Message-ID: On Wed, Jan 21, 2015 at 5:21 PM, Sergey Kandaurov wrote: > The patch looks good to me. > Bump? Any concern in getting this in? Damien Tournoud -------------- next part -------------- An HTML attachment was scrubbed... URL: From info at phpgangsta.de Wed Feb 4 21:07:50 2015 From: info at phpgangsta.de (Michael Kliewe) Date: Wed, 04 Feb 2015 22:07:50 +0100 Subject: [PATCH] Mail: send starttls flag value to auth script In-Reply-To: <53DBF531.2010308@phpgangsta.de> References: <51fd90f96449c23af007.1394099969@HPC> <20140306162718.GL34696@mdounin.ru> <877FD2F6-57CD-4C14-9F2B-4C9E909C3488@phpgangsta.de> <53D9AAB0.5060501@phpgangsta.de> <20140801185919.GU1849@mdounin.ru> <53DBF531.2010308@phpgangsta.de> Message-ID: <54D28A26.60903@phpgangsta.de> Hi Maxim, I would like to remind again this feature patch. It would help a lot to get this information about transport encryption into the auth script. It does not hurt the performance, and is a very tiny patch. You can rename the header name and values as you like. It would be very nice if you could please merge it into nginx. Thanks! Michael Am 01.08.2014 um 22:14 schrieb Michael Kliewe: > Hi, > > you can rename it as you wish, as long as the functionality gets into > nginx ;-) > > Michael > > Am 01.08.2014 um 20:59 schrieb Maxim Dounin: >> Hello! >> >> On Thu, Jul 31, 2014 at 04:32:16AM +0200, Michael Kliewe wrote: >> >>> Hi Maxim, >>> >>> this is very interesting to have in nginx, and it's a very easy >>> patch. Any >>> chance this gets into nginx? It helps a lot to migrate users to >>> encrypted >>> mail connections, allowing some users to still use unencrypted >>> connections, >>> and log in the auth script which user already uses encrypted >>> connections. >>> >>> It would be great if you could add it to nginx. >> I have this flagged in my inbox, and I hope I'll be able to into >> this. The functionality itself should be usable, but I'm not >> happy with the header name and value used. >> > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From hungnv at opensource.com.vn Thu Feb 5 07:21:48 2015 From: hungnv at opensource.com.vn (hungnv at opensource.com.vn) Date: Thu, 05 Feb 2015 14:21:48 +0700 Subject: [PATCH] Enable faststart for mp4 module Message-ID: <031db7af488c045fc4f0.1423120908@Hungs-MacBook-Air.local> # HG changeset patch # User Hung Nguyen +#include +#include +#include +#ifdef WIN32 +#include +#include +#define DIR_SEPARATOR '\\' +#define strdup _strdup +#define open _open +#define close _close +#define write _write +#define lseek _lseeki64 +#define stat _stat64 +#else +#include +#include +#include +#include +#include +#include +#include +#endif + + + +#ifdef __MINGW32__ +#define fseeko(x,y,z) fseeko64(x,y,z) +#define ftello(x) ftello64(x) +#endif + +#define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1]) + +#define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \ + (((uint8_t*)(x))[1] << 16) | \ + (((uint8_t*)(x))[2] << 8) | \ + ((uint8_t*)(x))[3]) + +#define BE_64(x) (((uint64_t)(((uint8_t*)(x))[0]) << 56) | \ + ((uint64_t)(((uint8_t*)(x))[1]) << 48) | \ + ((uint64_t)(((uint8_t*)(x))[2]) << 40) | \ + ((uint64_t)(((uint8_t*)(x))[3]) << 32) | \ + ((uint64_t)(((uint8_t*)(x))[4]) << 24) | \ + ((uint64_t)(((uint8_t*)(x))[5]) << 16) | \ + ((uint64_t)(((uint8_t*)(x))[6]) << 8) | \ + ((uint64_t)((uint8_t*)(x))[7])) + +#define BE_FOURCC( ch0, ch1, ch2, ch3 ) \ + ( (uint32_t)(unsigned char)(ch3) | \ + ( (uint32_t)(unsigned char)(ch2) << 8 ) | \ + ( (uint32_t)(unsigned char)(ch1) << 16 ) | \ + ( (uint32_t)(unsigned char)(ch0) << 24 ) ) + +#define QT_ATOM BE_FOURCC + +/* top level atoms */ +#define FREE_ATOM QT_ATOM('f', 'r', 'e', 'e') + +#define JUNK_ATOM QT_ATOM('j', 'u', 'n', 'k') + +#define MDAT_ATOM QT_ATOM('m', 'd', 'a', 't') + +#define MOOV_ATOM QT_ATOM('m', 'o', 'o', 'v') + +#define PNOT_ATOM QT_ATOM('p', 'n', 'o', 't') + +#define SKIP_ATOM QT_ATOM('s', 'k', 'i', 'p') + +#define WIDE_ATOM QT_ATOM('w', 'i', 'd', 'e') + +#define PICT_ATOM QT_ATOM('P', 'I', 'C', 'T') + +#define FTYP_ATOM QT_ATOM('f', 't', 'y', 'p') + +#define UUID_ATOM QT_ATOM('u', 'u', 'i', 'd') + +#define CMOV_ATOM QT_ATOM('c', 'm', 'o', 'v') + +#define STCO_ATOM QT_ATOM('s', 't', 'c', 'o') + +#define CO64_ATOM QT_ATOM('c', 'o', '6', '4') + +#define ATOM_PREAMBLE_SIZE 8 + +#define COPY_BUFFER_SIZE 1024 + + +/* we take 2 arguments from ngx_http_mp4_module + * path: to open it in write mode once source file need to be modified. + * file descriptor: nginx already opened the file, we dont have to open again + * ngx_fd_t is actually an integer (see ngx_files.h) + */ + + +int ngx_http_enable_fast_start(ngx_str_t *path, ngx_fd_t +ngx_open_file_cached_fd, ngx_http_request_t *r) { + unsigned char atom_bytes[ATOM_PREAMBLE_SIZE]; + uint32_t atom_type = 0; + uint64_t atom_size = 0; + uint64_t atom_offset = 0; + uint64_t last_offset; + unsigned char *moov_atom = NULL; + unsigned char *ftyp_atom = NULL; + uint64_t moov_atom_size; + uint64_t ftyp_atom_size = 0; + uint64_t i, j; + uint32_t offset_count; + uint64_t current_offset; + uint64_t start_offset = 0; + int outfile_fd = -1; + unsigned char *temp_buf = NULL; + ngx_log_t *log = r->connection->log; + + + /* traverse through the atoms in the file to make sure that 'moov' is + * at the end */ + while (1) { + + if (read(ngx_open_file_cached_fd, atom_bytes, ATOM_PREAMBLE_SIZE) == 0) + break; + + atom_size = (uint32_t) BE_32(&atom_bytes[0]); + atom_type = BE_32(&atom_bytes[4]); + /* keep ftyp atom */ + + if (atom_type == FTYP_ATOM) { + ftyp_atom_size = atom_size; + free(ftyp_atom); + ftyp_atom = ngx_palloc(r->connection->pool, ftyp_atom_size); + // ftyp_atom = malloc(ftyp_atom_size); + + if (!ftyp_atom) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, "could not allocate " + "%"PRIu64" byte for ftyp atom\n", atom_size); + goto error_out; + } + + lseek(ngx_open_file_cached_fd, -ATOM_PREAMBLE_SIZE, SEEK_CUR); + ngx_log_debug(NGX_LOG_DEBUG, log, 0, "atom_size: " + "%"PRIu64" \n", atom_size); + + if (read(ngx_open_file_cached_fd, ftyp_atom, atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + start_offset = atom_size; + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, + "start_offset to verify: %"PRIu64" \n", start_offset); + + } else { + /* 64-bit special case */ + if (atom_size == 1) { + if (read(ngx_open_file_cached_fd, atom_bytes, + ATOM_PREAMBLE_SIZE) == 0) { + break; + } + + atom_size = BE_64(&atom_bytes[0]); + lseek(ngx_open_file_cached_fd, atom_size - + ATOM_PREAMBLE_SIZE * 2, SEEK_CUR); + + } else { + lseek(ngx_open_file_cached_fd, atom_size - ATOM_PREAMBLE_SIZE, + SEEK_CUR); + } + + } + + ngx_log_debug(NGX_LOG_DEBUG_HTTP, log, 0, "%c%c%c%c %10"PRIu64"" + " %"PRIu64"\n", + (atom_type >> 24) & 255, + (atom_type >> 16) & 255, + (atom_type >> 8) & 255, + (atom_type >> 0) & 255, + atom_offset, + atom_size); + + if ((atom_type != FREE_ATOM) && + (atom_type != JUNK_ATOM) && + (atom_type != MDAT_ATOM) && + (atom_type != MOOV_ATOM) && + (atom_type != PNOT_ATOM) && + (atom_type != SKIP_ATOM) && + (atom_type != WIDE_ATOM) && + (atom_type != PICT_ATOM) && + (atom_type != UUID_ATOM) && + (atom_type != FTYP_ATOM)) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, "encountered non-QT " + "top-level atom (is this a Quicktime file?)\n"); + break; + } + + atom_offset += atom_size; + + /* The atom header is 8 (or 16 bytes), if the atom size (which + * includes these 8 or 16 bytes) is less than that, we won't be + * able to continue scanning sensibly after this atom, so break. */ + if (atom_size < 8) + break; + } + + if (atom_type != MOOV_ATOM) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "last atom in file: " + "%s was not a moov atom\n", path->data); + if (ftyp_atom) ngx_pfree(r->connection->pool, ftyp_atom); + // dont close file, not our job + return NGX_OK; + } + + /* moov atom was, in fact, the last atom in the chunk; load the whole + * moov atom */ + last_offset = lseek(ngx_open_file_cached_fd, -atom_size, SEEK_END); + moov_atom_size = atom_size; + moov_atom = ngx_palloc(r->connection->pool, moov_atom_size); + + if (!moov_atom) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, "could not allocate " + "%"PRIu64" byte for moov atom\n", atom_size); + goto error_out; + } + + if (read(ngx_open_file_cached_fd, moov_atom, atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + /* this utility does not support compressed atoms yet, so disqualify + * files with compressed QT atoms */ + if (BE_32(&moov_atom[12]) == CMOV_ATOM) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, "this module does " + "not support compressed moov atoms yet\n"); + free(ftyp_atom); + if (moov_atom) free(moov_atom); + /* should not return error, if we cannot fix it, + * let player download the whole file then play it*/ + return NGX_OK; + } + + /* read next move_atom_size bytes + * since we read/write file in same time, we must read before write into + * the buffer + */ + temp_buf = ngx_palloc(r->connection->pool, moov_atom_size); + + if (!temp_buf) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, "Cannot allocate %"PRIu64" " + "byte for temp buf \n", moov_atom_size); + goto error_out; + } + + /* seek to after ftyp_atom */ + lseek(ngx_open_file_cached_fd, ftyp_atom_size, SEEK_SET); + + if (read(ngx_open_file_cached_fd, temp_buf, moov_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + start_offset += moov_atom_size; + + /* end read temp buffer bytes */ + + /* crawl through the moov chunk in search of stco or co64 atoms */ + for (i = 4; i < moov_atom_size - 4; i++) { + atom_type = BE_32(&moov_atom[i]); + + if (atom_type == STCO_ATOM) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "%s patching stco " + "atom...\n", path->data); + atom_size = BE_32(&moov_atom[i - 4]); + + if (i + atom_size - 4 > moov_atom_size) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, " bad atom size\n"); + goto error_out; + } + + offset_count = BE_32(&moov_atom[i + 8]); + + for (j = 0; j < offset_count; j++) { + current_offset = BE_32(&moov_atom[i + 12 + j * 4]); + current_offset += moov_atom_size; + moov_atom[i + 12 + j * 4 + 0] = (current_offset >> 24) & 0xFF; + moov_atom[i + 12 + j * 4 + 1] = (current_offset >> 16) & 0xFF; + moov_atom[i + 12 + j * 4 + 2] = (current_offset >> 8) & 0xFF; + moov_atom[i + 12 + j * 4 + 3] = (current_offset >> 0) & 0xFF; + } + + i += atom_size - 4; + + } else if (atom_type == CO64_ATOM) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "%s patching co64 " + "atom...\n", path->data); + atom_size = BE_32(&moov_atom[i - 4]); + + if (i + atom_size - 4 > moov_atom_size) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, " bad atom size\n"); + goto error_out; + } + + offset_count = BE_32(&moov_atom[i + 8]); + + for (j = 0; j < offset_count; j++) { + current_offset = BE_64(&moov_atom[i + 12 + j * 8]); + current_offset += moov_atom_size; + moov_atom[i + 12 + j * 8 + 0] = (current_offset >> 56) & 0xFF; + moov_atom[i + 12 + j * 8 + 1] = (current_offset >> 48) & 0xFF; + moov_atom[i + 12 + j * 8 + 2] = (current_offset >> 40) & 0xFF; + moov_atom[i + 12 + j * 8 + 3] = (current_offset >> 32) & 0xFF; + moov_atom[i + 12 + j * 8 + 4] = (current_offset >> 24) & 0xFF; + moov_atom[i + 12 + j * 8 + 5] = (current_offset >> 16) & 0xFF; + moov_atom[i + 12 + j * 8 + 6] = (current_offset >> 8) & 0xFF; + moov_atom[i + 12 + j * 8 + 7] = (current_offset >> 0) & 0xFF; + } + + i += atom_size - 4; + } + } + + + if (start_offset > 0) { /* seek after ftyp atom */ + lseek(ngx_open_file_cached_fd, start_offset, SEEK_SET); + last_offset -= start_offset; + } + + outfile_fd = open((const char *) path->data, O_WRONLY); + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "outfile fd: %d\n", outfile_fd); + + if (outfile_fd < 0) { + perror((const char *) path->data); + goto error_out; + } + + /* dump the same ftyp atom */ + if (ftyp_atom_size > 0) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "%s: writing ftyp atom...\n" + , path->data); + + if (write(outfile_fd, ftyp_atom, ftyp_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + } + + i = 0; + /* + we must use 2 buffer to read/write + */ + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, " moov_atom_size: %"PRIu64" \n" + , moov_atom_size); + + while (last_offset) { + // printf("last offset: %"PRIu64" \n", last_offset); + if (i == 0) { + ngx_log_debug(NGX_LOG_DEBUG, log, 0, " writing moov atom...\n"); + i = 1; + } + + if (write(outfile_fd, moov_atom, moov_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + if (last_offset < moov_atom_size) + moov_atom_size = last_offset; + + if (read(ngx_open_file_cached_fd, moov_atom, moov_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + last_offset -= moov_atom_size; + + if (write(outfile_fd, temp_buf, moov_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + if (last_offset < moov_atom_size) + moov_atom_size = last_offset; + + if (read(ngx_open_file_cached_fd, temp_buf, moov_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + last_offset -= moov_atom_size; + } + + /* seek to beginning of source file*/ + lseek(ngx_open_file_cached_fd, 0, SEEK_SET); + + close(outfile_fd); + ngx_pfree(r->connection->pool, moov_atom); + ngx_pfree(r->connection->pool, ftyp_atom); + ngx_pfree(r->connection->pool, temp_buf); + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, " finish fixing file: %s\n" + , path->data); + return NGX_OK; + +error_out: + if (outfile_fd > 0) + close(outfile_fd); + + if (moov_atom) + ngx_pfree(r->connection->pool, moov_atom); + + if (ftyp_atom) + ngx_pfree(r->connection->pool, ftyp_atom); + + if (temp_buf) + ngx_pfree(r->connection->pool, temp_buf); + + return NGX_ERROR; +} diff -r 78271500b8de -r 031db7af488c src/http/modules/ngx_http_mp4_module.c --- a/src/http/modules/ngx_http_mp4_module.c Tue Jan 27 15:38:15 2015 +0300 +++ b/src/http/modules/ngx_http_mp4_module.c Fri Jan 30 11:11:00 2015 +0700 @@ -7,6 +7,7 @@ #include #include #include +#include "ngx_http_mp4_faststart.h" #define NGX_HTTP_MP4_TRAK_ATOM 0 @@ -43,6 +44,7 @@ typedef struct { size_t buffer_size; size_t max_buffer_size; + ngx_flag_t mp4_enhance; } ngx_http_mp4_conf_t; @@ -332,7 +334,14 @@ offsetof(ngx_http_mp4_conf_t, max_buffer_size), NULL }, - ngx_null_command + { ngx_string("fix_mp4"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_mp4_conf_t, mp4_enhance), + NULL }, + + ngx_null_command }; @@ -429,6 +438,7 @@ ngx_http_mp4_file_t *mp4; ngx_open_file_info_t of; ngx_http_core_loc_conf_t *clcf; + ngx_http_mp4_conf_t *mlcf; if (!(r->method & (NGX_HTTP_GET|NGX_HTTP_HEAD))) { return NGX_HTTP_NOT_ALLOWED; @@ -522,6 +532,18 @@ return NGX_DECLINED; } + /* move atom to beginning of file if it's in the last*/ + mlcf = ngx_http_get_module_loc_conf(r, ngx_http_mp4_module); + if (mlcf->mp4_enhance == 1) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, + "examine mp4 filename: \"%V\"", &path); + + if (ngx_http_enable_fast_start(&path, of.fd, r) != NGX_OK) { + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + } + r->root_tested = !r->error_page; r->allow_ranges = 1; @@ -3495,6 +3517,7 @@ ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, 512 * 1024); ngx_conf_merge_size_value(conf->max_buffer_size, prev->max_buffer_size, 10 * 1024 * 1024); + ngx_conf_merge_off_value(conf->mp4_enhance, prev->mp4_enhance, 0); return NGX_CONF_OK; } From pluknet at nginx.com Thu Feb 5 10:17:54 2015 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 5 Feb 2015 13:17:54 +0300 Subject: [PATCH] http_core: Do not match a file for a directory in try_files In-Reply-To: References: <1B0532BF-5827-41FF-A1DE-96455E1DFD69@nginx.com> Message-ID: <596CEE66-D534-4EAD-9875-2AFB5163DFA3@nginx.com> On Feb 4, 2015, at 6:35 PM, Damien Tournoud wrote: > > Any concern in getting this in? > Hi Damien. It is delayed for internal review. -- Sergey Kandaurov From mdounin at mdounin.ru Thu Feb 5 13:00:28 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 5 Feb 2015 16:00:28 +0300 Subject: [PATCH] Mail: send starttls flag value to auth script In-Reply-To: <54D28A26.60903@phpgangsta.de> References: <51fd90f96449c23af007.1394099969@HPC> <20140306162718.GL34696@mdounin.ru> <877FD2F6-57CD-4C14-9F2B-4C9E909C3488@phpgangsta.de> <53D9AAB0.5060501@phpgangsta.de> <20140801185919.GU1849@mdounin.ru> <53DBF531.2010308@phpgangsta.de> <54D28A26.60903@phpgangsta.de> Message-ID: <20150205130027.GE99511@mdounin.ru> Hello! On Wed, Feb 04, 2015 at 10:07:50PM +0100, Michael Kliewe wrote: > Hi Maxim, > > I would like to remind again this feature patch. It would help a lot to get > this information about transport encryption into the auth script. It does > not hurt the performance, and is a very tiny patch. > > You can rename the header name and values as you like. It would be very nice > if you could please merge it into nginx. I'm planning to look into this patch and other mail SSL improvements once I've done with unbuffered upload feature I'm currently working on. -- Maxim Dounin http://nginx.org/ From martin at lucina.net Thu Feb 5 15:31:22 2015 From: martin at lucina.net (Martin Lucina) Date: Thu, 5 Feb 2015 16:31:22 +0100 Subject: Linux.com article on Unikernels, Rump/Mirage collaboration Message-ID: <20150205153122.GG19901@nodbug.moloch.sk> Hi Anil, I'm working on a Rump Kernels contribution to the "rise of Unikernels" piece Sarah Conway is preparing for Linux.com. As part of that I'd like to highlight potential collaboration between Rump Kernels and Mirage, specifically the Mirage+Synjitsu frontend -> Rump Kernel PHP (or other application) backend scenario. I want to make sure I'm not making any false claims - here is my draft answer to Sarah's question on this: ---- > Please expand on the collaboration you'd like to engage in with MirageOS > and Anil (combination of a Mirage Unikernel using Jitsu to handle > just-in-time launching of Rump Kernel powered Unikernels to run existing > "legacy" applications is a unique solution which I think is quite valuable. > Why do you think this will be valuable? What type of applications will this > benefit the most? Jitsu is unique in enabling deployment of Unikernels on a just-in-time basis, eg. in response to a HTTP request. However, you still need to write your applications from the ground up in Ocaml. Combined with Rump Kernel powered Unikernels that requirement goes away and applications can be developed using any language stack, or existing applications can be retargeted to run on Unikernels. I think this technology could be a game-changer in two areas. Firstly, enabling new cloud services to emerge where the customer would be billed on the basis of how much CPU time they actually consumed to serve requests, rather than how much time a virtual machine spent running while not necessarily doing anything. Secondly, once support for virtualisation on ARM trickles down to consumer devices such as routers and smartphones this would enable safe deployment and power-efficient operation of microservices on these devices. Think of AWS Lambda, but running any application stack, anywhere. ---- Both Justin and Antti have pointed out to me that (as far as they know) you do have Ocaml <-> C interop working, which would mean that my claim of Mirage requiring applications to be written in Ocaml from the ground up is false. Is this the case? Can you use existing C libraries as part of your Ocaml stacks? Martin From martin at lucina.net Thu Feb 5 15:48:45 2015 From: martin at lucina.net (Martin Lucina) Date: Thu, 5 Feb 2015 16:48:45 +0100 Subject: Nginx on Unikernels; implications of running with "master_process off" Message-ID: <20150205154845.GI19901@nodbug.moloch.sk> [Apologies for my previous completely unrelated message to this list, this message originally got rejected by the list despite my being subscribed and I must have pressed the 'b' key on the wrong message in my attempt to re-send.] Hi, With the help of Samuel Martin's patches for cross-compiling [1], I have managed to get Nginx running on Rump Kernel powered Unikernels on the Xen hypervisor [2] [3]. Given that we don't have fork() or exec() I have to run Nginx with "master_process off". The documentation states it should NEVER be be run in this mode in production, however basic load tests (ab, siege) appear to run fine. Are there any non-obvious implications to running with "master_process off"? Reading through the different ngx_..._process_cycle() functions nothing obvious jumps out at me. Thanks, Martin [1] http://git.buildroot.net/buildroot/tree/package/nginx [2] http://repo.rumpkernel.org/rumprun-xen [3] https://github.com/mato/rump-php From piotr at cloudflare.com Fri Feb 6 00:42:26 2015 From: piotr at cloudflare.com (Piotr Sikora) Date: Thu, 5 Feb 2015 16:42:26 -0800 Subject: [PATCH] Upstream: add use_temp_path=tmp to proxy_cache_path and friends In-Reply-To: <20150202193723.GA634@Romans-MacBook-Air.local> References: <6F10FE84-DBBC-4B92-85A3-E7B907093174@nginx.com> <20150112174848.GN47350@mdounin.ru> <20150202193723.GA634@Romans-MacBook-Air.local> Message-ID: Hey Roman, > We finally came back to the idea you suggested in this thread. > Now use_temp_path option creates a separte temp hierarchy in the > cache directory. This solution looks simpler than prefix-based > temporary files. Great! The symlinking idea sounded very error-prone. Best regards, Piotr Sikora From pluknet at nginx.com Fri Feb 6 10:01:24 2015 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 06 Feb 2015 10:01:24 +0000 Subject: [nginx] Fixed try_files directory test to match only a directory. Message-ID: details: http://hg.nginx.org/nginx/rev/ebdb2023e84a branches: changeset: 5971:ebdb2023e84a user: Damien Tournoud date: Wed Jan 21 00:26:32 2015 +0100 description: Fixed try_files directory test to match only a directory. Historically, it was possible to match either a file or directory in the following configuration: location / { try_files $uri/ =404; } diffstat: src/http/ngx_http_core_module.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 74edc0ccf27a -r ebdb2023e84a src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c Wed Feb 04 16:22:43 2015 +0300 +++ b/src/http/ngx_http_core_module.c Wed Jan 21 00:26:32 2015 +0100 @@ -1353,7 +1353,7 @@ ngx_http_core_try_files_phase(ngx_http_r continue; } - if (of.is_dir && !test_dir) { + if (of.is_dir != test_dir) { continue; } From pluknet at nginx.com Fri Feb 6 13:51:33 2015 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 6 Feb 2015 16:51:33 +0300 Subject: [PATCH] http_core: Do not match a file for a directory in try_files In-Reply-To: References: <1B0532BF-5827-41FF-A1DE-96455E1DFD69@nginx.com> Message-ID: On Feb 4, 2015, at 6:35 PM, Damien Tournoud wrote: > On Wed, Jan 21, 2015 at 5:21 PM, Sergey Kandaurov wrote: > The patch looks good to me. > > Bump? > > Any concern in getting this in? > Committed with cosmetic changes, thank you. http://hg.nginx.org/nginx/rev/ebdb2023e84a http://hg.nginx.org/nginx-tests/rev/64eabe6aa1f2 -- Sergey Kandaurov From dakota at brokenpipe.ru Sun Feb 8 04:17:45 2015 From: dakota at brokenpipe.ru (Marat Dakota) Date: Sun, 8 Feb 2015 08:17:45 +0400 Subject: Custom file descriptor handler Message-ID: Hi, I have a file descriptor (it is created by other software, not by nginx). How to add it to nginx event loop watcher properly? All I need is to have my callback called when my file descriptor has a new data to read (I'll read the data by myself). Browsing the code shows functions like ngx_add_conn and ngx_add_event, but it seems like a non-trivial thing to prepare the proper input arguments. Thanks! -- Marat -------------- next part -------------- An HTML attachment was scrubbed... URL: From tigran.bayburtsyan at gmail.com Sun Feb 8 19:59:29 2015 From: tigran.bayburtsyan at gmail.com (Tigran Bayburtsyan) Date: Sun, 8 Feb 2015 23:59:29 +0400 Subject: Nginx Based Socket application Message-ID: Hi. I'm trying to find out, how to write module for Nginx which would be able to access directly to connection socket and transfer non HTTP data. I know that Nginx is written for specially HTTP(S) requests, but I'm thinking of use it as a proxy server for my custom binary protocol. Where can I find out some documentation or examples about it ? Thanks. -------------- next part -------------- An HTML attachment was scrubbed... URL: From dakota at brokenpipe.ru Mon Feb 9 01:18:58 2015 From: dakota at brokenpipe.ru (Marat Dakota) Date: Mon, 9 Feb 2015 05:18:58 +0400 Subject: Custom file descriptor handler In-Reply-To: References: Message-ID: Here's what I've come up with: // In upper scope. ngx_connection_t myConn; ngx_event_t myConnRev; static ngx_int_t my_module_init(ngx_cycle_t *cycle) { int myFd; ... // Initialize myFd. memset(&myConn, 0, sizeof(myConn)); memset(&myConnRev, 0, sizeof(myConnRev)); myConn.number = ngx_atomic_fetch_add(ngx_connection_counter, 1); myConn.fd = myFd; myConn.pool = cycle->pool; myConn.log = cycle->log; myConn.read = &myConnRev; myConnRev.data = &myConn; myConnRev.ready = 1; myConnRev.active = 1; myConnRev.instance = 1; myConnRev.handler = myReadCallback; myConnRev.log = cycle->log; ngx_add_event(&myConnRev, NGX_READ_EVENT, 0); } It looks like working on OSX, I had no chance to test it on other platforms, but there are a couple of questions. Is it enough, should it be stable? And I also have no actual idea what `ready`, `active` and `instance` flags actually mean. Do I need to `ngx_atomic_fetch_add(ngx_connection_counter, 1);` my connection number? Is it ok to leave write event field blank (I will only read from the descriptor)? Do I need to fill some other fields? The idea is that the file descriptor is initialized once the module is initialized and lives for as long as worker process does. Thanks! -- Marat On Sun, Feb 8, 2015 at 7:17 AM, Marat Dakota wrote: > Hi, > > I have a file descriptor (it is created by other software, not by nginx). > How to add it to nginx event loop watcher properly? All I need is to have > my callback called when my file descriptor has a new data to read (I'll > read the data by myself). > > Browsing the code shows functions like ngx_add_conn and ngx_add_event, but > it seems like a non-trivial thing to prepare the proper input arguments. > > Thanks! > > -- > Marat > -------------- next part -------------- An HTML attachment was scrubbed... URL: From jefftk at google.com Mon Feb 9 14:33:42 2015 From: jefftk at google.com (Jeff Kaufman) Date: Mon, 9 Feb 2015 09:33:42 -0500 Subject: Nginx Based Socket application In-Reply-To: References: Message-ID: Looking at the code of ngx_http_spdy_module might be helpful. On Sun, Feb 8, 2015 at 2:59 PM, Tigran Bayburtsyan wrote: > Hi. > I'm trying to find out, how to write module for Nginx which would be able to > access directly to connection socket and transfer non HTTP data. > I know that Nginx is written for specially HTTP(S) requests, but I'm > thinking of use it as a proxy server for my custom binary protocol. > > Where can I find out some documentation or examples about it ? > > Thanks. > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From vbart at nginx.com Mon Feb 9 14:43:10 2015 From: vbart at nginx.com (Valentin V. Bartenev) Date: Mon, 09 Feb 2015 17:43:10 +0300 Subject: Nginx Based Socket application In-Reply-To: References: Message-ID: <6980727.ObyPfdZa49@vbart-workstation> On Sunday 08 February 2015 23:59:29 Tigran Bayburtsyan wrote: > Hi. > I'm trying to find out, how to write module for Nginx which would be able > to access directly to connection socket and transfer non HTTP data. > I know that Nginx is written for specially HTTP(S) requests, No. See: http://nginx.org/en/docs/mail/ngx_mail_core_module.html http://nginx.org/en/docs/stream/ngx_stream_core_module.html https://github.com/arut/nginx-rtmp-module Also note that "http" in nginx is just a module too. > but I'm thinking of use it as a proxy server for my custom binary protocol. > > Where can I find out some documentation or examples about it ? The source code of mail, http and rtmp modules can be a good example. wbr, Valentin V. Bartenev From g.fischer at ah-consulting.net Mon Feb 9 21:30:55 2015 From: g.fischer at ah-consulting.net (Goetz T. Fischer) Date: Mon, 09 Feb 2015 22:30:55 +0100 Subject: NGX_EACCESS missing Message-ID: <54D9270F.BB34041@ah-consulting.net> hi, seems like nobody has built it on tru64 for a while :-) in ngx_shmtx.c line 261 (v1.7.9) is a tru64 specific part. however, NGX_EACCESS isn't defined anywhere. i assume it has existed in the past but has been forgotten now. happens with both 1.6.2 and 1.7.9. greets From pluknet at nginx.com Mon Feb 9 22:33:35 2015 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 10 Feb 2015 01:33:35 +0300 Subject: NGX_EACCESS missing In-Reply-To: <54D9270F.BB34041@ah-consulting.net> References: <54D9270F.BB34041@ah-consulting.net> Message-ID: <54D935BF.8090700@nginx.com> On 10.02.2015 00:30, Goetz T. Fischer wrote: > hi, > > seems like nobody has built it on tru64 for a while :-) > in ngx_shmtx.c line 261 (v1.7.9) is a tru64 specific part. however, > NGX_EACCESS isn't defined anywhere. i assume it has existed in the past > but has been forgotten now. happens with both 1.6.2 and 1.7.9. Hello, it is likely a typo and should be NGX_EACCES (note one S). Thanks for reporting! From g.fischer at ah-consulting.net Mon Feb 9 23:40:11 2015 From: g.fischer at ah-consulting.net (Goetz T. Fischer) Date: Tue, 10 Feb 2015 00:40:11 +0100 Subject: NGX_EACCESS missing References: <54D9270F.BB34041@ah-consulting.net> <54D935BF.8090700@nginx.com> Message-ID: <54D9455B.BB46DE77@ah-consulting.net> thanks for the quick clarification! btw the configure script finds nothing. adding some output to the scripts in auto/ uncovered that neither CPPFLAGS nor LDFLAGS are used and there's no option to specify the location of existing libs such as pcre or ssl. the only option left is to specify the location of the source(!) of external libs which is not practical at all. i'd suggest including the mentioned flags or alternatively add options for providing the location of existing libs. Sergey Kandaurov wrote: > > On 10.02.2015 00:30, Goetz T. Fischer wrote: > > hi, > > > > seems like nobody has built it on tru64 for a while :-) > > in ngx_shmtx.c line 261 (v1.7.9) is a tru64 specific part. however, > > NGX_EACCESS isn't defined anywhere. i assume it has existed in the past > > but has been forgotten now. happens with both 1.6.2 and 1.7.9. > > Hello, > > it is likely a typo and should be NGX_EACCES (note one S). > Thanks for reporting! From dani at telecom.pt Tue Feb 10 11:54:57 2015 From: dani at telecom.pt (Dani Bento) Date: Tue, 10 Feb 2015 11:54:57 +0000 Subject: nginx report a timestamp on upstream_response_time Message-ID: <20150210115457.07a711ca@alma> Hello, We are using nginx 1.6.2 and we found in our logs a strange behavior when connecting to an upstream. We found that in ngx_http_upstream.c:1213 we have: u->state->response_sec = tp->sec; u->state->response_msec = tp->msec; which gave to the last state the current ngx time (ngx_timeofday()). For some reason, the ngx_http_upstream_finalize_request(), where those values are updated to the correct value, doesn't run (we observe that most of the times, but not all the times, it happens after a 302 given by then upstream). This is a normal behavior? If yes, have we any way to avoid those pikes in the logs? Dani -- Dani Bento Dire??o de Internet e Tecnologia DTS/DVS tlm: +351 91 429 72 81 dani at telecom.pt From mdounin at mdounin.ru Tue Feb 10 13:02:48 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Feb 2015 16:02:48 +0300 Subject: NGX_EACCESS missing In-Reply-To: <54D9455B.BB46DE77@ah-consulting.net> References: <54D9270F.BB34041@ah-consulting.net> <54D935BF.8090700@nginx.com> <54D9455B.BB46DE77@ah-consulting.net> Message-ID: <20150210130248.GA19012@mdounin.ru> Hello! On Tue, Feb 10, 2015 at 12:40:11AM +0100, Goetz T. Fischer wrote: > thanks for the quick clarification! > > btw the configure script finds nothing. adding some output to the > scripts > in auto/ uncovered that neither CPPFLAGS nor LDFLAGS are used and > there's > no option to specify the location of existing libs such as pcre or ssl. > the only option left is to specify the location of the source(!) of > external libs which is not practical at all. > i'd suggest including the mentioned flags or alternatively add options > for providing the location of existing libs. There are --with-cc-opt and --with-ld-opt configure parameters to set arbitrary cc and ld options, see http://nginx.org/en/docs/configure.html. -- Maxim Dounin http://nginx.org/ From pluknet at nginx.com Tue Feb 10 13:24:06 2015 From: pluknet at nginx.com (Sergey Kandaurov) Date: Tue, 10 Feb 2015 13:24:06 +0000 Subject: [nginx] Core: fixed build on Tru64 UNIX. Message-ID: details: http://hg.nginx.org/nginx/rev/860a1c37f3b2 branches: changeset: 5972:860a1c37f3b2 user: Sergey Kandaurov date: Tue Feb 10 01:51:08 2015 +0300 description: Core: fixed build on Tru64 UNIX. There was a typo in NGX_EACCES. Reported by Goetz T. Fischer. diffstat: src/core/ngx_shmtx.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r ebdb2023e84a -r 860a1c37f3b2 src/core/ngx_shmtx.c --- a/src/core/ngx_shmtx.c Wed Jan 21 00:26:32 2015 +0100 +++ b/src/core/ngx_shmtx.c Tue Feb 10 01:51:08 2015 +0300 @@ -259,7 +259,7 @@ ngx_shmtx_trylock(ngx_shmtx_t *mtx) #if __osf__ /* Tru64 UNIX */ - if (err == NGX_EACCESS) { + if (err == NGX_EACCES) { return 0; } From mdounin at mdounin.ru Tue Feb 10 13:58:30 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Feb 2015 13:58:30 +0000 Subject: [nginx] Updated OpenSSL used for win32 builds. Message-ID: details: http://hg.nginx.org/nginx/rev/8c8a1fa351be branches: changeset: 5973:8c8a1fa351be user: Maxim Dounin date: Tue Feb 10 16:54:12 2015 +0300 description: Updated OpenSSL used for win32 builds. diffstat: misc/GNUmakefile | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff --git a/misc/GNUmakefile b/misc/GNUmakefile --- a/misc/GNUmakefile +++ b/misc/GNUmakefile @@ -5,7 +5,7 @@ NGINX = nginx-$(VER) TEMP = tmp OBJS = objs.msvc8 -OPENSSL = openssl-1.0.1j +OPENSSL = openssl-1.0.1l ZLIB = zlib-1.2.8 PCRE = pcre-8.35 From mdounin at mdounin.ru Tue Feb 10 14:46:39 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Feb 2015 14:46:39 +0000 Subject: [nginx] nginx-1.7.10-RELEASE Message-ID: details: http://hg.nginx.org/nginx/rev/860cfbcc4606 branches: changeset: 5974:860cfbcc4606 user: Maxim Dounin date: Tue Feb 10 17:33:32 2015 +0300 description: nginx-1.7.10-RELEASE diffstat: docs/xml/nginx/changes.xml | 92 ++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 92 insertions(+), 0 deletions(-) diffs (102 lines): diff --git a/docs/xml/nginx/changes.xml b/docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml +++ b/docs/xml/nginx/changes.xml @@ -5,6 +5,98 @@ + + + + +???????? use_temp_path ???????? proxy_cache_path, fastcgi_cache_path, +scgi_cache_path ? uwsgi_cache_path. + + +the "use_temp_path" parameter of the "proxy_cache_path", "fastcgi_cache_path", +"scgi_cache_path", and "uwsgi_cache_path" directives. + + + + + +?????????? $upstream_header_time. + + +the $upstream_header_time variable. + + + + + +?????? ??? ???????????? ????? nginx ???????? ?????? error_log'? ?????? +??? ? ???????. + + +now on disk overflow nginx tries to write error logs once a second only. + + + + + +????????? try_files ??? ???????????? ????????? +?? ???????????? ??????? ?????.
+??????? Damien Tournoud. +
+ +the "try_files" directive did not ignore normal files +while testing directories.
+Thanks to Damien Tournoud. +
+
+ + + +??? ????????????? ????????? sendfile ?? OS X +????????? ?????? "sendfile() failed"; +?????? ????????? ? nginx 1.7.8. + + +alerts "sendfile() failed" +if the "sendfile" directive was used on OS X; +the bug had appeared in 1.7.8. + + + + + +? ??? ????? ???????? ????????? "sem_post() failed". + + +alerts "sem_post() failed" might appear in logs. + + + + + +nginx ?? ????????? ? musl libc.
+??????? James Taylor. +
+ +nginx could not be built with musl libc.
+Thanks to James Taylor. +
+
+ + + +nginx ?? ????????? ?? Tru64 UNIX.
+??????? Goetz T. Fischer. +
+ +nginx could not be built on Tru64 UNIX.
+Thanks to Goetz T. Fischer. +
+
+ +
+ + From mdounin at mdounin.ru Tue Feb 10 14:46:43 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Feb 2015 14:46:43 +0000 Subject: [nginx] release-1.7.10 tag Message-ID: details: http://hg.nginx.org/nginx/rev/05bc5d5d1c5d branches: changeset: 5975:05bc5d5d1c5d user: Maxim Dounin date: Tue Feb 10 17:33:32 2015 +0300 description: release-1.7.10 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -379,3 +379,4 @@ a8d111bb68847f61d682a3c8792fecb2e52efa2c 6d2fbc30f8a7f70136cf08f32d5ff3179d524873 release-1.7.7 d5ea659b8bab2d6402a2266efa691f705e84001e release-1.7.8 34b201c1abd1e2d4faeae4650a21574771a03c0e release-1.7.9 +860cfbcc4606ee36d898a9cd0c5ae8858db984d6 release-1.7.10 From mdounin at mdounin.ru Tue Feb 10 15:25:39 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Feb 2015 18:25:39 +0300 Subject: nginx report a timestamp on upstream_response_time In-Reply-To: <20150210115457.07a711ca@alma> References: <20150210115457.07a711ca@alma> Message-ID: <20150210152539.GJ19012@mdounin.ru> Hello! On Tue, Feb 10, 2015 at 11:54:57AM +0000, Dani Bento wrote: > Hello, > > We are using nginx 1.6.2 and we found in our logs a strange behavior > when connecting to an upstream. > > We found that in ngx_http_upstream.c:1213 we have: > > u->state->response_sec = tp->sec; > u->state->response_msec = tp->msec; > > which gave to the last state the current ngx time (ngx_timeofday()). > > For some reason, the ngx_http_upstream_finalize_request(), where those > values are updated to the correct value, doesn't run (we observe that > most of the times, but not all the times, it happens after a 302 given > by then upstream). > > This is a normal behavior? If yes, have we any way to avoid those pikes > in the logs? No, this is not normal, ngx_http_upstream_finalize_request() is expected to be called before request logging. If this doesn't happen in your case, it would be good to trace the problem. Just in case, some debugging hints can be found at http://wiki.nginx.org/Debugging. -- Maxim Dounin http://nginx.org/ From dani at telecom.pt Tue Feb 10 16:08:06 2015 From: dani at telecom.pt (Dani Bento) Date: Tue, 10 Feb 2015 16:08:06 +0000 Subject: nginx report a timestamp on upstream_response_time In-Reply-To: <20150210152539.GJ19012@mdounin.ru> References: <20150210115457.07a711ca@alma> <20150210152539.GJ19012@mdounin.ru> Message-ID: <20150210160806.7bc2069d@alma> Hello, We are using LUA header filter to process some headers and decide if the request should continue or stop (or redirect), using ngx.exit(). Can it be a failure on ngx.exit LUA call? We will try to trace the process to be sure where the problem resides. Dani On Tue, 10 Feb 2015 15:25:39 +0000 Maxim Dounin wrote: > Hello! > > On Tue, Feb 10, 2015 at 11:54:57AM +0000, Dani Bento wrote: > > > Hello, > > > > We are using nginx 1.6.2 and we found in our logs a strange behavior > > when connecting to an upstream. > > > > We found that in ngx_http_upstream.c:1213 we have: > > > > u->state->response_sec = tp->sec; > > u->state->response_msec = tp->msec; > > > > which gave to the last state the current ngx time (ngx_timeofday()). > > > > For some reason, the ngx_http_upstream_finalize_request(), where > > those values are updated to the correct value, doesn't run (we > > observe that most of the times, but not all the times, it happens > > after a 302 given by then upstream). > > > > This is a normal behavior? If yes, have we any way to avoid those > > pikes in the logs? > > No, this is not normal, ngx_http_upstream_finalize_request() is > expected to be called before request logging. If this doesn't > happen in your case, it would be good to trace the problem. > > Just in case, some debugging hints can be found at > http://wiki.nginx.org/Debugging. > -- Dani Bento Dire??o de Internet e Tecnologia DTS/DVS tlm: +351 91 429 72 81 dani at telecom.pt From dani at telecom.pt Tue Feb 10 17:51:46 2015 From: dani at telecom.pt (Dani Bento) Date: Tue, 10 Feb 2015 17:51:46 +0000 Subject: nginx report a timestamp on upstream_response_time In-Reply-To: <20150210160806.7bc2069d@alma> References: <20150210115457.07a711ca@alma> <20150210152539.GJ19012@mdounin.ru> <20150210160806.7bc2069d@alma> Message-ID: <20150210175146.67167f50@alma> I was searching in the nginx tree for response_sec/msec fields and the only place where I found them was in src/http/ngx_http_upstream.c This fields are updated at ngx_http_upstream.c 1189 ngx_http_upstream_connect(...) 1197 if (u->state && u->state->response_sec) { 1198 tp = ngx_timeofday(); 1199 u->state->response_sec = tp->sec - u->state->response_sec; 1200 u->state->response_msec = tp->msec -u->state->response_msec; 1201 } (...) 3408 ngx_http_upstream_finalize_request(...) 3427 if (u->state && u->state->response_sec) { 3428 tp = ngx_timeofday(); 3429 u->state->response_sec = tp->sec - u->state->response_sec; 3430 u->state->response_msec = tp->msec -u->state->response_msec; 3431 3432 if (u->pipe && u->pipe->read_length) { 3433 u->state->response_length = u->pipe->read_length; 3434 } 3435 } (...) I assume that in both cases, we have to have u->state not NULL and u->state->response_sec not NULL or 0. Searching a little more I found that for ngx_http_upstream_connect we have this: 1203 u->state = ngx_array_push(r->upstream_states); 1204 if (u->state == NULL) { 1205 ngx_http_upstream_finalize_request(r, u, 1206 NGX_HTTP_INTERNAL_SERVER_ERROR); 1207 return; 1208 } 1209 1210 ngx_memzero(u->state, sizeof(ngx_http_upstream_state_t)); 1211 1212 tp = ngx_timeofday(); 1213 u->state->response_sec = tp->sec; 1214 u->state->response_msec = tp->msec; But in ngx_http_upstream_init_request we have: 559 } else { 560 561 u->state = ngx_array_push(r->upstream_states); 562 if (u->state == NULL) { 563 ngx_http_upstream_finalize_request(r, u, 564 NGX_HTTP_INTERNAL_SERVER_ERROR); 565 return; 566 } 567 568 ngx_memzero(u->state, sizeof(ngx_http_upstream_state_t)); 569 } The u->state->response_sec and u->state->response_mset are initialized with ngx_timeofday() in the first case, but are only zeroed in the second case. If the fields response_sec/msec are only affected if response_sec is not NULL or 0, it's possible that, in this particular condition, the time will be never updated with the correct value in the ngx_http_upstream_finalize_request? Dani On Tue, 10 Feb 2015 16:08:06 +0000 Dani Bento wrote: > Hello, > > We are using LUA header filter to process some headers and decide if > the request should continue or stop (or redirect), using > ngx.exit(). > > Can it be a failure on ngx.exit LUA call? We will try to trace the > process to be sure where the problem resides. > > Dani > > On Tue, 10 Feb 2015 15:25:39 +0000 > Maxim Dounin wrote: > > > Hello! > > > > On Tue, Feb 10, 2015 at 11:54:57AM +0000, Dani Bento wrote: > > > > > Hello, > > > > > > We are using nginx 1.6.2 and we found in our logs a strange > > > behavior when connecting to an upstream. > > > > > > We found that in ngx_http_upstream.c:1213 we have: > > > > > > u->state->response_sec = tp->sec; > > > u->state->response_msec = tp->msec; > > > > > > which gave to the last state the current ngx time > > > (ngx_timeofday()). > > > > > > For some reason, the ngx_http_upstream_finalize_request(), where > > > those values are updated to the correct value, doesn't run (we > > > observe that most of the times, but not all the times, it happens > > > after a 302 given by then upstream). > > > > > > This is a normal behavior? If yes, have we any way to avoid those > > > pikes in the logs? > > > > No, this is not normal, ngx_http_upstream_finalize_request() is > > expected to be called before request logging. If this doesn't > > happen in your case, it would be good to trace the problem. > > > > Just in case, some debugging hints can be found at > > http://wiki.nginx.org/Debugging. > > > > > -- Dani Bento Dire??o de Internet e Tecnologia DTS/DVS tlm: +351 91 429 72 81 dani at telecom.pt From mdounin at mdounin.ru Tue Feb 10 18:10:44 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 10 Feb 2015 21:10:44 +0300 Subject: nginx report a timestamp on upstream_response_time In-Reply-To: <20150210175146.67167f50@alma> References: <20150210115457.07a711ca@alma> <20150210152539.GJ19012@mdounin.ru> <20150210160806.7bc2069d@alma> <20150210175146.67167f50@alma> Message-ID: <20150210181044.GL19012@mdounin.ru> Hello! On Tue, Feb 10, 2015 at 05:51:46PM +0000, Dani Bento wrote: [...] > But in ngx_http_upstream_init_request we have: > > 559 } else { > 560 > 561 u->state = ngx_array_push(r->upstream_states); > 562 if (u->state == NULL) { > 563 ngx_http_upstream_finalize_request(r, u, > 564 NGX_HTTP_INTERNAL_SERVER_ERROR); > 565 return; > 566 } > 567 > 568 ngx_memzero(u->state, sizeof(ngx_http_upstream_state_t)); > 569 } > > The u->state->response_sec and u->state->response_mset are > initialized with ngx_timeofday() in the first case, but are only > zeroed in the second case. An empty state added in the ngx_http_upstream_init_request() marks switching to another upstream{} group, to be shown as ":" in the corresponding variables. See ngx_http_upstream_response_time_variable() for details. -- Maxim Dounin http://nginx.org/ From g.fischer at ah-consulting.net Tue Feb 10 20:08:13 2015 From: g.fischer at ah-consulting.net (Goetz T. Fischer) Date: Tue, 10 Feb 2015 21:08:13 +0100 Subject: NGX_EACCESS missing References: <54D9270F.BB34041@ah-consulting.net> <54D935BF.8090700@nginx.com> <54D9455B.BB46DE77@ah-consulting.net> <20150210130248.GA19012@mdounin.ru> Message-ID: <54DA652D.AA7F4700@ah-consulting.net> fair enough. as for me it'd make sense to support CPPFLAGS and LDFLAGS but as long as there's one way to get it done it's fine. and thanks for the quick fix. cheers Maxim Dounin wrote: > > Hello! > > On Tue, Feb 10, 2015 at 12:40:11AM +0100, Goetz T. Fischer wrote: > > > thanks for the quick clarification! > > > > btw the configure script finds nothing. adding some output to the > > scripts > > in auto/ uncovered that neither CPPFLAGS nor LDFLAGS are used and > > there's > > no option to specify the location of existing libs such as pcre or ssl. > > the only option left is to specify the location of the source(!) of > > external libs which is not practical at all. > > i'd suggest including the mentioned flags or alternatively add options > > for providing the location of existing libs. > > There are --with-cc-opt and --with-ld-opt configure parameters to > set arbitrary cc and ld options, see > http://nginx.org/en/docs/configure.html. > > -- > Maxim Dounin > http://nginx.org/ > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From ru at nginx.com Wed Feb 11 12:51:39 2015 From: ru at nginx.com (Ruslan Ermilov) Date: Wed, 11 Feb 2015 12:51:39 +0000 Subject: [nginx] Version bump. Message-ID: details: http://hg.nginx.org/nginx/rev/814583aef808 branches: changeset: 5976:814583aef808 user: Ruslan Ermilov date: Wed Feb 11 15:51:03 2015 +0300 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff -r 05bc5d5d1c5d -r 814583aef808 src/core/nginx.h --- a/src/core/nginx.h Tue Feb 10 17:33:32 2015 +0300 +++ b/src/core/nginx.h Wed Feb 11 15:51:03 2015 +0300 @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1007010 -#define NGINX_VERSION "1.7.10" +#define nginx_version 1007011 +#define NGINX_VERSION "1.7.11" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From ru at nginx.com Wed Feb 11 12:51:42 2015 From: ru at nginx.com (Ruslan Ermilov) Date: Wed, 11 Feb 2015 12:51:42 +0000 Subject: [nginx] Upstream: detect port absence in fastcgi_pass with IP li... Message-ID: details: http://hg.nginx.org/nginx/rev/26c127bab5ef branches: changeset: 5977:26c127bab5ef user: Ruslan Ermilov date: Thu Jan 22 16:23:32 2015 +0300 description: Upstream: detect port absence in fastcgi_pass with IP literal. If fastcgi_pass (or any look-alike that doesn't imply a default port) is specified as an IP literal (as opposed to a hostname), port absence was not detected at configuration time and could result in EADDRNOTAVAIL at run time. Fixed this in such a way that configs like http { server { location / { fastcgi_pass 127.0.0.1; } } upstream 127.0.0.1 { server 10.0.0.1:12345; } } still work. That is, port absence check is delayed until after we make sure there's no explicit upstream with such a name. diffstat: src/http/ngx_http_upstream.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 814583aef808 -r 26c127bab5ef src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Wed Feb 11 15:51:03 2015 +0300 +++ b/src/http/ngx_http_upstream.c Thu Jan 22 16:23:32 2015 +0300 @@ -5408,7 +5408,7 @@ ngx_http_upstream_add(ngx_conf_t *cf, ng uscf->default_port = u->default_port; uscf->no_port = u->no_port; - if (u->naddrs == 1) { + if (u->naddrs == 1 && (u->port || u->family == AF_UNIX)) { uscf->servers = ngx_array_create(cf->pool, 1, sizeof(ngx_http_upstream_server_t)); if (uscf->servers == NULL) { From ru at nginx.com Wed Feb 11 12:51:45 2015 From: ru at nginx.com (Ruslan Ermilov) Date: Wed, 11 Feb 2015 12:51:45 +0000 Subject: [nginx] Mail: fixed a comment. Message-ID: details: http://hg.nginx.org/nginx/rev/eb4ba3800c31 branches: changeset: 5978:eb4ba3800c31 user: Ruslan Ermilov date: Fri Jan 23 15:23:27 2015 +0300 description: Mail: fixed a comment. diffstat: src/mail/ngx_mail.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 26c127bab5ef -r eb4ba3800c31 src/mail/ngx_mail.c --- a/src/mail/ngx_mail.c Thu Jan 22 16:23:32 2015 +0300 +++ b/src/mail/ngx_mail.c Fri Jan 23 15:23:27 2015 +0300 @@ -98,7 +98,7 @@ ngx_mail_block(ngx_conf_t *cf, ngx_comma *(ngx_mail_conf_ctx_t **) conf = ctx; - /* count the number of the http modules and set up their indices */ + /* count the number of the mail modules and set up their indices */ ngx_mail_max_module = 0; for (m = 0; ngx_modules[m]; m++) { From ru at nginx.com Wed Feb 11 12:51:52 2015 From: ru at nginx.com (Ruslan Ermilov) Date: Wed, 11 Feb 2015 12:51:52 +0000 Subject: [nginx] Mail: fixed the duplicate listen address detection. Message-ID: details: http://hg.nginx.org/nginx/rev/b2920b517490 branches: changeset: 5979:b2920b517490 user: Ruslan Ermilov date: Fri Jan 23 15:23:29 2015 +0300 description: Mail: fixed the duplicate listen address detection. diffstat: src/mail/ngx_mail_core_module.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (21 lines): diff -r eb4ba3800c31 -r b2920b517490 src/mail/ngx_mail_core_module.c --- a/src/mail/ngx_mail_core_module.c Fri Jan 23 15:23:27 2015 +0300 +++ b/src/mail/ngx_mail_core_module.c Fri Jan 23 15:23:29 2015 +0300 @@ -336,7 +336,7 @@ ngx_mail_core_listen(ngx_conf_t *cf, ngx off = offsetof(struct sockaddr_in6, sin6_addr); len = 16; sin6 = (struct sockaddr_in6 *) sa; - port = sin6->sin6_port; + port = ntohs(sin6->sin6_port); break; #endif @@ -352,7 +352,7 @@ ngx_mail_core_listen(ngx_conf_t *cf, ngx off = offsetof(struct sockaddr_in, sin_addr); len = 4; sin = (struct sockaddr_in *) sa; - port = sin->sin_port; + port = ntohs(sin->sin_port); break; } From dani at telecom.pt Wed Feb 11 15:20:59 2015 From: dani at telecom.pt (Dani Bento) Date: Wed, 11 Feb 2015 15:20:59 +0000 Subject: nginx report a timestamp on upstream_response_time In-Reply-To: <20150210181044.GL19012@mdounin.ru> References: <20150210115457.07a711ca@alma> <20150210152539.GJ19012@mdounin.ru> <20150210160806.7bc2069d@alma> <20150210175146.67167f50@alma> <20150210181044.GL19012@mdounin.ru> Message-ID: <20150211152059.39b7d0ee@alma> Hello, I was looking in ngx_http_upstream_response_time_variable. I understand that if the state[i].status is not 0, the state[i].response_sec is used. It is assumed that if a state doesn't have a peer it prints a ":" before the next state (like the upstream_status_variable) Our problem is that in the log we have: [200 : 302] (for the status codes) [0.02 : 1423667767.600] (for the upstream_response_time) Dani On Tue, 10 Feb 2015 18:10:44 +0000 Maxim Dounin wrote: > Hello! > > On Tue, Feb 10, 2015 at 05:51:46PM +0000, Dani Bento wrote: > > [...] > > > But in ngx_http_upstream_init_request we have: > > > > 559 } else { > > 560 > > 561 u->state = ngx_array_push(r->upstream_states); > > 562 if (u->state == NULL) { > > 563 ngx_http_upstream_finalize_request(r, u, > > 564 > > NGX_HTTP_INTERNAL_SERVER_ERROR); 565 return; > > 566 } > > 567 > > 568 ngx_memzero(u->state, > > sizeof(ngx_http_upstream_state_t)); 569 } > > > > The u->state->response_sec and u->state->response_mset are > > initialized with ngx_timeofday() in the first case, but are only > > zeroed in the second case. > > An empty state added in the ngx_http_upstream_init_request() marks > switching to another upstream{} group, to be shown as ":" in the > corresponding variables. > > See ngx_http_upstream_response_time_variable() for details. > -- Dani Bento Dire??o de Internet e Tecnologia DTS/DVS tlm: +351 91 429 72 81 dani at telecom.pt From mdounin at mdounin.ru Wed Feb 11 15:26:59 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 11 Feb 2015 18:26:59 +0300 Subject: nginx report a timestamp on upstream_response_time In-Reply-To: <20150211152059.39b7d0ee@alma> References: <20150210115457.07a711ca@alma> <20150210152539.GJ19012@mdounin.ru> <20150210160806.7bc2069d@alma> <20150210175146.67167f50@alma> <20150210181044.GL19012@mdounin.ru> <20150211152059.39b7d0ee@alma> Message-ID: <20150211152659.GO19012@mdounin.ru> Hello! On Wed, Feb 11, 2015 at 03:20:59PM +0000, Dani Bento wrote: > Hello, > > I was looking in ngx_http_upstream_response_time_variable. > > I understand that if the state[i].status is not 0, the > state[i].response_sec is used. > > It is assumed that if a state doesn't have a peer it prints a ":" before the next state (like the upstream_status_variable) > > Our problem is that in the log we have: > > [200 : 302] (for the status codes) > > [0.02 : 1423667767.600] (for the upstream_response_time) So the question remains: how this happens. (Note though, that this can easily happen if you'll try to use the $upstream_response_time variable before the request is finalized, e.g., in add_header or in 3rd party modules. But it shouldn't happen during logging, as the request is expected to be finalized at this time.) -- Maxim Dounin http://nginx.org/ From vbart at nginx.com Wed Feb 11 15:42:57 2015 From: vbart at nginx.com (Valentin Bartenev) Date: Wed, 11 Feb 2015 15:42:57 +0000 Subject: [nginx] Refactored sendfile() AIO preload. Message-ID: details: http://hg.nginx.org/nginx/rev/ccad84a174e0 branches: changeset: 5980:ccad84a174e0 user: Valentin Bartenev date: Wed Feb 11 17:52:15 2015 +0300 description: Refactored sendfile() AIO preload. This reduces layering violation and simplifies the logic of AIO preread, since it's now triggered by the send chain function itself without falling back to the copy filter. The context of AIO operation is now stored per file buffer, which makes it possible to properly handle cases when multiple buffers come from different locations, each with its own configuration. diffstat: src/core/ngx_buf.h | 3 + src/core/ngx_connection.h | 2 - src/core/ngx_output_chain.c | 32 ++++++++++ src/event/ngx_event.h | 8 +- src/http/ngx_http_copy_filter_module.c | 99 +++++++++++-------------------- src/http/ngx_http_request.h | 3 - src/os/unix/ngx_file_aio_read.c | 42 ++++++++----- src/os/unix/ngx_files.h | 1 + src/os/unix/ngx_freebsd_sendfile_chain.c | 77 +++++++++++++++++++----- src/os/unix/ngx_linux_aio_read.c | 39 ++++++++---- 10 files changed, 187 insertions(+), 119 deletions(-) diffs (truncated from 508 to 300 lines): diff -r b2920b517490 -r ccad84a174e0 src/core/ngx_buf.h --- a/src/core/ngx_buf.h Fri Jan 23 15:23:29 2015 +0300 +++ b/src/core/ngx_buf.h Wed Feb 11 17:52:15 2015 +0300 @@ -94,6 +94,9 @@ struct ngx_output_chain_ctx_s { unsigned aio:1; ngx_output_chain_aio_pt aio_handler; +#if (NGX_HAVE_FILE_AIO) + ssize_t (*aio_preload)(ngx_buf_t *file); +#endif #endif off_t alignment; diff -r b2920b517490 -r ccad84a174e0 src/core/ngx_connection.h --- a/src/core/ngx_connection.h Fri Jan 23 15:23:29 2015 +0300 +++ b/src/core/ngx_connection.h Wed Feb 11 17:52:15 2015 +0300 @@ -181,9 +181,7 @@ struct ngx_connection_s { #endif #if (NGX_HAVE_AIO_SENDFILE) - unsigned aio_sendfile:1; unsigned busy_count:2; - ngx_buf_t *busy_sendfile; #endif #if (NGX_THREADS) diff -r b2920b517490 -r ccad84a174e0 src/core/ngx_output_chain.c --- a/src/core/ngx_output_chain.c Fri Jan 23 15:23:29 2015 +0300 +++ b/src/core/ngx_output_chain.c Wed Feb 11 17:52:15 2015 +0300 @@ -29,6 +29,10 @@ static ngx_inline ngx_int_t ngx_output_chain_as_is(ngx_output_chain_ctx_t *ctx, ngx_buf_t *buf); +#if (NGX_HAVE_AIO_SENDFILE) +static ngx_int_t ngx_output_chain_aio_setup(ngx_output_chain_ctx_t *ctx, + ngx_file_t *file); +#endif static ngx_int_t ngx_output_chain_add_copy(ngx_pool_t *pool, ngx_chain_t **chain, ngx_chain_t *in); static ngx_int_t ngx_output_chain_align_file_buf(ngx_output_chain_ctx_t *ctx, @@ -252,6 +256,12 @@ ngx_output_chain_as_is(ngx_output_chain_ buf->in_file = 0; } +#if (NGX_HAVE_AIO_SENDFILE) + if (ctx->aio_preload && buf->in_file) { + (void) ngx_output_chain_aio_setup(ctx, buf->file); + } +#endif + if (ctx->need_in_memory && !ngx_buf_in_memory(buf)) { return 0; } @@ -264,6 +274,28 @@ ngx_output_chain_as_is(ngx_output_chain_ } +#if (NGX_HAVE_AIO_SENDFILE) + +static ngx_int_t +ngx_output_chain_aio_setup(ngx_output_chain_ctx_t *ctx, ngx_file_t *file) +{ + ngx_event_aio_t *aio; + + if (file->aio == NULL && ngx_file_aio_init(file, ctx->pool) != NGX_OK) { + return NGX_ERROR; + } + + aio = file->aio; + + aio->data = ctx->filter_ctx; + aio->preload_handler = ctx->aio_preload; + + return NGX_OK; +} + +#endif + + static ngx_int_t ngx_output_chain_add_copy(ngx_pool_t *pool, ngx_chain_t **chain, ngx_chain_t *in) diff -r b2920b517490 -r ccad84a174e0 src/event/ngx_event.h --- a/src/event/ngx_event.h Fri Jan 23 15:23:29 2015 +0300 +++ b/src/event/ngx_event.h Wed Feb 11 17:52:15 2015 +0300 @@ -168,6 +168,10 @@ struct ngx_event_aio_s { ngx_event_handler_pt handler; ngx_file_t *file; +#if (NGX_HAVE_AIO_SENDFILE) + ssize_t (*preload_handler)(ngx_buf_t *file); +#endif + ngx_fd_t fd; #if (NGX_HAVE_EVENTFD) @@ -181,10 +185,6 @@ struct ngx_event_aio_s { size_t nbytes; #endif -#if (NGX_HAVE_AIO_SENDFILE) - off_t last_offset; -#endif - ngx_aiocb_t aiocb; ngx_event_t event; }; diff -r b2920b517490 -r ccad84a174e0 src/http/ngx_http_copy_filter_module.c --- a/src/http/ngx_http_copy_filter_module.c Fri Jan 23 15:23:29 2015 +0300 +++ b/src/http/ngx_http_copy_filter_module.c Wed Feb 11 17:52:15 2015 +0300 @@ -20,6 +20,7 @@ static void ngx_http_copy_aio_handler(ng ngx_file_t *file); static void ngx_http_copy_aio_event_handler(ngx_event_t *ev); #if (NGX_HAVE_AIO_SENDFILE) +static ssize_t ngx_http_copy_aio_sendfile_preload(ngx_buf_t *file); static void ngx_http_copy_aio_sendfile_event_handler(ngx_event_t *ev); #endif #endif @@ -125,7 +126,9 @@ ngx_http_copy_filter(ngx_http_request_t ctx->aio_handler = ngx_http_copy_aio_handler; } #if (NGX_HAVE_AIO_SENDFILE) - c->aio_sendfile = (clcf->aio == NGX_HTTP_AIO_SENDFILE); + if (clcf->aio == NGX_HTTP_AIO_SENDFILE) { + ctx->aio_preload = ngx_http_copy_aio_sendfile_preload; + } #endif } #endif @@ -139,72 +142,19 @@ ngx_http_copy_filter(ngx_http_request_t ctx->aio = r->aio; #endif - for ( ;; ) { - rc = ngx_output_chain(ctx, in); + rc = ngx_output_chain(ctx, in); - if (ctx->in == NULL) { - r->buffered &= ~NGX_HTTP_COPY_BUFFERED; + if (ctx->in == NULL) { + r->buffered &= ~NGX_HTTP_COPY_BUFFERED; - } else { - r->buffered |= NGX_HTTP_COPY_BUFFERED; - } + } else { + r->buffered |= NGX_HTTP_COPY_BUFFERED; + } - ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0, - "http copy filter: %i \"%V?%V\"", rc, &r->uri, &r->args); + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0, + "http copy filter: %i \"%V?%V\"", rc, &r->uri, &r->args); -#if (NGX_HAVE_FILE_AIO && NGX_HAVE_AIO_SENDFILE) - - if (c->busy_sendfile) { - ssize_t n; - off_t offset; - ngx_file_t *file; - ngx_http_ephemeral_t *e; - - if (r->aio) { - c->busy_sendfile = NULL; - return rc; - } - - file = c->busy_sendfile->file; - offset = c->busy_sendfile->file_pos; - - if (file->aio) { - c->busy_count = (offset == file->aio->last_offset) ? - c->busy_count + 1 : 0; - file->aio->last_offset = offset; - - if (c->busy_count > 2) { - ngx_log_error(NGX_LOG_ALERT, c->log, 0, - "sendfile(%V) returned busy again", - &file->name); - c->aio_sendfile = 0; - } - } - - c->busy_sendfile = NULL; - e = (ngx_http_ephemeral_t *) &r->uri_start; - - n = ngx_file_aio_read(file, &e->aio_preload, 1, offset, r->pool); - - if (n > 0) { - in = NULL; - continue; - } - - rc = n; - - if (rc == NGX_AGAIN) { - file->aio->data = r; - file->aio->handler = ngx_http_copy_aio_sendfile_event_handler; - - r->main->blocked++; - r->aio = 1; - } - } -#endif - - return rc; - } + return rc; } @@ -244,6 +194,29 @@ ngx_http_copy_aio_event_handler(ngx_even #if (NGX_HAVE_AIO_SENDFILE) +static ssize_t +ngx_http_copy_aio_sendfile_preload(ngx_buf_t *file) +{ + ssize_t n; + static u_char buf[1]; + ngx_event_aio_t *aio; + ngx_http_request_t *r; + + n = ngx_file_aio_read(file->file, buf, 1, file->file_pos, NULL); + + if (n == NGX_AGAIN) { + aio = file->file->aio; + aio->handler = ngx_http_copy_aio_sendfile_event_handler; + + r = aio->data; + r->main->blocked++; + r->aio = 1; + } + + return n; +} + + static void ngx_http_copy_aio_sendfile_event_handler(ngx_event_t *ev) { diff -r b2920b517490 -r ccad84a174e0 src/http/ngx_http_request.h --- a/src/http/ngx_http_request.h Fri Jan 23 15:23:29 2015 +0300 +++ b/src/http/ngx_http_request.h Wed Feb 11 17:52:15 2015 +0300 @@ -574,9 +574,6 @@ struct ngx_http_request_s { typedef struct { ngx_http_posted_request_t terminal_posted_request; -#if (NGX_HAVE_AIO_SENDFILE) - u_char aio_preload; -#endif } ngx_http_ephemeral_t; diff -r b2920b517490 -r ccad84a174e0 src/os/unix/ngx_file_aio_read.c --- a/src/os/unix/ngx_file_aio_read.c Fri Jan 23 15:23:29 2015 +0300 +++ b/src/os/unix/ngx_file_aio_read.c Wed Feb 11 17:52:15 2015 +0300 @@ -36,6 +36,28 @@ static ssize_t ngx_file_aio_result(ngx_f static void ngx_file_aio_event_handler(ngx_event_t *ev); +ngx_int_t +ngx_file_aio_init(ngx_file_t *file, ngx_pool_t *pool) +{ + ngx_event_aio_t *aio; + + aio = ngx_pcalloc(pool, sizeof(ngx_event_aio_t)); + if (aio == NULL) { + return NGX_ERROR; + } + + aio->file = file; + aio->fd = file->fd; + aio->event.data = aio; + aio->event.ready = 1; + aio->event.log = file->log; + + file->aio = aio; + + return NGX_OK; +} + + ssize_t ngx_file_aio_read(ngx_file_t *file, u_char *buf, size_t size, off_t offset, ngx_pool_t *pool) @@ -48,25 +70,11 @@ ngx_file_aio_read(ngx_file_t *file, u_ch return ngx_read_file(file, buf, size, offset); } - aio = file->aio; - - if (aio == NULL) { - aio = ngx_pcalloc(pool, sizeof(ngx_event_aio_t)); - if (aio == NULL) { - return NGX_ERROR; - } - - aio->file = file; - aio->fd = file->fd; From vbart at nginx.com Wed Feb 11 17:13:10 2015 From: vbart at nginx.com (Valentin Bartenev) Date: Wed, 11 Feb 2015 17:13:10 +0000 Subject: [nginx] Unbreak building on FreeBSD without file AIO. Message-ID: details: http://hg.nginx.org/nginx/rev/0f234ee664f7 branches: changeset: 5981:0f234ee664f7 user: Valentin Bartenev date: Wed Feb 11 20:00:07 2015 +0300 description: Unbreak building on FreeBSD without file AIO. It appeared that the NGX_HAVE_AIO_SENDFILE macro was defined regardless of the "--with-file-aio" configure option and the NGX_HAVE_FILE_AIO macro. Now they are related. Additionally, fixed one macro. diffstat: auto/os/freebsd | 8 +++++--- src/core/ngx_buf.h | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diffs (31 lines): diff -r ccad84a174e0 -r 0f234ee664f7 auto/os/freebsd --- a/auto/os/freebsd Wed Feb 11 17:52:15 2015 +0300 +++ b/auto/os/freebsd Wed Feb 11 20:00:07 2015 +0300 @@ -44,10 +44,12 @@ if [ $osreldate -gt 300007 ]; then CORE_SRCS="$CORE_SRCS $FREEBSD_SENDFILE_SRCS" fi -if [ $osreldate -gt 502103 ]; then - echo " + sendfile()'s SF_NODISKIO found" +if [ $NGX_FILE_AIO = YES ]; then + if [ $osreldate -gt 502103 ]; then + echo " + sendfile()'s SF_NODISKIO found" - have=NGX_HAVE_AIO_SENDFILE . auto/have + have=NGX_HAVE_AIO_SENDFILE . auto/have + fi fi # POSIX semaphores diff -r ccad84a174e0 -r 0f234ee664f7 src/core/ngx_buf.h --- a/src/core/ngx_buf.h Wed Feb 11 17:52:15 2015 +0300 +++ b/src/core/ngx_buf.h Wed Feb 11 20:00:07 2015 +0300 @@ -94,7 +94,7 @@ struct ngx_output_chain_ctx_s { unsigned aio:1; ngx_output_chain_aio_pt aio_handler; -#if (NGX_HAVE_FILE_AIO) +#if (NGX_HAVE_AIO_SENDFILE) ssize_t (*aio_preload)(ngx_buf_t *file); #endif #endif From pluknet at nginx.com Wed Feb 11 17:21:25 2015 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 11 Feb 2015 17:21:25 +0000 Subject: [nginx] Configure: typo fixed. Message-ID: details: http://hg.nginx.org/nginx/rev/f3f25ad09dee branches: changeset: 5982:f3f25ad09dee user: Sergey Kandaurov date: Wed Feb 11 20:18:55 2015 +0300 description: Configure: typo fixed. diffstat: auto/os/freebsd | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 0f234ee664f7 -r f3f25ad09dee auto/os/freebsd --- a/auto/os/freebsd Wed Feb 11 20:00:07 2015 +0300 +++ b/auto/os/freebsd Wed Feb 11 20:18:55 2015 +0300 @@ -80,7 +80,7 @@ fi NGX_KQUEUE_CHECKED=YES -# kqueue's NOTE_LAWAT +# kqueue's NOTE_LOWAT if [ \( $version -lt 500000 -a $version -ge 430000 \) \ -o $version -ge 500018 ] From dani at telecom.pt Wed Feb 11 18:21:48 2015 From: dani at telecom.pt (Dani Bento) Date: Wed, 11 Feb 2015 18:21:48 +0000 Subject: nginx report a timestamp on upstream_response_time In-Reply-To: <20150211152659.GO19012@mdounin.ru> References: <20150210115457.07a711ca@alma> <20150210152539.GJ19012@mdounin.ru> <20150210160806.7bc2069d@alma> <20150210175146.67167f50@alma> <20150210181044.GL19012@mdounin.ru> <20150211152059.39b7d0ee@alma> <20150211152659.GO19012@mdounin.ru> Message-ID: <20150211182148.5457555d@alma> Hello, After some research, we simplify our configuration to only have an header_filter_by_lua_file 'script.lua' with the following code: ngx.log(ngx.WARN, string.format("enter script")) ngx.exit(502) Making a siege -i -c 100 http://localhost/location we get some of the reported upstream_response_time. To verify if this could be some type of concurrency problem we put a ngx.sleep(0.001) before the ngx.exit() call. Apparently, this change removed all the wrong times in the upstream_response_time, giving always the expected value. Can be this a problem of the lua-nginx-module? Or some nginx race condition? Dani On Wed, 11 Feb 2015 15:26:59 +0000 Maxim Dounin wrote: > Hello! > > On Wed, Feb 11, 2015 at 03:20:59PM +0000, Dani Bento wrote: > > > Hello, > > > > I was looking in ngx_http_upstream_response_time_variable. > > > > I understand that if the state[i].status is not 0, the > > state[i].response_sec is used. > > > > It is assumed that if a state doesn't have a peer it prints a ":" > > before the next state (like the upstream_status_variable) > > > > Our problem is that in the log we have: > > > > [200 : 302] (for the status codes) > > > > [0.02 : 1423667767.600] (for the upstream_response_time) > > So the question remains: how this happens. > > (Note though, that this can easily happen if you'll try to use the > $upstream_response_time variable before the request is finalized, > e.g., in add_header or in 3rd party modules. But it shouldn't > happen during logging, as the request is expected to be finalized > at this time.) > -- Dani Bento Dire??o de Internet e Tecnologia DTS/DVS tlm: +351 91 429 72 81 dani at telecom.pt From skaurus at gmail.com Wed Feb 11 18:40:12 2015 From: skaurus at gmail.com (=?UTF-8?B?0JTQvNC40YLRgNC40Lkg0KjQsNC70LDRiNC+0LI=?=) Date: Wed, 11 Feb 2015 21:40:12 +0300 Subject: Making new server parameter inside upstream block Message-ID: Hi! As far as I see Nginx have limited number of possible server parameters inside upstream block. It is enforced by function ngx_http_upstream_server for example. Now, having my own upstream module how can I approach the task of adding a couple more parameters to a servers inside it? I would like to find a solution without resorting to patch Nginx source code, resilient to Nginx updates (self-contained, in other words) and having that -- as simple as possible. I have to confess that this module is the only piece of C code I've ever wrote, and I was "taking inspiration" from other modules :-) My best guess so far is overriding ngx_http_upstream_server (googling shows it's likely possible) with my own function which will find my new parameters, remove them from cf->conf and then call original function. Best regards, Dmitriy Shalashov -------------- next part -------------- An HTML attachment was scrubbed... URL: From hungnv at opensource.com.vn Thu Feb 12 04:05:34 2015 From: hungnv at opensource.com.vn (hungnv at opensource.com.vn) Date: Thu, 12 Feb 2015 11:05:34 +0700 Subject: Couple questions about module behaviour Message-ID: Hello, I am doing some research and writing small modules using nginx. One of the modules is serving file which is generated dynamically based on user?s request. I found it?s hard to handle 2 states of user?s connection: 1. When user request for object A and object A is being generated, server is processing user?s input to produce output, user closes browser, but server still produce output and just fire an error when it finished all the work and give output buffer to next filter. How to prevent this happens? 2. When user request large static file, lets say we are using mp4 module, a user requests a 1GB file, he just downloaded 100MB then close the browser, nginx log module produce number of bytes sent in log file is the file size (which is the content length). How to know exactly how many bytes server sent to client (number of bytes that client actually received)? Thanks. -- H?ng Email: hungnv at opensource.com.vn From mdounin at mdounin.ru Thu Feb 12 13:27:44 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 12 Feb 2015 16:27:44 +0300 Subject: nginx report a timestamp on upstream_response_time In-Reply-To: <20150211182148.5457555d@alma> References: <20150210115457.07a711ca@alma> <20150210152539.GJ19012@mdounin.ru> <20150210160806.7bc2069d@alma> <20150210175146.67167f50@alma> <20150210181044.GL19012@mdounin.ru> <20150211152059.39b7d0ee@alma> <20150211152659.GO19012@mdounin.ru> <20150211182148.5457555d@alma> Message-ID: <20150212132744.GT19012@mdounin.ru> Hello! On Wed, Feb 11, 2015 at 06:21:48PM +0000, Dani Bento wrote: > Hello, > > After some research, we simplify our configuration to only have an > header_filter_by_lua_file 'script.lua' with the following > code: > > ngx.log(ngx.WARN, string.format("enter script")) > ngx.exit(502) > > Making a siege -i -c 100 http://localhost/location we get some of the > reported upstream_response_time. > > To verify if this could be some type of concurrency problem we put a > ngx.sleep(0.001) before the ngx.exit() call. Apparently, this change > removed all the wrong times in the upstream_response_time, giving > always the expected value. > > Can be this a problem of the lua-nginx-module? Or some nginx race > condition? Yes, it can. -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Thu Feb 12 13:36:42 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 12 Feb 2015 16:36:42 +0300 Subject: Making new server parameter inside upstream block In-Reply-To: References: Message-ID: <20150212133642.GU19012@mdounin.ru> Hello! On Wed, Feb 11, 2015 at 09:40:12PM +0300, ??????? ??????? wrote: > Hi! > > As far as I see Nginx have limited number of possible server parameters > inside upstream block. It is enforced by function ngx_http_upstream_server for > example. > Now, having my own upstream module how can I approach the task of adding a > couple more parameters to a servers inside it? You can't. Parameters of the "server" directive are not currently extendable. > I would like to find a solution without resorting to patch Nginx source > code, resilient to Nginx updates (self-contained, in other words) and > having that -- as simple as possible. > > I have to confess that this module is the only piece of C code I've ever > wrote, and I was "taking inspiration" from other modules :-) > My best guess so far is overriding ngx_http_upstream_server (googling shows > it's likely possible) with my own function which will find my new > parameters, remove them from cf->conf and then call original function. While this approach may work, it will likely result in big problems in the future. -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Thu Feb 12 13:52:52 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 12 Feb 2015 16:52:52 +0300 Subject: Couple questions about module behaviour In-Reply-To: References: Message-ID: <20150212135252.GV19012@mdounin.ru> Hello! On Thu, Feb 12, 2015 at 11:05:34AM +0700, hungnv at opensource.com.vn wrote: > Hello, > > I am doing some research and writing small modules using nginx. > One of the modules is serving file which is generated > dynamically based on user?s request. I found it?s hard to handle > 2 states of user?s connection: > > 1. When user request for object A and object A is being > generated, server is processing user?s input to produce output, > user closes browser, but server still produce output and just > fire an error when it finished all the work and give output > buffer to next filter. How to prevent this happens? If you return control to the nginx event loop while generating the response, you may set appropriate events to detect if the client closed the connection. See ngx_http_upstream_check_broken_connection() and related things in the ngx_http_upstream.c for an example. > 2. When user request large static file, lets say we are using > mp4 module, a user requests a 1GB file, he just downloaded 100MB > then close the browser, nginx log module produce number of bytes > sent in log file is the file size (which is the content length). > How to know exactly how many bytes server sent to client (number > of bytes that client actually received)? Log module, once you use $bytes_sent (or $body_bytes_sent, as logged by default) variable, logs the number of bytes sent to the client. It is not the same as the number of bytes actually received by the client though, because there are bytes which are sent (i.e., passed by nginx to the socket buffer) but not yet received. -- Maxim Dounin http://nginx.org/ From skaurus at gmail.com Thu Feb 12 14:19:46 2015 From: skaurus at gmail.com (=?UTF-8?B?0JTQvNC40YLRgNC40Lkg0KjQsNC70LDRiNC+0LI=?=) Date: Thu, 12 Feb 2015 17:19:46 +0300 Subject: Making new server parameter inside upstream block In-Reply-To: <20150212133642.GU19012@mdounin.ru> References: <20150212133642.GU19012@mdounin.ru> Message-ID: Ok, thanks. Can you imagine any other viable way to pass some information to each server? It doesn't need to change between server restarts. I may use a distinct variable for each of them but this seems ugly and error-prone... In case you are curious why would I need this, let me explain. Upstream directive support "hash consistent" method and in that case it uses `ketama` algorithm. That means that each server assigned a key and depending on its value (actually hash of that value) each server mapped to a multiple points on the ketama ring. So far so good. Documentation says that keys distribution is compatible with Perl Cache::Memcached::Fast module. And that means that key for each server is "$ip\0$port" (or something else for unix sockets, doesn't matter). This means that if server ip changes, position of server points on ketama ring will change too. Now, I'm balancing via this upstream not memcacheds or other rather ephemeral storages but files. Each server in the upstream have a hundreds of gigabytes of files. And I would like to avoid rebalancing all these files in case of public ip changes. So my idea was to pass a key for each server via parameter. Actually, to preserve compatibility with current keys, I would pass a base64 of "$ip\0$port" value, decode it during module init and happily use it for ketama purposes. And be safe against servers redeployments. Best regards, Dmitriy Shalashov 2015-02-12 16:36 GMT+03:00 Maxim Dounin : > Hello! > > On Wed, Feb 11, 2015 at 09:40:12PM +0300, ??????? ??????? wrote: > > > Hi! > > > > As far as I see Nginx have limited number of possible server parameters > > inside upstream block. It is enforced by function > ngx_http_upstream_server for > > example. > > Now, having my own upstream module how can I approach the task of adding > a > > couple more parameters to a servers inside it? > > You can't. Parameters of the "server" directive are not currently > extendable. > > > I would like to find a solution without resorting to patch Nginx source > > code, resilient to Nginx updates (self-contained, in other words) and > > having that -- as simple as possible. > > > > I have to confess that this module is the only piece of C code I've ever > > wrote, and I was "taking inspiration" from other modules :-) > > My best guess so far is overriding ngx_http_upstream_server (googling > shows > > it's likely possible) with my own function which will find my new > > parameters, remove them from cf->conf and then call original function. > > While this approach may work, it will likely result in big > problems in the future. > > -- > Maxim Dounin > http://nginx.org/ > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Feb 12 14:53:03 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 12 Feb 2015 17:53:03 +0300 Subject: Making new server parameter inside upstream block In-Reply-To: References: <20150212133642.GU19012@mdounin.ru> Message-ID: <20150212145303.GW19012@mdounin.ru> Hello! On Thu, Feb 12, 2015 at 05:19:46PM +0300, ??????? ??????? wrote: > Ok, thanks. > > Can you imagine any other viable way to pass some information to each > server? It doesn't need to change between server restarts. > I may use a distinct variable for each of them but this seems ugly and > error-prone... You may want to rethink what you are trying to do. In this particular case the whole task seems to be unneeded, see below. > In case you are curious why would I need this, let me explain. > Upstream directive support "hash consistent" method and in that case it > uses `ketama` algorithm. That means that each server assigned a key and > depending on its value (actually hash of that value) each server mapped to > a multiple points on the ketama ring. So far so good. > Documentation says that keys distribution is compatible with Perl > Cache::Memcached::Fast module. And that means that key for each server is > "$ip\0$port" (or something else for unix sockets, doesn't matter). > This means that if server ip changes, position of server points on ketama > ring will change too. Now, I'm balancing via this upstream not memcacheds > or other rather ephemeral storages but files. Each server in the upstream > have a hundreds of gigabytes of files. And I would like to avoid > rebalancing all these files in case of public ip changes. > So my idea was to pass a key for each server via parameter. Actually, to > preserve compatibility with current keys, I would pass a base64 of > "$ip\0$port" value, decode it during module init and happily use it for > ketama purposes. And be safe against servers redeployments. Just use names in the configuration. Both Cache::Memcached::Fast and nginx will happily use names of servers and will derive Ketama points from names, not IP-addresses. That is, key distribution will stay the same as long as you don't change names configured, regardless of IP-addresses. -- Maxim Dounin http://nginx.org/ From skaurus at gmail.com Thu Feb 12 20:11:00 2015 From: skaurus at gmail.com (=?UTF-8?B?0JTQvNC40YLRgNC40Lkg0KjQsNC70LDRiNC+0LI=?=) Date: Thu, 12 Feb 2015 23:11:00 +0300 Subject: Making new server parameter inside upstream block In-Reply-To: <20150212145303.GW19012@mdounin.ru> References: <20150212133642.GU19012@mdounin.ru> <20150212145303.GW19012@mdounin.ru> Message-ID: > Just use names in the configuration. You mean local DNS? Best regards, Dmitriy Shalashov 2015-02-12 17:53 GMT+03:00 Maxim Dounin : > Hello! > > On Thu, Feb 12, 2015 at 05:19:46PM +0300, ??????? ??????? wrote: > > > Ok, thanks. > > > > Can you imagine any other viable way to pass some information to each > > server? It doesn't need to change between server restarts. > > I may use a distinct variable for each of them but this seems ugly and > > error-prone... > > You may want to rethink what you are trying to do. In this > particular case the whole task seems to be unneeded, see below. > > > In case you are curious why would I need this, let me explain. > > Upstream directive support "hash consistent" method and in that case it > > uses `ketama` algorithm. That means that each server assigned a key and > > depending on its value (actually hash of that value) each server mapped > to > > a multiple points on the ketama ring. So far so good. > > Documentation says that keys distribution is compatible with Perl > > Cache::Memcached::Fast module. And that means that key for each server is > > "$ip\0$port" (or something else for unix sockets, doesn't matter). > > This means that if server ip changes, position of server points on ketama > > ring will change too. Now, I'm balancing via this upstream not memcacheds > > or other rather ephemeral storages but files. Each server in the upstream > > have a hundreds of gigabytes of files. And I would like to avoid > > rebalancing all these files in case of public ip changes. > > So my idea was to pass a key for each server via parameter. Actually, to > > preserve compatibility with current keys, I would pass a base64 of > > "$ip\0$port" value, decode it during module init and happily use it for > > ketama purposes. And be safe against servers redeployments. > > Just use names in the configuration. Both Cache::Memcached::Fast > and nginx will happily use names of servers and will derive > Ketama points from names, not IP-addresses. That is, key > distribution will stay the same as long as you don't change names > configured, regardless of IP-addresses. > > -- > Maxim Dounin > http://nginx.org/ > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From agentzh at gmail.com Fri Feb 13 01:30:27 2015 From: agentzh at gmail.com (Yichun Zhang (agentzh)) Date: Thu, 12 Feb 2015 17:30:27 -0800 Subject: [PATCH] Upstream: fixed $upstream_response_time for filter_finalize + error_page. Message-ID: Hello! Please review the following patch. Thanks! -agentzh # HG changeset patch # User Yichun Zhang # Date 1423789183 28800 # Thu Feb 12 16:59:43 2015 -0800 # Node ID 8b3d7171f35e74c8bea3234e88d8977b4f11f815 # Parent f3f25ad09deee27485050a75732e5f46ab1b18b3 Upstream: fixed $upstream_response_time for filter_finalize + error_page. ngx_http_upstream_finalize_request() is always called twice when an output filter module calls ngx_http_filter_finalize_request() *and* a custom error page is configured by the error_page directive. This is because 1. ngx_http_filter_finalize_request() triggers calling ngx_http_terminate_request => calling ngx_http_upstream_cleanup => calling ngx_http_upstream_finalize_request 2. ngx_http_internal_redirect() returns NGX_DONE ==> ngx_http_special_response_handler() returns NGX_DONE ==> ngx_http_filter_finalize_request() returns NGX_ERROR ==> ngx_http_send_header() returns NGX_ERROR ==> ngx_http_upstream_send_response() calls ngx_http_upstream_finalize_request() again in the same ngx_http_upstream_send_response() call as 1). This might result in corrupted $upstream_response_time values (close to the absolute timestamp value) when u->state->response_sec happens to be non-zero. This patch ensures that the $upstream_response_time value is only calculated upon the first ngx_http_upstream_finalize_request() invocation. diff -r f3f25ad09dee -r 8b3d7171f35e src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Wed Feb 11 20:18:55 2015 +0300 +++ b/src/http/ngx_http_upstream.c Thu Feb 12 16:59:43 2015 -0800 @@ -3738,7 +3738,7 @@ static void ngx_http_upstream_finalize_request(ngx_http_request_t *r, ngx_http_upstream_t *u, ngx_int_t rc) { - ngx_uint_t flush; + ngx_uint_t flush, cleaned; ngx_time_t *tp; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, @@ -3747,6 +3747,10 @@ ngx_http_upstream_finalize_request(ngx_h if (u->cleanup) { *u->cleanup = NULL; u->cleanup = NULL; + cleaned = 0; + + } else { + cleaned = 1; } if (u->resolved && u->resolved->ctx) { @@ -3754,7 +3758,7 @@ ngx_http_upstream_finalize_request(ngx_h u->resolved->ctx = NULL; } - if (u->state && u->state->response_sec) { + if (!cleaned && u->state && u->state->response_sec) { tp = ngx_timeofday(); u->state->response_sec = tp->sec - u->state->response_sec; u->state->response_msec = tp->msec - u->state->response_msec; -------------- next part -------------- A non-text attachment was scrubbed... Name: upstream_filter_finalize.patch Type: text/x-patch Size: 2535 bytes Desc: not available URL: From hungnv at opensource.com.vn Fri Feb 13 02:49:08 2015 From: hungnv at opensource.com.vn (hungnv at opensource.com.vn) Date: Fri, 13 Feb 2015 09:49:08 +0700 Subject: Couple questions about module behaviour In-Reply-To: <20150212135252.GV19012@mdounin.ru> References: <20150212135252.GV19012@mdounin.ru> Message-ID: Hello, if you return control to the nginx event loop while generating the response, you may set appropriate events to detect if the client closed the connection. See ngx_http_upstream_check_broken_connection() and related things in the ngx_http_upstream.c for an example. Yes, it?s fine, I will take a deeper look at upstream module to find out the answer. Thanks. Log module, once you use $bytes_sent (or $body_bytes_sent, as logged by default) variable, logs the number of bytes sent to the client. It is not the same as the number of bytes actually received by the client though, because there are bytes which are sent (i.e., passed by nginx to the socket buffer) but not yet received. Well, this means there?s another parameter in log module which actually log number of bytes client received (other than $body_bytes_sent or $bytes_sent). ? -- H?ng Email: hungnv at opensource.com.vn > On Feb 12, 2015, at 8:52 PM, Maxim Dounin wrote: > > f you return control to the nginx event loop while generating the > response, you may set appropriate events to detect if the client > closed the connection. See ngx_http_upstream_check_broken_connection() > and related things in the ngx_http_upstream.c for an example. -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Feb 13 13:12:16 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 13 Feb 2015 16:12:16 +0300 Subject: Making new server parameter inside upstream block In-Reply-To: References: <20150212133642.GU19012@mdounin.ru> <20150212145303.GW19012@mdounin.ru> Message-ID: <20150213131216.GD19012@mdounin.ru> Hello! On Thu, Feb 12, 2015 at 11:11:00PM +0300, ??????? ??????? wrote: > > Just use names in the configuration. > > You mean local DNS? I mean names, as resolvable by gethostbyname()/getaddrinfo() functions on your OS. It's up to you and your OS how these names will be resolved. In most simple cases even /etc/hosts will be enough. -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Fri Feb 13 13:15:36 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 13 Feb 2015 16:15:36 +0300 Subject: Couple questions about module behaviour In-Reply-To: References: <20150212135252.GV19012@mdounin.ru> Message-ID: <20150213131536.GE19012@mdounin.ru> Hello! On Fri, Feb 13, 2015 at 09:49:08AM +0700, hungnv at opensource.com.vn wrote: > Well, this means there?s another parameter in log module which > actually log number of bytes client received (other than > $body_bytes_sent or $bytes_sent). ? No, it means that a server don't know how many bytes a client actually recieved. -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Fri Feb 13 15:05:12 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 13 Feb 2015 18:05:12 +0300 Subject: [PATCH] Upstream: fixed $upstream_response_time for filter_finalize + error_page. In-Reply-To: References: Message-ID: <20150213150512.GG19012@mdounin.ru> Hello! On Thu, Feb 12, 2015 at 05:30:27PM -0800, Yichun Zhang (agentzh) wrote: > Hello! > > Please review the following patch. > > Thanks! > -agentzh > > # HG changeset patch > # User Yichun Zhang > # Date 1423789183 28800 > # Thu Feb 12 16:59:43 2015 -0800 > # Node ID 8b3d7171f35e74c8bea3234e88d8977b4f11f815 > # Parent f3f25ad09deee27485050a75732e5f46ab1b18b3 > Upstream: fixed $upstream_response_time for filter_finalize + error_page. > > ngx_http_upstream_finalize_request() is always called twice when an > output filter module calls ngx_http_filter_finalize_request() *and* > a custom error page is configured by the error_page directive. This > is because > > 1. ngx_http_filter_finalize_request() triggers > calling ngx_http_terminate_request > => calling ngx_http_upstream_cleanup > => calling ngx_http_upstream_finalize_request > > 2. ngx_http_internal_redirect() returns NGX_DONE > ==> ngx_http_special_response_handler() returns NGX_DONE > ==> ngx_http_filter_finalize_request() returns NGX_ERROR > ==> ngx_http_send_header() returns NGX_ERROR > ==> ngx_http_upstream_send_response() calls > ngx_http_upstream_finalize_request() again in the same > ngx_http_upstream_send_response() call as 1). > > This might result in corrupted $upstream_response_time values (close > to the absolute timestamp value) when u->state->response_sec happens > to be non-zero. > > This patch ensures that the $upstream_response_time value is only > calculated upon the first ngx_http_upstream_finalize_request() > invocation. Yes, filter finalization functionality is known to be very fragile and can easily cause problems if one will try to redirect it's processing with error_page. Especially if one'll try to redirect the processing from one upstream to another upstream server. Current solution to the problem is "don't do this, it hurts". This mostly works as filter finalization is only used in a few very specific cases. > diff -r f3f25ad09dee -r 8b3d7171f35e src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c Wed Feb 11 20:18:55 2015 +0300 > +++ b/src/http/ngx_http_upstream.c Thu Feb 12 16:59:43 2015 -0800 > @@ -3738,7 +3738,7 @@ static void > ngx_http_upstream_finalize_request(ngx_http_request_t *r, > ngx_http_upstream_t *u, ngx_int_t rc) > { > - ngx_uint_t flush; > + ngx_uint_t flush, cleaned; > ngx_time_t *tp; > > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > @@ -3747,6 +3747,10 @@ ngx_http_upstream_finalize_request(ngx_h > if (u->cleanup) { > *u->cleanup = NULL; > u->cleanup = NULL; > + cleaned = 0; > + > + } else { > + cleaned = 1; > } This approach looks wrong for me. It tries to ensure that the u->state will not be corrupted, but the problem here is that we've already finalized the request, and doing _anything_ would be wrong. Rather, I would suggest something like this: --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -3744,10 +3744,13 @@ ngx_http_upstream_finalize_request(ngx_h ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "finalize http upstream request: %i", rc); - if (u->cleanup) { - *u->cleanup = NULL; - u->cleanup = NULL; - } + if (u->cleanup == NULL) { + /* the request was already finalized */ + ngx_http_finalize_request(r, NGX_DONE); + } + + *u->cleanup = NULL; + u->cleanup = NULL; if (u->resolved && u->resolved->ctx) { ngx_resolve_name_done(u->resolved->ctx); (Untested though.) -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Fri Feb 13 17:34:10 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 13 Feb 2015 20:34:10 +0300 Subject: [PATCH] Upstream: fixed $upstream_response_time for filter_finalize + error_page. In-Reply-To: <20150213150512.GG19012@mdounin.ru> References: <20150213150512.GG19012@mdounin.ru> Message-ID: <20150213173410.GJ19012@mdounin.ru> Hello! On Fri, Feb 13, 2015 at 06:05:12PM +0300, Maxim Dounin wrote: [...] > Rather, I would suggest something like this: > > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -3744,10 +3744,13 @@ ngx_http_upstream_finalize_request(ngx_h > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > "finalize http upstream request: %i", rc); > > - if (u->cleanup) { > - *u->cleanup = NULL; > - u->cleanup = NULL; > - } > + if (u->cleanup == NULL) { > + /* the request was already finalized */ > + ngx_http_finalize_request(r, NGX_DONE); Err, return should be here. > + } > + > + *u->cleanup = NULL; > + u->cleanup = NULL; > > if (u->resolved && u->resolved->ctx) { > ngx_resolve_name_done(u->resolved->ctx); > > (Untested though.) -- Maxim Dounin http://nginx.org/ From tommywatson+nginx-devel at gmail.com Sat Feb 14 01:44:37 2015 From: tommywatson+nginx-devel at gmail.com (tommy watson) Date: Fri, 13 Feb 2015 19:44:37 -0600 Subject: Terminating requests Message-ID: Hello, I'm trying to continue or cancel an ngx_http_request_t after a slight delay but am failing miserably, I keep getting crashes and am not sure what I'm doing wrong. The code is here https://github.com/tommywatson/nginx-hello-world-module (borrowed from https://www.ruby-forum.com/topic/5564332) basically it pauses and fires and event to continue or finalize the request. Firing nikto at it brings the dump below. Any help/insight appreciated. Cheers. Program terminated with signal SIGSEGV, Segmentation fault. #0 0x0000000000406af2 in ngx_pnalloc (pool=0x0, size=181) at src/core/ngx_palloc.c:155 155 if (size <= pool->max) { (gdb) where #0 0x0000000000406af2 in ngx_pnalloc (pool=0x0, size=181) at src/core/ngx_palloc.c:155 #1 0x0000000000452692 in ngx_http_log_handler (r=0x6676b50) at src/http/modules/ngx_http_log_module.c:349 #2 0x000000000044c385 in ngx_http_log_request (r=0x6676b50) at src/http/ngx_http_request.c:3510 #3 0x000000000044c1f2 in ngx_http_free_request (r=0x6676b50, rc=0) at src/http/ngx_http_request.c:3457 #4 0x000000000044b297 in ngx_http_set_keepalive (r=0x6676b50) at src/http/ngx_http_request.c:2895 #5 0x000000000044a994 in ngx_http_finalize_connection (r=0x6676b50) at src/http/ngx_http_request.c:2532 #6 0x000000000044a10b in ngx_http_finalize_request (r=0x6676b50, rc=-4) at src/http/ngx_http_request.c:2262 #7 0x000000000043cb18 in ngx_http_core_content_phase (r=0x6676b50, ph=0x60b7798) at src/http/ngx_http_core_module.c:1407 #8 0x000000000043b911 in ngx_http_core_run_phases (r=0x6676b50) at src/http/ngx_http_core_module.c:888 #9 0x00000000004af101 in hack_event (e=0x6677bc8) at ../nginx-hello-world-module/ngx_http_hello_world_module.c:85 #10 0x000000000042afac in ngx_event_expire_timers () at src/event/ngx_event_timer.c:94 #11 0x00000000004290a7 in ngx_process_events_and_timers (cycle=0x608f310) at src/event/ngx_event.c:262 #12 0x000000000043493f in ngx_worker_process_cycle (cycle=0x608f310, data=0x0) at src/os/unix/ngx_process_cycle.c:824 #13 0x000000000043176d in ngx_spawn_process (cycle=0x608f310, proc=0x43476b , data=0x0, name=0x4b3180 "worker process", respawn=-3) at src/os/unix/ngx_process.c:198 #14 0x0000000000433a71 in ngx_start_worker_processes (cycle=0x608f310, n=1, type=-3) at src/os/unix/ngx_process_cycle.c:368 #15 0x00000000004331cd in ngx_master_process_cycle (cycle=0x608f310) at src/os/unix/ngx_process_cycle.c:140 #16 0x00000000004037c6 in main (argc=1, argv=0xffefffbe8) at src/core/nginx.c:407 (gdb) quit -------------- next part -------------- An HTML attachment was scrubbed... URL: From agentzh at gmail.com Sun Feb 15 22:04:03 2015 From: agentzh at gmail.com (Yichun Zhang (agentzh)) Date: Sun, 15 Feb 2015 14:04:03 -0800 Subject: [PATCH] Upstream: fixed $upstream_response_time for filter_finalize + error_page. In-Reply-To: <20150213150512.GG19012@mdounin.ru> References: <20150213150512.GG19012@mdounin.ru> Message-ID: Hello! On Fri, Feb 13, 2015 at 7:05 AM, Maxim Dounin wrote: > Rather, I would suggest something like this: > > --- a/src/http/ngx_http_upstream.c > +++ b/src/http/ngx_http_upstream.c > @@ -3744,10 +3744,13 @@ ngx_http_upstream_finalize_request(ngx_h > ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > "finalize http upstream request: %i", rc); > > - if (u->cleanup) { > - *u->cleanup = NULL; > - u->cleanup = NULL; > - } > + if (u->cleanup == NULL) { > + /* the request was already finalized */ > + ngx_http_finalize_request(r, NGX_DONE); + return > + } > + > + *u->cleanup = NULL; > + u->cleanup = NULL; > This patch works for me and yeah it's better. Will you commit it? Thanks! -agentzh From lists at die-jansens.de Mon Feb 16 14:46:08 2015 From: lists at die-jansens.de (Arne Jansen) Date: Mon, 16 Feb 2015 15:46:08 +0100 Subject: suspected bug in devpoll Message-ID: <54E202B0.5000304@die-jansens.de> Hi, I suspect a bug in the devpoll implementation. What I see is this: recv() failed (134: Transport endpoint is not connected) while reading response header from upstream truss (solaris) tells me that nginx has done a connect() = EINPROGRESS, directly followed by a read() on that socket, getting the ENOTCONN error. What I suspect is as follows: I have two fds waiting for an event. Both get ready at the same time. ngx_devpoll_process_events fetches both from the kernel. While handling the first event, the second fd gets closed, reopened and connected (with EINPROGRESS). Afterwards, ngx_devpoll_process_events handles the already received event for the other fd. This leads to a read() call even though the socket is not connected yet. What I'm missing is code in ngx_devpoll_del_event that deletes revents from the global event_list, but I'm not sure if that's the right way to approach this and how it is supposed to synchronize with ngx_devpoll_process_events. Thanks, Arne From lists at die-jansens.de Tue Feb 17 08:43:29 2015 From: lists at die-jansens.de (Arne Jansen) Date: Tue, 17 Feb 2015 09:43:29 +0100 Subject: suspected bug in devpoll In-Reply-To: <54E202B0.5000304@die-jansens.de> References: <54E202B0.5000304@die-jansens.de> Message-ID: <54E2FF31.5000307@die-jansens.de> As a workaround I now set devpoll_events to 1. This fixes the issue, though it might slightly impact the performance. Nevertheless, if someone can point me to a clean way to fix this, I'd give it a try. -Arne On 02/16/2015 03:46 PM, Arne Jansen wrote: > Hi, > > I suspect a bug in the devpoll implementation. What I see is this: > > recv() failed (134: Transport endpoint is not connected) while reading response > header from upstream > > truss (solaris) tells me that nginx has done a connect() = EINPROGRESS, directly > followed by a read() on that socket, getting the ENOTCONN error. > > What I suspect is as follows: > I have two fds waiting for an event. Both get ready at the same time. > ngx_devpoll_process_events fetches both from the kernel. While handling the > first event, the second fd gets closed, reopened and connected (with > EINPROGRESS). Afterwards, ngx_devpoll_process_events handles the already > received event for the other fd. This leads to a read() call even though the > socket is not connected yet. > What I'm missing is code in ngx_devpoll_del_event that deletes revents from > the global event_list, but I'm not sure if that's the right way to approach > this and how it is supposed to synchronize with ngx_devpoll_process_events. > > Thanks, > Arne From arut at nginx.com Tue Feb 17 11:31:52 2015 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 17 Feb 2015 11:31:52 +0000 Subject: [nginx] Core: make ngx_connection_local_sockaddr() always assign... Message-ID: details: http://hg.nginx.org/nginx/rev/69c4912066a4 branches: changeset: 5983:69c4912066a4 user: Roman Arutyunyan date: Tue Feb 17 14:26:44 2015 +0300 description: Core: make ngx_connection_local_sockaddr() always assign address. Previously, this function checked for connection local address existence and returned error if it was missing. Now a new address is assigned in this case making it possible to call this function not only for accepted connections. diffstat: src/core/ngx_connection.c | 34 +++++++++++++++++----------------- 1 files changed, 17 insertions(+), 17 deletions(-) diffs (54 lines): diff -r f3f25ad09dee -r 69c4912066a4 src/core/ngx_connection.c --- a/src/core/ngx_connection.c Wed Feb 11 20:18:55 2015 +0300 +++ b/src/core/ngx_connection.c Tue Feb 17 14:26:44 2015 +0300 @@ -1073,33 +1073,33 @@ ngx_connection_local_sockaddr(ngx_connec struct sockaddr_in6 *sin6; #endif - if (c->local_socklen == 0) { - return NGX_ERROR; - } + addr = 0; - switch (c->local_sockaddr->sa_family) { + if (c->local_socklen) { + switch (c->local_sockaddr->sa_family) { #if (NGX_HAVE_INET6) - case AF_INET6: - sin6 = (struct sockaddr_in6 *) c->local_sockaddr; + case AF_INET6: + sin6 = (struct sockaddr_in6 *) c->local_sockaddr; - for (addr = 0, i = 0; addr == 0 && i < 16; i++) { - addr |= sin6->sin6_addr.s6_addr[i]; - } + for (i = 0; addr == 0 && i < 16; i++) { + addr |= sin6->sin6_addr.s6_addr[i]; + } - break; + break; #endif #if (NGX_HAVE_UNIX_DOMAIN) - case AF_UNIX: - addr = 1; - break; + case AF_UNIX: + addr = 1; + break; #endif - default: /* AF_INET */ - sin = (struct sockaddr_in *) c->local_sockaddr; - addr = sin->sin_addr.s_addr; - break; + default: /* AF_INET */ + sin = (struct sockaddr_in *) c->local_sockaddr; + addr = sin->sin_addr.s_addr; + break; + } } if (addr == 0) { From ru at nginx.com Tue Feb 17 13:29:01 2015 From: ru at nginx.com (Ruslan Ermilov) Date: Tue, 17 Feb 2015 13:29:01 +0000 Subject: [nginx] Cache: reduced diffs to the plus version of nginx. Message-ID: details: http://hg.nginx.org/nginx/rev/3f568dd68af1 branches: changeset: 5984:3f568dd68af1 user: Ruslan Ermilov date: Tue Feb 17 16:27:52 2015 +0300 description: Cache: reduced diffs to the plus version of nginx. No functional changes. diffstat: src/http/ngx_http_file_cache.c | 10 ++++------ 1 files changed, 4 insertions(+), 6 deletions(-) diffs (43 lines): diff -r 69c4912066a4 -r 3f568dd68af1 src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c Tue Feb 17 14:26:44 2015 +0300 +++ b/src/http/ngx_http_file_cache.c Tue Feb 17 16:27:52 2015 +0300 @@ -258,7 +258,7 @@ ngx_int_t ngx_http_file_cache_open(ngx_http_request_t *r) { ngx_int_t rc, rv; - ngx_uint_t cold, test; + ngx_uint_t test; ngx_http_cache_t *c; ngx_pool_cleanup_t *cln; ngx_open_file_info_t of; @@ -300,8 +300,6 @@ ngx_http_file_cache_open(ngx_http_reques return NGX_HTTP_CACHE_SCARCE; } - cold = cache->sh->cold; - if (rc == NGX_OK) { if (c->error) { @@ -314,18 +312,18 @@ ngx_http_file_cache_open(ngx_http_reques } else { /* rc == NGX_DECLINED */ + test = cache->sh->cold ? 1 : 0; + if (c->min_uses > 1) { - if (!cold) { + if (!test) { return NGX_HTTP_CACHE_SCARCE; } - test = 1; rv = NGX_HTTP_CACHE_SCARCE; } else { c->temp_file = 1; - test = cold ? 1 : 0; rv = NGX_DECLINED; } } From ek at kuramoto.org Tue Feb 17 14:25:42 2015 From: ek at kuramoto.org (Kuramoto Eiji) Date: Tue, 17 Feb 2015 23:25:42 +0900 Subject: SSLv3 protocol with LibreSSL Message-ID: # HG changeset patch # User Kuramoto Eiji # Date 1424182447 -32400 # Node ID 2f0279e2d15aa7fd4c8300a99fa323513deaf1ab # Parent f3f25ad09deee27485050a75732e5f46ab1b18b3 SSLv3 protocol is not available with LibreSSL, even if SSLv3 option is supplied in config. LibreSSL-2.1.2/2.1.3 disables SSLv3 by default. diff -r f3f25ad09dee -r 2f0279e2d15a src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Wed Feb 11 20:18:55 2015 +0300 +++ b/src/event/ngx_event_openssl.c Tue Feb 17 23:14:07 2015 +0900 @@ -252,9 +252,17 @@ if (!(protocols & NGX_SSL_SSLv2)) { SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_SSLv2); } +ifdef LIBRESSL_VERSION_NUMBER + if (!(protocols & NGX_SSL_SSLv3)) { + SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_SSLv3); + } else { + SSL_CTX_clear_options(ssl->ctx, SSL_OP_NO_SSLv3); + } +#else if (!(protocols & NGX_SSL_SSLv3)) { SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_SSLv3); } +#endif if (!(protocols & NGX_SSL_TLSv1)) { SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_TLSv1); } - Kuramoto Eiji From waikeen.woon at onapp.com Wed Feb 18 12:07:36 2015 From: waikeen.woon at onapp.com (Wai Keen Woon) Date: Wed, 18 Feb 2015 20:07:36 +0800 Subject: [PATCH] Upstream hash: speedup consistent hash init Message-ID: <54E48088.5070203@onapp.com> # HG changeset patch # User Wai Keen Woon # Date 1424259253 -28800 # Wed Feb 18 19:34:13 2015 +0800 # Node ID 34578fd8055e08db2e7bf1e6637b26e92bb1a89b # Parent 3f568dd68af147b5ba259a27fdc6645f99e87aa7 Upstream hash: speedup consistent hash init Repeatedly calling ngx_http_upstream_add_chash_point() to create the points array in sorted order, is O(n^2) to the total weight. This can cause nginx startup and reconfigure to be substantially delayed. For example, when total weight is 1000, startup takes 4s. Replace this with a linear any-order insertion followed by qsort(). Startup time for total weight of 1000 reduces to 40ms. Note that in the original implementation, if there are points with duplicate hash, only the first is kept. In this change, all are included. diff -r 3f568dd68af1 -r 34578fd8055e src/http/modules/ngx_http_upstream_hash_module.c --- a/src/http/modules/ngx_http_upstream_hash_module.c Tue Feb 17 16:27:52 2015 +0300 +++ b/src/http/modules/ngx_http_upstream_hash_module.c Wed Feb 18 19:34:13 2015 +0800 @@ -49,10 +49,10 @@ static ngx_int_t ngx_http_upstream_init_chash(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us); -static void ngx_http_upstream_add_chash_point( - ngx_http_upstream_chash_points_t *points, uint32_t hash, ngx_str_t *server); static ngx_uint_t ngx_http_upstream_find_chash_point( ngx_http_upstream_chash_points_t *points, uint32_t hash); +static int ngx_libc_cdecl + ngx_http_upstream_chash_cmp(const void *one, const void *two); static ngx_int_t ngx_http_upstream_init_chash_peer(ngx_http_request_t *r, ngx_http_upstream_srv_conf_t *us); static ngx_int_t ngx_http_upstream_get_chash_peer(ngx_peer_connection_t *pc, @@ -360,12 +360,19 @@ ngx_crc32_update(&hash, (u_char *) &prev_hash, sizeof(uint32_t)); ngx_crc32_final(hash); - ngx_http_upstream_add_chash_point(points, hash, &peer->server); + points->point[points->number].hash = hash; + points->point[points->number].server = server; + points->number++; prev_hash = hash; } } + ngx_qsort(points->point, points->number, + sizeof(ngx_http_upstream_chash_point_t), + ngx_http_upstream_chash_cmp); + + hcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_hash_module); hcf->points = points; @@ -373,31 +380,6 @@ } -static void -ngx_http_upstream_add_chash_point(ngx_http_upstream_chash_points_t *points, - uint32_t hash, ngx_str_t *server) -{ - size_t size; - ngx_uint_t i; - ngx_http_upstream_chash_point_t *point; - - i = ngx_http_upstream_find_chash_point(points, hash); - point = &points->point[i]; - - if (point->hash == hash) { - return; - } - - size = (points->number - i) * sizeof(ngx_http_upstream_chash_point_t); - - ngx_memmove(point + 1, point, size); - - points->number++; - point->hash = hash; - point->server = server; -} - - static ngx_uint_t ngx_http_upstream_find_chash_point(ngx_http_upstream_chash_points_t *points, uint32_t hash) @@ -430,6 +412,21 @@ } +static int ngx_libc_cdecl +ngx_http_upstream_chash_cmp(const void *one, const void *two) +{ + if (((ngx_http_upstream_chash_point_t *) one)->hash < + ((ngx_http_upstream_chash_point_t *) two)->hash) { + return -1; + } else if (((ngx_http_upstream_chash_point_t *) one)->hash > + ((ngx_http_upstream_chash_point_t *) two)->hash) { + return 1; + } else { + return 0; + } +} + + static ngx_int_t ngx_http_upstream_init_chash_peer(ngx_http_request_t *r, ngx_http_upstream_srv_conf_t *us) From arut at nginx.com Wed Feb 18 12:49:20 2015 From: arut at nginx.com (Roman Arutyunyan) Date: Wed, 18 Feb 2015 15:49:20 +0300 Subject: [PATCH] Upstream hash: speedup consistent hash init In-Reply-To: <54E48088.5070203@onapp.com> References: <54E48088.5070203@onapp.com> Message-ID: <5FDA6B3E-63FC-4A05-A259-9B334923D151@nginx.com> Hello, On 18 Feb 2015, at 15:07, Wai Keen Woon wrote: > # HG changeset patch > # User Wai Keen Woon > # Date 1424259253 -28800 > # Wed Feb 18 19:34:13 2015 +0800 > # Node ID 34578fd8055e08db2e7bf1e6637b26e92bb1a89b > # Parent 3f568dd68af147b5ba259a27fdc6645f99e87aa7 > Upstream hash: speedup consistent hash init > > Repeatedly calling ngx_http_upstream_add_chash_point() to create > the points array in sorted order, is O(n^2) to the total weight. > This can cause nginx startup and reconfigure to be substantially > delayed. For example, when total weight is 1000, startup takes 4s. > > Replace this with a linear any-order insertion followed by qsort(). > Startup time for total weight of 1000 reduces to 40ms. Thanks, we?ll look into this. > Note that in the original implementation, if there are points > with duplicate hash, only the first is kept. In this change, all > are included. This is the intended behaviour. Consistent hash array is build over server entries, but not addresses resolved from them. Duplicate points are ignored since most likely they refer to multiple addresses of the same host. [..] -- Roman Arutyunyan From tigran.bayburtsyan at gmail.com Wed Feb 18 15:37:13 2015 From: tigran.bayburtsyan at gmail.com (Tigran Bayburtsyan) Date: Wed, 18 Feb 2015 19:37:13 +0400 Subject: Forking Nginx process Message-ID: Hi. I'm writing simple module for Nginx just to fork process on request and execute some separate process and then exit. *if(fork() == 0)* *{* * // Use r request pointer* *.* *.* *.* * // creating out_chain (could be more than 700kb)* * ngx_http_output_filter ( r , out_chain );* * ngx_http_finalize_request(r, NGX_OK);* *}* *else* *{* * close(r->connection->fd); * * ngx_http_finalize_request(r, NGX_OK);* *}* if I'm sending 1 request per time it works fine , but when I'm sending 2 or more requests at the same time works only the last request , other requests just staying in loading stage. Who can help me figure out what is the problem ? I'm just thinking it could be because ngx_request_t *r pointer is in nginx shared memory so every request overriding another .... but maybe it's not about that. Please let me know if you can give any help. Thanks. -------------- next part -------------- An HTML attachment was scrubbed... URL: From waikeen.woon at onapp.com Thu Feb 19 05:33:03 2015 From: waikeen.woon at onapp.com (Wai Keen Woon) Date: Thu, 19 Feb 2015 13:33:03 +0800 Subject: [PATCH] Upstream hash: speedup consistent hash init In-Reply-To: <5FDA6B3E-63FC-4A05-A259-9B334923D151@nginx.com> References: <54E48088.5070203@onapp.com> <5FDA6B3E-63FC-4A05-A259-9B334923D151@nginx.com> Message-ID: <54E5758F.8070009@onapp.com> On 2/18/2015 8:49 PM, Roman Arutyunyan wrote: >> Note that in the original implementation, if there are points >> with duplicate hash, only the first is kept. In this change, all >> are included. > This is the intended behaviour. Consistent hash array is build over > server entries, but not addresses resolved from them. Duplicate points > are ignored since most likely they refer to multiple addresses of > the same host. I see. I could add a loop to remove duplicate hashes and maybe adjacent points referencing the same server too. Or do you prefer to take some time to look into it in more detail first? From nginx at paulschou.com Sat Feb 21 05:05:22 2015 From: nginx at paulschou.com (Paul Schoh) Date: Sat, 21 Feb 2015 00:05:22 -0500 Subject: HLS example Message-ID: <1E75D283-0570-4580-830D-7D9504CCF66C@paulschou.com> Good day-- Re: http://nginx.org/en/docs/http/ngx_http_hls_module.html I would like to offer an example HLS to add to the documentation / webpage which is license free and can be used for any purpose: http://fish.schou.me Take care, - Paul Schou -------------- next part -------------- An HTML attachment was scrubbed... URL: From nginx at paulschou.com Sat Feb 21 05:13:30 2015 From: nginx at paulschou.com (Paul Schou) Date: Sat, 21 Feb 2015 00:13:30 -0500 Subject: HLS example In-Reply-To: <1E75D283-0570-4580-830D-7D9504CCF66C@paulschou.com> References: <1E75D283-0570-4580-830D-7D9504CCF66C@paulschou.com> Message-ID: <5D16D548-B02A-449A-9C22-D2D8A5DEC83A@paulschou.com> Correction: this is a live stream example using the nginx RTMP module with the ffmpeg backend for converting to flv format. - Paul Schou > On Feb 21, 2015, at 12:05 AM, Paul Schou wrote: > > Good day-- > > Re: http://nginx.org/en/docs/http/ngx_http_hls_module.html > > I would like to offer an example HLS to add to the documentation / webpage which is license free and can be used for any purpose: > > http://fish.schou.me > > Take care, > > - Paul Schou > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: From tigran.bayburtsyan at gmail.com Sat Feb 21 20:55:10 2015 From: tigran.bayburtsyan at gmail.com (Tigran Bayburtsyan) Date: Sun, 22 Feb 2015 00:55:10 +0400 Subject: Nginx Timer is killing my request pool Message-ID: Hi All. I've created a function for adding my custom ngx_timer , but after 1st time loop my r->pool is setting to NULL , also it sets NULL my r->connection->log, and after second loop it throws exception on *ngx_palloc *because my r->pool is NULL. I can't find out why, here is my function *void add_aling_event(ngx_event_t *e, ngx_http_request_t *r, void (*handler), int timer)* *{* * if(e)* * {* * ngx_add_timer(e, timer);* * return;* * }* * ngx_event_t *event=ngx_palloc(r->pool,sizeof(ngx_event_t));* * memset(event,0,sizeof(ngx_event_t));* * event->data=r;* * event->handler=handler;* * event->log=r->connection->log;* * ngx_add_timer(event, timer);* *}* And I'm calling this function like this. static ngx_int_t ngx_http_aling_handler(ngx_http_request_t *r) { ........ ........ add_aling_event(NULL, r, hack_func, 50); return NGX_AGAIN; } void hack_func(ngx_event_t *e) { ngx_http_request_t *r = e->data; ........ ........ switch(shmLen[1]) { case 1: // adding chain { .......................... * ..........................* };break; case 2: // adding last chain { .......................... * ..........................* };break; default: { add_aling_event(e, r, hack_func, 50); };break; } } After 1 or 2 calls r->pool is sets to NULL ..... Please help me figure out this issue. Maybe I'm doing something wrong. I've did this code using example here https://github.com/tommywatson/nginx-hello-world-module/blob/master/ngx_http_hello_world_module.c#L104 Thanks. -------------- next part -------------- An HTML attachment was scrubbed... URL: From tommywatson+nginx-devel at gmail.com Sat Feb 21 21:02:05 2015 From: tommywatson+nginx-devel at gmail.com (tommy watson) Date: Sat, 21 Feb 2015 15:02:05 -0600 Subject: Nginx Timer is killing my request pool In-Reply-To: References: Message-ID: Tigran, are you still using fork() ? As you describe here: http://mailman.nginx.org/pipermail/nginx-devel/2015-February/006554.html If so, as already explained, the other process is probably closing your request. Cheers. On Sat, Feb 21, 2015 at 2:55 PM, Tigran Bayburtsyan < tigran.bayburtsyan at gmail.com> wrote: > Hi All. > > I've created a function for adding my custom ngx_timer , but after 1st > time loop my r->pool is setting to NULL , also it sets NULL my > r->connection->log, and after second loop it throws exception on *ngx_palloc > *because my r->pool is NULL. > I can't find out why, here is my function > > *void add_aling_event(ngx_event_t *e, ngx_http_request_t *r, void > (*handler), int timer)* > *{* > * if(e)* > * {* > * ngx_add_timer(e, timer);* > * return;* > * }* > * ngx_event_t *event=ngx_palloc(r->pool,sizeof(ngx_event_t));* > * memset(event,0,sizeof(ngx_event_t));* > * event->data=r;* > * event->handler=handler;* > * event->log=r->connection->log;* > * ngx_add_timer(event, timer);* > *}* > > And I'm calling this function like this. > > static ngx_int_t ngx_http_aling_handler(ngx_http_request_t *r) > { > ........ > ........ > add_aling_event(NULL, r, hack_func, 50); > return NGX_AGAIN; > } > > void hack_func(ngx_event_t *e) > { > ngx_http_request_t *r = e->data; > ........ > ........ > > switch(shmLen[1]) > { > case 1: // adding chain > { > .......................... > * ..........................* > };break; > case 2: // adding last chain > { > .......................... > * ..........................* > };break; > default: > { > add_aling_event(e, r, hack_func, 50); > };break; > } > } > > After 1 or 2 calls r->pool is sets to NULL ..... > > Please help me figure out this issue. > Maybe I'm doing something wrong. > I've did this code using example here > https://github.com/tommywatson/nginx-hello-world-module/blob/master/ngx_http_hello_world_module.c#L104 > > > Thanks. > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From tigran.bayburtsyan at gmail.com Sat Feb 21 21:19:59 2015 From: tigran.bayburtsyan at gmail.com (Tigran Bayburtsyan) Date: Sun, 22 Feb 2015 01:19:59 +0400 Subject: Nginx Timer is killing my request pool In-Reply-To: References: Message-ID: No this time I'm trying to do it with separate process by just sharing memory between Nginx worker and my 3rd party excusable... I'm making ngx_timer to send data when it will be received from shared memory. It works fine, but I can't figure out why my r->pool is null ? I'm using Nginx 1.7 version. Thanks for your help. 2015-02-22 1:02 GMT+04:00 tommy watson : > Tigran, > are you still using fork() ? As you describe here: > http://mailman.nginx.org/pipermail/nginx-devel/2015-February/006554.html > > If so, as already explained, the other process is probably closing your > request. > > Cheers. > > > On Sat, Feb 21, 2015 at 2:55 PM, Tigran Bayburtsyan < > tigran.bayburtsyan at gmail.com> wrote: > >> Hi All. >> >> I've created a function for adding my custom ngx_timer , but after 1st >> time loop my r->pool is setting to NULL , also it sets NULL my >> r->connection->log, and after second loop it throws exception on *ngx_palloc >> *because my r->pool is NULL. >> I can't find out why, here is my function >> >> *void add_aling_event(ngx_event_t *e, ngx_http_request_t *r, void >> (*handler), int timer)* >> *{* >> * if(e)* >> * {* >> * ngx_add_timer(e, timer);* >> * return;* >> * }* >> * ngx_event_t *event=ngx_palloc(r->pool,sizeof(ngx_event_t));* >> * memset(event,0,sizeof(ngx_event_t));* >> * event->data=r;* >> * event->handler=handler;* >> * event->log=r->connection->log;* >> * ngx_add_timer(event, timer);* >> *}* >> >> And I'm calling this function like this. >> >> static ngx_int_t ngx_http_aling_handler(ngx_http_request_t *r) >> { >> ........ >> ........ >> add_aling_event(NULL, r, hack_func, 50); >> return NGX_AGAIN; >> } >> >> void hack_func(ngx_event_t *e) >> { >> ngx_http_request_t *r = e->data; >> ........ >> ........ >> >> switch(shmLen[1]) >> { >> case 1: // adding chain >> { >> .......................... >> * ..........................* >> };break; >> case 2: // adding last chain >> { >> .......................... >> * ..........................* >> };break; >> default: >> { >> add_aling_event(e, r, hack_func, 50); >> };break; >> } >> } >> >> After 1 or 2 calls r->pool is sets to NULL ..... >> >> Please help me figure out this issue. >> Maybe I'm doing something wrong. >> I've did this code using example here >> https://github.com/tommywatson/nginx-hello-world-module/blob/master/ngx_http_hello_world_module.c#L104 >> >> >> Thanks. >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From tommywatson+nginx-devel at gmail.com Sat Feb 21 21:23:58 2015 From: tommywatson+nginx-devel at gmail.com (tommy watson) Date: Sat, 21 Feb 2015 15:23:58 -0600 Subject: Nginx Timer is killing my request pool In-Reply-To: References: Message-ID: My money is on your request has been cleaned up in ngx_http_free_request(): https://github.com/nginx/nginx/blob/master/src/http/ngx_http_request.c#L3493 On Sat, Feb 21, 2015 at 3:19 PM, Tigran Bayburtsyan < tigran.bayburtsyan at gmail.com> wrote: > No this time I'm trying to do it with separate process by just sharing > memory between Nginx worker and my 3rd party excusable... > I'm making ngx_timer to send data when it will be received from shared > memory. > > It works fine, but I can't figure out why my r->pool is null ? > > I'm using Nginx 1.7 version. > Thanks for your help. > > > 2015-02-22 1:02 GMT+04:00 tommy watson > : > >> Tigran, >> are you still using fork() ? As you describe here: >> http://mailman.nginx.org/pipermail/nginx-devel/2015-February/006554.html >> >> If so, as already explained, the other process is probably closing your >> request. >> >> Cheers. >> >> >> On Sat, Feb 21, 2015 at 2:55 PM, Tigran Bayburtsyan < >> tigran.bayburtsyan at gmail.com> wrote: >> >>> Hi All. >>> >>> I've created a function for adding my custom ngx_timer , but after 1st >>> time loop my r->pool is setting to NULL , also it sets NULL my >>> r->connection->log, and after second loop it throws exception on *ngx_palloc >>> *because my r->pool is NULL. >>> I can't find out why, here is my function >>> >>> *void add_aling_event(ngx_event_t *e, ngx_http_request_t *r, void >>> (*handler), int timer)* >>> *{* >>> * if(e)* >>> * {* >>> * ngx_add_timer(e, timer);* >>> * return;* >>> * }* >>> * ngx_event_t *event=ngx_palloc(r->pool,sizeof(ngx_event_t));* >>> * memset(event,0,sizeof(ngx_event_t));* >>> * event->data=r;* >>> * event->handler=handler;* >>> * event->log=r->connection->log;* >>> * ngx_add_timer(event, timer);* >>> *}* >>> >>> And I'm calling this function like this. >>> >>> static ngx_int_t ngx_http_aling_handler(ngx_http_request_t *r) >>> { >>> ........ >>> ........ >>> add_aling_event(NULL, r, hack_func, 50); >>> return NGX_AGAIN; >>> } >>> >>> void hack_func(ngx_event_t *e) >>> { >>> ngx_http_request_t *r = e->data; >>> ........ >>> ........ >>> >>> switch(shmLen[1]) >>> { >>> case 1: // adding chain >>> { >>> .......................... >>> * ..........................* >>> };break; >>> case 2: // adding last chain >>> { >>> .......................... >>> * ..........................* >>> };break; >>> default: >>> { >>> add_aling_event(e, r, hack_func, 50); >>> };break; >>> } >>> } >>> >>> After 1 or 2 calls r->pool is sets to NULL ..... >>> >>> Please help me figure out this issue. >>> Maybe I'm doing something wrong. >>> I've did this code using example here >>> https://github.com/tommywatson/nginx-hello-world-module/blob/master/ngx_http_hello_world_module.c#L104 >>> >>> >>> Thanks. >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From tigran.bayburtsyan at gmail.com Sat Feb 21 21:28:21 2015 From: tigran.bayburtsyan at gmail.com (Tigran Bayburtsyan) Date: Sun, 22 Feb 2015 01:28:21 +0400 Subject: Nginx Timer is killing my request pool In-Reply-To: References: Message-ID: Ok I sow that code during debugging Nginx request , but why my request is freeing if I'm not calling ngx_http_finalize_request ? What should I do to keep my request alive ? Thanks. 2015-02-22 1:23 GMT+04:00 tommy watson : > My money is on your request has been cleaned up in ngx_http_free_request(): > > > https://github.com/nginx/nginx/blob/master/src/http/ngx_http_request.c#L3493 > > On Sat, Feb 21, 2015 at 3:19 PM, Tigran Bayburtsyan < > tigran.bayburtsyan at gmail.com> wrote: > >> No this time I'm trying to do it with separate process by just sharing >> memory between Nginx worker and my 3rd party excusable... >> I'm making ngx_timer to send data when it will be received from shared >> memory. >> >> It works fine, but I can't figure out why my r->pool is null ? >> >> I'm using Nginx 1.7 version. >> Thanks for your help. >> >> >> 2015-02-22 1:02 GMT+04:00 tommy watson > >: >> >>> Tigran, >>> are you still using fork() ? As you describe here: >>> http://mailman.nginx.org/pipermail/nginx-devel/2015-February/006554.html >>> >>> If so, as already explained, the other process is probably closing your >>> request. >>> >>> Cheers. >>> >>> >>> On Sat, Feb 21, 2015 at 2:55 PM, Tigran Bayburtsyan < >>> tigran.bayburtsyan at gmail.com> wrote: >>> >>>> Hi All. >>>> >>>> I've created a function for adding my custom ngx_timer , but after 1st >>>> time loop my r->pool is setting to NULL , also it sets NULL my >>>> r->connection->log, and after second loop it throws exception on *ngx_palloc >>>> *because my r->pool is NULL. >>>> I can't find out why, here is my function >>>> >>>> *void add_aling_event(ngx_event_t *e, ngx_http_request_t *r, void >>>> (*handler), int timer)* >>>> *{* >>>> * if(e)* >>>> * {* >>>> * ngx_add_timer(e, timer);* >>>> * return;* >>>> * }* >>>> * ngx_event_t *event=ngx_palloc(r->pool,sizeof(ngx_event_t));* >>>> * memset(event,0,sizeof(ngx_event_t));* >>>> * event->data=r;* >>>> * event->handler=handler;* >>>> * event->log=r->connection->log;* >>>> * ngx_add_timer(event, timer);* >>>> *}* >>>> >>>> And I'm calling this function like this. >>>> >>>> static ngx_int_t ngx_http_aling_handler(ngx_http_request_t *r) >>>> { >>>> ........ >>>> ........ >>>> add_aling_event(NULL, r, hack_func, 50); >>>> return NGX_AGAIN; >>>> } >>>> >>>> void hack_func(ngx_event_t *e) >>>> { >>>> ngx_http_request_t *r = e->data; >>>> ........ >>>> ........ >>>> >>>> switch(shmLen[1]) >>>> { >>>> case 1: // adding chain >>>> { >>>> .......................... >>>> * ..........................* >>>> };break; >>>> case 2: // adding last chain >>>> { >>>> .......................... >>>> * ..........................* >>>> };break; >>>> default: >>>> { >>>> add_aling_event(e, r, hack_func, 50); >>>> };break; >>>> } >>>> } >>>> >>>> After 1 or 2 calls r->pool is sets to NULL ..... >>>> >>>> Please help me figure out this issue. >>>> Maybe I'm doing something wrong. >>>> I've did this code using example here >>>> https://github.com/tommywatson/nginx-hello-world-module/blob/master/ngx_http_hello_world_module.c#L104 >>>> >>>> >>>> Thanks. >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>> >>> >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From savetherbtz at gmail.com Sun Feb 22 03:05:31 2015 From: savetherbtz at gmail.com (Alexey Ivanov) Date: Sat, 21 Feb 2015 19:05:31 -0800 Subject: [PATCH] Locality-based Least connection with optional randomization Message-ID: <210E8E71-BFB4-4BBD-9015-ED9ED51DC567@gmail.com> Good weekend, everyone! Let me start by describing my problem first and then moving to proposed solution. Problem: Currently we have number of PoPs (Points-of-Presence) around the world with Linux/nginx doing TCP/TLS/HTTP termination. There we re-encrypt traffic and proxy_pass it to the upstream block with HUGE set of servers. Whole idea of those PoP nginxes is to have pool of keepalive connections with enormous tcp windows to upstreams. But in reality we can not use any of nginx?es connection balancing methods because they almost never reuse connections (yet again, our upstream list is huge). Also each worker has it?s own keepalive pool which makes situation even worse. Of cause we can generate per-server config files and give each server in each PoP different(and small) set of upstream servers, but that solution sounds awfully ?clunky?. Solution: IPVS for example, among it's numerous job scheduling modes has Locality-Based Least-Connection Scheduling[1], that looks quite close to what we want. The only problem is that if all the worker processes on all our boxes around the world will use same list of upstreams they will quickly overload first upstream, then second, etc, therefore I?ve added randomized mode in which each worker starts by filling upstreams w.r.t. some random starting point. That should give good locality for tcp connection reuse and as law of large numbers implies - good enough load distribution across upstreams globally. Implementation: PoC: coloured: https://gist.github.com/SaveTheRbtz/d6a505555cd02cb6aee6 raw: https://gist.githubusercontent.com/SaveTheRbtz/d6a505555cd02cb6aee6/raw/5aba3b0709777d2a6e99217bd3e06e2178846dc4/least_conn_locality_randomized.diff It basically tries to find first(starting from per-worker-random for randomized variant) not fully loaded peer and if it fails then it falls back to normal least_conn. Followup questions: Does anyone in the community have similar use cases? CloudFlare maybe? Is Nginx Inc interested in incorporating something patch like that, or is that too specific to our workflow? Should I prettify that PoC or should I just throw the ball your way? Alternative solution: Original upstream keepalive module[2] had ?single? keyword, that also suites our needs, though it was removed because, let me quote Maxim Dounin: The original idea was to optimize edge cases in case of interchangeable backends, i.e. don't establish a new connection if we have any one cached. This causes more harm than good though, as it screws up underlying balancer's idea about backends used and may result in various unexpected problems. [1] http://kb.linuxvirtualserver.org/wiki/Locality-Based_Least-Connection_Scheduling [2] http://mdounin.ru/hg/ngx_http_upstream_keepalive/ -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 842 bytes Desc: Message signed with OpenPGP using GPGMail URL: From tigran.bayburtsyan at gmail.com Sun Feb 22 08:21:52 2015 From: tigran.bayburtsyan at gmail.com (Tigran Bayburtsyan) Date: Sun, 22 Feb 2015 12:21:52 +0400 Subject: Nginx Timer is killing my request pool In-Reply-To: References: Message-ID: Hi. I can't find out why Nginx calling ngx_http_free_request function after ngx_add_timer , if I'm not calling ngx_http_finalize_request or something relevant ? Should I do something to keep request in memory ? Thanks. 2015-02-22 1:28 GMT+04:00 Tigran Bayburtsyan : > Ok I sow that code during debugging Nginx request , but why my request is > freeing if I'm not calling ngx_http_finalize_request ? > What should I do to keep my request alive ? > > Thanks. > > 2015-02-22 1:23 GMT+04:00 tommy watson > : > >> My money is on your request has been cleaned up >> in ngx_http_free_request(): >> >> >> https://github.com/nginx/nginx/blob/master/src/http/ngx_http_request.c#L3493 >> >> On Sat, Feb 21, 2015 at 3:19 PM, Tigran Bayburtsyan < >> tigran.bayburtsyan at gmail.com> wrote: >> >>> No this time I'm trying to do it with separate process by just sharing >>> memory between Nginx worker and my 3rd party excusable... >>> I'm making ngx_timer to send data when it will be received from shared >>> memory. >>> >>> It works fine, but I can't figure out why my r->pool is null ? >>> >>> I'm using Nginx 1.7 version. >>> Thanks for your help. >>> >>> >>> 2015-02-22 1:02 GMT+04:00 tommy watson < >>> tommywatson+nginx-devel at gmail.com>: >>> >>>> Tigran, >>>> are you still using fork() ? As you describe here: >>>> http://mailman.nginx.org/pipermail/nginx-devel/2015-February/006554.html >>>> >>>> If so, as already explained, the other process is probably closing >>>> your request. >>>> >>>> Cheers. >>>> >>>> >>>> On Sat, Feb 21, 2015 at 2:55 PM, Tigran Bayburtsyan < >>>> tigran.bayburtsyan at gmail.com> wrote: >>>> >>>>> Hi All. >>>>> >>>>> I've created a function for adding my custom ngx_timer , but after 1st >>>>> time loop my r->pool is setting to NULL , also it sets NULL my >>>>> r->connection->log, and after second loop it throws exception on *ngx_palloc >>>>> *because my r->pool is NULL. >>>>> I can't find out why, here is my function >>>>> >>>>> *void add_aling_event(ngx_event_t *e, ngx_http_request_t *r, void >>>>> (*handler), int timer)* >>>>> *{* >>>>> * if(e)* >>>>> * {* >>>>> * ngx_add_timer(e, timer);* >>>>> * return;* >>>>> * }* >>>>> * ngx_event_t *event=ngx_palloc(r->pool,sizeof(ngx_event_t));* >>>>> * memset(event,0,sizeof(ngx_event_t));* >>>>> * event->data=r;* >>>>> * event->handler=handler;* >>>>> * event->log=r->connection->log;* >>>>> * ngx_add_timer(event, timer);* >>>>> *}* >>>>> >>>>> And I'm calling this function like this. >>>>> >>>>> static ngx_int_t ngx_http_aling_handler(ngx_http_request_t *r) >>>>> { >>>>> ........ >>>>> ........ >>>>> add_aling_event(NULL, r, hack_func, 50); >>>>> return NGX_AGAIN; >>>>> } >>>>> >>>>> void hack_func(ngx_event_t *e) >>>>> { >>>>> ngx_http_request_t *r = e->data; >>>>> ........ >>>>> ........ >>>>> >>>>> switch(shmLen[1]) >>>>> { >>>>> case 1: // adding chain >>>>> { >>>>> .......................... >>>>> * ..........................* >>>>> };break; >>>>> case 2: // adding last chain >>>>> { >>>>> .......................... >>>>> * ..........................* >>>>> };break; >>>>> default: >>>>> { >>>>> add_aling_event(e, r, hack_func, 50); >>>>> };break; >>>>> } >>>>> } >>>>> >>>>> After 1 or 2 calls r->pool is sets to NULL ..... >>>>> >>>>> Please help me figure out this issue. >>>>> Maybe I'm doing something wrong. >>>>> I've did this code using example here >>>>> https://github.com/tommywatson/nginx-hello-world-module/blob/master/ngx_http_hello_world_module.c#L104 >>>>> >>>>> >>>>> Thanks. >>>>> >>>>> _______________________________________________ >>>>> nginx-devel mailing list >>>>> nginx-devel at nginx.org >>>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>>> >>>> >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>> >>> >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > -------------- next part -------------- An HTML attachment was scrubbed... URL: From toshikuni-fukaya at cybozu.co.jp Tue Feb 24 09:36:47 2015 From: toshikuni-fukaya at cybozu.co.jp (Toshikuni Fukaya) Date: Tue, 24 Feb 2015 18:36:47 +0900 Subject: fix error message for auth basic module. Message-ID: <54EC462F.3040909@cybozu.co.jp> Hi, I found a little bug on error logging for ngx_http_auth_basic_module. My config is following: location / { set $file passwd; auth_basic "closed"; auth_basic_user_file /etc/nginx/$file; } When access to the location with wrong user or password, nginx logged a user file name and it contains null character. The reason of this is using format '%V' to print user_file. I think it is a bug because '%s' is used for the variable in other positions. The patch attached below. Thanks, Toshikuni Fukaya # HG changeset patch # User Toshikuni Fukaya # Date 1424766762 -32400 # Tue Feb 24 17:32:42 2015 +0900 # Node ID 902c4eda6d80b960991ae05ea2c3d2db8dfdccf0 # Parent 3f568dd68af147b5ba259a27fdc6645f99e87aa7 fix error logging to print file name without null char. diff -r 3f568dd68af1 -r 902c4eda6d80 src/http/modules/ngx_http_auth_basic_module.c --- a/src/http/modules/ngx_http_auth_basic_module.c Tue Feb 17 16:27:52 2015 +0300 +++ b/src/http/modules/ngx_http_auth_basic_module.c Tue Feb 24 17:32:42 2015 +0900 @@ -280,8 +280,8 @@ } ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, - "user \"%V\" was not found in \"%V\"", - &r->headers_in.user, &user_file); + "user \"%V\" was not found in \"%s\"", + &r->headers_in.user, user_file.data); return ngx_http_auth_basic_set_realm(r, &realm); } From mdounin at mdounin.ru Tue Feb 24 15:38:35 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 24 Feb 2015 15:38:35 +0000 Subject: [nginx] Core: fixed potential buffer overrun when initializing h... Message-ID: details: http://hg.nginx.org/nginx/rev/f961c719fb09 branches: changeset: 5985:f961c719fb09 user: Maxim Dounin date: Tue Feb 24 18:37:14 2015 +0300 description: Core: fixed potential buffer overrun when initializing hash. Initial size as calculated from the number of elements may be bigger than max_size. If this happens, make sure to set size to max_size. Reported by Chris West. diffstat: src/core/ngx_hash.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff --git a/src/core/ngx_hash.c b/src/core/ngx_hash.c --- a/src/core/ngx_hash.c +++ b/src/core/ngx_hash.c @@ -312,7 +312,7 @@ ngx_hash_init(ngx_hash_init_t *hinit, ng continue; } - size--; + size = hinit->max_size; ngx_log_error(NGX_LOG_WARN, hinit->pool->log, 0, "could not build optimal %s, you should increase " From mdounin at mdounin.ru Tue Feb 24 16:34:33 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 24 Feb 2015 19:34:33 +0300 Subject: fix error message for auth basic module. In-Reply-To: <54EC462F.3040909@cybozu.co.jp> References: <54EC462F.3040909@cybozu.co.jp> Message-ID: <20150224163433.GA19012@mdounin.ru> Hello! On Tue, Feb 24, 2015 at 06:36:47PM +0900, Toshikuni Fukaya wrote: > Hi, > > I found a little bug on error logging for ngx_http_auth_basic_module. > My config is following: > > location / { > set $file passwd; > auth_basic "closed"; > auth_basic_user_file /etc/nginx/$file; > } > > When access to the location with wrong user or password, > nginx logged a user file name and it contains null character. > > The reason of this is using format '%V' to print user_file. > I think it is a bug because '%s' is used for the variable in other > positions. Normally, variables which are nginx strings and at the same time point to files do have a NULL character at the end (because it's required to work with system calls), but it doesn't included in the len field. That is, one can use either %s with user_file.data or %V with &user_file. The %s variant was used in syscall-related messages (to make sure to print the name used by syscalls), and %V in normal code. In this particular case the problem seems to be introduced by the revision a6954ce88b80 (http://hg.nginx.org/nginx/rev/a6954ce88b80) during conversion to complex values. Previously, the invariant outlined above was held, but after a6954ce88b80 if auth_basic_user_file contains variables, then user_files.len includes a NULL character. While using %s in all cases as in your patch will fix the problem, I would rather prefer to see the invariant restored. -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Tue Feb 24 18:53:21 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 24 Feb 2015 21:53:21 +0300 Subject: SSLv3 protocol with LibreSSL In-Reply-To: References: Message-ID: <20150224185320.GF19012@mdounin.ru> Hello! On Tue, Feb 17, 2015 at 11:25:42PM +0900, Kuramoto Eiji wrote: > # HG changeset patch > # User Kuramoto Eiji > # Date 1424182447 -32400 > # Node ID 2f0279e2d15aa7fd4c8300a99fa323513deaf1ab > # Parent f3f25ad09deee27485050a75732e5f46ab1b18b3 > SSLv3 protocol is not available with LibreSSL, > even if SSLv3 option is supplied in config. > > LibreSSL-2.1.2/2.1.3 disables SSLv3 by default. > > diff -r f3f25ad09dee -r 2f0279e2d15a src/event/ngx_event_openssl.c > --- a/src/event/ngx_event_openssl.c Wed Feb 11 20:18:55 2015 +0300 > +++ b/src/event/ngx_event_openssl.c Tue Feb 17 23:14:07 2015 +0900 > @@ -252,9 +252,17 @@ > if (!(protocols & NGX_SSL_SSLv2)) { > SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_SSLv2); > } > +ifdef LIBRESSL_VERSION_NUMBER > + if (!(protocols & NGX_SSL_SSLv3)) { > + SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_SSLv3); > + } else { > + SSL_CTX_clear_options(ssl->ctx, SSL_OP_NO_SSLv3); > + } > +#else I don't think we want LibreSSL-specific code like this. Rather, I see two possible options: 1) Respect LibreSSL decision to disable SSLv3 and don't do anything. That is, keep it as is. This basically means that there will be no SSLv3 support if you are using nginx with LibreSSL. Much like there is no SSLv2 support either, because it was removed from LibreSSL. 2) Clear all protocol options we know about. This will ensure that future changes like the one in LibreSSL will not affect nginx: --- a/src/event/ngx_event_openssl.c +++ b/src/event/ngx_event_openssl.c @@ -249,6 +249,11 @@ ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_ SSL_CTX_set_options(ssl->ctx, SSL_OP_SINGLE_DH_USE); +#ifdef SSL_CTRL_CLEAR_OPTIONS + SSL_clear_options(ssl->ctx, + SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3|SSL_OP_NO_TLSv1); +#endif + if (!(protocols & NGX_SSL_SSLv2)) { SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_SSLv2); } @@ -259,11 +264,13 @@ ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_ SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_TLSv1); } #ifdef SSL_OP_NO_TLSv1_1 + SSL_clear_options(ssl->ctx, SSL_OP_NO_TLSv1_1); if (!(protocols & NGX_SSL_TLSv1_1)) { SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_TLSv1_1); } #endif #ifdef SSL_OP_NO_TLSv1_2 + SSL_clear_options(ssl->ctx, SSL_OP_NO_TLSv1_2); if (!(protocols & NGX_SSL_TLSv1_2)) { SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_TLSv1_2); } Not sure which of the above I would prefer, as both variants have their pros and cons. -- Maxim Dounin http://nginx.org/ From ru at nginx.com Tue Feb 24 20:53:13 2015 From: ru at nginx.com (Ruslan Ermilov) Date: Tue, 24 Feb 2015 20:53:13 +0000 Subject: [nginx] SSL: account sent bytes in ngx_ssl_write(). Message-ID: details: http://hg.nginx.org/nginx/rev/c2f309fb7ad2 branches: changeset: 5986:c2f309fb7ad2 user: Ruslan Ermilov date: Tue Feb 24 23:52:47 2015 +0300 description: SSL: account sent bytes in ngx_ssl_write(). diffstat: src/event/ngx_event_openssl.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (28 lines): diff -r f961c719fb09 -r c2f309fb7ad2 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Feb 24 18:37:14 2015 +0300 +++ b/src/event/ngx_event_openssl.c Tue Feb 24 23:52:47 2015 +0300 @@ -1516,7 +1516,6 @@ ngx_ssl_send_chain(ngx_connection_t *c, } in->buf->pos += n; - c->sent += n; if (in->buf->pos == in->buf->last) { in = in->next; @@ -1617,7 +1616,6 @@ ngx_ssl_send_chain(ngx_connection_t *c, } buf->pos += n; - c->sent += n; if (n < size) { break; @@ -1675,6 +1673,8 @@ ngx_ssl_write(ngx_connection_t *c, u_cha ngx_post_event(c->read, &ngx_posted_events); } + c->sent += n; + return n; } From toshikuni-fukaya at cybozu.co.jp Wed Feb 25 02:19:22 2015 From: toshikuni-fukaya at cybozu.co.jp (Toshikuni Fukaya) Date: Wed, 25 Feb 2015 11:19:22 +0900 Subject: fix error message for auth basic module. In-Reply-To: <20150224163433.GA19012@mdounin.ru> References: <54EC462F.3040909@cybozu.co.jp> <20150224163433.GA19012@mdounin.ru> Message-ID: <54ED312A.9020201@cybozu.co.jp> Hi, On 2015/02/25 1:34, Maxim Dounin wrote: > Hello! > > On Tue, Feb 24, 2015 at 06:36:47PM +0900, Toshikuni Fukaya wrote: > >> Hi, >> >> I found a little bug on error logging for ngx_http_auth_basic_module. >> My config is following: >> >> location / { >> set $file passwd; >> auth_basic "closed"; >> auth_basic_user_file /etc/nginx/$file; >> } >> >> When access to the location with wrong user or password, >> nginx logged a user file name and it contains null character. >> >> The reason of this is using format '%V' to print user_file. >> I think it is a bug because '%s' is used for the variable in other >> positions. > > Normally, variables which are nginx strings and at the same time > point to files do have a NULL character at the end (because it's > required to work with system calls), but it doesn't included in > the len field. That is, one can use either %s with user_file.data > or %V with &user_file. The %s variant was used in syscall-related > messages (to make sure to print the name used by syscalls), and %V > in normal code. > > In this particular case the problem seems to be introduced by > the revision a6954ce88b80 (http://hg.nginx.org/nginx/rev/a6954ce88b80) > during conversion to complex values. Previously, the invariant > outlined above was held, but after a6954ce88b80 if auth_basic_user_file > contains variables, then user_files.len includes a NULL character. > > While using %s in all cases as in your patch will fix the problem, > I would rather prefer to see the invariant restored. > I understand the real reason of the problem. To solve this, I should add a null char to ngx_str_t.data but should not add a length of such null char to ngx_str_t.len. ok? If true, will I need to fix ngx_http_script_done and ngx_http_script_add_copy_code not to add a length of null char? Thanks, Toshikuni Fukaya From mdounin at mdounin.ru Wed Feb 25 14:58:28 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 25 Feb 2015 14:58:28 +0000 Subject: [nginx] Mail: fixed buffer allocation for CRLF after Auth-SMTP-*... Message-ID: details: http://hg.nginx.org/nginx/rev/62c098eb4509 branches: changeset: 5987:62c098eb4509 user: Maxim Dounin date: Wed Feb 25 17:47:43 2015 +0300 description: Mail: fixed buffer allocation for CRLF after Auth-SMTP-* headers. There were no buffer overruns in real life as there is extra space allocated for the Auth-Login-Attempt counter. diffstat: src/mail/ngx_mail_auth_http_module.c | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diffs (16 lines): diff --git a/src/mail/ngx_mail_auth_http_module.c b/src/mail/ngx_mail_auth_http_module.c --- a/src/mail/ngx_mail_auth_http_module.c +++ b/src/mail/ngx_mail_auth_http_module.c @@ -1170,9 +1170,9 @@ ngx_mail_auth_http_create_request(ngx_ma + sizeof("Client-IP: ") - 1 + s->connection->addr_text.len + sizeof(CRLF) - 1 + sizeof("Client-Host: ") - 1 + s->host.len + sizeof(CRLF) - 1 - + sizeof("Auth-SMTP-Helo: ") - 1 + s->smtp_helo.len - + sizeof("Auth-SMTP-From: ") - 1 + s->smtp_from.len - + sizeof("Auth-SMTP-To: ") - 1 + s->smtp_to.len + + sizeof("Auth-SMTP-Helo: ") - 1 + s->smtp_helo.len + sizeof(CRLF) - 1 + + sizeof("Auth-SMTP-From: ") - 1 + s->smtp_from.len + sizeof(CRLF) - 1 + + sizeof("Auth-SMTP-To: ") - 1 + s->smtp_to.len + sizeof(CRLF) - 1 + ahcf->header.len + sizeof(CRLF) - 1; From mdounin at mdounin.ru Wed Feb 25 14:58:31 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 25 Feb 2015 14:58:31 +0000 Subject: [nginx] Mail: added Auth-SSL header to indicate SSL. Message-ID: details: http://hg.nginx.org/nginx/rev/3b3f789655dc branches: changeset: 5988:3b3f789655dc user: Maxim Dounin date: Wed Feb 25 17:47:49 2015 +0300 description: Mail: added Auth-SSL header to indicate SSL. Based on a patch by Filipe da Silva. diffstat: src/mail/ngx_mail_auth_http_module.c | 12 ++++++++++++ 1 files changed, 12 insertions(+), 0 deletions(-) diffs (29 lines): diff --git a/src/mail/ngx_mail_auth_http_module.c b/src/mail/ngx_mail_auth_http_module.c --- a/src/mail/ngx_mail_auth_http_module.c +++ b/src/mail/ngx_mail_auth_http_module.c @@ -1173,6 +1173,9 @@ ngx_mail_auth_http_create_request(ngx_ma + sizeof("Auth-SMTP-Helo: ") - 1 + s->smtp_helo.len + sizeof(CRLF) - 1 + sizeof("Auth-SMTP-From: ") - 1 + s->smtp_from.len + sizeof(CRLF) - 1 + sizeof("Auth-SMTP-To: ") - 1 + s->smtp_to.len + sizeof(CRLF) - 1 +#if (NGX_MAIL_SSL) + + sizeof("Auth-SSL: on" CRLF) - 1 +#endif + ahcf->header.len + sizeof(CRLF) - 1; @@ -1255,6 +1258,15 @@ ngx_mail_auth_http_create_request(ngx_ma } +#if (NGX_MAIL_SSL) + + if (s->connection->ssl) { + b->last = ngx_cpymem(b->last, "Auth-SSL: on" CRLF, + sizeof("Auth-SSL: on" CRLF) - 1); + } + +#endif + if (ahcf->header.len) { b->last = ngx_copy(b->last, ahcf->header.data, ahcf->header.len); } From mdounin at mdounin.ru Wed Feb 25 14:58:34 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 25 Feb 2015 14:58:34 +0000 Subject: [nginx] Mail: client SSL certificates support. Message-ID: details: http://hg.nginx.org/nginx/rev/ec01b1d1fff1 branches: changeset: 5989:ec01b1d1fff1 user: Maxim Dounin date: Wed Feb 25 17:48:05 2015 +0300 description: Mail: client SSL certificates support. The "ssl_verify_client", "ssl_verify_depth", "ssl_client_certificate", "ssl_trusted_certificate", and "ssl_crl" directives introduced to control SSL client certificate verification in mail proxy module. If there is a certificate, detail of the certificate are passed to the auth_http script configured via Auth-SSL-Verify, Auth-SSL-Subject, Auth-SSL-Issuer, Auth-SSL-Serial, Auth-SSL-Fingerprint headers. If the auth_http_pass_client_cert directive is set, client certificate in PEM format will be passed in the Auth-SSL-Cert header (urlencoded). If there is no required certificate provided during an SSL handshake or certificate verification fails then a protocol-specific error is returned after the SSL handshake and the connection is closed. Based on previous work by Sven Peter, Franck Levionnois and Filipe Da Silva. diffstat: src/mail/ngx_mail.h | 2 + src/mail/ngx_mail_auth_http_module.c | 120 ++++++++++++++++++++++++++++++++++- src/mail/ngx_mail_handler.c | 71 ++++++++++++++++++++ src/mail/ngx_mail_imap_module.c | 4 +- src/mail/ngx_mail_pop3_module.c | 4 +- src/mail/ngx_mail_smtp_module.c | 4 +- src/mail/ngx_mail_ssl_module.c | 87 +++++++++++++++++++++++++ src/mail/ngx_mail_ssl_module.h | 6 + 8 files changed, 294 insertions(+), 4 deletions(-) diffs (truncated from 492 to 300 lines): diff --git a/src/mail/ngx_mail.h b/src/mail/ngx_mail.h --- a/src/mail/ngx_mail.h +++ b/src/mail/ngx_mail.h @@ -336,6 +336,8 @@ struct ngx_mail_protocol_s { ngx_mail_auth_state_pt auth_state; ngx_str_t internal_server_error; + ngx_str_t cert_error; + ngx_str_t no_cert; }; diff --git a/src/mail/ngx_mail_auth_http_module.c b/src/mail/ngx_mail_auth_http_module.c --- a/src/mail/ngx_mail_auth_http_module.c +++ b/src/mail/ngx_mail_auth_http_module.c @@ -16,6 +16,7 @@ typedef struct { ngx_addr_t *peer; ngx_msec_t timeout; + ngx_flag_t pass_client_cert; ngx_str_t host_header; ngx_str_t uri; @@ -106,6 +107,13 @@ static ngx_command_t ngx_mail_auth_http 0, NULL }, + { ngx_string("auth_http_pass_client_cert"), + NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_MAIL_SRV_CONF_OFFSET, + offsetof(ngx_mail_auth_http_conf_t, pass_client_cert), + NULL }, + ngx_null_command }; @@ -1143,6 +1151,11 @@ ngx_mail_auth_http_create_request(ngx_ma size_t len; ngx_buf_t *b; ngx_str_t login, passwd; +#if (NGX_MAIL_SSL) + ngx_str_t verify, subject, issuer, serial, fingerprint, + raw_cert, cert; + ngx_connection_t *c; +#endif ngx_mail_core_srv_conf_t *cscf; if (ngx_mail_auth_http_escape(pool, &s->login, &login) != NGX_OK) { @@ -1153,6 +1166,61 @@ ngx_mail_auth_http_create_request(ngx_ma return NULL; } +#if (NGX_MAIL_SSL) + + c = s->connection; + + if (c->ssl) { + + /* certificate details */ + + if (ngx_ssl_get_client_verify(c, pool, &verify) != NGX_OK) { + return NULL; + } + + if (ngx_ssl_get_subject_dn(c, pool, &subject) != NGX_OK) { + return NULL; + } + + if (ngx_ssl_get_issuer_dn(c, pool, &issuer) != NGX_OK) { + return NULL; + } + + if (ngx_ssl_get_serial_number(c, pool, &serial) != NGX_OK) { + return NULL; + } + + if (ngx_ssl_get_fingerprint(c, pool, &fingerprint) != NGX_OK) { + return NULL; + } + + if (ahcf->pass_client_cert) { + + /* certificate itself, if configured */ + + if (ngx_ssl_get_raw_certificate(c, pool, &raw_cert) != NGX_OK) { + return NULL; + } + + if (ngx_mail_auth_http_escape(pool, &raw_cert, &cert) != NGX_OK) { + return NULL; + } + + } else { + ngx_str_null(&cert); + } + + } else { + ngx_str_null(&verify); + ngx_str_null(&subject); + ngx_str_null(&issuer); + ngx_str_null(&serial); + ngx_str_null(&fingerprint); + ngx_str_null(&cert); + } + +#endif + cscf = ngx_mail_get_module_srv_conf(s, ngx_mail_core_module); len = sizeof("GET ") - 1 + ahcf->uri.len + sizeof(" HTTP/1.0" CRLF) - 1 @@ -1175,6 +1243,13 @@ ngx_mail_auth_http_create_request(ngx_ma + sizeof("Auth-SMTP-To: ") - 1 + s->smtp_to.len + sizeof(CRLF) - 1 #if (NGX_MAIL_SSL) + sizeof("Auth-SSL: on" CRLF) - 1 + + sizeof("Auth-SSL-Verify: ") - 1 + verify.len + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Subject: ") - 1 + subject.len + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Issuer: ") - 1 + issuer.len + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Serial: ") - 1 + serial.len + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Fingerprint: ") - 1 + fingerprint.len + + sizeof(CRLF) - 1 + + sizeof("Auth-SSL-Cert: ") - 1 + cert.len + sizeof(CRLF) - 1 #endif + ahcf->header.len + sizeof(CRLF) - 1; @@ -1260,9 +1335,49 @@ ngx_mail_auth_http_create_request(ngx_ma #if (NGX_MAIL_SSL) - if (s->connection->ssl) { + if (c->ssl) { b->last = ngx_cpymem(b->last, "Auth-SSL: on" CRLF, sizeof("Auth-SSL: on" CRLF) - 1); + + b->last = ngx_cpymem(b->last, "Auth-SSL-Verify: ", + sizeof("Auth-SSL-Verify: ") - 1); + b->last = ngx_copy(b->last, verify.data, verify.len); + *b->last++ = CR; *b->last++ = LF; + + if (subject.len) { + b->last = ngx_cpymem(b->last, "Auth-SSL-Subject: ", + sizeof("Auth-SSL-Subject: ") - 1); + b->last = ngx_copy(b->last, subject.data, subject.len); + *b->last++ = CR; *b->last++ = LF; + } + + if (issuer.len) { + b->last = ngx_cpymem(b->last, "Auth-SSL-Issuer: ", + sizeof("Auth-SSL-Issuer: ") - 1); + b->last = ngx_copy(b->last, issuer.data, issuer.len); + *b->last++ = CR; *b->last++ = LF; + } + + if (serial.len) { + b->last = ngx_cpymem(b->last, "Auth-SSL-Serial: ", + sizeof("Auth-SSL-Serial: ") - 1); + b->last = ngx_copy(b->last, serial.data, serial.len); + *b->last++ = CR; *b->last++ = LF; + } + + if (fingerprint.len) { + b->last = ngx_cpymem(b->last, "Auth-SSL-Fingerprint: ", + sizeof("Auth-SSL-Fingerprint: ") - 1); + b->last = ngx_copy(b->last, fingerprint.data, fingerprint.len); + *b->last++ = CR; *b->last++ = LF; + } + + if (cert.len) { + b->last = ngx_cpymem(b->last, "Auth-SSL-Cert: ", + sizeof("Auth-SSL-Cert: ") - 1); + b->last = ngx_copy(b->last, cert.data, cert.len); + *b->last++ = CR; *b->last++ = LF; + } } #endif @@ -1328,6 +1443,7 @@ ngx_mail_auth_http_create_conf(ngx_conf_ } ahcf->timeout = NGX_CONF_UNSET_MSEC; + ahcf->pass_client_cert = NGX_CONF_UNSET; ahcf->file = cf->conf_file->file.name.data; ahcf->line = cf->conf_file->line; @@ -1363,6 +1479,8 @@ ngx_mail_auth_http_merge_conf(ngx_conf_t ngx_conf_merge_msec_value(conf->timeout, prev->timeout, 60000); + ngx_conf_merge_value(conf->pass_client_cert, prev->pass_client_cert, 0); + if (conf->headers == NULL) { conf->headers = prev->headers; conf->header = prev->header; diff --git a/src/mail/ngx_mail_handler.c b/src/mail/ngx_mail_handler.c --- a/src/mail/ngx_mail_handler.c +++ b/src/mail/ngx_mail_handler.c @@ -16,6 +16,8 @@ static void ngx_mail_init_session(ngx_co #if (NGX_MAIL_SSL) static void ngx_mail_ssl_init_connection(ngx_ssl_t *ssl, ngx_connection_t *c); static void ngx_mail_ssl_handshake_handler(ngx_connection_t *c); +static ngx_int_t ngx_mail_verify_cert(ngx_mail_session_t *s, + ngx_connection_t *c); #endif @@ -247,6 +249,10 @@ ngx_mail_ssl_handshake_handler(ngx_conne s = c->data; + if (ngx_mail_verify_cert(s, c) != NGX_OK) { + return; + } + if (s->starttls) { cscf = ngx_mail_get_module_srv_conf(s, ngx_mail_core_module); @@ -267,6 +273,71 @@ ngx_mail_ssl_handshake_handler(ngx_conne ngx_mail_close_connection(c); } + +static ngx_int_t +ngx_mail_verify_cert(ngx_mail_session_t *s, ngx_connection_t *c) +{ + long rc; + X509 *cert; + ngx_mail_ssl_conf_t *sslcf; + ngx_mail_core_srv_conf_t *cscf; + + sslcf = ngx_mail_get_module_srv_conf(s, ngx_mail_ssl_module); + + if (!sslcf->verify) { + return NGX_OK; + } + + rc = SSL_get_verify_result(c->ssl->connection); + + if (rc != X509_V_OK + && (sslcf->verify != 3 || !ngx_ssl_verify_error_optional(rc))) + { + ngx_log_error(NGX_LOG_INFO, c->log, 0, + "client SSL certificate verify error: (%l:%s)", + rc, X509_verify_cert_error_string(rc)); + + ngx_ssl_remove_cached_session(sslcf->ssl.ctx, + (SSL_get0_session(c->ssl->connection))); + + cscf = ngx_mail_get_module_srv_conf(s, ngx_mail_core_module); + + s->out = cscf->protocol->cert_error; + s->quit = 1; + + c->write->handler = ngx_mail_send; + + ngx_mail_send(s->connection->write); + return NGX_ERROR; + } + + if (sslcf->verify == 1) { + cert = SSL_get_peer_certificate(c->ssl->connection); + + if (cert == NULL) { + ngx_log_error(NGX_LOG_INFO, c->log, 0, + "client sent no required SSL certificate"); + + ngx_ssl_remove_cached_session(sslcf->ssl.ctx, + (SSL_get0_session(c->ssl->connection))); + + cscf = ngx_mail_get_module_srv_conf(s, ngx_mail_core_module); + + s->out = cscf->protocol->no_cert; + s->quit = 1; + + c->write->handler = ngx_mail_send; + + ngx_mail_send(s->connection->write); + return NGX_ERROR; + } + + X509_free(cert); + } + + return NGX_OK; +} + #endif diff --git a/src/mail/ngx_mail_imap_module.c b/src/mail/ngx_mail_imap_module.c --- a/src/mail/ngx_mail_imap_module.c +++ b/src/mail/ngx_mail_imap_module.c @@ -52,7 +52,9 @@ static ngx_mail_protocol_t ngx_mail_ima ngx_mail_imap_parse_command, ngx_mail_imap_auth_state, - ngx_string("* BAD internal server error" CRLF) + ngx_string("* BAD internal server error" CRLF), + ngx_string("* BYE SSL certificate error" CRLF), + ngx_string("* BYE No required SSL certificate" CRLF) }; From mdounin at mdounin.ru Wed Feb 25 15:24:07 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 25 Feb 2015 18:24:07 +0300 Subject: [PATCH] Locality-based Least connection with optional randomization In-Reply-To: <210E8E71-BFB4-4BBD-9015-ED9ED51DC567@gmail.com> References: <210E8E71-BFB4-4BBD-9015-ED9ED51DC567@gmail.com> Message-ID: <20150225152406.GJ19012@mdounin.ru> Hello! On Sat, Feb 21, 2015 at 07:05:31PM -0800, Alexey Ivanov wrote: > Good weekend, everyone! > Let me start by describing my problem first and then moving to proposed solution. > > Problem: > Currently we have number of PoPs (Points-of-Presence) around the world with Linux/nginx doing TCP/TLS/HTTP termination. There we re-encrypt traffic and proxy_pass it to the upstream block with HUGE set of servers. Whole idea of those PoP nginxes is to have pool of keepalive connections with enormous tcp windows to upstreams. > But in reality we can not use any of nginx?es connection balancing methods because they almost never reuse connections (yet again, our upstream list is huge). Also each worker has it?s own keepalive pool which makes situation even worse. Of cause we can generate per-server config files and give each server in each PoP different(and small) set of upstream servers, but that solution sounds awfully ?clunky?. > > Solution: > IPVS for example, among it's numerous job scheduling modes has Locality-Based Least-Connection Scheduling[1], that looks quite close to what we want. The only problem is that if all the worker processes on all our boxes around the world will use same list of upstreams they will quickly overload first upstream, then second, etc, therefore I?ve added randomized mode in which each worker starts by filling upstreams w.r.t. some random starting point. That should give good locality for tcp connection reuse and as law of large numbers implies - good enough load distribution across upstreams globally. > > Implementation: > PoC: > coloured: https://gist.github.com/SaveTheRbtz/d6a505555cd02cb6aee6 > raw: https://gist.githubusercontent.com/SaveTheRbtz/d6a505555cd02cb6aee6/raw/5aba3b0709777d2a6e99217bd3e06e2178846dc4/least_conn_locality_randomized.diff > > It basically tries to find first(starting from per-worker-random for randomized variant) not fully loaded peer and if it fails then it falls back to normal least_conn. > > Followup questions: > Does anyone in the community have similar use cases? CloudFlare maybe? > Is Nginx Inc interested in incorporating something patch like that, or is that too specific to our workflow? Should I prettify that PoC or should I just throw the ball your way? I can't say I like the balancing approach proposed - it looks too hacky. But the problem itself may be interesting - not sure how common is it though. -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Wed Feb 25 15:25:18 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 25 Feb 2015 18:25:18 +0300 Subject: fix error message for auth basic module. In-Reply-To: <54ED312A.9020201@cybozu.co.jp> References: <54EC462F.3040909@cybozu.co.jp> <20150224163433.GA19012@mdounin.ru> <54ED312A.9020201@cybozu.co.jp> Message-ID: <20150225152518.GK19012@mdounin.ru> Hello! On Wed, Feb 25, 2015 at 11:19:22AM +0900, Toshikuni Fukaya wrote: > Hi, > > On 2015/02/25 1:34, Maxim Dounin wrote: > >Hello! > > > >On Tue, Feb 24, 2015 at 06:36:47PM +0900, Toshikuni Fukaya wrote: > > > >>Hi, > >> > >>I found a little bug on error logging for ngx_http_auth_basic_module. > >>My config is following: > >> > >>location / { > >> set $file passwd; > >> auth_basic "closed"; > >> auth_basic_user_file /etc/nginx/$file; > >>} > >> > >>When access to the location with wrong user or password, > >>nginx logged a user file name and it contains null character. > >> > >>The reason of this is using format '%V' to print user_file. > >>I think it is a bug because '%s' is used for the variable in other > >>positions. > > > >Normally, variables which are nginx strings and at the same time > >point to files do have a NULL character at the end (because it's > >required to work with system calls), but it doesn't included in > >the len field. That is, one can use either %s with user_file.data > >or %V with &user_file. The %s variant was used in syscall-related > >messages (to make sure to print the name used by syscalls), and %V > >in normal code. > > > >In this particular case the problem seems to be introduced by > >the revision a6954ce88b80 (http://hg.nginx.org/nginx/rev/a6954ce88b80) > >during conversion to complex values. Previously, the invariant > >outlined above was held, but after a6954ce88b80 if auth_basic_user_file > >contains variables, then user_files.len includes a NULL character. > > > >While using %s in all cases as in your patch will fix the problem, > >I would rather prefer to see the invariant restored. > > > > I understand the real reason of the problem. > To solve this, I should add a null char to ngx_str_t.data but should not add > a length of such null char to ngx_str_t.len. ok? > If true, will I need to fix ngx_http_script_done and > ngx_http_script_add_copy_code not to add a length of null char? Yes, something like this. Not sure if it would be something easy to fix though. -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Wed Feb 25 15:28:24 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 25 Feb 2015 18:28:24 +0300 Subject: [PATCH] Mail: send starttls flag value to auth script In-Reply-To: <20150205130027.GE99511@mdounin.ru> References: <51fd90f96449c23af007.1394099969@HPC> <20140306162718.GL34696@mdounin.ru> <877FD2F6-57CD-4C14-9F2B-4C9E909C3488@phpgangsta.de> <53D9AAB0.5060501@phpgangsta.de> <20140801185919.GU1849@mdounin.ru> <53DBF531.2010308@phpgangsta.de> <54D28A26.60903@phpgangsta.de> <20150205130027.GE99511@mdounin.ru> Message-ID: <20150225152823.GL19012@mdounin.ru> Hello! On Thu, Feb 05, 2015 at 04:00:28PM +0300, Maxim Dounin wrote: > Hello! > > On Wed, Feb 04, 2015 at 10:07:50PM +0100, Michael Kliewe wrote: > > > Hi Maxim, > > > > I would like to remind again this feature patch. It would help a lot to get > > this information about transport encryption into the auth script. It does > > not hurt the performance, and is a very tiny patch. > > > > You can rename the header name and values as you like. It would be very nice > > if you could please merge it into nginx. > > I'm planning to look into this patch and other mail SSL > improvements once I've done with unbuffered upload feature I'm > currently working on. Just an update: a patch to address this was committed, see http://hg.nginx.org/nginx/rev/3b3f789655dc. Thanks Filipe for the original patch, and thanks Michael for prodding this. -- Maxim Dounin http://nginx.org/ From info at phpgangsta.de Wed Feb 25 15:31:46 2015 From: info at phpgangsta.de (Michael Kliewe) Date: Wed, 25 Feb 2015 16:31:46 +0100 Subject: [PATCH] Mail: send starttls flag value to auth script In-Reply-To: <20150225152823.GL19012@mdounin.ru> References: <51fd90f96449c23af007.1394099969@HPC> <20140306162718.GL34696@mdounin.ru> <877FD2F6-57CD-4C14-9F2B-4C9E909C3488@phpgangsta.de> <53D9AAB0.5060501@phpgangsta.de> <20140801185919.GU1849@mdounin.ru> <53DBF531.2010308@phpgangsta.de> <54D28A26.60903@phpgangsta.de> <20150205130027.GE99511@mdounin.ru> <20150225152823.GL19012@mdounin.ru> Message-ID: <54EDEAE2.80904@phpgangsta.de> Hi Maxim, thank you very much, that helps a lot! Then we can use the unpatched nginx version again instead of self-compiling it every time ;-) Michael Am 25.02.2015 um 16:28 schrieb Maxim Dounin: > Hello! > > On Thu, Feb 05, 2015 at 04:00:28PM +0300, Maxim Dounin wrote: > >> Hello! >> >> On Wed, Feb 04, 2015 at 10:07:50PM +0100, Michael Kliewe wrote: >> >>> Hi Maxim, >>> >>> I would like to remind again this feature patch. It would help a lot to get >>> this information about transport encryption into the auth script. It does >>> not hurt the performance, and is a very tiny patch. >>> >>> You can rename the header name and values as you like. It would be very nice >>> if you could please merge it into nginx. >> I'm planning to look into this patch and other mail SSL >> improvements once I've done with unbuffered upload feature I'm >> currently working on. > Just an update: a patch to address this was committed, see > http://hg.nginx.org/nginx/rev/3b3f789655dc. > > Thanks Filipe for the original patch, and thanks Michael for > prodding this. > From ek at kuramoto.org Thu Feb 26 11:00:57 2015 From: ek at kuramoto.org (Kuramoto Eiji) Date: Thu, 26 Feb 2015 20:00:57 +0900 Subject: SSLv3 protocol with LibreSSL In-Reply-To: <20150224185320.GF19012@mdounin.ru> References: <20150224185320.GF19012@mdounin.ru> Message-ID: Hello, At Tue, 24 Feb 2015 21:53:21 +0300, Maxim Dounin wrote: > I don't think we want LibreSSL-specific code like this. > Rather, I see two possible options: > > 1) Respect LibreSSL decision to disable SSLv3 and don't do > anything. That is, keep it as is. This basically means that > there will be no SSLv3 support if you are using nginx with > LibreSSL. Much like there is no SSLv2 support either, because it > was removed from LibreSSL. > > 2) Clear all protocol options we know about. This will ensure > that future changes like the one in LibreSSL will not affect > nginx: > > --- a/src/event/ngx_event_openssl.c > +++ b/src/event/ngx_event_openssl.c > @@ -249,6 +249,11 @@ ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_ > > SSL_CTX_set_options(ssl->ctx, SSL_OP_SINGLE_DH_USE); > > +#ifdef SSL_CTRL_CLEAR_OPTIONS > + SSL_clear_options(ssl->ctx, > + SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3|SSL_OP_NO_TLSv1); > +#endif > + > if (!(protocols & NGX_SSL_SSLv2)) { > SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_SSLv2); > } > @@ -259,11 +264,13 @@ ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_ > SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_TLSv1); > } > #ifdef SSL_OP_NO_TLSv1_1 > + SSL_clear_options(ssl->ctx, SSL_OP_NO_TLSv1_1); > if (!(protocols & NGX_SSL_TLSv1_1)) { > SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_TLSv1_1); > } > #endif > #ifdef SSL_OP_NO_TLSv1_2 > + SSL_clear_options(ssl->ctx, SSL_OP_NO_TLSv1_2); > if (!(protocols & NGX_SSL_TLSv1_2)) { > SSL_CTX_set_options(ssl->ctx, SSL_OP_NO_TLSv1_2); > } > > Not sure which of the above I would prefer, as both variants have > their pros and cons. Of course, I don't want to use SSLv2 and SSLv3 protocol, but ... Some old mobile phones in Japan, can only use SSLv2 or SSLv3 protocol, SSLv3 option is still required. I would prefer #2 option, clear all protocol option like your patch. Thanks. - Kuramoto Eiji From Kabirova.Nailya at cg.ru Thu Feb 26 12:41:28 2015 From: Kabirova.Nailya at cg.ru (kabirova) Date: Thu, 26 Feb 2015 15:41:28 +0300 Subject: How to make a subrequest from content handler? In-Reply-To: References: Message-ID: <54EF1478.206@cg.ru> How to make a subrequest from content handler? Hi, I have a problem when using subrequest in content handler. The content handler (my_content_handler) calls ngx_http_read_client_request_body with callback handler (my_callback). my_callback() makes a subrequest: ngx_int_t my_content_handler() { rc = ngx_http_read_client_request_body(r, my_callback); return rc; } void my_callback() { ps->handler = my_post_subrequest; ps->data = ctx; ngx_http_subrequest(r, ..., ps); } void my_post_subrequest() { /* required to return back to my_content_handler after completion of subrequest in order to be able to handle resulting response of subrequest */ } I have 2 questions: 1. how to return back to my_content_handler, if it invokes a subrequest? 2. how to proxy subrequest's response to client? Thanks in advance! Best regards, Kabirova Nailya. -------------- next part -------------- An HTML attachment was scrubbed... URL: From agentzh at gmail.com Thu Feb 26 20:07:52 2015 From: agentzh at gmail.com (Yichun Zhang (agentzh)) Date: Thu, 26 Feb 2015 12:07:52 -0800 Subject: How to make a subrequest from content handler? In-Reply-To: <54EF1478.206@cg.ru> References: <54EF1478.206@cg.ru> Message-ID: Hello! On Thu, Feb 26, 2015 at 4:41 AM, kabirova wrote: > I have a problem when using subrequest in content handler. > The content handler (my_content_handler) calls > ngx_http_read_client_request_body with callback handler (my_callback). > my_callback() makes a subrequest: > Just check out how my ngx_echo module implements its echo_read_request_body [1] and echo_subrequest [2]. Similarly, our ngx_lua module implements the ngx.req.read_body() and ngx.location.capture in the content phase (via content_by_lua). Maybe you can directly use ngx_echo or ngx_lua for your purposes to save all the pain on the nginx C land. > I have 2 questions: > 1. how to return back to my_content_handler, if it invokes a subrequest? > This depends on whether you're doing sync or async subrequests (that is, whether your content handler needs to wait for the subrequest's completion). It's hard to explain well in just a few sentences and your best bet is the existing proven working code out there (see above). > 2. how to proxy subrequest's response to client? > Both ngx_echo and ngx_lua can do this and the latter has more control over the subrequest response. In addition, my ngx_srcache module also uses subrequest extensively though in an earlier phase (access phase): https://github.com/openresty/srcache-nginx-module Still worth a look if you still insist wrestling on the nginx C land :) Well, good luck! Best regards, -agentzh [1] http://wiki.nginx.org/HttpEchoModule#echo_read_request_body [2] http://wiki.nginx.org/HttpEchoModule#echo_subrequest From mail at isix.nl Fri Feb 27 08:16:27 2015 From: mail at isix.nl (Jeffrey K.) Date: Fri, 27 Feb 2015 09:16:27 +0100 Subject: cache revalidation bug Message-ID: <1A3D6C2F-F553-420A-A387-112DED7EDDB5@isix.nl> I?m experiencing an issue that cached 404 responses are revalidated when the requested file are available again on the backend server with an older time stamp. Hereby the details of my issue. Nginx version/build details # nginx -V nginx version: nginx/1.7.10 built by gcc 4.8.2 (Ubuntu 4.8.2-19ubuntu1) TLS SNI support enabled configure arguments: --prefix=/usr/share/nginx --sbin-path=/usr/sbin/nginx --conf-path=/etc/nginx/nginx.conf --error-log-path=/var/log/nginx/error.log --http-client-body-temp-path=/var/lib/nginx/body --http-log-path=/var/log/nginx/access.log --http-proxy-temp-path=/tmp --lock-path=/var/lock/nginx.lock --pid-path=/var/run/nginx.pid --without-http_fastcgi_module --without-http_uwsgi_module --without-http_scgi_module --with-http_flv_module --with-http_geoip_module --with-http_gzip_static_module --with-http_mp4_module --with-http_realip_module --with-http_secure_link_module --with-http_stub_status_module --with-http_ssl_module --with-ipv6 --with-sha1=/usr/include/openssl --with-md5=/usr/include/openssl --with-aio_module --with-file-aio --with-http_spdy_module --with-debug vhost is configured with server { listen 80; server_name test.domain.tld; set $origin backend.domain.tld; expires off; location / { client_max_body_size 0; client_body_buffer_size 8k; proxy_connect_timeout 60; proxy_send_timeout 60; proxy_read_timeout 60; proxy_buffer_size 16k; proxy_buffers 256 16k; proxy_buffering on; proxy_max_temp_file_size 1m; proxy_ignore_client_abort on; proxy_intercept_errors on; proxy_next_upstream error timeout invalid_header; proxy_cache one; proxy_cache_min_uses 1; proxy_cache_lock off; proxy_cache_lock_timeout 5s; proxy_cache_valid 200 302 301 1m; proxy_cache_valid 404 5s; proxy_cache_revalidate on; proxy_set_header Host $origin; proxy_pass_header Set-Cookie; proxy_set_header Range ""; proxy_set_header Request-Range ""; proxy_set_header If-Range ""; proxy_cache_key "$scheme://$host$uri"; proxy_pass http://$origin$uri; proxy_redirect off; } } Log format used ?$bytes_sent?$remote_addr?$msec?$status?$http_referer?$http_user_agent?$request_time?$request_method $request_uri $server_protocol?$server_port?$upstream_cache_status?$upstream_status?$upstream_response_time?$request_completion?$backend_server? Requesting non-existen file. 404 will be cached for 5 second after 5 seconds its expires, file is fetched from backend that gives 404 again ?469?[remote.ip.addr]?1424963103.586?404?-?curl/7.35.0?0.027?GET /pica.jpg HTTP/1.1?80?MISS?404?0.027?OK?[backend.server]? ?469?[remote.ip.addr]?1424963104.605?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963108.679?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963109.724?404?-?curl/7.35.0?0.027?GET /pica.jpg HTTP/1.1?80?EXPIRED?404?0.027?OK?[backend.server]? ?469?[remote.ip.addr]?1424963110.742?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963114.815?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963115.860?404?-?curl/7.35.0?0.027?GET /pica.jpg HTTP/1.1?80?EXPIRED?404?0.027?OK?[backend.server]? ?469?[remote.ip.addr]?1424963116.879?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963120.952?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? [Placed file on backend server (move), file has timestamp of year 2013] 404 expires, file is fetched from backend and cached ?49166?[remote.ip.addr]?1424963122.033?200?-?curl/7.35.0?0.063?GET /pica.jpg HTTP/1.1?80?EXPIRED?200?0.063?OK?[backend.server]? ?49162?[remote.ip.addr]?1424963123.037?200?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?49162?[remote.ip.addr]?1424963181.036?200?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? File expires, revalidation is send to backend and cached file is updated ?49170?[remote.ip.addr]?1424963182.081?200?-?curl/7.35.0?0.027?GET /pica.jpg HTTP/1.1?80?REVALIDATED?304?0.027?OK?[backend.server]? ?49162?[remote.ip.addr]?1424963183.098?200?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?49162?[remote.ip.addr]?1424963242.103?200?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? [File is removed from backend] File expires, file is fetched from backend that gives 404 ?469?[remote.ip.addr]?1424963243.148?404?-?curl/7.35.0?0.027?GET /pica.jpg HTTP/1.1?80?EXPIRED?404?0.027?OK?[backend.server]? ?469?[remote.ip.addr]?1424963244.167?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963248.240?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963249.285?404?-?curl/7.35.0?0.027?GET /pica.jpg HTTP/1.1?80?EXPIRED?404?0.027?OK?[backend.server]? ?469?[remote.ip.addr]?1424963250.304?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963254.376?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? [Placed file back on backend server (move), file has timestamp of year 2013] 404 expires, revalidation is done, because times stamp of file is older then time the 404 was fetched/cached it revalidates?? - bug? it should not revalidate 404, just expire and fetch actual file! ?469?[remote.ip.addr]?1424963255.421?404?-?curl/7.35.0?0.027?GET /pica.jpg HTTP/1.1?80?REVALIDATED?304?0.027?OK?[backend.server]? ?469?[remote.ip.addr]?1424963256.439?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963260.514?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963261.559?404?-?curl/7.35.0?0.027?GET /pica.jpg HTTP/1.1?80?REVALIDATED?304?0.027?OK?[backend.server]? ?469?[remote.ip.addr]?1424963262.577?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963266.650?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963267.697?404?-?curl/7.35.0?0.029?GET /pica.jpg HTTP/1.1?80?REVALIDATED?304?0.029?OK?[backend.server]? ?469?[remote.ip.addr]?1424963268.715?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963272.788?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? [File is removed from backend] Now the File expires, file is fetched from backend that gives 404 (probably because timestamp is newer?) ?469?[remote.ip.addr]?1424963291.224?404?-?curl/7.35.0?0.027?GET /pica.jpg HTTP/1.1?80?EXPIRED?404?0.027?OK?[backend.server]? ?469?[remote.ip.addr]?1424963292.242?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963296.315?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963297.360?404?-?curl/7.35.0?0.027?GET /pica.jpg HTTP/1.1?80?EXPIRED?404?0.027?OK?[backend.server]? ?469?[remote.ip.addr]?1424963298.379?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? ?469?[remote.ip.addr]?1424963302.457?404?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? [Placed file back on backend server (copy), file has timestamp of current time 404 expires, revalidation is done??, because times stamp of file is newer then time the 404 was fetched/cached it actually fetches the file ?49166?[remote.ip.addr]?1424963303.537?200?-?curl/7.35.0?0.059?GET /pica.jpg HTTP/1.1?80?EXPIRED?200?0.059?OK?[backend.server]? ?49162?[remote.ip.addr]?1424963304.544?200?-?curl/7.35.0?0.000?GET /pica.jpg HTTP/1.1?80?HIT?-?-?OK?[backend.server]? Regards, Jeffrey K. From tigran.bayburtsyan at gmail.com Fri Feb 27 10:56:46 2015 From: tigran.bayburtsyan at gmail.com (Tigran Bayburtsyan) Date: Fri, 27 Feb 2015 14:56:46 +0400 Subject: Get ngx_http_request_t as a char array Message-ID: <1e1801d0527c$1562e750$4028b5f0$@gmail.com> Hi. I'm trying to make a smart logging module for Nginx and I need to get all HTTP request from client as a string (char *). I know that ngx_http_request_t contains all HTTP request data , but I don't need to make a loop through all headers_in parameters or request structure parameters. I want to get all request with body as a char * array, like Nginx is receiving from tcp socket. How can I do that ? Thanks. --- This email has been checked for viruses by Avast antivirus software. http://www.avast.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Feb 27 12:23:44 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 27 Feb 2015 15:23:44 +0300 Subject: Get ngx_http_request_t as a char array In-Reply-To: <1e1801d0527c$1562e750$4028b5f0$@gmail.com> References: <1e1801d0527c$1562e750$4028b5f0$@gmail.com> Message-ID: <20150227122344.GC19012@mdounin.ru> Hello! On Fri, Feb 27, 2015 at 02:56:46PM +0400, Tigran Bayburtsyan wrote: > Hi. > > I'm trying to make a smart logging module for Nginx and I need to get all > HTTP request from client as a string (char *). > > I know that ngx_http_request_t contains all HTTP request data , but I don't > need to make a loop through all headers_in parameters or request structure > parameters. > > I want to get all request with body as a char * array, like Nginx is > receiving from tcp socket. There are two problems here: - nginx is not receiving a request as a string from tcp socket, even if you talk about headers only; - consequently, it is not available as a string within nginx. -- Maxim Dounin http://nginx.org/ From tigran.bayburtsyan at gmail.com Fri Feb 27 12:40:14 2015 From: tigran.bayburtsyan at gmail.com (Tigran Bayburtsyan) Date: Fri, 27 Feb 2015 16:40:14 +0400 Subject: Get ngx_http_request_t as a char array In-Reply-To: <20150227122344.GC19012@mdounin.ru> References: <1e1801d0527c$1562e750$4028b5f0$@gmail.com> <20150227122344.GC19012@mdounin.ru> Message-ID: <1e2901d0528a$897e88c0$9c7b9a40$@gmail.com> So there is now way of getting or transforming ngx_http_request_t to char * or byte array ? In any case C/C++ socket receiving data as a byte array .... For example here we can log full request to file as a char * (or string) https://gist.github.com/morhekil/1ff0e902ed4de2adcb7a When I'm saying "string" I don't mean string type, I mean something with a pointer and size where I can read all request from start to end .... I think I need something very similar to file logging. Thanks. -----Original Message----- From: nginx-devel-bounces at nginx.org [mailto:nginx-devel-bounces at nginx.org] On Behalf Of Maxim Dounin Sent: Friday, February 27, 2015 4:24 PM To: nginx-devel at nginx.org Subject: Re: Get ngx_http_request_t as a char array Hello! On Fri, Feb 27, 2015 at 02:56:46PM +0400, Tigran Bayburtsyan wrote: > Hi. > > I'm trying to make a smart logging module for Nginx and I need to get > all HTTP request from client as a string (char *). > > I know that ngx_http_request_t contains all HTTP request data , but I > don't need to make a loop through all headers_in parameters or request > structure parameters. > > I want to get all request with body as a char * array, like Nginx is > receiving from tcp socket. There are two problems here: - nginx is not receiving a request as a string from tcp socket, even if you talk about headers only; - consequently, it is not available as a string within nginx. -- Maxim Dounin http://nginx.org/ _______________________________________________ nginx-devel mailing list nginx-devel at nginx.org http://mailman.nginx.org/mailman/listinfo/nginx-devel --- This email has been checked for viruses by Avast antivirus software. http://www.avast.com From pluknet at nginx.com Fri Feb 27 20:23:52 2015 From: pluknet at nginx.com (Sergey Kandaurov) Date: Fri, 27 Feb 2015 20:23:52 +0000 Subject: [nginx] Mail: don't emit Auth-SSL-Verify with disabled ssl_verif... Message-ID: details: http://hg.nginx.org/nginx/rev/6a7c6973d6fc branches: changeset: 5990:6a7c6973d6fc user: Sergey Kandaurov date: Fri Feb 27 16:28:31 2015 +0300 description: Mail: don't emit Auth-SSL-Verify with disabled ssl_verify_client. Previously, the Auth-SSL-Verify header with the "NONE" value was always passed to the auth_http script if verification of client certificates is disabled. diffstat: src/mail/ngx_mail_auth_http_module.c | 14 +++++++++----- 1 files changed, 9 insertions(+), 5 deletions(-) diffs (39 lines): diff -r ec01b1d1fff1 -r 6a7c6973d6fc src/mail/ngx_mail_auth_http_module.c --- a/src/mail/ngx_mail_auth_http_module.c Wed Feb 25 17:48:05 2015 +0300 +++ b/src/mail/ngx_mail_auth_http_module.c Fri Feb 27 16:28:31 2015 +0300 @@ -1155,6 +1155,7 @@ ngx_mail_auth_http_create_request(ngx_ma ngx_str_t verify, subject, issuer, serial, fingerprint, raw_cert, cert; ngx_connection_t *c; + ngx_mail_ssl_conf_t *sslcf; #endif ngx_mail_core_srv_conf_t *cscf; @@ -1169,8 +1170,9 @@ ngx_mail_auth_http_create_request(ngx_ma #if (NGX_MAIL_SSL) c = s->connection; + sslcf = ngx_mail_get_module_srv_conf(s, ngx_mail_ssl_module); - if (c->ssl) { + if (c->ssl && sslcf->verify) { /* certificate details */ @@ -1339,10 +1341,12 @@ ngx_mail_auth_http_create_request(ngx_ma b->last = ngx_cpymem(b->last, "Auth-SSL: on" CRLF, sizeof("Auth-SSL: on" CRLF) - 1); - b->last = ngx_cpymem(b->last, "Auth-SSL-Verify: ", - sizeof("Auth-SSL-Verify: ") - 1); - b->last = ngx_copy(b->last, verify.data, verify.len); - *b->last++ = CR; *b->last++ = LF; + if (verify.len) { + b->last = ngx_cpymem(b->last, "Auth-SSL-Verify: ", + sizeof("Auth-SSL-Verify: ") - 1); + b->last = ngx_copy(b->last, verify.data, verify.len); + *b->last++ = CR; *b->last++ = LF; + } if (subject.len) { b->last = ngx_cpymem(b->last, "Auth-SSL-Subject: ",