From flygoast at 126.com Sun Jun 2 01:34:58 2013 From: flygoast at 126.com (flygoast) Date: Sun, 2 Jun 2013 09:34:58 +0800 (CST) Subject: [PATCH]upstream server directive support variable Message-ID: <233169bb.a3e3.13f0285eb67.Coremail.flygoast@126.com> Hi, guys In my business, I need dynamicly to find the backend ip address according to the request. However, I also want to use the upstream to take advantage of load balance. So I add the variable support in server directive. For sake of avoiding blocking the whole worker due to resolving domain, at present, only dotted decimal IP address should be parsed in the variables. Anyone can help to check or improve it? Thanks. The patch is based on 1.2.7. diff -ruNp nginx-1.2.7/src/http/ngx_http_upstream.c nginx-1.2.7.m/src/http/ngx_http_upstream.c --- nginx-1.2.7/src/http/ngx_http_upstream.c2013-02-11 22:39:49.000000000 +0800 +++ nginx-1.2.7.m/src/http/ngx_http_upstream.c2013-05-27 20:33:17.000000000 +0800 @@ -4220,8 +4220,9 @@ ngx_http_upstream_server(ngx_conf_t *cf, ngx_str_t *value, s; ngx_url_t u; ngx_int_t weight, max_fails; - ngx_uint_t i; + ngx_uint_t i, n; ngx_http_upstream_server_t *us; + ngx_http_script_compile_t sc; if (uscf->servers == NULL) { uscf->servers = ngx_array_create(cf->pool, 4, @@ -4245,13 +4246,32 @@ ngx_http_upstream_server(ngx_conf_t *cf, u.url = value[1]; u.default_port = 80; - if (ngx_parse_url(cf->pool, &u) != NGX_OK) { - if (u.err) { - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, - "%s in upstream \"%V\"", u.err, &u.url); + n = ngx_http_script_variables_count(&value[1]); + + if (n) { + ngx_memzero(&sc, sizeof(ngx_http_script_compile_t)); + + sc.cf = cf; + sc.source = &value[1]; + sc.lengths = &us->proxy_lengths; + sc.values = &us->proxy_values; + sc.variables = n; + sc.complete_lengths = 1; + sc.complete_values = 1; + + if (ngx_http_script_compile(&sc) != NGX_OK) { + return NGX_CONF_ERROR; } - return NGX_CONF_ERROR; + } else { + if (ngx_parse_url(cf->pool, &u) != NGX_OK) { + if (u.err) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "%s in upstream \"%V\"", u.err, &u.url); + } + + return NGX_CONF_ERROR; + } } weight = 1; @@ -4333,8 +4353,15 @@ ngx_http_upstream_server(ngx_conf_t *cf, goto invalid; } - us->addrs = u.addrs; - us->naddrs = u.naddrs; + if (n) { + us->addrs = NULL; + us->naddrs = 1; + + } else { + us->addrs = u.addrs; + us->naddrs = u.naddrs; + } + us->weight = weight; us->max_fails = max_fails; us->fail_timeout = fail_timeout; diff -ruNp nginx-1.2.7/src/http/ngx_http_upstream.h nginx-1.2.7.m/src/http/ngx_http_upstream.h --- nginx-1.2.7/src/http/ngx_http_upstream.h2012-02-13 19:01:58.000000000 +0800 +++ nginx-1.2.7.m/src/http/ngx_http_upstream.h2013-05-27 20:38:58.000000000 +0800 @@ -91,6 +91,9 @@ typedef struct { ngx_uint_t max_fails; time_t fail_timeout; + ngx_array_t *proxy_lengths; + ngx_array_t *proxy_values; + unsigned down:1; unsigned backup:1; } ngx_http_upstream_server_t; @@ -116,6 +119,8 @@ struct ngx_http_upstream_srv_conf_s { ngx_uint_t line; in_port_t port; in_port_t default_port; + ngx_uint_t variable_peer; + ngx_uint_t variable_backup; }; diff -ruNp nginx-1.2.7/src/http/ngx_http_upstream_round_robin.c nginx-1.2.7.m/src/http/ngx_http_upstream_round_robin.c --- nginx-1.2.7/src/http/ngx_http_upstream_round_robin.c2013-02-11 22:56:14.000000000 +0800 +++ nginx-1.2.7.m/src/http/ngx_http_upstream_round_robin.c2013-05-27 20:59:49.000000000 +0800 @@ -78,9 +78,19 @@ ngx_http_upstream_init_round_robin(ngx_c continue; } - peers->peer[n].sockaddr = server[i].addrs[j].sockaddr; - peers->peer[n].socklen = server[i].addrs[j].socklen; - peers->peer[n].name = server[i].addrs[j].name; + if (server[i].addrs == NULL) { + us->variable_peer++; + peers->peer[n].sockaddr = NULL; + peers->peer[n].socklen = 0; + ngx_str_null(&peers->peer[n].name); + peers->peer[n].server = &server[i]; + + } else { + peers->peer[n].sockaddr = server[i].addrs[j].sockaddr; + peers->peer[n].socklen = server[i].addrs[j].socklen; + peers->peer[n].name = server[i].addrs[j].name; + } + peers->peer[n].max_fails = server[i].max_fails; peers->peer[n].fail_timeout = server[i].fail_timeout; peers->peer[n].down = server[i].down; @@ -136,9 +146,20 @@ ngx_http_upstream_init_round_robin(ngx_c continue; } - backup->peer[n].sockaddr = server[i].addrs[j].sockaddr; - backup->peer[n].socklen = server[i].addrs[j].socklen; - backup->peer[n].name = server[i].addrs[j].name; + if (server[i].addrs == NULL) { + us->variable_backup++; + backup->peer[n].sockaddr = NULL; + backup->peer[n].socklen = 0; + ngx_str_null(&backup->peer[n].name); + backup->peer[n].server = &server[i]; + + } else { + + backup->peer[n].sockaddr = server[i].addrs[j].sockaddr; + backup->peer[n].socklen = server[i].addrs[j].socklen; + backup->peer[n].name = server[i].addrs[j].name; + } + backup->peer[n].weight = server[i].weight; backup->peer[n].effective_weight = server[i].weight; backup->peer[n].current_weight = 0; @@ -228,6 +249,73 @@ ngx_http_upstream_cmp_servers(const void } +static ngx_int_t +ngx_http_upstream_get_round_robin_variable_peer(ngx_http_request_t *r, + ngx_http_upstream_rr_peers_t *peers, ngx_uint_t variable_cnt) +{ + ngx_uint_t i, j, n; + ngx_url_t url; + ngx_http_upstream_rr_peer_t *peer; + + n = peers->number; + + for (i = 0, j = 0; j < variable_cnt && i < n; i++) { + + peer = &peers->peer[i]; + + if (peer->server) { + + peer->sockaddr = NULL; + peer->socklen = 0; + ngx_str_null(&peer->name); + + if (ngx_http_script_run(r, &peer->name, + peer->server->proxy_lengths->elts, 0, + peer->server->proxy_values->elts) + == NULL) + { + return NGX_ERROR; + } + + ngx_memzero(&url, sizeof(ngx_url_t)); + url.url = peer->name; + url.default_port = 80; + url.no_resolve = 1; + + if (ngx_parse_url(r->pool, &url) != NGX_OK) { + if (url.err) { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "%s in upstream \"%V\"", url.err, &url.url); + } + + return NGX_ERROR; + } + + url.one_addr = 1; + if (url.no_port) { + url.port = url.default_port; + } + + if (ngx_inet_resolve_host(r->pool, &url) != NGX_OK) { + if (url.err) { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "%s in upstream \"%V\"", url.err, &url.url); + } + + return NGX_ERROR; + } + + peer->sockaddr = url.addrs[0].sockaddr; + peer->socklen = url.addrs[0].socklen; + + j++; + } + } + + return NGX_OK; +} + + ngx_int_t ngx_http_upstream_init_round_robin_peer(ngx_http_request_t *r, ngx_http_upstream_srv_conf_t *us) @@ -251,6 +339,25 @@ ngx_http_upstream_init_round_robin_peer( n = rrp->peers->number; + if (us->variable_peer > 0) { + if (ngx_http_upstream_get_round_robin_variable_peer(r, rrp->peers, + us->variable_peer) + == NGX_ERROR) + { + return NGX_ERROR; + } + } + + if (us->variable_backup > 0) { + if (ngx_http_upstream_get_round_robin_variable_peer(r, + rrp->peers->next, + us->variable_backup) + == NGX_ERROR) + { + return NGX_ERROR; + } + } + if (rrp->peers->next && rrp->peers->next->number > n) { n = rrp->peers->next->number; } diff -ruNp nginx-1.2.7/src/http/ngx_http_upstream_round_robin.h nginx-1.2.7.m/src/http/ngx_http_upstream_round_robin.h --- nginx-1.2.7/src/http/ngx_http_upstream_round_robin.h2012-07-03 00:41:13.000000000 +0800 +++ nginx-1.2.7.m/src/http/ngx_http_upstream_round_robin.h2013-05-24 13:57:52.000000000 +0800 @@ -18,6 +18,7 @@ typedef struct { struct sockaddr *sockaddr; socklen_t socklen; ngx_str_t name; + ngx_http_upstream_server_t *server; ngx_int_t current_weight; ngx_int_t effective_weight; -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-1.2.7-upstream.patch Type: application/octet-stream Size: 9256 bytes Desc: not available URL: From mdounin at mdounin.ru Sun Jun 2 11:33:34 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 2 Jun 2013 15:33:34 +0400 Subject: [PATCH]upstream server directive support variable In-Reply-To: <233169bb.a3e3.13f0285eb67.Coremail.flygoast@126.com> References: <233169bb.a3e3.13f0285eb67.Coremail.flygoast@126.com> Message-ID: <20130602113334.GL72282@mdounin.ru> Hello! On Sun, Jun 02, 2013 at 09:34:58AM +0800, flygoast wrote: > Hi, guys > > > In my business, I need dynamicly to find the backend ip address > according to the request. However, I also want to use the > upstream to take advantage of load balance. So I add the > variable support in server directive. For sake of avoiding > blocking the whole worker due to resolving domain, at present, > only dotted decimal IP address should be parsed in > the variables. Anyone can help to check or improve it? Thanks. Load balancing requires a state for a group of servers to be kept between requests. This somewhat contradicts an idea of dynamically finding a backend's ip address for each request - instead of balancing backends the code will balance backend "placeholders". Have you tried dynamically finding an upstream{} block name instead, and configuring appropriate upstream{} blocks? This is available out of the box. -- Maxim Dounin http://nginx.org/en/donation.html From flygoast at 126.com Sun Jun 2 12:46:33 2013 From: flygoast at 126.com (flygoast) Date: Sun, 2 Jun 2013 20:46:33 +0800 (CST) Subject: [PATCH]upstream server directive support variable In-Reply-To: <20130602113334.GL72282@mdounin.ru> References: <233169bb.a3e3.13f0285eb67.Coremail.flygoast@126.com> <20130602113334.GL72282@mdounin.ru> Message-ID: <53462f9d.ba95.13f04ecc5f7.Coremail.flygoast@126.com> Thanks for your response. I've tried to dynamicly find upstream block name. However, now my backend ips are stored in some storage like REDIS, or memcache. In each request, I first to get the ips from the storage, then proxy the request to the backend. Some backends have not only one ip, so I used the usptream to process the innormal situation like 502 in someone of them. Main objective is to retry another backend ip instead of load balance. Sorry for my english. At 2013-06-02 19:33:34,"Maxim Dounin" wrote: >Hello! > >On Sun, Jun 02, 2013 at 09:34:58AM +0800, flygoast wrote: > >> Hi, guys >> >> >> In my business, I need dynamicly to find the backend ip address >> according to the request. However, I also want to use the >> upstream to take advantage of load balance. So I add the >> variable support in server directive. For sake of avoiding >> blocking the whole worker due to resolving domain, at present, >> only dotted decimal IP address should be parsed in >> the variables. Anyone can help to check or improve it? Thanks. > >Load balancing requires a state for a group of servers to be kept >between requests. This somewhat contradicts an idea of >dynamically finding a backend's ip address for each request - >instead of balancing backends the code will balance backend >"placeholders". > >Have you tried dynamically finding an upstream{} block name >instead, and configuring appropriate upstream{} blocks? This is >available out of the box. > >-- >Maxim Dounin >http://nginx.org/en/donation.html > >_______________________________________________ >nginx-devel mailing list >nginx-devel at nginx.org >http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Sun Jun 2 16:22:51 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 2 Jun 2013 20:22:51 +0400 Subject: [PATCH]upstream server directive support variable In-Reply-To: <53462f9d.ba95.13f04ecc5f7.Coremail.flygoast@126.com> References: <233169bb.a3e3.13f0285eb67.Coremail.flygoast@126.com> <20130602113334.GL72282@mdounin.ru> <53462f9d.ba95.13f04ecc5f7.Coremail.flygoast@126.com> Message-ID: <20130602162251.GN72282@mdounin.ru> Hello! On Sun, Jun 02, 2013 at 08:46:33PM +0800, flygoast wrote: > I've tried to dynamicly find upstream block name. However, now > my backend ips are stored in some storage like REDIS, or > memcache. In each request, I first to get the ips from the > storage, then proxy the request to the backend. Some backends > have not only one ip, so I used the usptream to process the > innormal situation like 502 in someone of them. Main objective > is to retry another backend ip instead of load balance. To retry with another backend there is error_page directive. -- Maxim Dounin http://nginx.org/en/donation.html From jzefip at gmail.com Mon Jun 3 02:29:02 2013 From: jzefip at gmail.com (Julien Zefi) Date: Sun, 2 Jun 2013 20:29:02 -0600 Subject: API question: large data processing handler Message-ID: hi, i am writing a module that creates and returns large amount of data, obviously this is blocking the worker but my main function that create the data can be called many times to get the result by parts. I cannot find what's the right way to setup my handler, the intention is to invoke my function many times as required and send out the results by chunks, whats the best approach ? thanks J.Z. -------------- next part -------------- An HTML attachment was scrubbed... URL: From jzefip at gmail.com Tue Jun 4 06:23:33 2013 From: jzefip at gmail.com (Julien Zefi) Date: Tue, 4 Jun 2013 00:23:33 -0600 Subject: API question: large data processing handler In-Reply-To: References: Message-ID: anyone? J.Z. On Sun, Jun 2, 2013 at 8:29 PM, Julien Zefi wrote: > hi, > > i am writing a module that creates and returns large amount of data, > obviously this is blocking the worker but my main function that create the > data can be called many times to get the result by parts. > > I cannot find what's the right way to setup my handler, the intention is > to invoke my function many times as required and send out the results by > chunks, whats the best approach ? > > thanks > > J.Z. > -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Tue Jun 4 13:10:06 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 04 Jun 2013 13:10:06 +0000 Subject: [nginx] Updated zlib used for win32 builds. Message-ID: details: http://hg.nginx.org/nginx/rev/21e07bf09dd6 branches: changeset: 5236:21e07bf09dd6 user: Maxim Dounin date: Tue Jun 04 16:16:51 2013 +0400 description: Updated zlib used for win32 builds. diffstat: misc/GNUmakefile | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff --git a/misc/GNUmakefile b/misc/GNUmakefile --- a/misc/GNUmakefile +++ b/misc/GNUmakefile @@ -6,7 +6,7 @@ TEMP = tmp OBJS = objs.msvc8 OPENSSL = openssl-1.0.1e -ZLIB = zlib-1.2.7 +ZLIB = zlib-1.2.8 PCRE = pcre-8.32 From mdounin at mdounin.ru Tue Jun 4 14:29:10 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 4 Jun 2013 18:29:10 +0400 Subject: API question: large data processing handler In-Reply-To: References: Message-ID: <20130604142910.GX72282@mdounin.ru> Hello! On Tue, Jun 04, 2013 at 12:23:33AM -0600, Julien Zefi wrote: > On Sun, Jun 2, 2013 at 8:29 PM, Julien Zefi wrote: > > > hi, > > > > i am writing a module that creates and returns large amount of data, > > obviously this is blocking the worker but my main function that create the > > data can be called many times to get the result by parts. > > > > I cannot find what's the right way to setup my handler, the intention is > > to invoke my function many times as required and send out the results by > > chunks, whats the best approach ? I would recommend to set r->write_event_handler, and use ngx_add_timer() with 1ms timeout to schedule another call to your handler. -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Tue Jun 4 15:31:19 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 04 Jun 2013 15:31:19 +0000 Subject: [nginx] nginx-1.5.1-RELEASE Message-ID: details: http://hg.nginx.org/nginx/rev/99eed1a88fc3 branches: changeset: 5237:99eed1a88fc3 user: Maxim Dounin date: Tue Jun 04 17:21:52 2013 +0400 description: nginx-1.5.1-RELEASE diffstat: docs/xml/nginx/changes.xml | 123 +++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 123 insertions(+), 0 deletions(-) diffs (133 lines): diff --git a/docs/xml/nginx/changes.xml b/docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml +++ b/docs/xml/nginx/changes.xml @@ -5,6 +5,129 @@ + + + + +????????? ssi_last_modified, sub_filter_last_modified ? +xslt_last_modified.
+??????? ??????? ?????????. +
+ +the "ssi_last_modified", "sub_filter_last_modified", and +"xslt_last_modified" directives.
+Thanks to Alexey Kolpakov. +
+
+ + + +???????? http_403 ? ?????????? proxy_next_upstream, fastcgi_next_upstream, +scgi_next_upstream ? uwsgi_next_upstream. + + +the "http_403" parameter of the "proxy_next_upstream", "fastcgi_next_upstream", +"scgi_next_upstream", and "uwsgi_next_upstream" directives. + + + + + +????????? allow ? deny ?????? ???????????? unix domain ??????. + + +the "allow" and "deny" directives now support unix domain sockets. + + + + + +nginx ?? ????????? ? ??????? ngx_mail_ssl_module, +?? ??? ?????? ngx_http_ssl_module; +?????? ????????? ? 1.3.14. + + +nginx could not be built with the ngx_mail_ssl_module, +but without ngx_http_ssl_module; +the bug had appeared in 1.3.14. + + + + + +? ????????? proxy_set_body.
+??????? Lanshun Zhou. +
+ +in the "proxy_set_body" directive.
+Thanks to Lanshun Zhou. +
+
+ + + +? ????????? lingering_time.
+??????? Lanshun Zhou. +
+ +in the "lingering_time" directive.
+Thanks to Lanshun Zhou. +
+
+ + + +???????? fail_timeout ????????? server +? ????? upstream ??? ?? ????????, +???? ????????????? ???????? max_fails; +?????? ????????? ? 1.3.0. + + +the "fail_timeout" parameter of the "server" directive +in the "upstream" context might not work +if "max_fails" parameter was used; +the bug had appeared in 1.3.0. + + + + + +? ??????? ???????? ??? ????????? segmentation fault, +???? ?????????????? ????????? ssl_stapling.
+??????? Piotr Sikora. +
+ +a segmentation fault might occur in a worker process +if the "ssl_stapling" directive was used.
+Thanks to Piotr Sikora. +
+
+ + + +? ???????? ??????-???????.
+??????? Filipe Da Silva. +
+ +in the mail proxy server.
+Thanks to Filipe Da Silva. +
+
+ + + +nginx/Windows ??? ????????? ????????? ??????????, +???? ?????????????? ????????? ??????? ?????????. + + +nginx/Windows might stop accepting connections +if several worker processes were used. + + + +
+ + From mdounin at mdounin.ru Tue Jun 4 15:31:20 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 04 Jun 2013 15:31:20 +0000 Subject: [nginx] release-1.5.1 tag Message-ID: details: http://hg.nginx.org/nginx/rev/a15e2ef8d73d branches: changeset: 5238:a15e2ef8d73d user: Maxim Dounin date: Tue Jun 04 17:21:52 2013 +0400 description: release-1.5.1 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -356,3 +356,4 @@ cd84e467c72967b9f5fb4d96bfc708c93edeb634 23159600bdea695db8f9d2890aaf73424303e49c release-1.3.16 7809529022b83157067e7d1e2fb65d57db5f4d99 release-1.4.0 48a84bc3ff074a65a63e353b9796ff2b14239699 release-1.5.0 +99eed1a88fc33f32d66e2ec913874dfef3e12fcc release-1.5.1 From vl at nginx.com Tue Jun 4 15:32:20 2013 From: vl at nginx.com (Homutov Vladimir) Date: Tue, 04 Jun 2013 15:32:20 +0000 Subject: [nginx] Core: fixed stderr redirection on win32 in ngx_reopen_fi... Message-ID: details: http://hg.nginx.org/nginx/rev/85e72ea8fbfd branches: changeset: 5239:85e72ea8fbfd user: Vladimir Homutov date: Mon Jun 03 16:54:28 2013 +0400 description: Core: fixed stderr redirection on win32 in ngx_reopen_files(). On win32 stderr was not redirected into a file specified by "error_log" while reopening files. Fix is to use platform-independent functions to work with stderr, as already used by ngx_init_cycle() and main() since rev. d8316f307b6a. diffstat: src/core/ngx_cycle.c | 11 ++++------- 1 files changed, 4 insertions(+), 7 deletions(-) diffs (24 lines): diff -r a15e2ef8d73d -r 85e72ea8fbfd src/core/ngx_cycle.c --- a/src/core/ngx_cycle.c Tue Jun 04 17:21:52 2013 +0400 +++ b/src/core/ngx_cycle.c Mon Jun 03 16:54:28 2013 +0400 @@ -1228,16 +1228,13 @@ ngx_reopen_files(ngx_cycle_t *cycle, ngx file[i].fd = fd; } -#if !(NGX_WIN32) + if (cycle->log->file->fd != ngx_stderr) { - if (cycle->log->file->fd != STDERR_FILENO) { - if (dup2(cycle->log->file->fd, STDERR_FILENO) == -1) { - ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno, - "dup2(STDERR) failed"); + if (ngx_set_stderr(cycle->log->file->fd) == NGX_FILE_ERROR) { + ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, + ngx_set_stderr_n " failed"); } } - -#endif } From vl at nginx.com Tue Jun 4 15:32:20 2013 From: vl at nginx.com (Homutov Vladimir) Date: Tue, 04 Jun 2013 15:32:20 +0000 Subject: [nginx] Core: fixed handling of "stderr" in error_log. Message-ID: details: http://hg.nginx.org/nginx/rev/d8af1005e886 branches: changeset: 5240:d8af1005e886 user: Vladimir Homutov date: Tue Jun 04 11:27:36 2013 +0400 description: Core: fixed handling of "stderr" in error_log. If "stderr" was specified in one of the "error_log" directives, stderr is not redirected to the first error_log on startup, configuration reload, and reopening log files. diffstat: src/core/nginx.c | 2 +- src/core/ngx_cycle.c | 7 ++++--- src/core/ngx_cycle.h | 2 ++ src/core/ngx_log.c | 1 + src/http/ngx_http_core_module.c | 1 + 5 files changed, 9 insertions(+), 4 deletions(-) diffs (70 lines): diff -r 85e72ea8fbfd -r d8af1005e886 src/core/nginx.c --- a/src/core/nginx.c Mon Jun 03 16:54:28 2013 +0400 +++ b/src/core/nginx.c Tue Jun 04 11:27:36 2013 +0400 @@ -387,7 +387,7 @@ main(int argc, char *const *argv) return 1; } - if (cycle->log->file->fd != ngx_stderr) { + if (!cycle->log_use_stderr && cycle->log->file->fd != ngx_stderr) { if (ngx_set_stderr(cycle->log->file->fd) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno, diff -r 85e72ea8fbfd -r d8af1005e886 src/core/ngx_cycle.c --- a/src/core/ngx_cycle.c Mon Jun 03 16:54:28 2013 +0400 +++ b/src/core/ngx_cycle.c Tue Jun 04 11:27:36 2013 +0400 @@ -582,8 +582,9 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) /* commit the new cycle configuration */ - if (!ngx_use_stderr && cycle->log->file->fd != ngx_stderr) { - + if (!ngx_use_stderr && !cycle->log_use_stderr + && cycle->log->file->fd != ngx_stderr) + { if (ngx_set_stderr(cycle->log->file->fd) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, ngx_set_stderr_n " failed"); @@ -1228,7 +1229,7 @@ ngx_reopen_files(ngx_cycle_t *cycle, ngx file[i].fd = fd; } - if (cycle->log->file->fd != ngx_stderr) { + if (!cycle->log_use_stderr && cycle->log->file->fd != ngx_stderr) { if (ngx_set_stderr(cycle->log->file->fd) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, diff -r 85e72ea8fbfd -r d8af1005e886 src/core/ngx_cycle.h --- a/src/core/ngx_cycle.h Mon Jun 03 16:54:28 2013 +0400 +++ b/src/core/ngx_cycle.h Tue Jun 04 11:27:36 2013 +0400 @@ -41,6 +41,8 @@ struct ngx_cycle_s { ngx_log_t *log; ngx_log_t new_log; + ngx_uint_t log_use_stderr; /* unsigned log_use_stderr:1; */ + ngx_connection_t **files; ngx_connection_t *free_connections; ngx_uint_t free_connection_n; diff -r 85e72ea8fbfd -r d8af1005e886 src/core/ngx_log.c --- a/src/core/ngx_log.c Mon Jun 03 16:54:28 2013 +0400 +++ b/src/core/ngx_log.c Tue Jun 04 11:27:36 2013 +0400 @@ -438,6 +438,7 @@ ngx_error_log(ngx_conf_t *cf, ngx_comman if (ngx_strcmp(value[1].data, "stderr") == 0) { ngx_str_null(&name); + cf->cycle->log_use_stderr = 1; } else { name = value[1]; diff -r 85e72ea8fbfd -r d8af1005e886 src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c Mon Jun 03 16:54:28 2013 +0400 +++ b/src/http/ngx_http_core_module.c Tue Jun 04 11:27:36 2013 +0400 @@ -4898,6 +4898,7 @@ ngx_http_core_error_log(ngx_conf_t *cf, if (ngx_strcmp(value[1].data, "stderr") == 0) { ngx_str_null(&name); + cf->cycle->log_use_stderr = 1; } else { name = value[1]; From mdounin at mdounin.ru Tue Jun 4 15:41:27 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 04 Jun 2013 15:41:27 +0000 Subject: [nginx] Version bump. Message-ID: details: http://hg.nginx.org/nginx/rev/03eb70798821 branches: changeset: 5241:03eb70798821 user: Maxim Dounin date: Tue Jun 04 19:38:20 2013 +0400 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff --git a/src/core/nginx.h b/src/core/nginx.h --- a/src/core/nginx.h +++ b/src/core/nginx.h @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1005001 -#define NGINX_VERSION "1.5.1" +#define nginx_version 1005002 +#define NGINX_VERSION "1.5.2" #define NGINX_VER "nginx/" NGINX_VERSION #define NGINX_VAR "NGINX" From jzefip at gmail.com Tue Jun 4 15:45:19 2013 From: jzefip at gmail.com (Julien Zefi) Date: Tue, 4 Jun 2013 09:45:19 -0600 Subject: API question: large data processing handler In-Reply-To: <20130604142910.GX72282@mdounin.ru> References: <20130604142910.GX72282@mdounin.ru> Message-ID: On Tue, Jun 4, 2013 at 8:29 AM, Maxim Dounin wrote: > Hello! > > On Tue, Jun 04, 2013 at 12:23:33AM -0600, Julien Zefi wrote: > > > On Sun, Jun 2, 2013 at 8:29 PM, Julien Zefi wrote: > > > > > hi, > > > > > > i am writing a module that creates and returns large amount of data, > > > obviously this is blocking the worker but my main function that create > the > > > data can be called many times to get the result by parts. > > > > > > I cannot find what's the right way to setup my handler, the intention > is > > > to invoke my function many times as required and send out the results > by > > > chunks, whats the best approach ? > > I would recommend to set r->write_event_handler, and use > ngx_add_timer() with 1ms timeout to schedule another call to your > handler. > > hi Maxim, thanks for your response. Why a timer is required ?, my expectation is that my function be called when the client socket is ready for write again. My function is encoding video packets so i expect this: 1. initial handler called, setup the write event callback 2. once the client socket is ready for write, invoke my write_event callback 3. write the packet 4. repeat #2 and #3 until the callback decide to finish the request is that possible ? J.Z. -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Tue Jun 4 16:08:31 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 4 Jun 2013 20:08:31 +0400 Subject: API question: large data processing handler In-Reply-To: References: <20130604142910.GX72282@mdounin.ru> Message-ID: <20130604160831.GY72282@mdounin.ru> Hello! On Tue, Jun 04, 2013 at 09:45:19AM -0600, Julien Zefi wrote: > On Tue, Jun 4, 2013 at 8:29 AM, Maxim Dounin wrote: > > > Hello! > > > > On Tue, Jun 04, 2013 at 12:23:33AM -0600, Julien Zefi wrote: > > > > > On Sun, Jun 2, 2013 at 8:29 PM, Julien Zefi wrote: > > > > > > > hi, > > > > > > > > i am writing a module that creates and returns large amount of data, > > > > obviously this is blocking the worker but my main function that create > > the > > > > data can be called many times to get the result by parts. > > > > > > > > I cannot find what's the right way to setup my handler, the intention > > is > > > > to invoke my function many times as required and send out the results > > by > > > > chunks, whats the best approach ? > > > > I would recommend to set r->write_event_handler, and use > > ngx_add_timer() with 1ms timeout to schedule another call to your > > handler. > > > > > hi Maxim, > > thanks for your response. Why a timer is required ?, my expectation is that > my function be called when the client socket is ready for write again. My > function is encoding video packets so i expect this: > > 1. initial handler called, setup the write event callback > 2. once the client socket is ready for write, invoke my write_event > callback > 3. write the packet > 4. repeat #2 and #3 until the callback decide to finish the request > > is that possible ? This will work, but only if you'll send enough data to fill socket's send buffer. A timer is needed if you want nginx to call your write event handler again while not filing socket's send buffer. (You may also try to emulate filing socket's send buffer by setting c->write->ready to 0 before calling the ngx_handle_write_event(), but not sure if it's safe/will actually work will all event methods.) -- Maxim Dounin http://nginx.org/en/donation.html From vbart at nginx.com Tue Jun 4 16:29:57 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Tue, 4 Jun 2013 20:29:57 +0400 Subject: API question: large data processing handler In-Reply-To: <20130604160831.GY72282@mdounin.ru> References: <20130604160831.GY72282@mdounin.ru> Message-ID: <201306042029.57511.vbart@nginx.com> On Tuesday 04 June 2013 20:08:31 Maxim Dounin wrote: [...] > (You may also try to emulate filing socket's send buffer by > setting c->write->ready to 0 before calling the > ngx_handle_write_event(), but not sure if it's safe/will actually > work will all event methods.) It's certainly not safe. In particular it will break spdy. wbr, Valentin V. Bartenev From vbart at nginx.com Tue Jun 4 22:18:08 2013 From: vbart at nginx.com (Valentin Bartenev) Date: Tue, 04 Jun 2013 22:18:08 +0000 Subject: [nginx] SPDY: use proper macros for value length and headers cou... Message-ID: details: http://hg.nginx.org/nginx/rev/5776804fff04 branches: changeset: 5242:5776804fff04 user: Valentin Bartenev date: Wed Jun 05 02:13:52 2013 +0400 description: SPDY: use proper macros for value length and headers counter. Currently these macros are synonyms, but this may change in the future (in particular, spdy/3 uses 4 bytes for lengths). diffstat: src/http/ngx_http_spdy_filter_module.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (21 lines): diff -r 03eb70798821 -r 5776804fff04 src/http/ngx_http_spdy_filter_module.c --- a/src/http/ngx_http_spdy_filter_module.c Tue Jun 04 19:38:20 2013 +0400 +++ b/src/http/ngx_http_spdy_filter_module.c Wed Jun 05 02:13:52 2013 +0400 @@ -304,7 +304,7 @@ ngx_http_spdy_header_filter(ngx_http_req last = ngx_http_spdy_nv_write_val(last, "HTTP/1.1"); last = ngx_http_spdy_nv_write_name(last, "status"); - last = ngx_spdy_frame_write_uint16(last, 3); + last = ngx_http_spdy_nv_write_vlen(last, 3); last = ngx_sprintf(last, "%03ui", r->headers_out.status); count = 2; @@ -500,7 +500,7 @@ ngx_http_spdy_header_filter(ngx_http_req count++; } - (void) ngx_spdy_frame_write_uint16(buf, count); + (void) ngx_http_spdy_nv_write_num(buf, count); stream = r->spdy_stream; sc = stream->connection; From vbart at nginx.com Wed Jun 5 13:42:01 2013 From: vbart at nginx.com (Valentin Bartenev) Date: Wed, 05 Jun 2013 13:42:01 +0000 Subject: [nginx] Status: the "last_in_chain" flag must be set. Message-ID: details: http://hg.nginx.org/nginx/rev/ee739104d164 branches: changeset: 5243:ee739104d164 user: Valentin Bartenev date: Wed Jun 05 16:22:40 2013 +0400 description: Status: the "last_in_chain" flag must be set. The module always produces only one and obviously the last buffer in chain. diffstat: src/http/modules/ngx_http_stub_status_module.c | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (11 lines): diff -r 5776804fff04 -r ee739104d164 src/http/modules/ngx_http_stub_status_module.c --- a/src/http/modules/ngx_http_stub_status_module.c Wed Jun 05 02:13:52 2013 +0400 +++ b/src/http/modules/ngx_http_stub_status_module.c Wed Jun 05 16:22:40 2013 +0400 @@ -145,6 +145,7 @@ static ngx_int_t ngx_http_status_handler r->headers_out.content_length_n = b->last - b->pos; b->last_buf = (r == r->main) ? 1 : 0; + b->last_in_chain = 1; rc = ngx_http_send_header(r); From vbart at nginx.com Wed Jun 5 14:33:41 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Wed, 5 Jun 2013 18:33:41 +0400 Subject: [PATCH/v2] SPDY: Allow returning the full status line In-Reply-To: <20130531045241.GA12348@home.blackbean.org> References: <20130530213434.GA9044@home.blackbean.org> <201305310421.45269.vbart@nginx.com> <20130531045241.GA12348@home.blackbean.org> Message-ID: <201306051833.41715.vbart@nginx.com> On Friday 31 May 2013 08:52:41 Jim Radford wrote: > On Fri, May 31, 2013 at 04:21:45AM +0400, Valentin V. Bartenev wrote: > > On Friday 31 May 2013 02:24:50 Jim Radford wrote: > > > This is a replacement to my previous patch which actaully includes the > > > buffer length handling. > > > > > > # HG changeset patch > > > # User Jim Radford > > > # Date 1369952377 25200 > > > # Node ID 52d7b6082129c90275579fa3667cce3f537cbd09 > > > # Parent 00dbfac67e48a8fe20802287b6fca50950178b8b > > > SPDY: Allow returning the full status line > > > > [...] > > > > Could you clarify a bit the purpose of this change? > > When using nginx to proxy to http, the response status (code and text) > are passed though unchanged if the request comes in via HTTP or HTTP > over SSL; the text however is currently stripped when the request is > made via SPDY. For us this meant that enabling SPDY broke our > application which expected to receive our custom status text. > > While we could easily work around this, we think that it better for > nginx to be request transport agnostic as much as possible. > Applications that prefer a curt status may still provide one and it > will be passed through unmolested. > Ok, fair enough. See the review below: > # HG changeset patch > # User Jim Radford > # Date 1369952377 25200 > # Node ID 52d7b6082129c90275579fa3667cce3f537cbd09 > # Parent 00dbfac67e48a8fe20802287b6fca50950178b8b > SPDY: Allow returning the full status line You should not use capitalization after a colon, and also please end the summary line with a period. A small description of the problem would be nice to see here. > diff -r 00dbfac67e48 -r 52d7b6082129 src/http/ngx_http_spdy_filter_module.c > --- a/src/http/ngx_http_spdy_filter_module.c Thu May 30 18:23:05 2013 +0400 > +++ b/src/http/ngx_http_spdy_filter_module.c Thu May 30 15:19:37 2013 -0700 > @@ -162,7 +162,9 @@ > + ngx_http_spdy_nv_nsize("version") > + ngx_http_spdy_nv_vsize("HTTP/1.1") > + ngx_http_spdy_nv_nsize("status") > - + ngx_http_spdy_nv_vsize("418"); > + + (r->headers_out.status_line.len > + ? NGX_SPDY_NV_VLEN_SIZE + r->headers_out.status_line.len > + : ngx_http_spdy_nv_vsize("418")); > > clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); > > @@ -304,8 +306,14 @@ > last = ngx_http_spdy_nv_write_val(last, "HTTP/1.1"); > > last = ngx_http_spdy_nv_write_name(last, "status"); > - last = ngx_spdy_frame_write_uint16(last, 3); > - last = ngx_sprintf(last, "%03ui", r->headers_out.status); It turns too tightly. Please add an empty line here for better readability. > + if (r->headers_out.status_line.len) { > + last = ngx_http_spdy_nv_write_vlen(last, r->headers_out.status_line.len); > + last = ngx_cpymem(last, r->headers_out.status_line.data, > + r->headers_out.status_line.len); > + } else { > + last = ngx_spdy_frame_write_uint16(last, 3); > + last = ngx_sprintf(last, "%03ui", r->headers_out.status); > + } > > count = 2; > > > Also, please note changes in http://hg.nginx.org/nginx/rev/5776804fff04 wbr, Valentin V. Bartenev From mdounin at mdounin.ru Wed Jun 5 16:10:03 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 05 Jun 2013 16:10:03 +0000 Subject: [nginx] Valgrind: sigaction() failure now ignored. Message-ID: details: http://hg.nginx.org/nginx/rev/593d344999f5 branches: changeset: 5244:593d344999f5 user: Maxim Dounin date: Wed Jun 05 19:44:20 2013 +0400 description: Valgrind: sigaction() failure now ignored. Valgrind intercepts SIGUSR2 in some cases, and nginx might not be able to start due to sigaction() failure. If compiled with NGX_VALGRIND defined, we now ignore the failure of sigaction(). diffstat: src/os/unix/ngx_process.c | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) diffs (18 lines): diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c --- a/src/os/unix/ngx_process.c +++ b/src/os/unix/ngx_process.c @@ -291,9 +291,14 @@ ngx_init_signals(ngx_log_t *log) sa.sa_handler = sig->handler; sigemptyset(&sa.sa_mask); if (sigaction(sig->signo, &sa, NULL) == -1) { +#if (NGX_VALGRIND) + ngx_log_error(NGX_LOG_ALERT, log, ngx_errno, + "sigaction(%s) failed, ignored", sig->signame); +#else ngx_log_error(NGX_LOG_EMERG, log, ngx_errno, "sigaction(%s) failed", sig->signame); return NGX_ERROR; +#endif } } From mdounin at mdounin.ru Wed Jun 5 16:10:04 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 05 Jun 2013 16:10:04 +0000 Subject: [nginx] Mail: fixed possible uninitialized memory access. Message-ID: details: http://hg.nginx.org/nginx/rev/b6562f98bfd8 branches: changeset: 5246:b6562f98bfd8 user: Maxim Dounin date: Wed Jun 05 19:44:23 2013 +0400 description: Mail: fixed possible uninitialized memory access. Found by Valgrind. diffstat: src/mail/ngx_mail_parse.c | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diffs (14 lines): diff --git a/src/mail/ngx_mail_parse.c b/src/mail/ngx_mail_parse.c --- a/src/mail/ngx_mail_parse.c +++ b/src/mail/ngx_mail_parse.c @@ -842,6 +842,10 @@ ngx_mail_auth_parse(ngx_mail_session_t * } #endif + if (s->args.nelts == 0) { + return NGX_MAIL_PARSE_INVALID_COMMAND; + } + arg = s->args.elts; if (arg[0].len == 5) { From mdounin at mdounin.ru Wed Jun 5 16:10:04 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 05 Jun 2013 16:10:04 +0000 Subject: [nginx] Fixed debug logging in ngx_http_parse_complex_uri(). Message-ID: details: http://hg.nginx.org/nginx/rev/55dc535ae5dc branches: changeset: 5247:55dc535ae5dc user: Maxim Dounin date: Wed Jun 05 19:45:08 2013 +0400 description: Fixed debug logging in ngx_http_parse_complex_uri(). The *u previously logged isn't yet initialized at this point, and Valgrind complains. diffstat: src/http/ngx_http_parse.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff --git a/src/http/ngx_http_parse.c b/src/http/ngx_http_parse.c --- a/src/http/ngx_http_parse.c +++ b/src/http/ngx_http_parse.c @@ -1256,8 +1256,8 @@ ngx_http_parse_complex_uri(ngx_http_requ * the line feed */ - ngx_log_debug4(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, - "s:%d in:'%Xd:%c', out:'%c'", state, ch, ch, *u); + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "s:%d in:'%Xd:%c'", state, ch, ch); switch (state) { From mdounin at mdounin.ru Wed Jun 5 16:10:04 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 05 Jun 2013 16:10:04 +0000 Subject: [nginx] Valgrind: supressed complaints about uninitialized bytes. Message-ID: details: http://hg.nginx.org/nginx/rev/711fa02afae8 branches: changeset: 5245:711fa02afae8 user: Maxim Dounin date: Wed Jun 05 19:44:22 2013 +0400 description: Valgrind: supressed complaints about uninitialized bytes. Valgrind complains if we pass uninitialized memory to a syscall: ==36492== Syscall param sendmsg(msg.msg_iov[0]) points to uninitialised byte(s) ==36492== at 0x6B5E6A: sendmsg (in /usr/lib/system/libsystem_kernel.dylib) ==36492== by 0x10004288E: ngx_signal_worker_processes (ngx_process_cycle.c:527) ==36492== by 0x1000417A7: ngx_master_process_cycle (ngx_process_cycle.c:203) ==36492== by 0x100001F10: main (nginx.c:410) ==36492== Address 0x7fff5fbff71c is on thread 1's stack Even initialization of all members of the structure passed isn't enough, as there is padding which still remains uninitialized and results in Valgrind complaint. Note there is no real problem here as data from uninitialized memory isn't used. diffstat: src/http/ngx_http_file_cache.c | 2 ++ src/os/unix/ngx_process_cycle.c | 8 ++++++++ 2 files changed, 10 insertions(+), 0 deletions(-) diffs (51 lines): diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c +++ b/src/http/ngx_http_file_cache.c @@ -875,6 +875,8 @@ ngx_http_file_cache_set_header(ngx_http_ c = r->cache; + ngx_memzero(h, sizeof(ngx_http_file_cache_header_t)); + h->valid_sec = c->valid_sec; h->last_modified = c->last_modified; h->date = c->date; diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c +++ b/src/os/unix/ngx_process_cycle.c @@ -355,6 +355,8 @@ ngx_start_worker_processes(ngx_cycle_t * ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "start worker processes"); + ngx_memzero(&ch, sizeof(ngx_channel_t)); + ch.command = NGX_CMD_OPEN_CHANNEL; for (i = 0; i < n; i++) { @@ -401,6 +403,8 @@ ngx_start_cache_manager_processes(ngx_cy &ngx_cache_manager_ctx, "cache manager process", respawn ? NGX_PROCESS_JUST_RESPAWN : NGX_PROCESS_RESPAWN); + ngx_memzero(&ch, sizeof(ngx_channel_t)); + ch.command = NGX_CMD_OPEN_CHANNEL; ch.pid = ngx_processes[ngx_process_slot].pid; ch.slot = ngx_process_slot; @@ -460,6 +464,8 @@ ngx_signal_worker_processes(ngx_cycle_t ngx_err_t err; ngx_channel_t ch; + ngx_memzero(&ch, sizeof(ngx_channel_t)); + #if (NGX_BROKEN_SCM_RIGHTS) ch.command = 0; @@ -561,6 +567,8 @@ ngx_reap_children(ngx_cycle_t *cycle) ngx_channel_t ch; ngx_core_conf_t *ccf; + ngx_memzero(&ch, sizeof(ngx_channel_t)); + ch.command = NGX_CMD_CLOSE_CHANNEL; ch.fd = -1; From atul at dreamzinfotech.com Thu Jun 6 07:17:14 2013 From: atul at dreamzinfotech.com (Atul Bhouraskar) Date: Thu, 06 Jun 2013 17:17:14 +1000 Subject: Setting headers within process_headers function of module Message-ID: <51B0377A.7060305@dreamzinfotech.com> Hi, I am writing an "upstream" module to implement a custom protocol. I have managed to get most of it working end to end by following the fastcgi module code but am stuck at a problem that should be simple... I need to set certain HTTP headers within the process_header function as the upstream server does not speak HTTP. The two headers that I am most interested in at this time are "X-Accel-Redirect" and "Content-Type" (populated at different times depending upon content received from upstream). The values of both headers are available in the function. I have tried following the example at http://wiki.nginx.org/HeadersManagement (substituting headers_out with headers_in) with no luck, the output headers don't seem to change or there is a math exception thrown when a nginx internal function is performing a hash lookup. Looking at the fastcgi module it seems that there might be some hash initialisation that is needed? I haven't been able to figure this out. Any help would be appreciated! Regards, Atul From mdounin at mdounin.ru Thu Jun 6 11:24:11 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 6 Jun 2013 15:24:11 +0400 Subject: Setting headers within process_headers function of module In-Reply-To: <51B0377A.7060305@dreamzinfotech.com> References: <51B0377A.7060305@dreamzinfotech.com> Message-ID: <20130606112411.GO72282@mdounin.ru> Hello! On Thu, Jun 06, 2013 at 05:17:14PM +1000, Atul Bhouraskar wrote: > Hi, > > I am writing an "upstream" module to implement a custom protocol. I > have managed to get most of it working end to end by following the > fastcgi module code but am stuck at a problem that should be > simple... > > I need to set certain HTTP headers within the process_header > function as the upstream server does not speak HTTP. The two headers > that I am most interested in at this time are "X-Accel-Redirect" and > "Content-Type" (populated at different times depending upon content > received from upstream). The values of both headers are available in > the function. > > I have tried following the example at > http://wiki.nginx.org/HeadersManagement (substituting headers_out > with headers_in) with no luck, the output headers don't seem to > change or there is a math exception thrown when a nginx internal > function is performing a hash lookup. Looking at the fastcgi module > it seems that there might be some hash initialisation that is > needed? I haven't been able to figure this out. >From your description it's not clear what exactly fails for you, but I would assume it's hide_headers_hash lookup, which should be initialized using the ngx_http_upstream_hide_headers_hash() function. -- Maxim Dounin http://nginx.org/en/donation.html From dmitry.petroff at gmail.com Thu Jun 6 12:20:21 2013 From: dmitry.petroff at gmail.com (Dmitry Petrov) Date: Thu, 6 Jun 2013 16:20:21 +0400 Subject: Request buffering when uploading big file Message-ID: Hi. I'm using nginx as a front-end on file uploader. It gets PUT request from client and then proxy_pass it to custom uploader. The problem is nginx is buffering whole request body (which may be very large - up to several gigabytes) and only after that request is passed to my backend. Is there any ways to force nginx to proxy request immediately without buffering? -- Regards, Dmitry -------------- next part -------------- An HTML attachment was scrubbed... URL: From witekfl at gazeta.pl Thu Jun 6 13:41:53 2013 From: witekfl at gazeta.pl ( Witold Filipczyk) Date: Thu, 06 Jun 2013 15:41:53 +0200 Subject: The addition before sub Message-ID: Hello, I want to inject something before and before , but even if there is no . So used add_after_body /after_body; /after_body returns the text , so there is at least one in the page. I wanted sub_filter to replace to SOMETHING, but the sub_filter is run before the addition_filter. Changing the order in modules doesn't help, because the sub_filter doesn't replace the text from subrequests. I have a few questions: How to replace two different substrings using sub_filter or something else? How to force sub_filter to replace text, even if it comes from the addition filter? From mdounin at mdounin.ru Thu Jun 6 13:57:41 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 6 Jun 2013 17:57:41 +0400 Subject: The addition before sub In-Reply-To: References: Message-ID: <20130606135741.GR72282@mdounin.ru> Hello! On Thu, Jun 06, 2013 at 03:41:53PM +0200, Witold Filipczyk wrote: > Hello, > I want to inject something before and before , but > even if there is no . > So used add_after_body /after_body; > /after_body returns the text , so there is at least one > in the page. > I wanted sub_filter to replace to SOMETHING, but the > sub_filter is run before the addition_filter. > Changing the order in modules doesn't help, because the sub_filter > doesn't replace the text from subrequests. Works fine here: location / { add_after_body /after.html; sub_filter 'foo' 'bar'; } $ cat main.html this is main.html: foo $ cat after.html this is in after.html: foo $ fetch -qo - 'http://localhost:8080/main.html' this is main.html: bar this is in after.html: bar The explanation is simple: addition filter uses _subrequests_ to add text, and these subrequests are in turn processed by the whole filter chain, including the sub filter. Hence order of sub and addition filters doesn't matter. Most likely it doesn't work for you because /after_body have some default mime type not matched by sub_filter_types, see http://nginx.org/r/sub_filter_types. > I have a few questions: > How to replace two different substrings using sub_filter or something else? As of now it's not possible. > How to force sub_filter to replace text, even if it comes from the > addition filter? This works out of the box, see above. -- Maxim Dounin http://nginx.org/en/donation.html From witekfl at gazeta.pl Thu Jun 6 15:43:14 2013 From: witekfl at gazeta.pl ( Witold Filipczyk) Date: Thu, 06 Jun 2013 17:43:14 +0200 Subject: The addition before sub In-Reply-To: <20130606135741.GR72282@mdounin.ru> References: <20130606135741.GR72282@mdounin.ru> Message-ID: W dniu 06.06.2013 o 15:57 Maxim Dounin pisze: > Hello! > > On Thu, Jun 06, 2013 at 03:41:53PM +0200, Witold Filipczyk wrote: > >> Hello, >> I want to inject something before and before , but >> even if there is no . >> So used add_after_body /after_body; >> /after_body returns the text , so there is at least one >> in the page. >> I wanted sub_filter to replace to SOMETHING, but the >> sub_filter is run before the addition_filter. >> Changing the order in modules doesn't help, because the sub_filter >> doesn't replace the text from subrequests. > > Works fine here: > > location / { > add_after_body /after.html; > sub_filter 'foo' 'bar'; > } > > $ cat main.html > this is main.html: foo > $ cat after.html > this is in after.html: foo > $ fetch -qo - 'http://localhost:8080/main.html' > this is main.html: bar > this is in after.html: bar > > The explanation is simple: addition filter uses _subrequests_ to > add text, and these subrequests are in turn processed by the whole > filter chain, including the sub filter. Hence order of sub and > addition filters doesn't matter. > > Most likely it doesn't work for you because /after_body have some > default mime type not matched by sub_filter_types, see > http://nginx.org/r/sub_filter_types. OK. It works, but: server { listen 8000; sub_filter 'foo' 'bar'; sub_filter_once on; location / { root /; add_after_body /after.html; } location = /after.html { root /; } } /after.html contains "lalala foo" /body.html contains "foo" http://localhost:8000/body.html: bar lalala bar I expected: bar lalala foo How to switch off sub_filter for location = /after.html to get expected result? From mdounin at mdounin.ru Thu Jun 6 17:56:52 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 6 Jun 2013 21:56:52 +0400 Subject: The addition before sub In-Reply-To: References: <20130606135741.GR72282@mdounin.ru> Message-ID: <20130606175652.GS72282@mdounin.ru> Hello! On Thu, Jun 06, 2013 at 05:43:14PM +0200, Witold Filipczyk wrote: > W dniu 06.06.2013 o 15:57 Maxim Dounin pisze: > > >Hello! > > > >On Thu, Jun 06, 2013 at 03:41:53PM +0200, Witold Filipczyk wrote: > > > >>Hello, > >>I want to inject something before and before , but > >>even if there is no . > >>So used add_after_body /after_body; > >>/after_body returns the text , so there is at least one > >> in the page. > >>I wanted sub_filter to replace to SOMETHING, but the > >>sub_filter is run before the addition_filter. > >>Changing the order in modules doesn't help, because the sub_filter > >>doesn't replace the text from subrequests. > > > >Works fine here: > > > > location / { > > add_after_body /after.html; > > sub_filter 'foo' 'bar'; > > } > > > >$ cat main.html > >this is main.html: foo > >$ cat after.html > >this is in after.html: foo > >$ fetch -qo - 'http://localhost:8080/main.html' > >this is main.html: bar > >this is in after.html: bar > > > >The explanation is simple: addition filter uses _subrequests_ to > >add text, and these subrequests are in turn processed by the whole > >filter chain, including the sub filter. Hence order of sub and > >addition filters doesn't matter. > > > >Most likely it doesn't work for you because /after_body have some > >default mime type not matched by sub_filter_types, see > >http://nginx.org/r/sub_filter_types. > > OK. It works, but: > > server { > listen 8000; > sub_filter 'foo' 'bar'; > sub_filter_once on; > > location / { > root /; > add_after_body /after.html; > } > > location = /after.html { > root /; > } > } > > /after.html contains "lalala foo" > /body.html contains "foo" > > http://localhost:8000/body.html: > bar lalala bar > > I expected: > bar lalala foo Any reasons for the expectation? > How to switch off sub_filter for location = /after.html to get > expected result? As in most cases sub_filter isn't switched on if it's not needed, there are no special way to turn it off. Using an empty search string does the trick though, at least in recent enough versions: location = /after.html { sub_filter "" ""; } Better solution would be to don't switch it on where it's not needed, i.e.: location / { add_after_body /after.html; sub_filter "foo" "bar"; } location = /after.html { # no sub_filter here } -- Maxim Dounin http://nginx.org/en/donation.html From witekfl at gazeta.pl Thu Jun 6 18:25:53 2013 From: witekfl at gazeta.pl ( Witold Filipczyk) Date: Thu, 06 Jun 2013 20:25:53 +0200 Subject: The addition before sub In-Reply-To: <20130606175652.GS72282@mdounin.ru> References: <20130606135741.GR72282@mdounin.ru> <20130606175652.GS72282@mdounin.ru> Message-ID: W dniu 06.06.2013 o 19:56 Maxim Dounin pisze: > Hello! > > On Thu, Jun 06, 2013 at 05:43:14PM +0200, Witold Filipczyk wrote: > >> W dniu 06.06.2013 o 15:57 Maxim Dounin pisze: >> >> >Hello! >> > >> >On Thu, Jun 06, 2013 at 03:41:53PM +0200, Witold Filipczyk wrote: >> > >> >>Hello, >> >>I want to inject something before and before , but >> >>even if there is no . >> >>So used add_after_body /after_body; >> >>/after_body returns the text , so there is at least one >> >> in the page. >> >>I wanted sub_filter to replace to SOMETHING, but the >> >>sub_filter is run before the addition_filter. >> >>Changing the order in modules doesn't help, because the sub_filter >> >>doesn't replace the text from subrequests. >> > >> >Works fine here: >> > >> > location / { >> > add_after_body /after.html; >> > sub_filter 'foo' 'bar'; >> > } >> > >> >$ cat main.html >> >this is main.html: foo >> >$ cat after.html >> >this is in after.html: foo >> >$ fetch -qo - 'http://localhost:8080/main.html' >> >this is main.html: bar >> >this is in after.html: bar >> > >> >The explanation is simple: addition filter uses _subrequests_ to >> >add text, and these subrequests are in turn processed by the whole >> >filter chain, including the sub filter. Hence order of sub and >> >addition filters doesn't matter. >> > >> >Most likely it doesn't work for you because /after_body have some >> >default mime type not matched by sub_filter_types, see >> >http://nginx.org/r/sub_filter_types. >> >> OK. It works, but: >> >> server { >> listen 8000; >> sub_filter 'foo' 'bar'; >> sub_filter_once on; >> >> location / { >> root /; >> add_after_body /after.html; >> } >> >> location = /after.html { >> root /; >> } >> } >> >> /after.html contains "lalala foo" >> /body.html contains "foo" >> >> http://localhost:8000/body.html: >> bar lalala bar >> >> I expected: >> bar lalala foo > > Any reasons for the expectation? I wanted to replace given substring once. add_after_body and sub_filter occurs at the "server" level. There is too many locations. Thanks for the reply. I did a workaround: In ngx_http_sub_body_filter: ngx_http_sub_ctx_t *ctx_main = ngx_http_get_module_ctx(r->main, ngx_http_sub_filter_module); if (ctx_main && ctx_main->once) { ctx->once = ctx_main->once; } From atul at dreamzinfotech.com Thu Jun 6 23:42:27 2013 From: atul at dreamzinfotech.com (Atul Bhouraskar) Date: Fri, 07 Jun 2013 09:42:27 +1000 Subject: Setting headers within process_headers function of module In-Reply-To: <20130606112411.GO72282@mdounin.ru> References: <51B0377A.7060305@dreamzinfotech.com> <20130606112411.GO72282@mdounin.ru> Message-ID: <51B11E63.6060309@dreamzinfotech.com> On 06/06/13 21:24, Maxim Dounin wrote: > Hello! > > On Thu, Jun 06, 2013 at 05:17:14PM +1000, Atul Bhouraskar wrote: > >> Hi, >> >> I am writing an "upstream" module to implement a custom protocol. I >> have managed to get most of it working end to end by following the >> fastcgi module code but am stuck at a problem that should be >> simple... >> >> I need to set certain HTTP headers within the process_header >> function as the upstream server does not speak HTTP. The two headers >> that I am most interested in at this time are "X-Accel-Redirect" and >> "Content-Type" (populated at different times depending upon content >> received from upstream). The values of both headers are available in >> the function. >> >> I have tried following the example at >> http://wiki.nginx.org/HeadersManagement (substituting headers_out >> with headers_in) with no luck, the output headers don't seem to >> change or there is a math exception thrown when a nginx internal >> function is performing a hash lookup. Looking at the fastcgi module >> it seems that there might be some hash initialisation that is >> needed? I haven't been able to figure this out. > From your description it's not clear what exactly fails for you, > but I would assume it's hide_headers_hash lookup, which should be > initialized using the ngx_http_upstream_hide_headers_hash() > function. > Thanks! That was it. I basically followed the fastcgi module again: static ngx_str_t ngx_http_fastcgi_hide_headers[] = { ngx_string("Status"), ngx_string("X-Accel-Expires"), ngx_string("X-Accel-Redirect"), ngx_string("X-Accel-Limit-Rate"), ngx_string("X-Accel-Buffering"), ngx_string("X-Accel-Charset"), ngx_null_string }; ... if (ngx_http_upstream_hide_headers_hash(cf, &conf->upstream, &prev->upstream, ngx_http_fastcgi_hide_headers, &hash) != NGX_OK) { return NGX_CONF_ERROR; } From radford at galvanix.com Fri Jun 7 20:18:37 2013 From: radford at galvanix.com (Jim Radford) Date: Fri, 7 Jun 2013 13:18:37 -0700 Subject: [PATCH/v3] SPDY: pass though the full status when available. In-Reply-To: <201306051833.41715.vbart@nginx.com> References: <20130530213434.GA9044@home.blackbean.org> <201305310421.45269.vbart@nginx.com> <20130531045241.GA12348@home.blackbean.org> <201306051833.41715.vbart@nginx.com> Message-ID: <20130607201837.GA10437@home.blackbean.org> # HG changeset patch # User Jim Radford # Date 1370636160 25200 # Node ID 18018b06ed3103c8a0e841e59913daa8bab1c03c # Parent 55dc535ae5dc9d9ffb9bf9d163f9c7d298a56f87 SPDY: pass though the full status when available. Avoid stripping the status text when proxying for compatibility with http. diff -r 55dc535ae5dc -r 18018b06ed31 src/http/ngx_http_spdy_filter_module.c --- a/src/http/ngx_http_spdy_filter_module.c Wed Jun 05 19:45:08 2013 +0400 +++ b/src/http/ngx_http_spdy_filter_module.c Fri Jun 07 13:16:00 2013 -0700 @@ -162,7 +162,9 @@ + ngx_http_spdy_nv_nsize("version") + ngx_http_spdy_nv_vsize("HTTP/1.1") + ngx_http_spdy_nv_nsize("status") - + ngx_http_spdy_nv_vsize("418"); + + (r->headers_out.status_line.len + ? NGX_SPDY_NV_VLEN_SIZE + r->headers_out.status_line.len + : ngx_http_spdy_nv_vsize("418")); clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); @@ -304,8 +306,15 @@ last = ngx_http_spdy_nv_write_val(last, "HTTP/1.1"); last = ngx_http_spdy_nv_write_name(last, "status"); - last = ngx_http_spdy_nv_write_vlen(last, 3); - last = ngx_sprintf(last, "%03ui", r->headers_out.status); + + if (r->headers_out.status_line.len) { + last = ngx_http_spdy_nv_write_vlen(last, r->headers_out.status_line.len); + last = ngx_cpymem(last, r->headers_out.status_line.data, + r->headers_out.status_line.len); + } else { + last = ngx_http_spdy_nv_write_vlen(last, 3); + last = ngx_sprintf(last, "%03ui", r->headers_out.status); + } count = 2; From danfiala at centrum.cz Sun Jun 9 09:21:39 2013 From: danfiala at centrum.cz (danfiala at centrum.cz) Date: Sun, 09 Jun 2013 11:21:39 +0200 Subject: Proxy module without waiting for a reply Message-ID: <20130609112139.09556BB5@centrum.cz> Hi all, I need to implement nginx module that does the following: * It receives http get requests. * For every request it sends information about the request over TCP packet to another server. * When data are send it generates some trivial HTTP reply. The another server does not sent any reply to the module. I studied source code of other modules and some tutorials and it seems that I should implement upstream (proxy) module. But I would like to achieve the following: * Module does generate the reply just after data are sent to another server. It does not wait for any reply from another server. * Socket to another server remains open among requests and is reused for subsequent requests. Is this possible and is implementation of upstream module the right way? Kind regards, Daniel From mdounin at mdounin.ru Sun Jun 9 12:04:32 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 9 Jun 2013 16:04:32 +0400 Subject: Proxy module without waiting for a reply In-Reply-To: <20130609112139.09556BB5@centrum.cz> References: <20130609112139.09556BB5@centrum.cz> Message-ID: <20130609120432.GO72282@mdounin.ru> Hello! On Sun, Jun 09, 2013 at 11:21:39AM +0200, danfiala at centrum.cz wrote: > Hi all, > I need to implement nginx module that does the following: > * It receives http get requests. > * For every request it sends information about the request over TCP packet > to another server. > * When data are send it generates some trivial HTTP reply. The another > server does not sent any reply to the module. > > I studied source code of other modules and some tutorials and it seems that > I should implement upstream (proxy) module. But I would like to achieve the > following: > * Module does generate the reply just after data are sent to another > server. It does not wait for any reply from another server. > * Socket to another server remains open among requests and is reused for > subsequent requests. > > Is this possible and is implementation of upstream module the right way? Yes, it looks possible and easy enough. -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Mon Jun 10 10:37:46 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 10 Jun 2013 10:37:46 +0000 Subject: [nginx] Perl: fixed r->header_in("Cookie") (ticket #351). Message-ID: details: http://hg.nginx.org/nginx/rev/f5626ab8cb87 branches: changeset: 5248:f5626ab8cb87 user: Maxim Dounin date: Mon Jun 10 14:35:00 2013 +0400 description: Perl: fixed r->header_in("Cookie") (ticket #351). It was broken by X-Forwarded-For related changes in f7fe817c92a2 (1.3.14) as hh->offset is no longer 0 for Cookie. diffstat: src/http/modules/perl/nginx.xs | 36 +++++++++++++++++++++++++++--------- 1 files changed, 27 insertions(+), 9 deletions(-) diffs (88 lines): diff --git a/src/http/modules/perl/nginx.xs b/src/http/modules/perl/nginx.xs --- a/src/http/modules/perl/nginx.xs +++ b/src/http/modules/perl/nginx.xs @@ -222,10 +222,11 @@ header_in(r, key) dXSTARG; ngx_http_request_t *r; SV *key; - u_char *p, *lowcase_key, *cookie; + u_char *p, *lowcase_key, *value, sep; STRLEN len; ssize_t size; ngx_uint_t i, n, hash; + ngx_array_t *a; ngx_list_part_t *part; ngx_table_elt_t *h, **ph; ngx_http_header_t *hh; @@ -255,6 +256,19 @@ header_in(r, key) hh = ngx_hash_find(&cmcf->headers_in_hash, hash, lowcase_key, len); if (hh) { + + if (hh->offset == offsetof(ngx_http_headers_in_t, cookies)) { + sep = ';'; + goto multi; + } + + #if (NGX_HTTP_X_FORWARDED_FOR) + if (hh->offset == offsetof(ngx_http_headers_in_t, x_forwarded_for)) { + sep = ','; + goto multi; + } + #endif + if (hh->offset) { ph = (ngx_table_elt_t **) ((char *) &r->headers_in + hh->offset); @@ -268,15 +282,19 @@ header_in(r, key) XSRETURN_UNDEF; } - /* Cookie */ + multi: - n = r->headers_in.cookies.nelts; + /* Cookie, X-Forwarded-For */ + + a = (ngx_array_t *) ((char *) &r->headers_in + hh->offset); + + n = a->nelts; if (n == 0) { XSRETURN_UNDEF; } - ph = r->headers_in.cookies.elts; + ph = a->elts; if (n == 1) { ngx_http_perl_set_targ((*ph)->value.data, (*ph)->value.len); @@ -290,12 +308,12 @@ header_in(r, key) size += ph[i]->value.len + sizeof("; ") - 1; } - cookie = ngx_pnalloc(r->pool, size); - if (cookie == NULL) { + value = ngx_pnalloc(r->pool, size); + if (value == NULL) { XSRETURN_UNDEF; } - p = cookie; + p = value; for (i = 0; /* void */ ; i++) { p = ngx_copy(p, ph[i]->value.data, ph[i]->value.len); @@ -304,10 +322,10 @@ header_in(r, key) break; } - *p++ = ';'; *p++ = ' '; + *p++ = sep; *p++ = ' '; } - ngx_http_perl_set_targ(cookie, size); + ngx_http_perl_set_targ(value, size); goto done; } From vbart at nginx.com Mon Jun 10 11:02:50 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Mon, 10 Jun 2013 15:02:50 +0400 Subject: [PATCH/v3] SPDY: pass though the full status when available. In-Reply-To: <20130607201837.GA10437@home.blackbean.org> References: <20130530213434.GA9044@home.blackbean.org> <201306051833.41715.vbart@nginx.com> <20130607201837.GA10437@home.blackbean.org> Message-ID: <201306101502.50571.vbart@nginx.com> On Saturday 08 June 2013 00:18:37 Jim Radford wrote: > # HG changeset patch > # User Jim Radford > # Date 1370636160 25200 > # Node ID 18018b06ed3103c8a0e841e59913daa8bab1c03c > # Parent 55dc535ae5dc9d9ffb9bf9d163f9c7d298a56f87 > SPDY: pass though the full status when available. > > Avoid stripping the status text when proxying for compatibility with http. > [...] The patch looks fine for me. I'll push it after Maxim Dounin's approval. wbr, Valentin V. Bartenev From mdounin at mdounin.ru Mon Jun 10 11:18:11 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 10 Jun 2013 15:18:11 +0400 Subject: [PATCH/v3] SPDY: pass though the full status when available. In-Reply-To: <201306101502.50571.vbart@nginx.com> References: <20130530213434.GA9044@home.blackbean.org> <201306051833.41715.vbart@nginx.com> <20130607201837.GA10437@home.blackbean.org> <201306101502.50571.vbart@nginx.com> Message-ID: <20130610111811.GS72282@mdounin.ru> Hello! On Mon, Jun 10, 2013 at 03:02:50PM +0400, Valentin V. Bartenev wrote: > On Saturday 08 June 2013 00:18:37 Jim Radford wrote: > > # HG changeset patch > > # User Jim Radford > > # Date 1370636160 25200 > > # Node ID 18018b06ed3103c8a0e841e59913daa8bab1c03c > > # Parent 55dc535ae5dc9d9ffb9bf9d163f9c7d298a56f87 > > SPDY: pass though the full status when available. > > > > Avoid stripping the status text when proxying for compatibility with http. > > > [...] > > The patch looks fine for me. I'll push it after Maxim Dounin's approval. No objections. -- Maxim Dounin http://nginx.org/en/donation.html From vbart at nginx.com Mon Jun 10 11:49:11 2013 From: vbart at nginx.com (Valentin Bartenev) Date: Mon, 10 Jun 2013 11:49:11 +0000 Subject: [nginx] SPDY: pass through the full status when available. Message-ID: details: http://hg.nginx.org/nginx/rev/725fb71ab1a6 branches: changeset: 5249:725fb71ab1a6 user: Jim Radford date: Fri Jun 07 13:16:00 2013 -0700 description: SPDY: pass through the full status when available. Avoid stripping the status text when proxying for compatibility with http. diffstat: src/http/ngx_http_spdy_filter_module.c | 15 ++++++++++++--- 1 files changed, 12 insertions(+), 3 deletions(-) diffs (32 lines): diff -r f5626ab8cb87 -r 725fb71ab1a6 src/http/ngx_http_spdy_filter_module.c --- a/src/http/ngx_http_spdy_filter_module.c Mon Jun 10 14:35:00 2013 +0400 +++ b/src/http/ngx_http_spdy_filter_module.c Fri Jun 07 13:16:00 2013 -0700 @@ -162,7 +162,9 @@ ngx_http_spdy_header_filter(ngx_http_req + ngx_http_spdy_nv_nsize("version") + ngx_http_spdy_nv_vsize("HTTP/1.1") + ngx_http_spdy_nv_nsize("status") - + ngx_http_spdy_nv_vsize("418"); + + (r->headers_out.status_line.len + ? NGX_SPDY_NV_VLEN_SIZE + r->headers_out.status_line.len + : ngx_http_spdy_nv_vsize("418")); clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); @@ -304,8 +306,15 @@ ngx_http_spdy_header_filter(ngx_http_req last = ngx_http_spdy_nv_write_val(last, "HTTP/1.1"); last = ngx_http_spdy_nv_write_name(last, "status"); - last = ngx_http_spdy_nv_write_vlen(last, 3); - last = ngx_sprintf(last, "%03ui", r->headers_out.status); + + if (r->headers_out.status_line.len) { + last = ngx_http_spdy_nv_write_vlen(last, r->headers_out.status_line.len); + last = ngx_cpymem(last, r->headers_out.status_line.data, + r->headers_out.status_line.len); + } else { + last = ngx_http_spdy_nv_write_vlen(last, 3); + last = ngx_sprintf(last, "%03ui", r->headers_out.status); + } count = 2; From vbart at nginx.com Mon Jun 10 11:53:34 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Mon, 10 Jun 2013 15:53:34 +0400 Subject: [PATCH/v3] SPDY: pass though the full status when available. In-Reply-To: <201306101502.50571.vbart@nginx.com> References: <20130530213434.GA9044@home.blackbean.org> <20130607201837.GA10437@home.blackbean.org> <201306101502.50571.vbart@nginx.com> Message-ID: <201306101553.34770.vbart@nginx.com> On Monday 10 June 2013 15:02:50 Valentin V. Bartenev wrote: > On Saturday 08 June 2013 00:18:37 Jim Radford wrote: > > # HG changeset patch > > # User Jim Radford > > # Date 1370636160 25200 > > # Node ID 18018b06ed3103c8a0e841e59913daa8bab1c03c > > # Parent 55dc535ae5dc9d9ffb9bf9d163f9c7d298a56f87 > > SPDY: pass though the full status when available. > > > > Avoid stripping the status text when proxying for compatibility with > > http. > > [...] > > The patch looks fine for me. I'll push it after Maxim Dounin's approval. > Done: http://hg.nginx.org/nginx/rev/725fb71ab1a6 Thank you. wbr, Valentin V. Bartenev From agentzh at gmail.com Mon Jun 10 22:37:09 2013 From: agentzh at gmail.com (agentzh) Date: Mon, 10 Jun 2013 15:37:09 -0700 Subject: [ANN] Test::Nginx 0.22 released Message-ID: Hello guys! I've just uploaded Test::Nginx 0.22 to CPAN: http://search.cpan.org/perldoc?Test::Nginx It will appear on the CPAN mirror near you in the next few hours or so. Special thanks go to all our contributors and users :) Here's the complete change log for this release (compared to the last CPAN release, 0.21): * feature: added new sections --- abort and --- shutdown to test premature client abortions. * feature: implemented new section "--- raw_response_headers_unlike". * feature: added support for custom http_config filters for subclasses. * feature: now we automatically check [alert] and [crit] messages in the nginx error logs and print them as warnings if they do not appear in either --- error_log or --- no_error_log. * feature: added new section "--- stap_out_unlike" for testing patterns which should not appear in the output systemtap. * feature: added support for the special notation "$LIBPCRE_PATH" within --- stap, which is always substituted by the absolute path to libpcre used by the current nginx. * feature: now we check if the nginx process is gone in the "check leak" testing mode (i.e., when TEST_NGINX_CHECK_LEAK=1). * feature: added new section "--- skip_eval: " for skipping tests according to the running result of a piece of Perl code. * feature: now we allow a (unix domain) socket file path specified in the "--- udp_listen" directive value. * feature: in --- stap values, now we support the notation "F(func at file)". * feature: made the environment TEST_NGINX_SERVER_PORT always take the value so that the test cases can always reference it. * feature: added new section "--- wait" to always wait a specified amount of time before checking the systemtap outputs and/or error logs (if any). * bugfix: no longer retry connecting to the nginx server when the child process running nginx is already gone. * bugfix: no longer bail out the up to 3 test cases fail all their connecting attempts. * bugfix: TEST_NGINX_USE_HUP=1 could not work with TEST_NGINX_NO_CLEAN=1. * bugfix: the output of systemtap took the value undef when it was in fact empty. * bugfix: protect against division by zero in the "check leak" testing mode. * bugfix: disabled the test report ending when failing to parse chunked response body because the test count is surely out of sync. * bugfix: the code that searches executables in the PATH environment would always look up the current working directory first and might pick up an executable directory with the same name. * bugfix: conflicting testing modes may conflict together. * bugfix: added File::Path 2.06_05 as a minimum requirement. thanks Andreas J. K??nig for reporting this issue. * bugfix: pass --show-possibly-lost=no to valgrind by default because nginx makes clever use of pointers almost everywhere. * bugfix: when --- abort was not specified, http client timeout was not treated as a subtest failure as specified. * bugfix: the child process running ab or weighttp might not be reaped in extreme conditions. * bugfix: we did not close STDOUT and STDERR in the child processes forked, which could confuse the "prove" harness and lead to hang. * optimize: disabled the access log altogether in the "check leak" testing mode. * docs: documented the TEST_NGINX_NO_CLEAN environment. * docs: fixed a small issue in the code sample. thanks smallfish. This Perl module provides a test scaffold based on IO::Socket or LWP for automated testing in Nginx C module or ngx_lua-based Lua library development. This class inherits from Test::Base, thus bringing all its declarative power to the NginxC module testing practices. Please check out the full documentation on CPAN: http://search.cpan.org/perldoc?Test::Nginx::Socket All of our Nginx modules (as well as our lua-resty-* libraries) are using Test::Nginx to drive their test suites. And it is also driving my test cluster running on Amazon EC2: http://qa.openresty.org Please note that this module is completely different from the Test::Nginx module created by Maxim Dounin. The git repository for this Perl module is hosted on GitHub: https://github.com/agentzh/test-nginx Enjoy! -agentzh From ykirpichev at gmail.com Tue Jun 11 09:00:42 2013 From: ykirpichev at gmail.com (Yury Kirpichev) Date: Tue, 11 Jun 2013 13:00:42 +0400 Subject: [PATCH] Fix nopush cleanup for SPDY Message-ID: Hi, Could you please take a look at patch below. I've tried to fix problem that TCP_CORK option is not cleaned in SPDY case. # HG changeset patch # User ykirpichev at gmail.com # Date 1370939502 -14400 # Branch nopush_fix_1 # Node ID 58d7a76b975ed7afb6a980b8810051a10dfc96f4 # Parent 725fb71ab1a60bd48b0afb8b001b5349f5054cb1 Fix tcp_nopush cleanup for spdy diff -r 725fb71ab1a6 -r 58d7a76b975e src/http/ngx_http.c --- a/src/http/ngx_http.c Fri Jun 07 13:16:00 2013 -0700 +++ b/src/http/ngx_http.c Tue Jun 11 12:31:42 2013 +0400 @@ -2106,3 +2106,112 @@ return NGX_OK; } + + +ngx_int_t +ngx_http_check_and_set_nopush(ngx_connection_t* c) +{ + int tcp_nodelay; + + /* the TCP_CORK and TCP_NODELAY are mutually exclusive */ + if (c->tcp_nodelay == NGX_TCP_NODELAY_SET) { + + tcp_nodelay = 0; + + if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, + (const void *) &tcp_nodelay, sizeof(int)) == -1) + { + /* + * there is a tiny chance to be interrupted, however, + * we continue a processing with the TCP_NODELAY + * and without the TCP_CORK + */ + + if (ngx_errno != NGX_EINTR) { + ngx_connection_error(c, ngx_errno, + "setsockopt(TCP_NODELAY) failed"); + return NGX_ERROR; + } + + } else { + c->tcp_nodelay = NGX_TCP_NODELAY_UNSET; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "no tcp_nodelay"); + } + } + + if (c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) { + + if (ngx_tcp_nopush(c->fd) == NGX_ERROR) { + + /* + * there is a tiny chance to be interrupted, however, + * we continue a processing without the TCP_CORK + */ + + if (ngx_errno != NGX_EINTR) { + ngx_connection_error(c, ngx_errno, + ngx_tcp_nopush_n " failed"); + return NGX_ERROR; + } + + } else { + c->tcp_nopush = NGX_TCP_NOPUSH_SET; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "tcp_nopush"); + } + } + + return NGX_OK; +} + +ngx_int_t +ngx_http_check_and_restore_nopush(ngx_connection_t* c, + ngx_http_core_loc_conf_t* clcf) +{ + int tcp_nodelay; + + if (c->tcp_nopush == NGX_TCP_NOPUSH_SET) { + if (ngx_tcp_push(c->fd) == -1) { + ngx_connection_error(c, ngx_socket_errno, ngx_tcp_push_n " failed"); + return NGX_ERROR; + } + + c->tcp_nopush = NGX_TCP_NOPUSH_UNSET; + tcp_nodelay = ngx_tcp_nodelay_and_tcp_nopush ? 1 : 0; + + } else { + tcp_nodelay = 1; + } +#if 1 + if (tcp_nodelay + && clcf->tcp_nodelay + && c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) + { + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "tcp_nodelay"); + + if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, + (const void *) &tcp_nodelay, sizeof(int)) + == -1) + { +#if (NGX_SOLARIS) + /* Solaris returns EINVAL if a socket has been shut down */ + c->log_error = NGX_ERROR_IGNORE_EINVAL; +#endif + + ngx_connection_error(c, ngx_socket_errno, + "setsockopt(TCP_NODELAY) failed"); + + c->log_error = NGX_ERROR_INFO; + return NGX_ERROR; + } + + c->tcp_nodelay = NGX_TCP_NODELAY_SET; + } +#endif + + return NGX_OK; +} + diff -r 725fb71ab1a6 -r 58d7a76b975e src/http/ngx_http.h --- a/src/http/ngx_http.h Fri Jun 07 13:16:00 2013 -0700 +++ b/src/http/ngx_http.h Tue Jun 11 12:31:42 2013 +0400 @@ -180,5 +180,9 @@ extern ngx_http_output_header_filter_pt ngx_http_top_header_filter; extern ngx_http_output_body_filter_pt ngx_http_top_body_filter; +ngx_int_t ngx_http_check_and_set_nopush(ngx_connection_t* c); + +ngx_int_t ngx_http_check_and_restore_nopush(ngx_connection_t* c, + ngx_http_core_loc_conf_t* clcf); #endif /* _NGX_HTTP_H_INCLUDED_ */ diff -r 725fb71ab1a6 -r 58d7a76b975e src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Fri Jun 07 13:16:00 2013 -0700 +++ b/src/http/ngx_http_request.c Tue Jun 11 12:31:42 2013 +0400 @@ -2740,7 +2740,6 @@ static void ngx_http_set_keepalive(ngx_http_request_t *r) { - int tcp_nodelay; ngx_int_t i; ngx_buf_t *b, *f; ngx_event_t *rev, *wev; @@ -2913,44 +2912,9 @@ c->log->action = "keepalive"; - if (c->tcp_nopush == NGX_TCP_NOPUSH_SET) { - if (ngx_tcp_push(c->fd) == -1) { - ngx_connection_error(c, ngx_socket_errno, ngx_tcp_push_n " failed"); - ngx_http_close_connection(c); - return; - } - - c->tcp_nopush = NGX_TCP_NOPUSH_UNSET; - tcp_nodelay = ngx_tcp_nodelay_and_tcp_nopush ? 1 : 0; - - } else { - tcp_nodelay = 1; - } - - if (tcp_nodelay - && clcf->tcp_nodelay - && c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) - { - ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "tcp_nodelay"); - - if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, - (const void *) &tcp_nodelay, sizeof(int)) - == -1) - { -#if (NGX_SOLARIS) - /* Solaris returns EINVAL if a socket has been shut down */ - c->log_error = NGX_ERROR_IGNORE_EINVAL; -#endif - - ngx_connection_error(c, ngx_socket_errno, - "setsockopt(TCP_NODELAY) failed"); - - c->log_error = NGX_ERROR_INFO; - ngx_http_close_connection(c); - return; - } - - c->tcp_nodelay = NGX_TCP_NODELAY_SET; + if (ngx_http_check_and_restore_nopush(c, clcf) != NGX_OK) { + ngx_http_close_connection(c); + return; } #if 0 diff -r 725fb71ab1a6 -r 58d7a76b975e src/http/ngx_http_spdy.c --- a/src/http/ngx_http_spdy.c Fri Jun 07 13:16:00 2013 -0700 +++ b/src/http/ngx_http_spdy.c Tue Jun 11 12:31:42 2013 +0400 @@ -450,7 +450,6 @@ ngx_http_spdy_handle_connection(sc); } - ngx_int_t ngx_http_spdy_send_output_queue(ngx_http_spdy_connection_t *sc) { @@ -490,6 +489,13 @@ } cl = c->send_chain(c, cl, 0); + clcf = ngx_http_get_module_loc_conf(sc->http_connection->conf_ctx, + ngx_http_core_module); + + if (ngx_http_check_and_restore_nopush(c, clcf) != NGX_OK) { + c->error = 1; + return NGX_ERROR; + } if (cl == NGX_CHAIN_ERROR) { c->error = 1; @@ -501,8 +507,6 @@ return NGX_ERROR; } - clcf = ngx_http_get_module_loc_conf(sc->http_connection->conf_ctx, - ngx_http_core_module); if (ngx_handle_write_event(wev, clcf->send_lowat) != NGX_OK) { return NGX_ERROR; /* FIXME */ diff -r 725fb71ab1a6 -r 58d7a76b975e src/os/unix/ngx_linux_sendfile_chain.c --- a/src/os/unix/ngx_linux_sendfile_chain.c Fri Jun 07 13:16:00 2013 -0700 +++ b/src/os/unix/ngx_linux_sendfile_chain.c Tue Jun 11 12:31:42 2013 +0400 @@ -33,11 +33,13 @@ #define NGX_HEADERS IOV_MAX #endif +ngx_int_t +ngx_http_check_and_set_nopush(ngx_connection_t* c); ngx_chain_t * ngx_linux_sendfile_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit) { - int rc, tcp_nodelay; + int rc; off_t size, send, prev_send, aligned, sent, fprev; u_char *prev; size_t file_size; @@ -154,61 +156,9 @@ && cl && cl->buf->in_file) { - /* the TCP_CORK and TCP_NODELAY are mutually exclusive */ - - if (c->tcp_nodelay == NGX_TCP_NODELAY_SET) { - - tcp_nodelay = 0; - - if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, - (const void *) &tcp_nodelay, sizeof(int)) == -1) - { - err = ngx_errno; - - /* - * there is a tiny chance to be interrupted, however, - * we continue a processing with the TCP_NODELAY - * and without the TCP_CORK - */ - - if (err != NGX_EINTR) { - wev->error = 1; - ngx_connection_error(c, err, - "setsockopt(TCP_NODELAY) failed"); - return NGX_CHAIN_ERROR; - } - - } else { - c->tcp_nodelay = NGX_TCP_NODELAY_UNSET; - - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, - "no tcp_nodelay"); - } - } - - if (c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) { - - if (ngx_tcp_nopush(c->fd) == NGX_ERROR) { - err = ngx_errno; - - /* - * there is a tiny chance to be interrupted, however, - * we continue a processing without the TCP_CORK - */ - - if (err != NGX_EINTR) { - wev->error = 1; - ngx_connection_error(c, err, - ngx_tcp_nopush_n " failed"); - return NGX_CHAIN_ERROR; - } - - } else { - c->tcp_nopush = NGX_TCP_NOPUSH_SET; - - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, - "tcp_nopush"); - } + if (ngx_http_check_and_set_nopush(c) != NGX_OK) { + wev->error = 1; + return NGX_CHAIN_ERROR; } } -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: spdy_fix_nopush Type: application/octet-stream Size: 9844 bytes Desc: not available URL: From mdounin at mdounin.ru Tue Jun 11 12:13:01 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 11 Jun 2013 16:13:01 +0400 Subject: [PATCH] Fix nopush cleanup for SPDY In-Reply-To: References: Message-ID: <20130611121301.GE72282@mdounin.ru> Hello! On Tue, Jun 11, 2013 at 01:00:42PM +0400, Yury Kirpichev wrote: > Could you please take a look at patch below. > I've tried to fix problem that TCP_CORK option is not cleaned in SPDY case. The patch looks wrong. - It introduces layering violation and build failure --without-http as a result. You may have better luck focusing on a problem you want to fix, and avoiding unrelated changes as much as possible. - It tries to restore nopush after each c->send_chain() call, which looks suboptimal. It probably should be done if there are no pending data to send. -- Maxim Dounin http://nginx.org/en/donation.html From cubicdaiya at gmail.com Tue Jun 11 14:59:42 2013 From: cubicdaiya at gmail.com (cubicdaiya) Date: Tue, 11 Jun 2013 23:59:42 +0900 Subject: [PATCH]Valgind: a complaint about uninitialized bytes Message-ID: Hi, Could you take a look at the following patch? # HG changeset patch # User cubicdaiya # Date 1370962306 -32400 # Node ID 4d97f7d8925f3d10a94b13bbe2cd0923e79e5d65 # Parent 725fb71ab1a60bd48b0afb8b001b5349f5054cb1 Valgrind: a complaint about uninitialized bytes Valgrind outputs the following message when NGX_HAVE_MSGHDR_MSG_CONTROL is 1. ==12605== Syscall param socketcall.sendmsg(msg.msg_control) points to uninitialised byte(s) ==12605== at 0x4E37660: __sendmsg_nocancel (syscall-template.S:82) ==12605== by 0x41C9BF: ngx_write_channel (ngx_channel.c:77) ==12605== by 0x41E2DC: ngx_pass_open_channel (ngx_process_cycle.c:454) ==12605== by 0x41E3B6: ngx_start_worker_processes (ngx_process_cycle.c:371) ==12605== by 0x41F368: ngx_master_process_cycle (ngx_process_cycle.c:136) ==12605== by 0x404A19: main (nginx.c:412) ==12605== Address 0x7ff000614 is on thread 1's stack diff -r 725fb71ab1a6 -r 4d97f7d8925f src/os/unix/ngx_channel.c --- a/src/os/unix/ngx_channel.c Fri Jun 07 13:16:00 2013 -0700 +++ b/src/os/unix/ngx_channel.c Tue Jun 11 23:51:46 2013 +0900 @@ -26,6 +26,8 @@ char space[CMSG_SPACE(sizeof(int))]; } cmsg; + ngx_memzero(&cmsg, sizeof(cmsg)); + if (ch->fd == -1) { msg.msg_control = NULL; msg.msg_controllen = 0; # my environment ## uname -a Linux a016 2.6.32-5-amd64 #1 SMP Mon Feb 25 00:26:11 UTC 2013 x86_64 GNU/Linux ## nginx -V nginx version: nginx/1.5.2 built by gcc 4.4.5 (Debian 4.4.5-8) configure arguments: --sbin-path=/usr/sbin/nginx --conf-path=/etc/nginx/nginx.conf --error-log-path=/var/log/nginx/error.log --pid-path=/var/run/nginx.pid --lock-path=/var/lock/nginx.lock --http-log-path=/var/log/nginx/access.log --http-client-body-temp-path=/var/lib/nginx/body --with-http_stub_status_module --http-proxy-temp-path=/var/lib/nginx/proxy --with-http_stub_status_module --http-fastcgi-temp-path=/var/lib/nginx/fastcgi --with-pcre -- Tatsuhiko Kubo E-Mail : cubicdaiya at gmail.com HP : http://cccis.jp/index_en.html Twitter : http://twitter.com/cubicdaiya -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: valgrind_complaint_fix.patch Type: application/octet-stream Size: 1283 bytes Desc: not available URL: From cubicdaiya at gmail.com Tue Jun 11 15:20:10 2013 From: cubicdaiya at gmail.com (cubicdaiya) Date: Wed, 12 Jun 2013 00:20:10 +0900 Subject: [PATCH]Valgind: a complaint about uninitialized bytes Message-ID: Hi, I'm sorry. As I mistook my name in changeset, could you take a look at the following patch, too? # HG changeset patch # User Tatsuhiko Kubo # Date 1370963498 -32400 # Node ID fe954e402fd5b1292f9d5039454f327f8939999a # Parent 725fb71ab1a60bd48b0afb8b001b5349f5054cb1 Valgrind: a complaint about uninitialized bytes Valgrind outputs the following message when NGX_HAVE_MSGHDR_MSG_CONTROL is 1. ==12605== Syscall param socketcall.sendmsg(msg.msg_control) points to uninitialised byte(s) ==12605== at 0x4E37660: __sendmsg_nocancel (syscall-template.S:82) ==12605== by 0x41C9BF: ngx_write_channel (ngx_channel.c:77) ==12605== by 0x41E2DC: ngx_pass_open_channel (ngx_process_cycle.c:454) ==12605== by 0x41E3B6: ngx_start_worker_processes (ngx_process_cycle.c:371) ==12605== by 0x41F368: ngx_master_process_cycle (ngx_process_cycle.c:136) ==12605== by 0x404A19: main (nginx.c:412) ==12605== Address 0x7ff000614 is on thread 1's stack diff -r 725fb71ab1a6 -r fe954e402fd5 src/os/unix/ngx_channel.c --- a/src/os/unix/ngx_channel.c Fri Jun 07 13:16:00 2013 -0700 +++ b/src/os/unix/ngx_channel.c Wed Jun 12 00:11:38 2013 +0900 @@ -26,6 +26,8 @@ char space[CMSG_SPACE(sizeof(int))]; } cmsg; + ngx_memzero(&cmsg, sizeof(cmsg)); + if (ch->fd == -1) { msg.msg_control = NULL; msg.msg_controllen = 0; 2013/6/11 cubicdaiya > Hi, > > Could you take a look at the following patch? > > # HG changeset patch > # User cubicdaiya > # Date 1370962306 -32400 > # Node ID 4d97f7d8925f3d10a94b13bbe2cd0923e79e5d65 > # Parent 725fb71ab1a60bd48b0afb8b001b5349f5054cb1 > Valgrind: a complaint about uninitialized bytes > > Valgrind outputs the following message > when NGX_HAVE_MSGHDR_MSG_CONTROL is 1. > > ==12605== Syscall param socketcall.sendmsg(msg.msg_control) points to > uninitialised byte(s) > ==12605== at 0x4E37660: __sendmsg_nocancel (syscall-template.S:82) > ==12605== by 0x41C9BF: ngx_write_channel (ngx_channel.c:77) > ==12605== by 0x41E2DC: ngx_pass_open_channel (ngx_process_cycle.c:454) > ==12605== by 0x41E3B6: ngx_start_worker_processes > (ngx_process_cycle.c:371) > ==12605== by 0x41F368: ngx_master_process_cycle > (ngx_process_cycle.c:136) > ==12605== by 0x404A19: main (nginx.c:412) > ==12605== Address 0x7ff000614 is on thread 1's stack > > diff -r 725fb71ab1a6 -r 4d97f7d8925f src/os/unix/ngx_channel.c > --- a/src/os/unix/ngx_channel.c Fri Jun 07 13:16:00 2013 -0700 > +++ b/src/os/unix/ngx_channel.c Tue Jun 11 23:51:46 2013 +0900 > @@ -26,6 +26,8 @@ > char space[CMSG_SPACE(sizeof(int))]; > } cmsg; > > + ngx_memzero(&cmsg, sizeof(cmsg)); > + > if (ch->fd == -1) { > msg.msg_control = NULL; > msg.msg_controllen = 0; > > > # my environment > > ## uname -a > > Linux a016 2.6.32-5-amd64 #1 SMP Mon Feb 25 00:26:11 UTC 2013 x86_64 > GNU/Linux > > ## nginx -V > > nginx version: nginx/1.5.2 > built by gcc 4.4.5 (Debian 4.4.5-8) > configure arguments: --sbin-path=/usr/sbin/nginx > --conf-path=/etc/nginx/nginx.conf --error-log-path=/var/log/nginx/error.log > --pid-path=/var/run/nginx.pid --lock-path=/var/lock/nginx.lock > --http-log-path=/var/log/nginx/access.log > --http-client-body-temp-path=/var/lib/nginx/body > --with-http_stub_status_module --http-proxy-temp-path=/var/lib/nginx/proxy > --with-http_stub_status_module > --http-fastcgi-temp-path=/var/lib/nginx/fastcgi --with-pcre > > > -- > Tatsuhiko Kubo > > E-Mail : cubicdaiya at gmail.com > HP : http://cccis.jp/index_en.html > Twitter : http://twitter.com/cubicdaiya > -- Tatsuhiko Kubo E-Mail : cubicdaiya at gmail.com HP : http://cccis.jp/index_en.html Twitter : http://twitter.com/cubicdaiya -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: valgrind_complaint_fix.patch Type: application/octet-stream Size: 1287 bytes Desc: not available URL: From ykirpichev at gmail.com Tue Jun 11 15:30:08 2013 From: ykirpichev at gmail.com (Yury Kirpichev) Date: Tue, 11 Jun 2013 19:30:08 +0400 Subject: [PATCH] SPDY: fix nopush cleanup Message-ID: Hello, Thanks for your comments. How about second version: # HG changeset patch # User ykirpichev at gmail.com # Date 1370964275 -14400 # Branch nopush_fix_2 # Node ID 14de55787a48327019d549d48abf2631e294b4d8 # Parent 725fb71ab1a60bd48b0afb8b001b5349f5054cb1 SPDY: fix nopush cleanup diff -r 725fb71ab1a6 -r 14de55787a48 src/http/ngx_http_spdy.c --- a/src/http/ngx_http_spdy.c Fri Jun 07 13:16:00 2013 -0700 +++ b/src/http/ngx_http_spdy.c Tue Jun 11 19:24:35 2013 +0400 @@ -504,6 +504,51 @@ clcf = ngx_http_get_module_loc_conf(sc->http_connection->conf_ctx, ngx_http_core_module); + // all data is sent, can clean nopush if necessary + if (wev->ready) + { + int tcp_nodelay; + + if (c->tcp_nopush == NGX_TCP_NOPUSH_SET) { + if (ngx_tcp_push(c->fd) == -1) { + ngx_connection_error(c, ngx_socket_errno, ngx_tcp_push_n " failed"); + return NGX_ERROR; + } + + c->tcp_nopush = NGX_TCP_NOPUSH_UNSET; + tcp_nodelay = ngx_tcp_nodelay_and_tcp_nopush ? 1 : 0; + + } else { + tcp_nodelay = 1; + } + + if (tcp_nodelay + && clcf->tcp_nodelay + && c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) + { + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "tcp_nodelay"); + + if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, + (const void *) &tcp_nodelay, sizeof(int)) + == -1) + { +#if (NGX_SOLARIS) + /* Solaris returns EINVAL if a socket has been shut down */ + c->log_error = NGX_ERROR_IGNORE_EINVAL; +#endif + + ngx_connection_error(c, ngx_socket_errno, + "setsockopt(TCP_NODELAY) failed"); + + c->log_error = NGX_ERROR_INFO; + return NGX_ERROR; + } + + c->tcp_nodelay = NGX_TCP_NODELAY_SET; + } + + } + if (ngx_handle_write_event(wev, clcf->send_lowat) != NGX_OK) { return NGX_ERROR; /* FIXME */ } 2013/6/11 Maxim Dounin > Hello! > > On Tue, Jun 11, 2013 at 01:00:42PM +0400, Yury Kirpichev wrote: > > > Could you please take a look at patch below. > > I've tried to fix problem that TCP_CORK option is not cleaned in SPDY > case. > > The patch looks wrong. > > - It introduces layering violation and build failure --without-http > as a result. You may have better luck focusing on a problem you > want to fix, and avoiding unrelated changes as much as possible. > > - It tries to restore nopush after each c->send_chain() call, > which looks suboptimal. It probably should be done if there are no > pending data to send. > > -- > Maxim Dounin > http://nginx.org/en/donation.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Tue Jun 11 15:30:20 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 11 Jun 2013 19:30:20 +0400 Subject: [PATCH]Valgind: a complaint about uninitialized bytes In-Reply-To: References: Message-ID: <20130611153020.GI72282@mdounin.ru> Hello! On Wed, Jun 12, 2013 at 12:20:10AM +0900, cubicdaiya wrote: > Hi, I'm sorry. > > As I mistook my name in changeset, > could you take a look at the following patch, too? > > # HG changeset patch > # User Tatsuhiko Kubo > # Date 1370963498 -32400 > # Node ID fe954e402fd5b1292f9d5039454f327f8939999a > # Parent 725fb71ab1a60bd48b0afb8b001b5349f5054cb1 > Valgrind: a complaint about uninitialized bytes > > Valgrind outputs the following message > when NGX_HAVE_MSGHDR_MSG_CONTROL is 1. > > ==12605== Syscall param socketcall.sendmsg(msg.msg_control) points to > uninitialised byte(s) > ==12605== at 0x4E37660: __sendmsg_nocancel (syscall-template.S:82) > ==12605== by 0x41C9BF: ngx_write_channel (ngx_channel.c:77) > ==12605== by 0x41E2DC: ngx_pass_open_channel (ngx_process_cycle.c:454) > ==12605== by 0x41E3B6: ngx_start_worker_processes > (ngx_process_cycle.c:371) > ==12605== by 0x41F368: ngx_master_process_cycle (ngx_process_cycle.c:136) > ==12605== by 0x404A19: main (nginx.c:412) > ==12605== Address 0x7ff000614 is on thread 1's stack > > diff -r 725fb71ab1a6 -r fe954e402fd5 src/os/unix/ngx_channel.c > --- a/src/os/unix/ngx_channel.c Fri Jun 07 13:16:00 2013 -0700 > +++ b/src/os/unix/ngx_channel.c Wed Jun 12 00:11:38 2013 +0900 > @@ -26,6 +26,8 @@ > char space[CMSG_SPACE(sizeof(int))]; > } cmsg; > > + ngx_memzero(&cmsg, sizeof(cmsg)); > + > if (ch->fd == -1) { > msg.msg_control = NULL; > msg.msg_controllen = 0; The memzero call is certainly only needed in case of fd != -1 (i.e., to suppress valgrind warning). -- Maxim Dounin http://nginx.org/en/donation.html From cubicdaiya at gmail.com Tue Jun 11 15:51:56 2013 From: cubicdaiya at gmail.com (cubicdaiya) Date: Wed, 12 Jun 2013 00:51:56 +0900 Subject: [PATCH]Valgind: a complaint about uninitialized bytes In-Reply-To: <20130611153020.GI72282@mdounin.ru> References: <20130611153020.GI72282@mdounin.ru> Message-ID: Hi. 2013/6/12 Maxim Dounin > The memzero call is certainly only needed in case of fd != -1 > (i.e., to suppress valgrind warning). I see. How is that? # HG changeset patch # User Tatsuhiko Kubo # Date 1370965284 -32400 # Node ID 2bb2b7d3a263d97b148b706407caf16fd0f074eb # Parent 725fb71ab1a60bd48b0afb8b001b5349f5054cb1 Valgrind: a complaint about uninitialized bytes Valgrind outputs the following message when NGX_HAVE_MSGHDR_MSG_CONTROL is 1. ==12605== Syscall param socketcall.sendmsg(msg.msg_control) points to uninitialised byte(s) ==12605== at 0x4E37660: __sendmsg_nocancel (syscall-template.S:82) ==12605== by 0x41C9BF: ngx_write_channel (ngx_channel.c:77) ==12605== by 0x41E2DC: ngx_pass_open_channel (ngx_process_cycle.c:454) ==12605== by 0x41E3B6: ngx_start_worker_processes (ngx_process_cycle.c:371) ==12605== by 0x41F368: ngx_master_process_cycle (ngx_process_cycle.c:136) ==12605== by 0x404A19: main (nginx.c:412) ==12605== Address 0x7ff000614 is on thread 1's stack diff -r 725fb71ab1a6 -r 2bb2b7d3a263 src/os/unix/ngx_channel.c --- a/src/os/unix/ngx_channel.c Fri Jun 07 13:16:00 2013 -0700 +++ b/src/os/unix/ngx_channel.c Wed Jun 12 00:41:24 2013 +0900 @@ -31,6 +31,7 @@ msg.msg_controllen = 0; } else { + ngx_memzero(&cmsg, sizeof(cmsg)); msg.msg_control = (caddr_t) &cmsg; msg.msg_controllen = sizeof(cmsg); -- Tatsuhiko Kubo E-Mail : cubicdaiya at gmail.com HP : http://cccis.jp/index_en.html Twitter : http://twitter.com/cubicdaiya -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: valgrind_complaint_fix.patch Type: application/octet-stream Size: 1265 bytes Desc: not available URL: From vbart at nginx.com Tue Jun 11 16:28:10 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Tue, 11 Jun 2013 20:28:10 +0400 Subject: [PATCH] SPDY: fix nopush cleanup In-Reply-To: References: Message-ID: <201306112028.10430.vbart@nginx.com> On Tuesday 11 June 2013 19:30:08 Yury Kirpichev wrote: > Hello, > > Thanks for your comments. > How about second version: > > # HG changeset patch > # User ykirpichev at gmail.com > # Date 1370964275 -14400 > # Branch nopush_fix_2 > # Node ID 14de55787a48327019d549d48abf2631e294b4d8 > # Parent 725fb71ab1a60bd48b0afb8b001b5349f5054cb1 > SPDY: fix nopush cleanup > > diff -r 725fb71ab1a6 -r 14de55787a48 src/http/ngx_http_spdy.c > --- a/src/http/ngx_http_spdy.c Fri Jun 07 13:16:00 2013 -0700 > +++ b/src/http/ngx_http_spdy.c Tue Jun 11 19:24:35 2013 +0400 > @@ -504,6 +504,51 @@ > clcf = ngx_http_get_module_loc_conf(sc->http_connection->conf_ctx, > ngx_http_core_module); > > + // all data is sent, can clean nopush if necessary > + if (wev->ready) > + { [...] No, it doesn't indicate that there is no data to send even in a short term. wbr, Valentin V. Bartenev From ykirpichev at gmail.com Thu Jun 13 08:12:18 2013 From: ykirpichev at gmail.com (Yury Kirpichev) Date: Thu, 13 Jun 2013 12:12:18 +0400 Subject: [PATCH/v3] SPDY: fix nopush cleanup Message-ID: Hello, Thanks for your comments. I tried to analyze it and came up with new patch. # HG changeset patch # User ykirpichev at gmail.com # Date 1371110599 -14400 # Branch nopush_fix_3 # Node ID f0a2291a34dae24a78bf3e97d64d2d0f9e37c09e # Parent 725fb71ab1a60bd48b0afb8b001b5349f5054cb1 SPDY: fix nopush cleanup for SPDY connection diff -r 725fb71ab1a6 -r f0a2291a34da src/http/ngx_http_spdy.c --- a/src/http/ngx_http_spdy.c Fri Jun 07 13:16:00 2013 -0700 +++ b/src/http/ngx_http_spdy.c Thu Jun 13 12:03:19 2013 +0400 @@ -378,6 +378,58 @@ sc->blocked = 0; + // It is better to use NGX_SPDY_WRITE_BUFFERED here, but + // it is defined in ngx_http_spdy_filter_module.c + // So, just use !c->buffered + if (!c->buffered && !c->error) { + //no buffered data, so, we should clean nopush if needed + int tcp_nodelay; + ngx_http_core_loc_conf_t *clcf; + + clcf = ngx_http_get_module_loc_conf(sc->http_connection->conf_ctx, + ngx_http_core_module); + + if (c->tcp_nopush == NGX_TCP_NOPUSH_SET) { + if (ngx_tcp_push(c->fd) == -1) { + ngx_connection_error(c, ngx_socket_errno, ngx_tcp_push_n " failed"); + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + + c->tcp_nopush = NGX_TCP_NOPUSH_UNSET; + tcp_nodelay = ngx_tcp_nodelay_and_tcp_nopush ? 1 : 0; + + } else { + tcp_nodelay = 1; + } + + if (tcp_nodelay + && clcf->tcp_nodelay + && c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) + { + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "tcp_nodelay"); + + if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, + (const void *) &tcp_nodelay, sizeof(int)) + == -1) + { +#if (NGX_SOLARIS) + /* Solaris returns EINVAL if a socket has been shut down */ + c->log_error = NGX_ERROR_IGNORE_EINVAL; +#endif + + ngx_connection_error(c, ngx_socket_errno, + "setsockopt(TCP_NODELAY) failed"); + + c->log_error = NGX_ERROR_INFO; + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + + c->tcp_nodelay = NGX_TCP_NODELAY_SET; + } + } + if (sc->processing) { if (rev->timer_set) { ngx_del_timer(rev); Did I understand you correctly that previous version was incorrect, because even though write is successful, there are might be more incoming data which need to be handled thus there is no sense to clear nopush option, since we might send more data within the same read iteration. BR/ Yury 2013/6/11 Yury Kirpichev > Hello, > > Thanks for your comments. > How about second version: > > # HG changeset patch > # User ykirpichev at gmail.com > # Date 1370964275 -14400 > # Branch nopush_fix_2 > # Node ID 14de55787a48327019d549d48abf2631e294b4d8 > # Parent 725fb71ab1a60bd48b0afb8b001b5349f5054cb1 > SPDY: fix nopush cleanup > > diff -r 725fb71ab1a6 -r 14de55787a48 src/http/ngx_http_spdy.c > --- a/src/http/ngx_http_spdy.c Fri Jun 07 13:16:00 2013 -0700 > +++ b/src/http/ngx_http_spdy.c Tue Jun 11 19:24:35 2013 +0400 > @@ -504,6 +504,51 @@ > clcf = ngx_http_get_module_loc_conf(sc->http_connection->conf_ctx, > ngx_http_core_module); > > + // all data is sent, can clean nopush if necessary > + if (wev->ready) > + { > + int tcp_nodelay; > + > + if (c->tcp_nopush == NGX_TCP_NOPUSH_SET) { > + if (ngx_tcp_push(c->fd) == -1) { > + ngx_connection_error(c, ngx_socket_errno, ngx_tcp_push_n > " failed"); > + return NGX_ERROR; > + } > + > + c->tcp_nopush = NGX_TCP_NOPUSH_UNSET; > + tcp_nodelay = ngx_tcp_nodelay_and_tcp_nopush ? 1 : 0; > + > + } else { > + tcp_nodelay = 1; > + } > + > + if (tcp_nodelay > + && clcf->tcp_nodelay > + && c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) > + { > + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "tcp_nodelay"); > + > + if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, > + (const void *) &tcp_nodelay, sizeof(int)) > + == -1) > + { > +#if (NGX_SOLARIS) > + /* Solaris returns EINVAL if a socket has been shut down > */ > + c->log_error = NGX_ERROR_IGNORE_EINVAL; > +#endif > + > + ngx_connection_error(c, ngx_socket_errno, > + "setsockopt(TCP_NODELAY) failed"); > + > + c->log_error = NGX_ERROR_INFO; > + return NGX_ERROR; > + } > + > + c->tcp_nodelay = NGX_TCP_NODELAY_SET; > + } > + > + } > + > if (ngx_handle_write_event(wev, clcf->send_lowat) != NGX_OK) { > return NGX_ERROR; /* FIXME */ > } > > > 2013/6/11 Maxim Dounin > >> Hello! >> >> On Tue, Jun 11, 2013 at 01:00:42PM +0400, Yury Kirpichev wrote: >> >> > Could you please take a look at patch below. >> > I've tried to fix problem that TCP_CORK option is not cleaned in SPDY >> case. >> >> The patch looks wrong. >> >> - It introduces layering violation and build failure --without-http >> as a result. You may have better luck focusing on a problem you >> want to fix, and avoiding unrelated changes as much as possible. >> >> - It tries to restore nopush after each c->send_chain() call, >> which looks suboptimal. It probably should be done if there are no >> pending data to send. >> >> -- >> Maxim Dounin >> http://nginx.org/en/donation.html >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > -------------- next part -------------- An HTML attachment was scrubbed... URL: From dronnikov at gmail.com Thu Jun 13 08:15:54 2013 From: dronnikov at gmail.com (Vladimir Dronnikov) Date: Thu, 13 Jun 2013 12:15:54 +0400 Subject: Fwd: WEBDAV: accomodate MKCOL for somewhat broken clients In-Reply-To: References: Message-ID: Hello! I wonder if https://github.com/dvv/nginx/commit/3a8cdadea196a594fd1940be02818f51d1b1769fis feasible? The rationale is to be more liberal to the zoo of webdav clients. TIA, --Vladimir -------------- next part -------------- An HTML attachment was scrubbed... URL: From vl at nginx.com Thu Jun 13 10:56:42 2013 From: vl at nginx.com (Homutov Vladimir) Date: Thu, 13 Jun 2013 10:56:42 +0000 Subject: [nginx] Core: moved initialization of log level. Message-ID: details: http://hg.nginx.org/nginx/rev/3bb51e5afa9e branches: changeset: 5250:3bb51e5afa9e user: Vladimir Homutov date: Thu Jun 13 14:50:10 2013 +0400 description: Core: moved initialization of log level. The cycle->new_log->log_level should only be initialized by ngx_init_cycle() if no error logs were found in the configuration. This move allows to get rid of extra initialization in ngx_error_log(). diffstat: src/core/ngx_cycle.c | 3 ++- src/core/ngx_log.c | 2 -- 2 files changed, 2 insertions(+), 3 deletions(-) diffs (31 lines): diff -r 725fb71ab1a6 -r 3bb51e5afa9e src/core/ngx_cycle.c --- a/src/core/ngx_cycle.c Fri Jun 07 13:16:00 2013 -0700 +++ b/src/core/ngx_cycle.c Thu Jun 13 14:50:10 2013 +0400 @@ -84,7 +84,6 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) cycle->pool = pool; cycle->log = log; - cycle->new_log.log_level = NGX_LOG_ERR; cycle->old_cycle = old_cycle; cycle->conf_prefix.len = old_cycle->conf_prefix.len; @@ -344,6 +343,8 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) if (cycle->new_log.file == NULL) { goto failed; } + + cycle->new_log.log_level = NGX_LOG_ERR; } /* open the new files */ diff -r 725fb71ab1a6 -r 3bb51e5afa9e src/core/ngx_log.c --- a/src/core/ngx_log.c Fri Jun 07 13:16:00 2013 -0700 +++ b/src/core/ngx_log.c Thu Jun 13 14:50:10 2013 +0400 @@ -454,7 +454,5 @@ ngx_error_log(ngx_conf_t *cf, ngx_comman return NGX_CONF_OK; } - cf->cycle->new_log.log_level = 0; - return ngx_log_set_levels(cf, &cf->cycle->new_log); } From mdounin at mdounin.ru Thu Jun 13 12:21:44 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 13 Jun 2013 16:21:44 +0400 Subject: Fwd: WEBDAV: accomodate MKCOL for somewhat broken clients In-Reply-To: References: Message-ID: <20130613122144.GO72282@mdounin.ru> Hello! On Thu, Jun 13, 2013 at 12:15:54PM +0400, Vladimir Dronnikov wrote: > Hello! > > I wonder if > https://github.com/dvv/nginx/commit/3a8cdadea196a594fd1940be02818f51d1b1769fis > feasible? The rationale is to be more liberal to the zoo of webdav > clients. Doesn't looks like a good change to me. -- Maxim Dounin http://nginx.org/en/donation.html From dronnikov at gmail.com Thu Jun 13 12:28:26 2013 From: dronnikov at gmail.com (Vladimir Dronnikov) Date: Thu, 13 Jun 2013 16:28:26 +0400 Subject: Fwd: WEBDAV: accomodate MKCOL for somewhat broken clients In-Reply-To: <20130613122144.GO72282@mdounin.ru> References: <20130613122144.GO72282@mdounin.ru> Message-ID: Any chance to have this trailing slash controlled by an option? Or would you people mind to share a chunk of nginx config which would append that slash? TIA On Thu, Jun 13, 2013 at 4:21 PM, Maxim Dounin wrote: > Hello! > > On Thu, Jun 13, 2013 at 12:15:54PM +0400, Vladimir Dronnikov wrote: > > > Hello! > > > > I wonder if > > > https://github.com/dvv/nginx/commit/3a8cdadea196a594fd1940be02818f51d1b1769fis > > feasible? The rationale is to be more liberal to the zoo of webdav > > clients. > > Doesn't looks like a good change to me. > > -- > Maxim Dounin > http://nginx.org/en/donation.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From vbart at nginx.com Thu Jun 13 12:29:54 2013 From: vbart at nginx.com (Valentin Bartenev) Date: Thu, 13 Jun 2013 12:29:54 +0000 Subject: [nginx] SPDY: fixed code style, no functional changes. Message-ID: details: http://hg.nginx.org/nginx/rev/4ef49081ef4c branches: changeset: 5251:4ef49081ef4c user: Valentin Bartenev date: Thu Jun 13 16:28:42 2013 +0400 description: SPDY: fixed code style, no functional changes. diffstat: src/http/ngx_http_spdy_filter_module.c | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diffs (13 lines): diff -r 3bb51e5afa9e -r 4ef49081ef4c src/http/ngx_http_spdy_filter_module.c --- a/src/http/ngx_http_spdy_filter_module.c Thu Jun 13 14:50:10 2013 +0400 +++ b/src/http/ngx_http_spdy_filter_module.c Thu Jun 13 16:28:42 2013 +0400 @@ -308,7 +308,8 @@ ngx_http_spdy_header_filter(ngx_http_req last = ngx_http_spdy_nv_write_name(last, "status"); if (r->headers_out.status_line.len) { - last = ngx_http_spdy_nv_write_vlen(last, r->headers_out.status_line.len); + last = ngx_http_spdy_nv_write_vlen(last, + r->headers_out.status_line.len); last = ngx_cpymem(last, r->headers_out.status_line.data, r->headers_out.status_line.len); } else { From mdounin at mdounin.ru Thu Jun 13 12:36:52 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 13 Jun 2013 12:36:52 +0000 Subject: [nginx] Valgrind: another complaint about uninitialized bytes. Message-ID: details: http://hg.nginx.org/nginx/rev/982678c5c270 branches: changeset: 5252:982678c5c270 user: Tatsuhiko Kubo date: Wed Jun 12 00:41:24 2013 +0900 description: Valgrind: another complaint about uninitialized bytes. diffstat: src/os/unix/ngx_channel.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diffs (12 lines): diff --git a/src/os/unix/ngx_channel.c b/src/os/unix/ngx_channel.c --- a/src/os/unix/ngx_channel.c +++ b/src/os/unix/ngx_channel.c @@ -34,6 +34,8 @@ ngx_write_channel(ngx_socket_t s, ngx_ch msg.msg_control = (caddr_t) &cmsg; msg.msg_controllen = sizeof(cmsg); + ngx_memzero(&cmsg, sizeof(cmsg)); + cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int)); cmsg.cm.cmsg_level = SOL_SOCKET; cmsg.cm.cmsg_type = SCM_RIGHTS; From mdounin at mdounin.ru Thu Jun 13 12:36:59 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 13 Jun 2013 16:36:59 +0400 Subject: [PATCH]Valgind: a complaint about uninitialized bytes In-Reply-To: References: <20130611153020.GI72282@mdounin.ru> Message-ID: <20130613123659.GQ72282@mdounin.ru> Hello! On Wed, Jun 12, 2013 at 12:51:56AM +0900, cubicdaiya wrote: > Hi. > > 2013/6/12 Maxim Dounin > > The memzero call is certainly only needed in case of fd != -1 > > (i.e., to suppress valgrind warning). > > I see. How is that? > > # HG changeset patch > # User Tatsuhiko Kubo > # Date 1370965284 -32400 > # Node ID 2bb2b7d3a263d97b148b706407caf16fd0f074eb > # Parent 725fb71ab1a60bd48b0afb8b001b5349f5054cb1 > Valgrind: a complaint about uninitialized bytes Slightly modified patch pushed, thanks. -- Maxim Dounin http://nginx.org/en/donation.html From toshic.toshic at gmail.com Fri Jun 14 17:44:46 2013 From: toshic.toshic at gmail.com (ToSHiC) Date: Fri, 14 Jun 2013 21:44:46 +0400 Subject: IPv6 support in resolver Message-ID: Hello, We needed this feature in our company, I found that it is in milestones of version 1.5 but doesn't exist yet. So I've implemented it based in 1.3 code and merged in current 1.5 code. When I wrote this code I mostly cared about minimum intrusion into other parts of nginx. IPv6 fallback logic is not a straightforward implementation of suggested by RFC. RFC states that IPv6 resolving have priority over IPv4, and it's not very good for Internet we have currently. With this patch you can specify priority, and in upstream and mail modules I've set IPv4 as preferred address family. Patch is pretty big and I hope it'll not break mailing list or mail clients. >From b98c8cd3bd0bca9df88a8d6d660015a502b9727c Mon Sep 17 00:00:00 2001 From: Anton Kortunov Date: Fri, 14 Jun 2013 20:38:41 +0400 Subject: [PATCH] IPv6 resolver --- src/core/ngx_inet.h | 10 + src/core/ngx_resolver.c | 436 +++++++++++++++++++++++++----- src/core/ngx_resolver.h | 23 ++- src/http/ngx_http_upstream.c | 16 +- src/http/ngx_http_upstream.h | 2 +- src/http/ngx_http_upstream_round_robin.c | 49 +++- src/mail/ngx_mail_smtp_handler.c | 95 ++++++-- 7 files changed, 524 insertions(+), 107 deletions(-) diff --git a/src/core/ngx_inet.h b/src/core/ngx_inet.h index 6a5a368..077ed34 100644 --- a/src/core/ngx_inet.h +++ b/src/core/ngx_inet.h @@ -68,6 +68,16 @@ typedef struct { typedef struct { + ngx_uint_t family; + union { + in_addr_t v4; +#if (NGX_HAVE_INET6) + struct in6_addr v6; +#endif + } u; +} ngx_ipaddr_t; + +typedef struct { struct sockaddr *sockaddr; socklen_t socklen; ngx_str_t name; diff --git a/src/core/ngx_resolver.c b/src/core/ngx_resolver.c index d59d0c4..5953b9c 100644 --- a/src/core/ngx_resolver.c +++ b/src/core/ngx_resolver.c @@ -71,12 +71,12 @@ static void ngx_resolver_process_response(ngx_resolver_t *r, u_char *buf, size_t n); static void ngx_resolver_process_a(ngx_resolver_t *r, u_char *buf, size_t n, ngx_uint_t ident, ngx_uint_t code, ngx_uint_t nan, ngx_uint_t ans); -static void ngx_resolver_process_ptr(ngx_resolver_t *r, u_char *buf, size_t n, +void ngx_resolver_process_ptr(ngx_resolver_t *r, u_char *buf, size_t n, ngx_uint_t ident, ngx_uint_t code, ngx_uint_t nan); static ngx_resolver_node_t *ngx_resolver_lookup_name(ngx_resolver_t *r, ngx_str_t *name, uint32_t hash); static ngx_resolver_node_t *ngx_resolver_lookup_addr(ngx_resolver_t *r, - in_addr_t addr); + ngx_ipaddr_t addr, uint32_t hash); static void ngx_resolver_rbtree_insert_value(ngx_rbtree_node_t *temp, ngx_rbtree_node_t *node, ngx_rbtree_node_t *sentinel); static ngx_int_t ngx_resolver_copy(ngx_resolver_t *r, ngx_str_t *name, @@ -88,7 +88,7 @@ static void *ngx_resolver_calloc(ngx_resolver_t *r, size_t size); static void ngx_resolver_free(ngx_resolver_t *r, void *p); static void ngx_resolver_free_locked(ngx_resolver_t *r, void *p); static void *ngx_resolver_dup(ngx_resolver_t *r, void *src, size_t size); -static in_addr_t *ngx_resolver_rotate(ngx_resolver_t *r, in_addr_t *src, +static ngx_ipaddr_t *ngx_resolver_rotate(ngx_resolver_t *r, ngx_ipaddr_t *src, ngx_uint_t n); static u_char *ngx_resolver_log_error(ngx_log_t *log, u_char *buf, size_t len); @@ -270,13 +270,27 @@ ngx_resolver_cleanup_tree(ngx_resolver_t *r, ngx_rbtree_t *tree) ngx_resolver_ctx_t * ngx_resolve_start(ngx_resolver_t *r, ngx_resolver_ctx_t *temp) { - in_addr_t addr; + ngx_ipaddr_t addr; ngx_resolver_ctx_t *ctx; if (temp) { - addr = ngx_inet_addr(temp->name.data, temp->name.len); + addr.family = 0; - if (addr != INADDR_NONE) { + + addr.u.v4 = ngx_inet_addr(temp->name.data, temp->name.len); + + if (addr.u.v4 != INADDR_NONE) { + + addr.family = AF_INET; + +#if (NGX_HAVE_INET6) + } else if (ngx_inet6_addr(temp->name.data, temp->name.len, addr.u.v6.s6_addr) == NGX_OK) { + + addr.family = AF_INET6; +#endif + } + + if (addr.family) { temp->resolver = r; temp->state = NGX_OK; temp->naddrs = 1; @@ -417,7 +431,7 @@ static ngx_int_t ngx_resolve_name_locked(ngx_resolver_t *r, ngx_resolver_ctx_t *ctx) { uint32_t hash; - in_addr_t addr, *addrs; + ngx_ipaddr_t addr, *addrs; ngx_int_t rc; ngx_uint_t naddrs; ngx_resolver_ctx_t *next; @@ -429,7 +443,11 @@ ngx_resolve_name_locked(ngx_resolver_t *r, ngx_resolver_ctx_t *ctx) if (rn) { - if (rn->valid >= ngx_time()) { + if (rn->valid >= ngx_time() +#if (NGX_HAVE_INET6) + && rn->qtype != NGX_RESOLVE_RETRY +#endif + ) { ngx_log_debug0(NGX_LOG_DEBUG_CORE, r->log, 0, "resolve cached"); @@ -446,7 +464,6 @@ ngx_resolve_name_locked(ngx_resolver_t *r, ngx_resolver_ctx_t *ctx) /* NGX_RESOLVE_A answer */ if (naddrs != 1) { - addr = 0; addrs = ngx_resolver_rotate(r, rn->u.addrs, naddrs); if (addrs == NULL) { return NGX_ERROR; @@ -506,6 +523,8 @@ ngx_resolve_name_locked(ngx_resolver_t *r, ngx_resolver_ctx_t *ctx) } while (ctx); return NGX_OK; + } else { + rn->qtype = ctx->type; } if (rn->waiting) { @@ -552,6 +571,7 @@ ngx_resolve_name_locked(ngx_resolver_t *r, ngx_resolver_ctx_t *ctx) rn->node.key = hash; rn->nlen = (u_short) ctx->name.len; rn->query = NULL; + rn->qtype = ctx->type; ngx_rbtree_insert(&r->name_rbtree, &rn->node); } @@ -629,17 +649,40 @@ failed: ngx_int_t ngx_resolve_addr(ngx_resolver_ctx_t *ctx) { + uint32_t hash; u_char *name; ngx_resolver_t *r; ngx_resolver_node_t *rn; r = ctx->resolver; + rn = NULL; + + hash = ctx->addr.family; - ctx->addr = ntohl(ctx->addr); + switch(ctx->addr.family) { + + case AF_INET: + ctx->addr.u.v4 = ntohl(ctx->addr.u.v4); + ngx_crc32_update(&hash, (u_char *)&ctx->addr.u.v4, sizeof(in_addr_t)); +ngx_log_debug3(NGX_LOG_DEBUG_CORE, r->log, 0, + "resolve addr hash: %xd, addr:%xd, family: %d", hash, ctx->addr.u.v4, ctx->addr.family); + break; + +#if (NGX_HAVE_INET6) + case AF_INET6: + ngx_crc32_update(&hash, (u_char *)&ctx->addr.u.v6, sizeof(struct in6_addr)); + break; +#endif + + default: + goto failed; + } /* lock addr mutex */ - rn = ngx_resolver_lookup_addr(r, ctx->addr); + rn = ngx_resolver_lookup_addr(r, ctx->addr, hash); + ngx_log_error(r->log_level, r->log, 0, + "resolve: in resolve_addr searching, hash = %xd, rn = %p", hash, rn); if (rn) { @@ -694,8 +737,10 @@ ngx_resolve_addr(ngx_resolver_ctx_t *ctx) goto failed; } - rn->node.key = ctx->addr; + rn->node.key = hash; rn->query = NULL; + rn->qtype = ctx->type; + rn->u.addr = ctx->addr; ngx_rbtree_insert(&r->addr_rbtree, &rn->node); } @@ -768,10 +813,11 @@ failed: void ngx_resolve_addr_done(ngx_resolver_ctx_t *ctx) { - in_addr_t addr; + uint32_t hash; ngx_resolver_t *r; ngx_resolver_ctx_t *w, **p; ngx_resolver_node_t *rn; + char text[NGX_SOCKADDR_STRLEN]; r = ctx->resolver; @@ -786,7 +832,25 @@ ngx_resolve_addr_done(ngx_resolver_ctx_t *ctx) if (ctx->state == NGX_AGAIN || ctx->state == NGX_RESOLVE_TIMEDOUT) { - rn = ngx_resolver_lookup_addr(r, ctx->addr); + hash = ctx->addr.family; + + switch(ctx->addr.family) { + + case AF_INET: + ngx_crc32_update(&hash, (u_char *)&ctx->addr.u.v4, sizeof(in_addr_t)); + break; + +#if (NGX_HAVE_INET6) + case AF_INET6: + ngx_crc32_update(&hash, (u_char *)&ctx->addr.u.v6, sizeof(struct in6_addr)); + break; +#endif + + default: + goto failed; + } + + rn = ngx_resolver_lookup_addr(r, ctx->addr, hash); if (rn) { p = &rn->waiting; @@ -804,12 +868,13 @@ ngx_resolve_addr_done(ngx_resolver_ctx_t *ctx) } } - addr = ntohl(ctx->addr); +failed: + + //addr = ntohl(ctx->addr); + inet_ntop(ctx->addr.family, &ctx->addr.u, text, NGX_SOCKADDR_STRLEN); ngx_log_error(NGX_LOG_ALERT, r->log, 0, - "could not cancel %ud.%ud.%ud.%ud resolving", - (addr >> 24) & 0xff, (addr >> 16) & 0xff, - (addr >> 8) & 0xff, addr & 0xff); + "could not cancel %s resolving", text); } done: @@ -1130,6 +1195,9 @@ found: switch (qtype) { case NGX_RESOLVE_A: +#if (NGX_HAVE_INET6) + case NGX_RESOLVE_AAAA: +#endif ngx_resolver_process_a(r, buf, n, ident, code, nan, i + sizeof(ngx_resolver_qs_t)); @@ -1178,7 +1246,7 @@ ngx_resolver_process_a(ngx_resolver_t *r, u_char *buf, size_t last, size_t len; int32_t ttl; uint32_t hash; - in_addr_t addr, *addrs; + ngx_ipaddr_t addr, *addrs; ngx_str_t name; ngx_uint_t qtype, qident, naddrs, a, i, n, start; ngx_resolver_an_t *an; @@ -1212,12 +1280,55 @@ ngx_resolver_process_a(ngx_resolver_t *r, u_char *buf, size_t last, goto failed; } - ngx_resolver_free(r, name.data); - if (code == 0 && nan == 0) { + +#if (NGX_HAVE_INET6) + // If it was required dual type v4|v6 resolv create one more request + if (rn->qtype == NGX_RESOLVE_A_AAAA + || rn->qtype == NGX_RESOLVE_AAAA_A) { + + ngx_queue_remove(&rn->queue); + + rn->valid = ngx_time() + (r->valid ? r->valid : ttl); + rn->expire = ngx_time() + r->expire; + + ngx_queue_insert_head(&r->name_expire_queue, &rn->queue); + + ctx = rn->waiting; + rn->waiting = NULL; + + if (ctx) { + ctx->name = name; + + switch (rn->qtype) { + + case NGX_RESOLVE_A_AAAA: + ctx->type = NGX_RESOLVE_AAAA; + break; + + case NGX_RESOLVE_AAAA_A: + ctx->type = NGX_RESOLVE_A; + break; + } + + ngx_log_debug2(NGX_LOG_DEBUG_CORE, r->log, 0, + "restarting request for name %V, with type %ud", + &name, ctx->type); + + rn->qtype = NGX_RESOLVE_RETRY; + + (void) ngx_resolve_name_locked(r, ctx); + } + + return; + } +#endif + code = 3; /* NXDOMAIN */ } + ngx_resolver_free(r, name.data); + if (code) { next = rn->waiting; rn->waiting = NULL; @@ -1243,7 +1354,7 @@ ngx_resolver_process_a(ngx_resolver_t *r, u_char *buf, size_t last, i = ans; naddrs = 0; - addr = 0; + addr.family = 0; addrs = NULL; cname = NULL; qtype = 0; @@ -1302,13 +1413,30 @@ ngx_resolver_process_a(ngx_resolver_t *r, u_char *buf, size_t last, goto short_response; } - addr = htonl((buf[i] << 24) + (buf[i + 1] << 16) + addr.family = AF_INET; + addr.u.v4 = htonl((buf[i] << 24) + (buf[i + 1] << 16) + (buf[i + 2] << 8) + (buf[i + 3])); naddrs++; i += len; +#if (NGX_HAVE_INET6) + } else if (qtype == NGX_RESOLVE_AAAA) { + + i += sizeof(ngx_resolver_an_t); + + if (i + len > last) { + goto short_response; + } + + addr.family = AF_INET6; + ngx_memcpy(&addr.u.v6.s6_addr, &buf[i], 16); + + naddrs++; + + i += len; +#endif } else if (qtype == NGX_RESOLVE_CNAME) { cname = &buf[i] + sizeof(ngx_resolver_an_t); i += sizeof(ngx_resolver_an_t) + len; @@ -1333,7 +1461,7 @@ ngx_resolver_process_a(ngx_resolver_t *r, u_char *buf, size_t last, } else { - addrs = ngx_resolver_alloc(r, naddrs * sizeof(in_addr_t)); + addrs = ngx_resolver_alloc(r, naddrs * sizeof(ngx_ipaddr_t)); if (addrs == NULL) { return; } @@ -1369,12 +1497,23 @@ ngx_resolver_process_a(ngx_resolver_t *r, u_char *buf, size_t last, if (qtype == NGX_RESOLVE_A) { - addrs[n++] = htonl((buf[i] << 24) + (buf[i + 1] << 16) + addrs[n].family = AF_INET; + addrs[n++].u.v4 = htonl((buf[i] << 24) + (buf[i + 1] << 16) + (buf[i + 2] << 8) + (buf[i + 3])); if (n == naddrs) { break; } +#if (NGX_HAVE_INET6) + } else if (qtype == NGX_RESOLVE_AAAA) { + + addrs[n].family = AF_INET6; + ngx_memcpy(&addrs[n++].u.v6.s6_addr, &buf[i], 16); + + if (n == naddrs) { + break; + } +#endif } i += len; @@ -1383,7 +1522,7 @@ ngx_resolver_process_a(ngx_resolver_t *r, u_char *buf, size_t last, rn->u.addrs = addrs; addrs = ngx_resolver_dup(r, rn->u.addrs, - naddrs * sizeof(in_addr_t)); + naddrs * sizeof(ngx_ipaddr_t)); if (addrs == NULL) { return; } @@ -1486,13 +1625,14 @@ failed: } -static void +void ngx_resolver_process_ptr(ngx_resolver_t *r, u_char *buf, size_t n, ngx_uint_t ident, ngx_uint_t code, ngx_uint_t nan) { - char *err; + char *err = NULL; + uint32_t hash = 0; size_t len; - in_addr_t addr; + ngx_ipaddr_t addr; int32_t ttl; ngx_int_t digit; ngx_str_t name; @@ -1500,12 +1640,16 @@ ngx_resolver_process_ptr(ngx_resolver_t *r, u_char *buf, size_t n, ngx_resolver_an_t *an; ngx_resolver_ctx_t *ctx, *next; ngx_resolver_node_t *rn; + char text[NGX_SOCKADDR_STRLEN]; if (ngx_resolver_copy(r, NULL, buf, &buf[12], &buf[n]) != NGX_OK) { goto invalid_in_addr_arpa; } - addr = 0; + ngx_memzero(&addr, sizeof(ngx_ipaddr_t)); + + /* Try to parse request as in-addr.arpa */ + addr.family = AF_INET; i = 12; for (mask = 0; mask < 32; mask += 8) { @@ -1516,7 +1660,7 @@ ngx_resolver_process_ptr(ngx_resolver_t *r, u_char *buf, size_t n, goto invalid_in_addr_arpa; } - addr += digit << mask; + addr.u.v4 += digit << mask; i += len; } @@ -1524,15 +1668,79 @@ ngx_resolver_process_ptr(ngx_resolver_t *r, u_char *buf, size_t n, goto invalid_in_addr_arpa; } + i += sizeof("\7in-addr\4arpa") + sizeof(ngx_resolver_qs_t); + + goto found; + +invalid_in_addr_arpa: + +#if (NGX_HAVE_INET6) + /* Try to parse request as ip6.arpa */ + addr.family = AF_INET6; + i = 12; + + for (len = 15; len < 16; len--) { + if (buf[i++] != 1) + goto invalid_arpa; + + digit = ngx_hextoi(&buf[i++], 1); + if (digit == NGX_ERROR || digit > 16) { + goto invalid_arpa; + } + + addr.u.v6.s6_addr[len] = digit; + + if (buf[i++] != 1) + goto invalid_arpa; + + + digit = ngx_hextoi(&buf[i++], 1); + if (digit == NGX_ERROR || digit > 16) { + goto invalid_arpa; + } + + addr.u.v6.s6_addr[len] += digit << 4; + } + + if (ngx_strcmp(&buf[i], "\3ip6\4arpa") != 0) { + goto invalid_arpa; + } + + i += sizeof("\3ip6\4arpa") + sizeof(ngx_resolver_qs_t); + +#else /* NGX_HAVE_INET6 */ + goto invalid_arpa; +#endif + +found: + /* lock addr mutex */ - rn = ngx_resolver_lookup_addr(r, addr); + hash = addr.family; + + switch(addr.family) { + + case AF_INET: + ngx_crc32_update(&hash, (u_char *)&addr.u.v4, sizeof(in_addr_t)); + break; + +#if (NGX_HAVE_INET6) + case AF_INET6: + ngx_crc32_update(&hash, (u_char *)&addr.u.v6, sizeof(struct in6_addr)); + break; +#endif + + default: + goto invalid; + } + + rn = ngx_resolver_lookup_addr(r, addr, hash); + + inet_ntop(addr.family, &addr.u, text, NGX_SOCKADDR_STRLEN); if (rn == NULL || rn->query == NULL) { ngx_log_error(r->log_level, r->log, 0, - "unexpected response for %ud.%ud.%ud.%ud", - (addr >> 24) & 0xff, (addr >> 16) & 0xff, - (addr >> 8) & 0xff, addr & 0xff); + "unexpected response for %s", text); goto failed; } @@ -1540,12 +1748,15 @@ ngx_resolver_process_ptr(ngx_resolver_t *r, u_char *buf, size_t n, if (ident != qident) { ngx_log_error(r->log_level, r->log, 0, - "wrong ident %ui response for %ud.%ud.%ud.%ud, expect %ui", - ident, (addr >> 24) & 0xff, (addr >> 16) & 0xff, - (addr >> 8) & 0xff, addr & 0xff, qident); + "wrong ident %ui response for %s, expect %ui", + ident, text, qident); goto failed; } + ngx_log_error(r->log_level, r->log, 0, + "code: %d, nan: %d", + code, nan); + if (code == 0 && nan == 0) { code = 3; /* NXDOMAIN */ } @@ -1573,8 +1784,6 @@ ngx_resolver_process_ptr(ngx_resolver_t *r, u_char *buf, size_t n, return; } - i += sizeof("\7in-addr\4arpa") + sizeof(ngx_resolver_qs_t); - if (i + 2 + sizeof(ngx_resolver_an_t) > (ngx_uint_t) n) { goto short_response; } @@ -1654,10 +1863,10 @@ ngx_resolver_process_ptr(ngx_resolver_t *r, u_char *buf, size_t n, return; -invalid_in_addr_arpa: +invalid_arpa: ngx_log_error(r->log_level, r->log, 0, - "invalid in-addr.arpa name in DNS response"); + "invalid in-addr.arpa or ip6.arpa name in DNS response"); return; short_response: @@ -1722,28 +1931,54 @@ ngx_resolver_lookup_name(ngx_resolver_t *r, ngx_str_t *name, uint32_t hash) static ngx_resolver_node_t * -ngx_resolver_lookup_addr(ngx_resolver_t *r, in_addr_t addr) +ngx_resolver_lookup_addr(ngx_resolver_t *r, ngx_ipaddr_t addr, uint32_t hash) { + ngx_int_t rc; ngx_rbtree_node_t *node, *sentinel; + ngx_resolver_node_t *rn; node = r->addr_rbtree.root; sentinel = r->addr_rbtree.sentinel; while (node != sentinel) { - if (addr < node->key) { + if (hash < node->key) { node = node->left; continue; } - if (addr > node->key) { + if (hash > node->key) { node = node->right; continue; } - /* addr == node->key */ + /* hash == node->key */ + + rn = (ngx_resolver_node_t *) node; + + rc = addr.family - rn->u.addr.family; + + if (rc == 0) { + + switch (addr.family) { + case AF_INET: + rc = ngx_memn2cmp((u_char *)&addr.u.v4, (u_char *)&rn->u.addr.u.v4, sizeof(in_addr_t), sizeof(in_addr_t)); + break; + +#if (NGX_HAVE_INET6) + case AF_INET6: + rc = ngx_memn2cmp((u_char *)&addr.u.v6, (u_char *)&rn->u.addr.u.v6, sizeof(struct in6_addr), sizeof(struct in6_addr)); + break; +#endif + } + + if (rc == 0) { + return rn; + } + + } - return (ngx_resolver_node_t *) node; + node = (rc < 0) ? node->left : node->right; } /* not found */ @@ -1758,6 +1993,7 @@ ngx_resolver_rbtree_insert_value(ngx_rbtree_node_t *temp, { ngx_rbtree_node_t **p; ngx_resolver_node_t *rn, *rn_temp; + ngx_int_t rc; for ( ;; ) { @@ -1774,8 +2010,29 @@ ngx_resolver_rbtree_insert_value(ngx_rbtree_node_t *temp, rn = (ngx_resolver_node_t *) node; rn_temp = (ngx_resolver_node_t *) temp; - p = (ngx_memn2cmp(rn->name, rn_temp->name, rn->nlen, rn_temp->nlen) - < 0) ? &temp->left : &temp->right; + if (rn->qtype == NGX_RESOLVE_PTR) { + rc = rn->u.addr.family - rn_temp->u.addr.family; + + if (rc == 0) { + + switch (rn->u.addr.family) { + case AF_INET: + rc = ngx_memn2cmp((u_char *)&rn->u.addr.u.v4, (u_char *)&rn_temp->u.addr.u.v4, sizeof(in_addr_t), sizeof(in_addr_t)); + break; + + #if (NGX_HAVE_INET6) + case AF_INET6: + rc = ngx_memn2cmp((u_char *)&rn->u.addr.u.v6, (u_char *)&rn_temp->u.addr.u.v6, sizeof(struct in6_addr), sizeof(struct in6_addr)); + break; + #endif + } + } + + } else { + rc = ngx_memn2cmp(rn->name, rn_temp->name, rn->nlen, rn_temp->nlen); + } + + p = (rc < 0) ? &temp->left : &temp->right; } if (*p == sentinel) { @@ -1838,7 +2095,20 @@ ngx_resolver_create_name_query(ngx_resolver_node_t *rn, ngx_resolver_ctx_t *ctx) qs = (ngx_resolver_qs_t *) p; /* query type */ - qs->type_hi = 0; qs->type_lo = (u_char) ctx->type; + qs->type_hi = 0; qs->type_lo = (u_char) rn->qtype; + +#if (NGX_HAVE_INET6) + switch (rn->qtype) { + + case NGX_RESOLVE_A_AAAA: + qs->type_lo = NGX_RESOLVE_A; + break; + + case NGX_RESOLVE_AAAA_A: + qs->type_lo = NGX_RESOLVE_AAAA; + break; + } +#endif /* IP query class */ qs->class_hi = 0; qs->class_lo = 1; @@ -1880,8 +2150,6 @@ ngx_resolver_create_name_query(ngx_resolver_node_t *rn, ngx_resolver_ctx_t *ctx) } -/* AF_INET only */ - static ngx_int_t ngx_resolver_create_addr_query(ngx_resolver_node_t *rn, ngx_resolver_ctx_t *ctx) { @@ -1892,7 +2160,7 @@ ngx_resolver_create_addr_query(ngx_resolver_node_t *rn, ngx_resolver_ctx_t *ctx) ngx_resolver_query_t *query; len = sizeof(ngx_resolver_query_t) - + sizeof(".255.255.255.255.in-addr.arpa.") - 1 + + NGX_PTR_QUERY_LEN + sizeof(ngx_resolver_qs_t); p = ngx_resolver_alloc(ctx->resolver, len); @@ -1919,18 +2187,50 @@ ngx_resolver_create_addr_query(ngx_resolver_node_t *rn, ngx_resolver_ctx_t *ctx) p += sizeof(ngx_resolver_query_t); - for (n = 0; n < 32; n += 8) { - d = ngx_sprintf(&p[1], "%ud", (ctx->addr >> n) & 0xff); - *p = (u_char) (d - &p[1]); - p = d; + switch (ctx->addr.family) { + + case AF_INET: + for (n = 0; n < 32; n += 8) { + d = ngx_sprintf(&p[1], "%ud", (ctx->addr.u.v4 >> n) & 0xff); + *p = (u_char) (d - &p[1]); + p = d; + } + + /* query type "PTR", IP query class */ + ngx_memcpy(p, "\7in-addr\4arpa\0\0\14\0\1", 18); + + rn->qlen = (u_short) + (p + sizeof("\7in-addr\4arpa") + sizeof(ngx_resolver_qs_t) + - rn->query); + + break; + +#if (NGX_HAVE_INET6) + case AF_INET6: + for (n = 15; n >= 0; n--) { + p = ngx_sprintf(p, "\1%xd\1%xd", + (ctx->addr.u.v6.s6_addr[n]) & 0xf, + (ctx->addr.u.v6.s6_addr[n] >> 4) & 0xf); + + } + + /* query type "PTR", IP query class */ + ngx_memcpy(p, "\3ip6\4arpa\0\0\14\0\1", 18); + + rn->qlen = (u_short) + (p + sizeof("\3ip6\4arpa") + sizeof(ngx_resolver_qs_t) + - rn->query); + + break; +#endif + + default: + return NGX_ERROR; } - /* query type "PTR", IP query class */ - ngx_memcpy(p, "\7in-addr\4arpa\0\0\14\0\1", 18); +ngx_log_debug2(NGX_LOG_DEBUG_CORE, ctx->resolver->log, 0, + "resolve: query %s, ident %i", (rn->query+12), ident & 0xffff); - rn->qlen = (u_short) - (p + sizeof("\7in-addr\4arpa") + sizeof(ngx_resolver_qs_t) - - rn->query); return NGX_OK; } @@ -2136,13 +2436,13 @@ ngx_resolver_dup(ngx_resolver_t *r, void *src, size_t size) } -static in_addr_t * -ngx_resolver_rotate(ngx_resolver_t *r, in_addr_t *src, ngx_uint_t n) +static ngx_ipaddr_t * +ngx_resolver_rotate(ngx_resolver_t *r, ngx_ipaddr_t *src, ngx_uint_t n) { void *dst, *p; ngx_uint_t j; - dst = ngx_resolver_alloc(r, n * sizeof(in_addr_t)); + dst = ngx_resolver_alloc(r, n * sizeof(ngx_ipaddr_t)); if (dst == NULL) { return dst; @@ -2151,12 +2451,12 @@ ngx_resolver_rotate(ngx_resolver_t *r, in_addr_t *src, ngx_uint_t n) j = ngx_random() % n; if (j == 0) { - ngx_memcpy(dst, src, n * sizeof(in_addr_t)); + ngx_memcpy(dst, src, n * sizeof(ngx_ipaddr_t)); return dst; } - p = ngx_cpymem(dst, &src[j], (n - j) * sizeof(in_addr_t)); - ngx_memcpy(p, src, j * sizeof(in_addr_t)); + p = ngx_cpymem(dst, &src[j], (n - j) * sizeof(ngx_ipaddr_t)); + ngx_memcpy(p, src, j * sizeof(ngx_ipaddr_t)); return dst; } diff --git a/src/core/ngx_resolver.h b/src/core/ngx_resolver.h index ae34ca5..a45b244 100644 --- a/src/core/ngx_resolver.h +++ b/src/core/ngx_resolver.h @@ -20,6 +20,15 @@ #define NGX_RESOLVE_TXT 16 #define NGX_RESOLVE_DNAME 39 +#if (NGX_HAVE_INET6) + +#define NGX_RESOLVE_AAAA 28 +#define NGX_RESOLVE_A_AAAA 1000 +#define NGX_RESOLVE_AAAA_A 1001 +#define NGX_RESOLVE_RETRY 1002 + +#endif + #define NGX_RESOLVE_FORMERR 1 #define NGX_RESOLVE_SERVFAIL 2 #define NGX_RESOLVE_NXDOMAIN 3 @@ -32,6 +41,11 @@ #define NGX_RESOLVER_MAX_RECURSION 50 +#if (NGX_HAVE_INET6) +#define NGX_PTR_QUERY_LEN (sizeof(".f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.f.ip6.arpa.") - 1) +#else +#define NGX_PTR_QUERY_LEN (sizeof(".255.255.255.255.in-addr.arpa.") - 1) +#endif typedef struct { ngx_connection_t *connection; @@ -58,10 +72,11 @@ typedef struct { u_short qlen; u_char *query; + ngx_int_t qtype; union { - in_addr_t addr; - in_addr_t *addrs; + ngx_ipaddr_t addr; + ngx_ipaddr_t *addrs; u_char *cname; } u; @@ -121,8 +136,8 @@ struct ngx_resolver_ctx_s { ngx_str_t name; ngx_uint_t naddrs; - in_addr_t *addrs; - in_addr_t addr; + ngx_ipaddr_t *addrs; + ngx_ipaddr_t addr; ngx_resolver_handler_pt handler; void *data; diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c index 16e6602..7a8035c 100644 --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -638,7 +638,11 @@ ngx_http_upstream_init_request(ngx_http_request_t *r) } ctx->name = *host; +#if (NGX_HAVE_INET6) + ctx->type = NGX_RESOLVE_A_AAAA; +#else ctx->type = NGX_RESOLVE_A; +#endif ctx->handler = ngx_http_upstream_resolve_handler; ctx->data = r; ctx->timeout = clcf->resolver_timeout; @@ -912,16 +916,14 @@ ngx_http_upstream_resolve_handler(ngx_resolver_ctx_t *ctx) #if (NGX_DEBUG) { - in_addr_t addr; + char text[NGX_SOCKADDR_STRLEN]; ngx_uint_t i; - for (i = 0; i < ctx->naddrs; i++) { - addr = ntohl(ur->addrs[i]); + for (i = 0; i < ur->naddrs; i++) { + inet_ntop(ur->addrs[i].family, &ur->addrs[i].u, text, NGX_SOCKADDR_STRLEN); - ngx_log_debug4(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, - "name was resolved to %ud.%ud.%ud.%ud", - (addr >> 24) & 0xff, (addr >> 16) & 0xff, - (addr >> 8) & 0xff, addr & 0xff); + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "name was resolved to %s", text); } } #endif diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h index fd4e36b..9e88a9a 100644 --- a/src/http/ngx_http_upstream.h +++ b/src/http/ngx_http_upstream.h @@ -254,7 +254,7 @@ typedef struct { ngx_uint_t no_port; /* unsigned no_port:1 */ ngx_uint_t naddrs; - in_addr_t *addrs; + ngx_ipaddr_t *addrs; struct sockaddr *sockaddr; socklen_t socklen; diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c index e0c6c58..92fa825 100644 --- a/src/http/ngx_http_upstream_round_robin.c +++ b/src/http/ngx_http_upstream_round_robin.c @@ -268,6 +268,9 @@ ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r, size_t len; ngx_uint_t i, n; struct sockaddr_in *sin; +#if (NGX_HAVE_INET6) + struct sockaddr_in6 *sin6; +#endif ngx_http_upstream_rr_peers_t *peers; ngx_http_upstream_rr_peer_data_t *rrp; @@ -306,27 +309,52 @@ ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r, for (i = 0; i < ur->naddrs; i++) { - len = NGX_INET_ADDRSTRLEN + sizeof(":65536") - 1; + len = NGX_SOCKADDR_STRLEN; p = ngx_pnalloc(r->pool, len); if (p == NULL) { return NGX_ERROR; } - len = ngx_inet_ntop(AF_INET, &ur->addrs[i], p, NGX_INET_ADDRSTRLEN); + len = ngx_inet_ntop(ur->addrs[i].family, &ur->addrs[i].u, p, NGX_SOCKADDR_STRLEN - sizeof(":65535") + 1); len = ngx_sprintf(&p[len], ":%d", ur->port) - p; - sin = ngx_pcalloc(r->pool, sizeof(struct sockaddr_in)); - if (sin == NULL) { + switch (ur->addrs[i].family) { + + case AF_INET: + sin = ngx_pcalloc(r->pool, sizeof(struct sockaddr_in)); + if (sin == NULL) { + return NGX_ERROR; + } + + sin->sin_family = AF_INET; + sin->sin_port = htons(ur->port); + sin->sin_addr.s_addr = ur->addrs[i].u.v4; + + peers->peer[i].sockaddr = (struct sockaddr *) sin; + peers->peer[i].socklen = sizeof(struct sockaddr_in); + break; + +#if (NGX_HAVE_INET6) + case AF_INET6: + sin6 = ngx_pcalloc(r->pool, sizeof(struct sockaddr_in6)); + if (sin6 == NULL) { + return NGX_ERROR; + } + + sin6->sin6_family = AF_INET6; + sin6->sin6_port = htons(ur->port); + sin6->sin6_addr = ur->addrs[i].u.v6; + + peers->peer[i].sockaddr = (struct sockaddr *) sin6; + peers->peer[i].socklen = sizeof(struct sockaddr_in6); + break; +#endif + + default: return NGX_ERROR; } - sin->sin_family = AF_INET; - sin->sin_port = htons(ur->port); - sin->sin_addr.s_addr = ur->addrs[i]; - - peers->peer[i].sockaddr = (struct sockaddr *) sin; - peers->peer[i].socklen = sizeof(struct sockaddr_in); peers->peer[i].name.len = len; peers->peer[i].name.data = p; peers->peer[i].weight = 1; @@ -334,6 +362,7 @@ ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r, peers->peer[i].current_weight = 0; peers->peer[i].max_fails = 1; peers->peer[i].fail_timeout = 10; + } } diff --git a/src/mail/ngx_mail_smtp_handler.c b/src/mail/ngx_mail_smtp_handler.c index 2171423..668424e 100644 --- a/src/mail/ngx_mail_smtp_handler.c +++ b/src/mail/ngx_mail_smtp_handler.c @@ -56,6 +56,9 @@ void ngx_mail_smtp_init_session(ngx_mail_session_t *s, ngx_connection_t *c) { struct sockaddr_in *sin; +#if (NGX_HAVE_INET6) + struct sockaddr_in6 *sin6; +#endif ngx_resolver_ctx_t *ctx; ngx_mail_core_srv_conf_t *cscf; @@ -67,7 +70,11 @@ ngx_mail_smtp_init_session(ngx_mail_session_t *s, ngx_connection_t *c) return; } - if (c->sockaddr->sa_family != AF_INET) { + if (c->sockaddr->sa_family != AF_INET +#if (NGX_HAVE_INET6) + && c->sockaddr->sa_family != AF_INET6 +#endif + ) { s->host = smtp_tempunavail; ngx_mail_smtp_greeting(s, c); return; @@ -81,11 +88,23 @@ ngx_mail_smtp_init_session(ngx_mail_session_t *s, ngx_connection_t *c) return; } - /* AF_INET only */ + ctx->addr.family = c->sockaddr->sa_family; - sin = (struct sockaddr_in *) c->sockaddr; + switch (c->sockaddr->sa_family) { + + case AF_INET: + sin = (struct sockaddr_in *) c->sockaddr; + ctx->addr.u.v4 = sin->sin_addr.s_addr; + break; + +#if (NGX_HAVE_INET6) + case AF_INET6: + sin6 = (struct sockaddr_in6 *) c->sockaddr; + ctx->addr.u.v6 = sin6->sin6_addr; + break; +#endif + } - ctx->addr = sin->sin_addr.s_addr; ctx->handler = ngx_mail_smtp_resolve_addr_handler; ctx->data = s; ctx->timeout = cscf->resolver_timeout; @@ -167,11 +186,23 @@ ngx_mail_smtp_resolve_name(ngx_event_t *rev) } ctx->name = s->host; - ctx->type = NGX_RESOLVE_A; ctx->handler = ngx_mail_smtp_resolve_name_handler; ctx->data = s; ctx->timeout = cscf->resolver_timeout; + switch (c->sockaddr->sa_family) { + + case AF_INET: + ctx->type = NGX_RESOLVE_A; + break; + +#if (NGX_HAVE_INET6) + case AF_INET6: + ctx->type = NGX_RESOLVE_AAAA_A; + break; +#endif + } + if (ngx_resolve_name(ctx) != NGX_OK) { ngx_mail_close_connection(c); } @@ -181,11 +212,15 @@ ngx_mail_smtp_resolve_name(ngx_event_t *rev) static void ngx_mail_smtp_resolve_name_handler(ngx_resolver_ctx_t *ctx) { - in_addr_t addr; + ngx_ipaddr_t addr; ngx_uint_t i; ngx_connection_t *c; struct sockaddr_in *sin; +#if (NGX_HAVE_INET6) + struct sockaddr_in6 *sin6; +#endif ngx_mail_session_t *s; + char text[NGX_SOCKADDR_STRLEN]; s = ctx->data; c = s->connection; @@ -205,23 +240,49 @@ ngx_mail_smtp_resolve_name_handler(ngx_resolver_ctx_t *ctx) } else { - /* AF_INET only */ + addr.family = c->sockaddr->sa_family; - sin = (struct sockaddr_in *) c->sockaddr; + switch (c->sockaddr->sa_family) { + + case AF_INET: + sin = (struct sockaddr_in *) c->sockaddr; + addr.u.v4 = sin->sin_addr.s_addr; + break; + +#if (NGX_HAVE_INET6) + case AF_INET6: + sin6 = (struct sockaddr_in6 *) c->sockaddr; + addr.u.v6 = sin6->sin6_addr; + break; +#endif + } for (i = 0; i < ctx->naddrs; i++) { - addr = ctx->addrs[i]; + inet_ntop(ctx->addrs[i].family, &ctx->addrs[i].u, text, NGX_SOCKADDR_STRLEN); - ngx_log_debug4(NGX_LOG_DEBUG_MAIL, c->log, 0, - "name was resolved to %ud.%ud.%ud.%ud", - (ntohl(addr) >> 24) & 0xff, - (ntohl(addr) >> 16) & 0xff, - (ntohl(addr) >> 8) & 0xff, - ntohl(addr) & 0xff); + ngx_log_debug1(NGX_LOG_DEBUG_MAIL, c->log, 0, + "name was resolved to %s", text); - if (addr == sin->sin_addr.s_addr) { - goto found; + if (addr.family != ctx->addrs[i].family) { + continue; + } + + switch (addr.family) { + + case AF_INET: + if (addr.u.v4 == ctx->addrs[i].u.v4) { + goto found; + } + break; + +#if (NGX_HAVE_INET6) + case AF_INET6: + if (!ngx_memcmp(&addr.u.v6, &ctx->addrs[i].u.v6, sizeof(addr.u.v6))) { + goto found; + } + break; +#endif } } -- 1.7.0.4 -------------- next part -------------- An HTML attachment was scrubbed... URL: From jon at b0g.us Fri Jun 14 20:12:36 2013 From: jon at b0g.us (Jonathan Kolb) Date: Fri, 14 Jun 2013 16:12:36 -0400 Subject: Fwd: WEBDAV: accomodate MKCOL for somewhat broken clients In-Reply-To: References: <20130613122144.GO72282@mdounin.ru> Message-ID: This worked in a quick test for me: map $uri $missing_slash { default 1; ~/$ 0; } map $request_method $add_slash { default 0; MKCOL $missing_slash; } server { location / { dav_methods MKCOL; if ($add_slash) { rewrite ^ $uri/ break; } } } On Thu, Jun 13, 2013 at 8:28 AM, Vladimir Dronnikov wrote: > Any chance to have this trailing slash controlled by an option? > Or would you people mind to share a chunk of nginx config which would > append that slash? > TIA > > > On Thu, Jun 13, 2013 at 4:21 PM, Maxim Dounin wrote: > >> Hello! >> >> On Thu, Jun 13, 2013 at 12:15:54PM +0400, Vladimir Dronnikov wrote: >> >> > Hello! >> > >> > I wonder if >> > >> https://github.com/dvv/nginx/commit/3a8cdadea196a594fd1940be02818f51d1b1769fis >> > feasible? The rationale is to be more liberal to the zoo of webdav >> > clients. >> >> Doesn't looks like a good change to me. >> >> -- >> Maxim Dounin >> http://nginx.org/en/donation.html >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From dronnikov at gmail.com Sat Jun 15 06:09:24 2013 From: dronnikov at gmail.com (Vladimir Dronnikov) Date: Sat, 15 Jun 2013 10:09:24 +0400 Subject: Fwd: WEBDAV: accomodate MKCOL for somewhat broken clients In-Reply-To: References: <20130613122144.GO72282@mdounin.ru> Message-ID: Thanks. That is elegant. Another stopper is that nginx dav does differentiate folders vs files when performing DELETE/MOVE. Is it possible to append slashes if target is a directory? Tia 15.06.2013 0:12 ???????????? "Jonathan Kolb" ???????: > This worked in a quick test for me: > > map $uri $missing_slash { > default 1; > ~/$ 0; > } > > map $request_method $add_slash { > default 0; > MKCOL $missing_slash; > } > > server { > location / { > dav_methods MKCOL; > if ($add_slash) { rewrite ^ $uri/ break; } > } > } > > > > On Thu, Jun 13, 2013 at 8:28 AM, Vladimir Dronnikov wrote: > >> Any chance to have this trailing slash controlled by an option? >> Or would you people mind to share a chunk of nginx config which would >> append that slash? >> TIA >> >> >> On Thu, Jun 13, 2013 at 4:21 PM, Maxim Dounin wrote: >> >>> Hello! >>> >>> On Thu, Jun 13, 2013 at 12:15:54PM +0400, Vladimir Dronnikov wrote: >>> >>> > Hello! >>> > >>> > I wonder if >>> > >>> https://github.com/dvv/nginx/commit/3a8cdadea196a594fd1940be02818f51d1b1769fis >>> > feasible? The rationale is to be more liberal to the zoo of webdav >>> > clients. >>> >>> Doesn't looks like a good change to me. >>> >>> -- >>> Maxim Dounin >>> http://nginx.org/en/donation.html >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From jon at b0g.us Sat Jun 15 23:56:20 2013 From: jon at b0g.us (Jonathan Kolb) Date: Sat, 15 Jun 2013 19:56:20 -0400 Subject: Fwd: WEBDAV: accomodate MKCOL for somewhat broken clients In-Reply-To: References: <20130613122144.GO72282@mdounin.ru> Message-ID: You should be able to just duplicate the MKCOL line in the second map for DELETE, but since MOVE also requires a trailing / on the Destination header, I can't think of a way around it without something much more hacky (I haven't tested it this time, but I think it'll work): map $http_destination $dest_missing_slash { default 1; ~/$ 0; } map $request_method $proxy_hack { default 0; MOVE $dest_missing_slash; COPY $dest_missing_slash; } server { location / { dav_methods MKCOL MOVE COPY DELETE PUT; if ($proxy_hack) { # proxy back to ourselves with a fixed up header # the destination header gets fixed up here, and the uri will get fixed up the second time around # you can also listen on a unix port and proxy_pass there for this part proxy_set_header Destination $http_destination/; proxy_pass http://localhost; break; } if ($add_slash) { rewrite ^ $uri/ break; } } } On Sat, Jun 15, 2013 at 2:09 AM, Vladimir Dronnikov wrote: > Thanks. That is elegant. Another stopper is that nginx dav does > differentiate folders vs files when performing DELETE/MOVE. Is it possible > to append slashes if target is a directory? Tia > 15.06.2013 0:12 ???????????? "Jonathan Kolb" ???????: > > This worked in a quick test for me: >> >> map $uri $missing_slash { >> default 1; >> ~/$ 0; >> } >> >> map $request_method $add_slash { >> default 0; >> MKCOL $missing_slash; >> } >> >> server { >> location / { >> dav_methods MKCOL; >> if ($add_slash) { rewrite ^ $uri/ break; } >> } >> } >> >> >> >> On Thu, Jun 13, 2013 at 8:28 AM, Vladimir Dronnikov wrote: >> >>> Any chance to have this trailing slash controlled by an option? >>> Or would you people mind to share a chunk of nginx config which would >>> append that slash? >>> TIA >>> >>> >>> On Thu, Jun 13, 2013 at 4:21 PM, Maxim Dounin wrote: >>> >>>> Hello! >>>> >>>> On Thu, Jun 13, 2013 at 12:15:54PM +0400, Vladimir Dronnikov wrote: >>>> >>>> > Hello! >>>> > >>>> > I wonder if >>>> > >>>> https://github.com/dvv/nginx/commit/3a8cdadea196a594fd1940be02818f51d1b1769fis >>>> > feasible? The rationale is to be more liberal to the zoo of webdav >>>> > clients. >>>> >>>> Doesn't looks like a good change to me. >>>> >>>> -- >>>> Maxim Dounin >>>> http://nginx.org/en/donation.html >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>> >>> >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From danfiala at centrum.cz Sun Jun 16 16:32:57 2013 From: danfiala at centrum.cz (danfiala at centrum.cz) Date: Sun, 16 Jun 2013 18:32:57 +0200 Subject: Proxy module without waiting for a reply In-Reply-To: <20130609120432.GO72282@mdounin.ru> References: <20130609112139.09556BB5@centrum.cz> <20130609120432.GO72282@mdounin.ru> Message-ID: <20130616183257.E2CC733F@centrum.cz> Hello. But when I send data through upstream then Nginx waits until it can read some data from the upstream server, finally it times out. But I would need to finish the upstream request just after data are written without any waiting for the reply from upstream server. Is there any way how to force it to work this way? Thanks, Daniel ______________________________________________________________ > Od: "Maxim Dounin" > Komu: > Datum: 09.06.2013 14:04 > P?edm?t: Re: Proxy module without waiting for a reply > >Hello! > >On Sun, Jun 09, 2013 at 11:21:39AM +0200, danfiala at centrum.cz wrote: > >> Hi all, >> I need to implement nginx module that does the following: >> * It receives http get requests. >> * For every request it sends information about the request over TCP packet >> to another server. >> * When data are send it generates some trivial HTTP reply. The another >> server does not sent any reply to the module. >> >> I studied source code of other modules and some tutorials and it seems that >> I should implement upstream (proxy) module. But I would like to achieve the >> following: >> * Module does generate the reply just after data are sent to another >> server. It does not wait for any reply from another server. >> * Socket to another server remains open among requests and is reused for >> subsequent requests. >> >> Is this possible and is implementation of upstream module the right way? > >Yes, it looks possible and easy enough. > >-- >Maxim Dounin >http://nginx.org/en/donation.html > >_______________________________________________ >nginx-devel mailing list >nginx-devel at nginx.org >http://mailman.nginx.org/mailman/listinfo/nginx-devel > From ykirpichev at gmail.com Mon Jun 17 08:52:06 2013 From: ykirpichev at gmail.com (Yury Kirpichev) Date: Mon, 17 Jun 2013 12:52:06 +0400 Subject: [PATCH / v4] SPDY: fix nopush cleanup Message-ID: Hi, Please disregard v3 version of patch. I've found that it does not fix the problem in case if some data was buffered and afterwards was sent by "write" handler. Here is new version: # HG changeset patch # User ykirpichev at yandex-team.ru # Date 1371458549 -14400 # Branch nopush_fix # Node ID 3d463c13dadd70b3f74cbc037938c624db348cc0 # Parent 982678c5c270f93a0c21ab6eb23cb123c0dc3df0 SPDY: fix nopush cleanup diff -r 982678c5c270 -r 3d463c13dadd src/http/ngx_http_spdy.c --- a/src/http/ngx_http_spdy.c Wed Jun 12 00:41:24 2013 +0900 +++ b/src/http/ngx_http_spdy.c Mon Jun 17 12:42:29 2013 +0400 @@ -154,6 +154,8 @@ static void *ngx_http_spdy_zalloc(void *opaque, u_int items, u_int size); static void ngx_http_spdy_zfree(void *opaque, void *address); +static ngx_int_t +ngx_http_spdy_restore_nopush(ngx_connection_t *c, ngx_http_connection_t *hc); static const u_char ngx_http_spdy_dict[] = @@ -378,6 +380,11 @@ sc->blocked = 0; + if (ngx_http_spdy_restore_nopush(c, sc->http_connection) != NGX_OK) { + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + if (sc->processing) { if (rev->timer_set) { ngx_del_timer(rev); @@ -447,6 +454,11 @@ return; } + if (ngx_http_spdy_restore_nopush(c, sc->http_connection) != NGX_OK) { + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + ngx_http_spdy_handle_connection(sc); } @@ -2880,3 +2892,61 @@ "spdy zfree: %p", address); #endif } + +static ngx_int_t +ngx_http_spdy_restore_nopush(ngx_connection_t *c, ngx_http_connection_t *hc) +{ + int tcp_nodelay; + ngx_http_core_loc_conf_t *clcf; + + // It is better to use NGX_SPDY_WRITE_BUFFERED here, but + // it is defined in ngx_http_spdy_filter_module.c + // So, just use !c->buffered + if (!c->buffered && !c->error) { + //no buffered data, so, we should clean nopush if needed + clcf = ngx_http_get_module_loc_conf(hc->conf_ctx, + ngx_http_core_module); + + if (c->tcp_nopush == NGX_TCP_NOPUSH_SET) { + if (ngx_tcp_push(c->fd) == -1) { + ngx_connection_error(c, ngx_socket_errno, ngx_tcp_push_n " failed"); + return NGX_ERROR; + } + + c->tcp_nopush = NGX_TCP_NOPUSH_UNSET; + tcp_nodelay = ngx_tcp_nodelay_and_tcp_nopush ? 1 : 0; + + } else { + tcp_nodelay = 1; + } + + if (tcp_nodelay + && clcf->tcp_nodelay + && c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) + { + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "tcp_nodelay"); + + if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, + (const void *) &tcp_nodelay, sizeof(int)) + == -1) + { +#if (NGX_SOLARIS) + /* Solaris returns EINVAL if a socket has been shut down */ + c->log_error = NGX_ERROR_IGNORE_EINVAL; +#endif + + ngx_connection_error(c, ngx_socket_errno, + "setsockopt(TCP_NODELAY) failed"); + + c->log_error = NGX_ERROR_INFO; + return NGX_ERROR; + } + + c->tcp_nodelay = NGX_TCP_NODELAY_SET; + } + } + + return NGX_OK; +} + + -------------- next part -------------- An HTML attachment was scrubbed... URL: From zorceta at gmail.com Mon Jun 17 14:42:21 2013 From: zorceta at gmail.com (Zorceta Moshak) Date: Mon, 17 Jun 2013 22:42:21 +0800 Subject: Adding per user traffic stats using iptables Message-ID: <-212719754363614768@unknownmsgid> Hi, I'm trying to let iptables record different users' traffic. Planned to setuid() before worker actually sends out data. Read through modules and filter chain in source codes, still can't figure out who's the real sender func. Could anyone tell? Thanks. Yours, Zorceta Moshak -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Mon Jun 17 15:04:48 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 17 Jun 2013 19:04:48 +0400 Subject: Adding per user traffic stats using iptables In-Reply-To: <-212719754363614768@unknownmsgid> References: <-212719754363614768@unknownmsgid> Message-ID: <20130617150448.GG72282@mdounin.ru> Hello! On Mon, Jun 17, 2013 at 10:42:21PM +0800, Zorceta Moshak wrote: > Hi, > I'm trying to let iptables record different users' traffic. > Planned to setuid() before worker actually sends out data. > Read through modules and filter chain in source codes, still can't figure > out who's the real sender func. > Could anyone tell? Sending data is done via c->send() or c->send_chain() functions. Assuming no ssl, this will map to ngx_send() or ngx_send_chain() macros, defined as: : #define ngx_send ngx_io.send : #define ngx_send_chain ngx_io.send_chain The ngx_io structure depends on event method, but in all practical cases maps to ngx_os_io. In it's turn ngx_os_io is a platform-dependant structure, which is set during platform init. E.g. on Linux it's set to ngx_linux_io, defined as follows: : static ngx_os_io_t ngx_linux_io = { : ngx_unix_recv, : ngx_readv_chain, : ngx_udp_unix_recv, : ngx_unix_send, : #if (NGX_HAVE_SENDFILE) : ngx_linux_sendfile_chain, : NGX_IO_SENDFILE : #else : ngx_writev_chain, : 0 : #endif : }; That is, ngx_send() maps to ngx_unix_send() (which in turn uses send() to actually send data), while c->send_chain() to either ngx_linux_sendfile_chain() or ngx_writev_chain(). Overral I would recommend you to do what you need around c->send()/c->send_chain() calls (that is, in write filter if you are talking about http). It would be much easier than digging into low level and hacking all the function nginx can use. -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Mon Jun 17 15:30:21 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 17 Jun 2013 19:30:21 +0400 Subject: IPv6 support in resolver In-Reply-To: References: Message-ID: <20130617153021.GH72282@mdounin.ru> Hello! On Fri, Jun 14, 2013 at 09:44:46PM +0400, ToSHiC wrote: > Hello, > > We needed this feature in our company, I found that it is in milestones of > version 1.5 but doesn't exist yet. So I've implemented it based in 1.3 code > and merged in current 1.5 code. When I wrote this code I mostly cared about > minimum intrusion into other parts of nginx. > > IPv6 fallback logic is not a straightforward implementation of suggested by > RFC. RFC states that IPv6 resolving have priority over IPv4, and it's not > very good for Internet we have currently. With this patch you can specify > priority, and in upstream and mail modules I've set IPv4 as preferred > address family. > > Patch is pretty big and I hope it'll not break mailing list or mail clients. You may want to try to split the patch into smaller patches to simplify review. See also some hints here: http://nginx.org/en/docs/contributing_changes.html Some quick comments below. [...] > - addr = ntohl(ctx->addr); > +failed: > + > + //addr = ntohl(ctx->addr); > + inet_ntop(ctx->addr.family, &ctx->addr.u, text, > NGX_SOCKADDR_STRLEN); > > ngx_log_error(NGX_LOG_ALERT, r->log, 0, > - "could not cancel %ud.%ud.%ud.%ud resolving", > - (addr >> 24) & 0xff, (addr >> 16) & 0xff, > - (addr >> 8) & 0xff, addr & 0xff); > + "could not cancel %s resolving", text); 1. Don't use inet_ntop(), there is ngx_sock_ntop() instead. 2. Don't use C++ style ("//") comments. 3. If some data is only needed for debug logging, keep relevant calculations under #if (NGX_DEBUG). [...] > @@ -334,6 +362,7 @@ > ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r, > peers->peer[i].current_weight = 0; > peers->peer[i].max_fails = 1; > peers->peer[i].fail_timeout = 10; > + > } > } > Please avoid unrelated changes. [...] -- Maxim Dounin http://nginx.org/en/donation.html From danfiala at centrum.cz Mon Jun 17 19:37:33 2013 From: danfiala at centrum.cz (danfiala at centrum.cz) Date: Mon, 17 Jun 2013 21:37:33 +0200 Subject: Proxy module without waiting for a reply In-Reply-To: <20130616183257.E2CC733F@centrum.cz> References: <20130609112139.09556BB5@centrum.cz>, <20130609120432.GO72282@mdounin.ru> <20130616183257.E2CC733F@centrum.cz> Message-ID: <20130617213733.4E50A73A@centrum.cz> Unfortunately I still cannot find out how to send data to upstream and force nginx to not wait for reply. Is really upstream suitable for my purpose? Regards, Daniel ______________________________________________________________ > Od: danfiala at centrum.cz > Komu: > Datum: 16.06.2013 18:33 > P?edm?t: Re: Proxy module without waiting for a reply > >Hello. >But when I send data through upstream then Nginx waits until it can read some data from the upstream server, finally it times out. But I would need to finish the upstream request just after data are written without any waiting for the reply from upstream server. Is there any way how to force it to work this way? > >Thanks, >Daniel > >______________________________________________________________ >> Od: "Maxim Dounin" >> Komu: >> Datum: 09.06.2013 14:04 >> P?edm?t: Re: Proxy module without waiting for a reply >> >>Hello! >> >>On Sun, Jun 09, 2013 at 11:21:39AM +0200, danfiala at centrum.cz wrote: >> >>> Hi all, >>> I need to implement nginx module that does the following: >>> * It receives http get requests. >>> * For every request it sends information about the request over TCP packet >>> to another server. >>> * When data are send it generates some trivial HTTP reply. The another >>> server does not sent any reply to the module. >>> >>> I studied source code of other modules and some tutorials and it seems that >>> I should implement upstream (proxy) module. But I would like to achieve the >>> following: >>> * Module does generate the reply just after data are sent to another >>> server. It does not wait for any reply from another server. >>> * Socket to another server remains open among requests and is reused for >>> subsequent requests. >>> >>> Is this possible and is implementation of upstream module the right way? >> >>Yes, it looks possible and easy enough. >> >>-- >>Maxim Dounin >>http://nginx.org/en/donation.html >> >>_______________________________________________ >>nginx-devel mailing list >>nginx-devel at nginx.org >>http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > >_______________________________________________ >nginx-devel mailing list >nginx-devel at nginx.org >http://mailman.nginx.org/mailman/listinfo/nginx-devel > From leok.trash at gmail.com Tue Jun 18 07:07:39 2013 From: leok.trash at gmail.com (=?utf-8?B?bGVvay50cmFzaEBnbWFpbC5jb20=?=) Date: Tue, 18 Jun 2013 11:07:39 +0400 Subject: =?UTF-8?B?0J3QkDogc3Vic2NyaXB0?= Message-ID: <51c0073c.a11b980a.53c2.1579@mx.google.com> ?????????? ? ????? HTC ----- Reply message ----- ??: "actioncao" ????: "nginx-devel at nginx.org" ????: subscript ????: ??, ??? 10, 2013 05:18 ???? iPhone _______________________________________________ nginx-devel mailing list nginx-devel at nginx.org http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: From ru at nginx.com Wed Jun 19 04:57:51 2013 From: ru at nginx.com (Ruslan Ermilov) Date: Wed, 19 Jun 2013 04:57:51 +0000 Subject: [nginx] Simplified ngx_list_create(). Message-ID: details: http://hg.nginx.org/nginx/rev/a82f305487c2 branches: changeset: 5253:a82f305487c2 user: Ruslan Ermilov date: Wed Jun 19 08:55:08 2013 +0400 description: Simplified ngx_list_create(). diffstat: src/core/ngx_list.c | 10 +--------- 1 files changed, 1 insertions(+), 9 deletions(-) diffs (23 lines): diff -r 982678c5c270 -r a82f305487c2 src/core/ngx_list.c --- a/src/core/ngx_list.c Wed Jun 12 00:41:24 2013 +0900 +++ b/src/core/ngx_list.c Wed Jun 19 08:55:08 2013 +0400 @@ -19,18 +19,10 @@ ngx_list_create(ngx_pool_t *pool, ngx_ui return NULL; } - list->part.elts = ngx_palloc(pool, n * size); - if (list->part.elts == NULL) { + if (ngx_list_init(list, pool, n, size) != NGX_OK) { return NULL; } - list->part.nelts = 0; - list->part.next = NULL; - list->last = &list->part; - list->size = size; - list->nalloc = n; - list->pool = pool; - return list; } From ykirpichev at gmail.com Wed Jun 19 11:33:13 2013 From: ykirpichev at gmail.com (Yury Kirpichev) Date: Wed, 19 Jun 2013 15:33:13 +0400 Subject: SPDY: question about PING Message-ID: Hello Nginx Developers, I have a question about how SPDY PING message is handled by current nginx implementation. >From source code of function ngx_http_spdy_state_ping I can find that PING frame is queued like: buf->last = p; ngx_http_spdy_queue_frame(sc, frame); pos += NGX_SPDY_PING_SIZE; return ngx_http_spdy_state_complete(sc, pos, end); But I can not find the code where PING frame is sent to network. (The same is for RST, SETTINGS). (Like there is no call to ngx_http_spdy_send_output_queue function) So, my question is when such frames will be actually sent? BR/ Yury -------------- next part -------------- An HTML attachment was scrubbed... URL: From jzefip at gmail.com Thu Jun 20 05:42:57 2013 From: jzefip at gmail.com (Julien Zefi) Date: Wed, 19 Jun 2013 23:42:57 -0600 Subject: API question: large data processing handler In-Reply-To: <201306042029.57511.vbart@nginx.com> References: <20130604160831.GY72282@mdounin.ru> <201306042029.57511.vbart@nginx.com> Message-ID: i tried to follow the suggestion of using a timer and a new handler for the write callback without luck, indeed there is something wrong on my end. if you have some minute to review, I wrote a simple test case, my goal is to make ngx_http_test_stream_handler(..) to be called every 10ms and send some data to the browser until this same function decide to stop working... thanks for your help On Tue, Jun 4, 2013 at 10:29 AM, Valentin V. Bartenev wrote: > On Tuesday 04 June 2013 20:08:31 Maxim Dounin wrote: > [...] > > (You may also try to emulate filing socket's send buffer by > > setting c->write->ready to 0 before calling the > > ngx_handle_write_event(), but not sure if it's safe/will actually > > work will all event methods.) > > It's certainly not safe. In particular it will break spdy. > > wbr, Valentin V. Bartenev > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: test_case.tar.gz Type: application/x-gzip Size: 1122 bytes Desc: not available URL: From mdounin at mdounin.ru Thu Jun 20 09:11:07 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 20 Jun 2013 13:11:07 +0400 Subject: API question: large data processing handler In-Reply-To: References: <20130604160831.GY72282@mdounin.ru> <201306042029.57511.vbart@nginx.com> Message-ID: <20130620091107.GK49779@mdounin.ru> Hello! On Wed, Jun 19, 2013 at 11:42:57PM -0600, Julien Zefi wrote: > i tried to follow the suggestion of using a timer and a new handler for the > write callback without luck, indeed there is something wrong on my end. > > if you have some minute to review, I wrote a simple test case, my goal is > to make ngx_http_test_stream_handler(..) to be called every 10ms and send > some data to the browser until this same function decide to stop working... Two obvious problems: 1) You try to send data from stack, which is wrong as nginx might not be able to send data immediately. 2) You set timer only once. Note timers are not periodic, and this will result in only call of the timer handler function. Haven't looked any further. -- Maxim Dounin http://nginx.org/en/donation.html From ykirpichev at gmail.com Thu Jun 20 11:08:38 2013 From: ykirpichev at gmail.com (Yury Kirpichev) Date: Thu, 20 Jun 2013 15:08:38 +0400 Subject: SPDY: fix tcp_nodelay and tcp_nopush handling Message-ID: Hi, Could you please take a look at the following patch. In this patch I tried to fix handling for tcp_nodelay and tcp_nopush. This patch is only applicable for SPDY over TCP connection (but not for SPDY over SSL). # HG changeset patch # User ykirpichev at gmail.com # Date 1371726192 -14400 # Branch fix_spdy_nopush_nodelay # Node ID 71c426451cbe308c8a96cce175d34aeca39a266a # Parent 982678c5c270f93a0c21ab6eb23cb123c0dc3df0 Fix spdy tcp_nodelay and tcp_nopush optioins handling diff -r 982678c5c270 -r 71c426451cbe src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Wed Jun 12 00:41:24 2013 +0900 +++ b/src/http/ngx_http_request.c Thu Jun 20 15:03:12 2013 +0400 @@ -314,6 +314,13 @@ #if (NGX_HTTP_SPDY) if (hc->addr_conf->spdy) { + ngx_http_core_loc_conf_t *clcf; + + clcf = ngx_http_get_module_loc_conf(hc->conf_ctx, + ngx_http_core_module); + + c->tcp_nodelay = clcf->tcp_nodelay ? NGX_TCP_NODELAY_UNSET : NGX_TCP_NODELAY_DISABLED; + c->tcp_nopush = clcf->tcp_nopush ? NGX_TCP_NOPUSH_UNSET : NGX_TCP_NOPUSH_DISABLED; rev->handler = ngx_http_spdy_init; } #endif diff -r 982678c5c270 -r 71c426451cbe src/http/ngx_http_spdy.c --- a/src/http/ngx_http_spdy.c Wed Jun 12 00:41:24 2013 +0900 +++ b/src/http/ngx_http_spdy.c Thu Jun 20 15:03:12 2013 +0400 @@ -154,6 +154,8 @@ static void *ngx_http_spdy_zalloc(void *opaque, u_int items, u_int size); static void ngx_http_spdy_zfree(void *opaque, void *address); +static ngx_int_t +ngx_http_spdy_restore_nopush(ngx_connection_t *c, ngx_http_connection_t *hc); static const u_char ngx_http_spdy_dict[] = @@ -378,6 +380,11 @@ sc->blocked = 0; + if (ngx_http_spdy_restore_nopush(c, sc->http_connection) != NGX_OK) { + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + if (sc->processing) { if (rev->timer_set) { ngx_del_timer(rev); @@ -447,6 +454,11 @@ return; } + if (ngx_http_spdy_restore_nopush(c, sc->http_connection) != NGX_OK) { + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + ngx_http_spdy_handle_connection(sc); } @@ -2880,3 +2892,51 @@ "spdy zfree: %p", address); #endif } + +static ngx_int_t +ngx_http_spdy_restore_nopush(ngx_connection_t *c, ngx_http_connection_t *hc) +{ + // It is better to use NGX_SPDY_WRITE_BUFFERED here, but + // it is defined in ngx_http_spdy_filter_module.c + // So, just use !c->buffered + if (!c->buffered && !c->error) { + //no buffered data, so, we should clean nopush if needed + if (c->tcp_nopush == NGX_TCP_NOPUSH_SET) { + if (ngx_tcp_push(c->fd) == -1) { + ngx_connection_error(c, ngx_socket_errno, ngx_tcp_push_n " failed"); + return NGX_ERROR; + } + + c->tcp_nopush = NGX_TCP_NOPUSH_UNSET; + } + + if (c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) + { + int tcp_nodelay = 1; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "tcp_nodelay"); + + if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, + (const void *) &tcp_nodelay, sizeof(int)) + == -1) + { +#if (NGX_SOLARIS) + /* Solaris returns EINVAL if a socket has been shut down */ + c->log_error = NGX_ERROR_IGNORE_EINVAL; +#endif + + ngx_connection_error(c, ngx_socket_errno, + "setsockopt(TCP_NODELAY) failed"); + + c->log_error = NGX_ERROR_INFO; + return NGX_ERROR; + } + + c->tcp_nodelay = NGX_TCP_NODELAY_SET; + } + } + + return NGX_OK; +} + + BR/ Yury -------------- next part -------------- An HTML attachment was scrubbed... URL: From vl at nginx.com Fri Jun 21 12:38:11 2013 From: vl at nginx.com (Homutov Vladimir) Date: Fri, 21 Jun 2013 12:38:11 +0000 Subject: [nginx] Core: support several "error_log" directives. Message-ID: details: http://hg.nginx.org/nginx/rev/7ecaa9e4bf1b branches: changeset: 5254:7ecaa9e4bf1b user: Vladimir Homutov date: Thu Jun 20 20:47:39 2013 +0400 description: Core: support several "error_log" directives. When several "error_log" directives are specified in the same configuration block, logs are written to all files with a matching log level. All logs are stored in the singly-linked list that is sorted by log level in the descending order. Specific debug levels (NGX_LOG_DEBUG_HTTP,EVENT, etc.) are not supported if several "error_log" directives are specified. In this case all logs will use debug level that has largest absolute value. diffstat: src/core/ngx_log.c | 137 ++++++++++++++++++++++++++++----------- src/core/ngx_log.h | 5 +- src/http/ngx_http_core_module.c | 28 +------- src/http/ngx_http_request.h | 1 + src/os/unix/ngx_process_cycle.c | 2 + 5 files changed, 106 insertions(+), 67 deletions(-) diffs (288 lines): diff -r a82f305487c2 -r 7ecaa9e4bf1b src/core/ngx_log.c --- a/src/core/ngx_log.c Wed Jun 19 08:55:08 2013 +0400 +++ b/src/core/ngx_log.c Thu Jun 20 20:47:39 2013 +0400 @@ -10,6 +10,8 @@ static char *ngx_error_log(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +static char *ngx_log_set_levels(ngx_conf_t *cf, ngx_log_t *log); +static void ngx_log_insert(ngx_log_t *log, ngx_log_t *new_log); static ngx_command_t ngx_errlog_commands[] = { @@ -86,14 +88,11 @@ ngx_log_error_core(ngx_uint_t level, ngx #endif { #if (NGX_HAVE_VARIADIC_MACROS) - va_list args; + va_list args; #endif - u_char *p, *last, *msg; - u_char errstr[NGX_MAX_ERROR_STR]; - - if (log->file->fd == NGX_INVALID_FILE) { - return; - } + u_char *p, *last, *msg; + u_char errstr[NGX_MAX_ERROR_STR]; + ngx_uint_t wrote_stderr, debug_connection; last = errstr + NGX_MAX_ERROR_STR; @@ -140,11 +139,27 @@ ngx_log_error_core(ngx_uint_t level, ngx ngx_linefeed(p); - (void) ngx_write_fd(log->file->fd, errstr, p - errstr); + wrote_stderr = 0; + debug_connection = (log->log_level & NGX_LOG_DEBUG_CONNECTION) != 0; + + while (log) { + + if (log->log_level < level && !debug_connection) { + break; + } + + (void) ngx_write_fd(log->file->fd, errstr, p - errstr); + + if (log->file->fd == ngx_stderr) { + wrote_stderr = 1; + } + + log = log->next; + } if (!ngx_use_stderr || level > NGX_LOG_WARN - || log->file->fd == ngx_stderr) + || wrote_stderr) { return; } @@ -348,31 +363,17 @@ ngx_log_init(u_char *prefix) } -ngx_log_t * -ngx_log_create(ngx_cycle_t *cycle, ngx_str_t *name) -{ - ngx_log_t *log; - - log = ngx_pcalloc(cycle->pool, sizeof(ngx_log_t)); - if (log == NULL) { - return NULL; - } - - log->file = ngx_conf_open_file(cycle, name); - if (log->file == NULL) { - return NULL; - } - - return log; -} - - -char * +static char * ngx_log_set_levels(ngx_conf_t *cf, ngx_log_t *log) { ngx_uint_t i, n, d, found; ngx_str_t *value; + if (cf->args->nelts == 2) { + log->log_level = NGX_LOG_ERR; + return NGX_CONF_OK; + } + value = cf->args->elts; for (i = 2; i < cf->args->nelts; i++) { @@ -428,10 +429,33 @@ ngx_log_set_levels(ngx_conf_t *cf, ngx_l static char * ngx_error_log(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { + ngx_log_t *dummy; + + dummy = &cf->cycle->new_log; + + return ngx_log_set_log(cf, &dummy); +} + + +char * +ngx_log_set_log(ngx_conf_t *cf, ngx_log_t **head) +{ + ngx_log_t *new_log; ngx_str_t *value, name; - if (cf->cycle->new_log.file) { - return "is duplicate"; + if (*head != NULL && (*head)->log_level == 0) { + new_log = *head; + + } else { + + new_log = ngx_pcalloc(cf->pool, sizeof(ngx_log_t)); + if (new_log == NULL) { + return NGX_CONF_ERROR; + } + + if (*head == NULL) { + *head = new_log; + } } value = cf->args->elts; @@ -444,15 +468,52 @@ ngx_error_log(ngx_conf_t *cf, ngx_comman name = value[1]; } - cf->cycle->new_log.file = ngx_conf_open_file(cf->cycle, &name); - if (cf->cycle->new_log.file == NULL) { - return NULL; + new_log->file = ngx_conf_open_file(cf->cycle, &name); + if (new_log->file == NULL) { + return NGX_CONF_ERROR; } - if (cf->args->nelts == 2) { - cf->cycle->new_log.log_level = NGX_LOG_ERR; - return NGX_CONF_OK; + if (ngx_log_set_levels(cf, new_log) != NGX_CONF_OK) { + return NGX_CONF_ERROR; } - return ngx_log_set_levels(cf, &cf->cycle->new_log); + if (*head != new_log) { + ngx_log_insert(*head, new_log); + } + + return NGX_CONF_OK; } + + +static void +ngx_log_insert(ngx_log_t *log, ngx_log_t *new_log) +{ + ngx_log_t tmp; + + if (new_log->log_level > log->log_level) { + + /* + * list head address is permanent, insert new log after + * head and swap its contents with head + */ + + tmp = *log; + *log = *new_log; + *new_log = tmp; + + log->next = new_log; + return; + } + + while (log->next) { + if (new_log->log_level > log->next->log_level) { + new_log->next = log->next; + log->next = new_log; + return; + } + + log = log->next; + } + + log->next = new_log; +} diff -r a82f305487c2 -r 7ecaa9e4bf1b src/core/ngx_log.h --- a/src/core/ngx_log.h Wed Jun 19 08:55:08 2013 +0400 +++ b/src/core/ngx_log.h Thu Jun 20 20:47:39 2013 +0400 @@ -61,6 +61,8 @@ struct ngx_log_s { */ char *action; + + ngx_log_t *next; }; @@ -220,11 +222,10 @@ void ngx_cdecl ngx_log_debug_core(ngx_lo /*********************************/ ngx_log_t *ngx_log_init(u_char *prefix); -ngx_log_t *ngx_log_create(ngx_cycle_t *cycle, ngx_str_t *name); -char *ngx_log_set_levels(ngx_conf_t *cf, ngx_log_t *log); void ngx_cdecl ngx_log_abort(ngx_err_t err, const char *fmt, ...); void ngx_cdecl ngx_log_stderr(ngx_err_t err, const char *fmt, ...); u_char *ngx_log_errno(u_char *buf, u_char *last, ngx_err_t err); +char *ngx_log_set_log(ngx_conf_t *cf, ngx_log_t **head); /* diff -r a82f305487c2 -r 7ecaa9e4bf1b src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c Wed Jun 19 08:55:08 2013 +0400 +++ b/src/http/ngx_http_core_module.c Thu Jun 20 20:47:39 2013 +0400 @@ -4888,33 +4888,7 @@ ngx_http_core_error_log(ngx_conf_t *cf, { ngx_http_core_loc_conf_t *clcf = conf; - ngx_str_t *value, name; - - if (clcf->error_log) { - return "is duplicate"; - } - - value = cf->args->elts; - - if (ngx_strcmp(value[1].data, "stderr") == 0) { - ngx_str_null(&name); - cf->cycle->log_use_stderr = 1; - - } else { - name = value[1]; - } - - clcf->error_log = ngx_log_create(cf->cycle, &name); - if (clcf->error_log == NULL) { - return NGX_CONF_ERROR; - } - - if (cf->args->nelts == 2) { - clcf->error_log->log_level = NGX_LOG_ERR; - return NGX_CONF_OK; - } - - return ngx_log_set_levels(cf, clcf->error_log); + return ngx_log_set_log(cf, &clcf->error_log); } diff -r a82f305487c2 -r 7ecaa9e4bf1b src/http/ngx_http_request.h --- a/src/http/ngx_http_request.h Wed Jun 19 08:55:08 2013 +0400 +++ b/src/http/ngx_http_request.h Thu Jun 20 20:47:39 2013 +0400 @@ -585,6 +585,7 @@ extern ngx_http_header_out_t ngx_http_ #define ngx_http_set_connection_log(c, l) \ \ c->log->file = l->file; \ + c->log->next = l->next; \ if (!(c->log->log_level & NGX_LOG_DEBUG_CONNECTION)) { \ c->log->log_level = l->log_level; \ } diff -r a82f305487c2 -r 7ecaa9e4bf1b src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c Wed Jun 19 08:55:08 2013 +0400 +++ b/src/os/unix/ngx_process_cycle.c Thu Jun 20 20:47:39 2013 +0400 @@ -714,6 +714,7 @@ ngx_master_process_exit(ngx_cycle_t *cyc ngx_exit_log = *ngx_cycle->log; ngx_exit_log.file = &ngx_exit_log_file; + ngx_exit_log.next = NULL; ngx_exit_cycle.log = &ngx_exit_log; ngx_exit_cycle.files = ngx_cycle->files; @@ -1066,6 +1067,7 @@ ngx_worker_process_exit(ngx_cycle_t *cyc ngx_exit_log = *ngx_cycle->log; ngx_exit_log.file = &ngx_exit_log_file; + ngx_exit_log.next = NULL; ngx_exit_cycle.log = &ngx_exit_log; ngx_exit_cycle.files = ngx_cycle->files; From anshukk at gmail.com Sun Jun 23 05:46:21 2013 From: anshukk at gmail.com (anshuk kumar) Date: Sun, 23 Jun 2013 11:16:21 +0530 Subject: Why nginx's http parser doesnt use regular expressions? Message-ID: This is in reference to the following code https://github.com/joyent/http-parser What could be the main reason for such a design decision? I guess I could write few regular expressions to parse HTTP req & res this would be a lot less complex than Igor's version of the parser. What am I missing here? On some discussion with over nginx's IRC i got the following points * HTTP parsing requires recursive exp resolution which is not possible using regexp * State Machines are faster? * A tool like ragel is used to create such state machine? -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Mon Jun 24 10:51:02 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 24 Jun 2013 14:51:02 +0400 Subject: Why nginx's http parser doesnt use regular expressions? In-Reply-To: References: Message-ID: <20130624105102.GB20717@mdounin.ru> Hello! On Sun, Jun 23, 2013 at 11:16:21AM +0530, anshuk kumar wrote: > This is in reference to the following code > > https://github.com/joyent/http-parser > > What could be the main reason for such a design decision? I guess I could > write few regular expressions to parse HTTP req & res this would be a lot > less complex than Igor's version of the parser. > > What am I missing here? Regular expressions isn't something readily available when you code in C, nor something which can be easily used to parse data available in chunks. It's also highly unlikely that even carefully coded regular expressions will be able to beat C code in terms of performance. Of course if you are coding some simple http server in perl or javascript - using regular expressions is a way to go. But it's unlikely a good choise if you are coding high performance web server in C. -- Maxim Dounin http://nginx.org/en/donation.html From manowar at gsc-game.kiev.ua Mon Jun 24 13:01:24 2013 From: manowar at gsc-game.kiev.ua (Serguei I. Ivantsov) Date: Mon, 24 Jun 2013 16:01:24 +0300 Subject: build failed on Linux with x32 ABI Message-ID: <1ae0168681b2c371b8fbbe0de3f7d57a.squirrel@webmail.gsc-game.kiev.ua> Hi, Nginx failed to build on Linux with x32 ABI. >In file included from /usr/include/sys/sysctl.h:63:0, > from src/os/unix/ngx_linux_config.h:54, > from src/core/ngx_config.h:26, > from src/core/nginx.c:8: >/usr/include/bits/sysctl.h:19:3: error: #error "sysctl system call is >unsupported in x32 kernel" > # error "sysctl system call is unsupported in x32 kernel" sysctl() is only used within RTSIG module, but included anyway in ngx_linux_config.h. Please find the patch attached to address the issue. -- Serguei I. Ivantsov -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx_linux_sysctl.patch Type: application/octet-stream Size: 1294 bytes Desc: not available URL: From manowar at gsc-game.kiev.ua Mon Jun 24 13:01:33 2013 From: manowar at gsc-game.kiev.ua (Serguei I. Ivantsov) Date: Mon, 24 Jun 2013 16:01:33 +0300 Subject: build failed on Linux with x32 ABI Message-ID: <93aea50754f0bd99ee448301db5fa049.squirrel@webmail.gsc-game.kiev.ua> Hi, Nginx failed to build on Linux with x32 ABI. >In file included from /usr/include/sys/sysctl.h:63:0, > from src/os/unix/ngx_linux_config.h:54, > from src/core/ngx_config.h:26, > from src/core/nginx.c:8: >/usr/include/bits/sysctl.h:19:3: error: #error "sysctl system call is >unsupported in x32 kernel" > # error "sysctl system call is unsupported in x32 kernel" sysctl() is only used within RTSIG module, but included anyway in ngx_linux_config.h. Please find the patch attached to address the issue. -- Serguei I. Ivantsov -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx_linux_sysctl.patch Type: application/octet-stream Size: 1294 bytes Desc: not available URL: From ykirpichev at gmail.com Mon Jun 24 15:04:52 2013 From: ykirpichev at gmail.com (Yury Kirpichev) Date: Mon, 24 Jun 2013 19:04:52 +0400 Subject: SPDY: split big data frames Message-ID: Hi, It is limitation of current nginx spdy implementation that whole data chain is sent by single spdy data frame. Below are changes where I tried to change this behavior and use 32kb data frames at most in order to send outgoing spdy data. Any comments are welcome. # HG changeset patch # User ykirpichev at gmail.com # Date 1372085474 -14400 # Branch spdy_split_big_frame_default # Node ID c8660bcdd8d3fb2ef12fa1edc1d0f39b771de51e # Parent 982678c5c270f93a0c21ab6eb23cb123c0dc3df0 SPDY: split big frames diff -r 982678c5c270 -r c8660bcdd8d3 src/core/ngx_buf.h --- a/src/core/ngx_buf.h Wed Jun 12 00:41:24 2013 +0900 +++ b/src/core/ngx_buf.h Mon Jun 24 18:51:14 2013 +0400 @@ -158,5 +158,8 @@ void ngx_chain_update_chains(ngx_pool_t *p, ngx_chain_t **free, ngx_chain_t **busy, ngx_chain_t **out, ngx_buf_tag_t tag); + ngx_int_t +ngx_split_buf_in_chain(ngx_pool_t *pool, ngx_chain_t **chain, + ngx_chain_t *in, off_t buf_size); #endif /* _NGX_BUF_H_INCLUDED_ */ diff -r 982678c5c270 -r c8660bcdd8d3 src/core/ngx_output_chain.c --- a/src/core/ngx_output_chain.c Wed Jun 12 00:41:24 2013 +0900 +++ b/src/core/ngx_output_chain.c Mon Jun 24 18:51:14 2013 +0400 @@ -672,3 +672,59 @@ return NGX_AGAIN; } + + + ngx_int_t +ngx_split_buf_in_chain(ngx_pool_t *pool, ngx_chain_t **chain, + ngx_chain_t *in, off_t buf_size) +{ + ngx_chain_t *cl, **il; + ngx_buf_t *b, *buf; + +#if 0 + if (!in->buf->in_file) { + return NGX_OK; + } +#endif + + il = chain; + + while (ngx_buf_size(in->buf) > buf_size) { + cl = ngx_alloc_chain_link(pool); + if (cl == NULL) { + return NGX_ERROR; + } + + buf = in->buf; + + /* split a file buf on bufs by the buf_size limit */ + + b = ngx_calloc_buf(pool); + if (b == NULL) { + return NGX_ERROR; + } + + ngx_memcpy(b, buf, sizeof(ngx_buf_t)); + b->start = NULL; + b->end = NULL; + b->last_buf = 0; + b->last_in_chain = 0; + + if (ngx_buf_in_memory(buf)) { + buf->pos += buf_size; + b->last = buf->pos; + } + + buf->file_pos += buf_size; + b->file_last = buf->file_pos; + + cl->buf = b; + + cl->next = in; + *il = cl; + il = &cl->next; + } + + return NGX_OK; +} + diff -r 982678c5c270 -r c8660bcdd8d3 src/http/ngx_http_spdy_filter_module.c --- a/src/http/ngx_http_spdy_filter_module.c Wed Jun 12 00:41:24 2013 +0900 +++ b/src/http/ngx_http_spdy_filter_module.c Mon Jun 24 18:51:14 2013 +0400 @@ -15,6 +15,9 @@ #define NGX_SPDY_WRITE_BUFFERED NGX_HTTP_WRITE_BUFFERED +/* it is subject for change */ +/* consider to use NGX_SPDY_MAX_FRAME_SIZE instead ??? */ +#define NGX_SPDY_MAX_FRAME_LENGTH (1024 * 32) #define ngx_http_spdy_nv_nsize(h) (NGX_SPDY_NV_NLEN_SIZE + sizeof(h) - 1) #define ngx_http_spdy_nv_vsize(h) (NGX_SPDY_NV_VLEN_SIZE + sizeof(h) - 1) @@ -676,6 +679,10 @@ cl->buf = b; *ln = cl; + if (ngx_buf_size(b) > NGX_SPDY_MAX_FRAME_LENGTH) { + ngx_split_buf_in_chain(r->pool, ln, cl, NGX_SPDY_MAX_FRAME_LENGTH); + } + ln = &cl->next; if (ll->next == NULL) { @@ -685,24 +692,51 @@ ll = ll->next; } - if (size > NGX_SPDY_MAX_FRAME_SIZE) { - ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, - "FIXME: chain too big in spdy filter: %O", size); - return NGX_ERROR; + cl->next = NULL; + ll = cl = out; + size = 0; + + for ( ;; ) { + if (size + ngx_buf_size(cl->buf) > NGX_SPDY_MAX_FRAME_LENGTH) { + ll->buf->last_in_chain = 1; + frame = ngx_http_spdy_filter_get_data_frame(stream, (size_t)size, + ll->buf->last_buf, out, ll); + + ngx_http_spdy_queue_frame(stream->connection, frame); + stream->waiting++; + r->main->blocked++; + + + size = ngx_buf_size(cl->buf); + out = cl; + ll->next = NULL; + ll = cl; + if (cl->next == NULL) { + break; + } + cl = cl->next; + } + else { + size += ngx_buf_size(cl->buf); + ll = cl; + if (cl->next == NULL) { + break; + } + cl = cl->next; + + } } - frame = ngx_http_spdy_filter_get_data_frame(stream, (size_t) size, - b->last_buf, out, cl); - if (frame == NULL) { - return NGX_ERROR; + if (size > 0) { + ll->buf->last_in_chain = 1; + + frame = ngx_http_spdy_filter_get_data_frame(stream, (size_t)size, + ll->buf->last_buf, out, ll); + ngx_http_spdy_queue_frame(stream->connection, frame); + stream->waiting++; + r->main->blocked++; } - ngx_http_spdy_queue_frame(stream->connection, frame); - - stream->waiting++; - - r->main->blocked++; - return ngx_http_spdy_filter_send(r->connection, stream); } BR/ Yury -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Mon Jun 24 15:36:08 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 24 Jun 2013 19:36:08 +0400 Subject: build failed on Linux with x32 ABI In-Reply-To: <93aea50754f0bd99ee448301db5fa049.squirrel@webmail.gsc-game.kiev.ua> References: <93aea50754f0bd99ee448301db5fa049.squirrel@webmail.gsc-game.kiev.ua> Message-ID: <20130624153608.GH20717@mdounin.ru> Hello! On Mon, Jun 24, 2013 at 04:01:33PM +0300, Serguei I. Ivantsov wrote: > Hi, > > Nginx failed to build on Linux with x32 ABI. > > >In file included from /usr/include/sys/sysctl.h:63:0, > > from src/os/unix/ngx_linux_config.h:54, > > from src/core/ngx_config.h:26, > > from src/core/nginx.c:8: > >/usr/include/bits/sysctl.h:19:3: error: #error "sysctl system call is > >unsupported in x32 kernel" > > # error "sysctl system call is unsupported in x32 kernel" > > sysctl() is only used within RTSIG module, but included anyway in > ngx_linux_config.h. > > Please find the patch attached to address the issue. > > -- > Serguei I. Ivantsov > diff -ru a/auto/os/linux b/auto/os/linux > --- a/auto/os/linux 2013-06-04 16:21:53.000000000 +0300 > +++ b/auto/os/linux 2013-06-24 15:40:14.519059918 +0300 > @@ -28,7 +28,17 @@ > > # enable the rt signals on Linux between 2.2.19 and 2.6.17 > > -if [ \( $version -ge 131603 -a $version -lt 132626 \) -o $EVENT_RTSIG = YES ] > +ngx_feature="sysctl" > +ngx_feature_name="NGX_HAVE_SYSCTL" > +ngx_feature_run=yes > +ngx_feature_incs="#include " > +ngx_feature_path= > +ngx_feature_libs= > +ngx_feature_test= > + > +. auto/feature > + > +if [ $ngx_found = yes -a \( \( $version -ge 131603 -a $version -lt 132626 \) -o $EVENT_RTSIG = YES \) ] > then > echo " + rt signals found" > have=NGX_HAVE_RTSIG . auto/have This part looks unneeded. We are probably ok with a build failure without sysctl() if rtsig was explicitly requested. > diff -ru a/src/os/unix/ngx_linux_config.h b/src/os/unix/ngx_linux_config.h > --- a/src/os/unix/ngx_linux_config.h 2013-06-04 16:21:53.000000000 +0300 > +++ b/src/os/unix/ngx_linux_config.h 2013-06-24 15:38:47.689061321 +0300 > @@ -51,13 +51,16 @@ > #include /* memalign() */ > #include /* IOV_MAX */ > #include > -#include > #include > #include /* uname() */ > > > #include > > +#if (NGX_HAVE_RTSIG) > +#include > +#endif > + > > #if (NGX_HAVE_POSIX_SEM) > #include This probably needs to be moved to other event method related conditional includes (and needs another empty line to match style). See also here for some basic tips about submitting patches: http://nginx.org/en/docs/contributing_changes.html -- Maxim Dounin http://nginx.org/en/donation.html From anshukk at gmail.com Tue Jun 25 04:11:54 2013 From: anshukk at gmail.com (anshuk kumar) Date: Tue, 25 Jun 2013 09:41:54 +0530 Subject: Why nginx's http parser doesnt use regular expressions? In-Reply-To: <20130624105102.GB20717@mdounin.ru> References: <20130624105102.GB20717@mdounin.ru> Message-ID: Thanks for the reply. I have another question, was any state machine generators like ragel used to code this or this is completely hand written? Anshuk On Mon, Jun 24, 2013 at 4:21 PM, Maxim Dounin wrote: > Hello! > > On Sun, Jun 23, 2013 at 11:16:21AM +0530, anshuk kumar wrote: > > > This is in reference to the following code > > > > https://github.com/joyent/http-parser > > > > What could be the main reason for such a design decision? I guess I could > > write few regular expressions to parse HTTP req & res this would be a lot > > less complex than Igor's version of the parser. > > > > What am I missing here? > > Regular expressions isn't something readily available when you > code in C, nor something which can be easily used to parse data > available in chunks. It's also highly unlikely that even > carefully coded regular expressions will be able to beat C code in > terms of performance. > > Of course if you are coding some simple http server in perl or > javascript - using regular expressions is a way to go. But it's > unlikely a good choise if you are coding high performance web > server in C. > > -- > Maxim Dounin > http://nginx.org/en/donation.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From jzefip at gmail.com Tue Jun 25 07:03:38 2013 From: jzefip at gmail.com (Julien Zefi) Date: Tue, 25 Jun 2013 01:03:38 -0600 Subject: API question: large data processing handler In-Reply-To: <20130620091107.GK49779@mdounin.ru> References: <20130604160831.GY72282@mdounin.ru> <201306042029.57511.vbart@nginx.com> <20130620091107.GK49779@mdounin.ru> Message-ID: hi, On Thu, Jun 20, 2013 at 3:11 AM, Maxim Dounin wrote: > Hello! > > On Wed, Jun 19, 2013 at 11:42:57PM -0600, Julien Zefi wrote: > > > i tried to follow the suggestion of using a timer and a new handler for > the > > write callback without luck, indeed there is something wrong on my end. > > > > if you have some minute to review, I wrote a simple test case, my goal is > > to make ngx_http_test_stream_handler(..) to be called every 10ms and send > > some data to the browser until this same function decide to stop > working... > > Two obvious problems: > > 1) You try to send data from stack, which is wrong as nginx might > not be able to send data immediately. > > 2) You set timer only once. Note timers are not periodic, and > this will result in only call of the timer handler function. > > Haven't looked any further. > thanks for your comments. Taking in count tha changes provided i still face this problem: #0 0x00000000004065d6 in ngx_palloc (pool=0x0, size=16) at src/core/ngx_palloc.c:122 #1 0x0000000000406a73 in ngx_pcalloc (pool=0x0, size=16) at src/core/ngx_palloc.c:305 #2 0x000000000046b76d in ngx_http_chunked_header_filter (r=0x6eebb0) at src/http/modules/ngx_http_chunked_filter_module.c:82 #3 0x000000000046bdc4 in ngx_http_range_header_filter (r=0x6eebb0) at src/http/modules/ngx_http_range_filter_module.c:160 why my pool is always NULL ? do i am missing some initialization somewhere ? thanks > > -- > Maxim Dounin > http://nginx.org/en/donation.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From ykirpichev at gmail.com Tue Jun 25 08:51:17 2013 From: ykirpichev at gmail.com (Yury Kirpichev) Date: Tue, 25 Jun 2013 12:51:17 +0400 Subject: SPDY: what is the purpose of blocked frame Message-ID: Hi Nginx Developers, Could someone explain what is the purpose to use blocked frame for SYN_REPLY frame in spdy implementation? According to our investigation it makes it impossible to use spdy priorities because of blocked frames (since each stream is started with SYN_REPLY which is blocked there is no way how frames from subsequent requests can outrun previous request with lower priority in spdy output queue). Here is log snippet which shows the problem: 2013/06/25 07:35:04 [debug] 4045#0: *245 free: 0000000002090650, unused: 0 2013/06/25 07:35:04 [debug] 4045#0: *245 free: 0000000002090760, unused: 128 2013/06/25 07:35:04 [debug] 4045#0: *1 post event 00007FA2EC947280 2013/06/25 07:35:04 [debug] 4045#0: *1 delete posted event 00007FA2EC947280 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy write handler 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000022B17D8 sid:243 prio:3 bl:0 size:8 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000022B16C0 sid:243 prio:3 bl:0 size:5008 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000022AB2F8 sid:243 prio:3 bl:1 size:269 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000022A8748 sid:239 prio:3 bl:0 size:8 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000022A8630 sid:239 prio:3 bl:0 size:5008 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000022AC690 sid:241 prio:1 bl:0 size:8 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000021184B8 sid:241 prio:1 bl:0 size:1008 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 0000000002118278 sid:241 prio:1 bl:1 size:268 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000020B3C38 sid:239 prio:3 bl:1 size:269 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000022A4718 sid:237 prio:3 bl:0 size:8 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000022A4600 sid:237 prio:3 bl:0 size:5008 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 000000000229D228 sid:237 prio:3 bl:1 size:269 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 000000000229F6C8 sid:235 prio:3 bl:0 size:8 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 000000000229F5B0 sid:235 prio:3 bl:0 size:5008 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 0000000002290158 sid:235 prio:3 bl:1 size:269 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 000000000229A678 sid:233 prio:3 bl:0 size:8 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 000000000229A560 sid:233 prio:3 bl:0 size:5008 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 0000000002294198 sid:233 prio:3 bl:1 size:269 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 0000000002297648 sid:229 prio:3 bl:0 size:8 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 0000000002297530 sid:229 prio:3 bl:0 size:5008 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 0000000002296638 sid:231 prio:3 bl:0 size:8 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 0000000002296520 sid:231 prio:3 bl:0 size:5008 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000020B5C58 sid:231 prio:3 bl:1 size:269 2013/06/25 07:35:04 [debug] 4045#0: *1 spdy frame out: 00000000020ACBC8 sid:229 prio:3 bl:1 size:269 As you can see farm with priority 1 was added somewhere in the middle of queue because of we have 00000000020B3C38 sid:239 prio:3 bl:1 size:269 BR/ Yury -------------- next part -------------- An HTML attachment was scrubbed... URL: From vbart at nginx.com Tue Jun 25 11:05:43 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Tue, 25 Jun 2013 15:05:43 +0400 Subject: Why nginx's http parser doesnt use regular expressions? In-Reply-To: References: <20130624105102.GB20717@mdounin.ru> Message-ID: <201306251505.44002.vbart@nginx.com> On Tuesday 25 June 2013 08:11:54 anshuk kumar wrote: > Thanks for the reply. > > I have another question, was any state machine generators like ragel used > to code this or this is completely hand written? > [..] It's all written by hand. We don't use generators. wbr, Valentin V. Bartenev From vbart at nginx.com Tue Jun 25 16:31:45 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Tue, 25 Jun 2013 20:31:45 +0400 Subject: SPDY: what is the purpose of blocked frame In-Reply-To: References: Message-ID: <201306252031.45298.vbart@nginx.com> On Tuesday 25 June 2013 12:51:17 Yury Kirpichev wrote: > Hi Nginx Developers, > > > Could someone explain what is the purpose to use blocked frame for > SYN_REPLY frame in spdy implementation? > > According to our investigation it makes it impossible to use spdy > priorities because of blocked frames (since each stream is started with > SYN_REPLY which is blocked there is no way how frames from subsequent > requests can outrun previous request with lower priority in spdy output > queue). > [...] SPDY uses zlib compression for output headers in SYN_REPLY frames. In fact zlib is just a wrapper over deflate compression that consists of LZ77 and Huffman coding. Both client and server must keep LZ77 window in sync between each other across a whole SPDY session, so the order of SYN_REPLY frames cannot be changed after the compression has done. There is a way to improve things a bit. We may postpone compression to the latest phase (right before sending of queue), but it requires more code and we have no ETA for this yet. wbr, Valentin V. Bartenev From manowar at gsc-game.kiev.ua Tue Jun 25 16:32:04 2013 From: manowar at gsc-game.kiev.ua (Serguei I. Ivantsov) Date: Tue, 25 Jun 2013 19:32:04 +0300 Subject: build failed on Linux with x32 ABI Message-ID: <3fc412953da46be5287b6e486ac14072.squirrel@webmail.gsc-game.kiev.ua> >> +#if (NGX_HAVE_RTSIG) >> +#include >> +#endif >> + >> >> #if (NGX_HAVE_POSIX_SEM) >> #include >This probably needs to be moved to other event method related >conditional includes I did not find an example how and where conditionally include system header. BTW, I just make the same way I found in ngx_linux_config.h : #if (NGX_HAVE_POLL || NGX_HAVE_RTSIG) #include #endif From mdounin at mdounin.ru Tue Jun 25 17:49:10 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 25 Jun 2013 21:49:10 +0400 Subject: API question: large data processing handler In-Reply-To: References: <20130604160831.GY72282@mdounin.ru> <201306042029.57511.vbart@nginx.com> <20130620091107.GK49779@mdounin.ru> Message-ID: <20130625174910.GM20717@mdounin.ru> Hello! On Tue, Jun 25, 2013 at 01:03:38AM -0600, Julien Zefi wrote: > hi, > > > On Thu, Jun 20, 2013 at 3:11 AM, Maxim Dounin wrote: > > > Hello! > > > > On Wed, Jun 19, 2013 at 11:42:57PM -0600, Julien Zefi wrote: > > > > > i tried to follow the suggestion of using a timer and a new handler for > > the > > > write callback without luck, indeed there is something wrong on my end. > > > > > > if you have some minute to review, I wrote a simple test case, my goal is > > > to make ngx_http_test_stream_handler(..) to be called every 10ms and send > > > some data to the browser until this same function decide to stop > > working... > > > > Two obvious problems: > > > > 1) You try to send data from stack, which is wrong as nginx might > > not be able to send data immediately. > > > > 2) You set timer only once. Note timers are not periodic, and > > this will result in only call of the timer handler function. > > > > Haven't looked any further. > > > > thanks for your comments. Taking in count tha changes provided i still face > this problem: > > #0 0x00000000004065d6 in ngx_palloc (pool=0x0, size=16) at > src/core/ngx_palloc.c:122 > #1 0x0000000000406a73 in ngx_pcalloc (pool=0x0, size=16) at > src/core/ngx_palloc.c:305 > #2 0x000000000046b76d in ngx_http_chunked_header_filter (r=0x6eebb0) > at src/http/modules/ngx_http_chunked_filter_module.c:82 > #3 0x000000000046bdc4 in ngx_http_range_header_filter (r=0x6eebb0) > at src/http/modules/ngx_http_range_filter_module.c:160 > > why my pool is always NULL ? do i am missing some initialization somewhere ? Part of the backtrace shown suggests you trigger request activity after the request was freed. Most likely you've forgot r->main->count++. -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Tue Jun 25 19:21:01 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 25 Jun 2013 23:21:01 +0400 Subject: build failed on Linux with x32 ABI In-Reply-To: <3fc412953da46be5287b6e486ac14072.squirrel@webmail.gsc-game.kiev.ua> References: <3fc412953da46be5287b6e486ac14072.squirrel@webmail.gsc-game.kiev.ua> Message-ID: <20130625192101.GQ20717@mdounin.ru> Hello! On Tue, Jun 25, 2013 at 07:32:04PM +0300, Serguei I. Ivantsov wrote: > >> +#if (NGX_HAVE_RTSIG) > >> +#include > >> +#endif > >> + > >> > >> #if (NGX_HAVE_POSIX_SEM) > >> #include > > >This probably needs to be moved to other event method related > >conditional includes > > I did not find an example how and where conditionally include system > header. BTW, I just make the same way I found in ngx_linux_config.h : > > #if (NGX_HAVE_POLL || NGX_HAVE_RTSIG) > #include > #endif Yes, that's fine. But you placed the include added before semaphore.h instead of adding it to other event method related includes. I mean to do something like this: --- a/src/os/unix/ngx_linux_config.h +++ b/src/os/unix/ngx_linux_config.h @@ -51,7 +51,6 @@ #include /* memalign() */ #include /* IOV_MAX */ #include -#include #include #include /* uname() */ @@ -77,6 +76,11 @@ extern ssize_t sendfile(int s, int fd, #endif +#if (NGX_HAVE_RTSIG) +#include +#endif + + #if (NGX_HAVE_POLL || NGX_HAVE_RTSIG) #include #endif -- Maxim Dounin http://nginx.org/en/donation.html From ykirpichev at gmail.com Wed Jun 26 08:34:17 2013 From: ykirpichev at gmail.com (Yury Kirpichev) Date: Wed, 26 Jun 2013 12:34:17 +0400 Subject: SPDY: what is the purpose of blocked frame In-Reply-To: <201306252031.45298.vbart@nginx.com> References: <201306252031.45298.vbart@nginx.com> Message-ID: Hello, Thanks for analysis and explanation. Then how about the following workaround - - queue blocked frames at the begining of queue in FIFO order. (just remove from ngx_http_spdy_queue_blocked_frame the code: if (frame->priority >= (*out)->priority) { break; } ) - queue non-blocked frames after blocked in priority order: static ngx_inline void ngx_http_spdy_queue_frame(ngx_http_spdy_connection_t *sc, ngx_http_spdy_out_frame_t *frame) { ngx_http_spdy_out_frame_t **out; for (out = &sc->last_out; *out *&& !(*out)->blocked*; out = &(*out)->next) { if (frame->priority >= (*out)->priority) { break; } } frame->next = *out; *out = frame; } Do you foresee any obvious drawback of such approach? BR/ Yury 2013/6/25 Valentin V. Bartenev > On Tuesday 25 June 2013 12:51:17 Yury Kirpichev wrote: > > Hi Nginx Developers, > > > > > > Could someone explain what is the purpose to use blocked frame for > > SYN_REPLY frame in spdy implementation? > > > > According to our investigation it makes it impossible to use spdy > > priorities because of blocked frames (since each stream is started with > > SYN_REPLY which is blocked there is no way how frames from subsequent > > requests can outrun previous request with lower priority in spdy output > > queue). > > > [...] > > SPDY uses zlib compression for output headers in SYN_REPLY frames. > In fact zlib is just a wrapper over deflate compression that consists > of LZ77 and Huffman coding. > > Both client and server must keep LZ77 window in sync between each > other across a whole SPDY session, so the order of SYN_REPLY frames > cannot be changed after the compression has done. > > There is a way to improve things a bit. We may postpone compression > to the latest phase (right before sending of queue), but it requires > more code and we have no ETA for this yet. > > wbr, Valentin V. Bartenev > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From manowar at gsc-game.kiev.ua Wed Jun 26 10:00:52 2013 From: manowar at gsc-game.kiev.ua (Serguei I. Ivantsov) Date: Wed, 26 Jun 2013 13:00:52 +0300 Subject: build failed on Linux with x32 ABI In-Reply-To: <20130625192101.GQ20717@mdounin.ru> References: <3fc412953da46be5287b6e486ac14072.squirrel@webmail.gsc-game.kiev.ua> <20130625192101.GQ20717@mdounin.ru> Message-ID: <5ccb39d952b4e7c0d80ace00af952a74.squirrel@webmail.gsc-game.kiev.ua> Well, please apply the proper patch. It will also speeds up building on Linux, because rt signals (and included sysctl.h) are very rare nowadays. > Yes, that's fine. But you placed the include added before > semaphore.h instead of adding it to other event method related > includes. I mean to do something like this: > > --- a/src/os/unix/ngx_linux_config.h > +++ b/src/os/unix/ngx_linux_config.h > @@ -51,7 +51,6 @@ > #include /* memalign() */ > #include /* IOV_MAX */ > #include > -#include > #include > #include /* uname() */ > > @@ -77,6 +76,11 @@ extern ssize_t sendfile(int s, int fd, > #endif > > > +#if (NGX_HAVE_RTSIG) > +#include > +#endif > + > + > #if (NGX_HAVE_POLL || NGX_HAVE_RTSIG) > #include > #endif > > > -- > Maxim Dounin > http://nginx.org/en/donation.html > From mdounin at mdounin.ru Wed Jun 26 12:32:58 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 26 Jun 2013 12:32:58 +0000 Subject: [nginx] Fixed build on Linux with x32 ABI. Message-ID: details: http://hg.nginx.org/nginx/rev/7f4ec5bfb715 branches: changeset: 5255:7f4ec5bfb715 user: Maxim Dounin date: Wed Jun 26 15:47:27 2013 +0400 description: Fixed build on Linux with x32 ABI. On Linux x32 inclusion of sys/sysctl.h produces an error. As sysctl() is only used by rtsig event method code, which is legacy and not compiled in by default on modern linuxes, the sys/sysctl.h file now only included if rtsig support is enabled. Based on patch by Serguei I. Ivantsov. diffstat: src/os/unix/ngx_linux_config.h | 9 +++++++-- 1 files changed, 7 insertions(+), 2 deletions(-) diffs (30 lines): diff --git a/src/os/unix/ngx_linux_config.h b/src/os/unix/ngx_linux_config.h --- a/src/os/unix/ngx_linux_config.h +++ b/src/os/unix/ngx_linux_config.h @@ -51,7 +51,6 @@ #include /* memalign() */ #include /* IOV_MAX */ #include -#include #include #include /* uname() */ @@ -77,11 +76,17 @@ extern ssize_t sendfile(int s, int fd, i #endif -#if (NGX_HAVE_POLL || NGX_HAVE_RTSIG) +#if (NGX_HAVE_POLL) #include #endif +#if (NGX_HAVE_RTSIG) +#include +#include +#endif + + #if (NGX_HAVE_EPOLL) #include #endif From mdounin at mdounin.ru Wed Jun 26 12:37:56 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 26 Jun 2013 16:37:56 +0400 Subject: build failed on Linux with x32 ABI In-Reply-To: <5ccb39d952b4e7c0d80ace00af952a74.squirrel@webmail.gsc-game.kiev.ua> References: <3fc412953da46be5287b6e486ac14072.squirrel@webmail.gsc-game.kiev.ua> <20130625192101.GQ20717@mdounin.ru> <5ccb39d952b4e7c0d80ace00af952a74.squirrel@webmail.gsc-game.kiev.ua> Message-ID: <20130626123756.GX20717@mdounin.ru> Hello! On Wed, Jun 26, 2013 at 01:00:52PM +0300, Serguei I. Ivantsov wrote: > Well, please apply the proper patch. > It will also speeds up building on Linux, because rt signals (and > included sysctl.h) are very rare nowadays. I've committed slightly different version (with minor changes as requested by Igor), thanks. -- Maxim Dounin http://nginx.org/en/donation.html From dp at highloadlab.com Thu Jun 27 19:20:56 2013 From: dp at highloadlab.com (Dmitry Popov) Date: Thu, 27 Jun 2013 23:20:56 +0400 Subject: [PATCH] ngx_http_parse_chunked might request wrong number of bytes Message-ID: <20130627232056.a19b1638a4c10aa76e17fce2@highloadlab.com> Consider a case when we've just read chunk size (but nothing else): case sw_chunk_size: ctx->length = 2 /* LF LF */ + (ctx->size ? ctx->size + 4 /* LF "0" LF LF */ : 0); break; ctx->length will be equal to 6 + ctx->size, but actually we need 5 + ctx->size bytes: LF LF 0 LF LF. It may lead to a deadlock (peer waits for a response from us while we wait for that last byte). * IIRC, RFC states that CRLF should be used after chunk size, not LF, so it's not so critical, but I think it should be fixed anyway. Signed-off-by: Dmitry Popov diff -ur old/src/http/ngx_http_parse.c new/src/http/ngx_http_parse.c --- old/src/http/ngx_http_parse.c 2013-06-04 17:21:53.000000000 +0400 +++ new/src/http/ngx_http_parse.c 2013-06-27 23:00:27.091638084 +0400 @@ -2180,8 +2180,10 @@ ctx->length = 3 /* "0" LF LF */; break; case sw_chunk_size: - ctx->length = 2 /* LF LF */ - + (ctx->size ? ctx->size + 4 /* LF "0" LF LF */ : 0); + ctx->length = 1 /* LF */ + + (ctx->size + ? ctx->size + 4 /* LF "0" LF LF */ + : 1 /* LF */); break; case sw_chunk_extension: case sw_chunk_extension_almost_done: From pranay.kanwar at gmail.com Fri Jun 28 07:55:20 2013 From: pranay.kanwar at gmail.com (Pranay Kanwar) Date: Fri, 28 Jun 2013 13:25:20 +0530 Subject: Accessing request body in log phase Message-ID: Hi, What is the correct way to get request body in a log phase handler module ? The usual method of ngx_http_read_client_request_body via a handler doesn't work, the r->request_body->bufs is always NULL. The handler works fine in other phases (for example the pre access phase). -pk -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Jun 28 08:46:16 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 28 Jun 2013 12:46:16 +0400 Subject: Accessing request body in log phase In-Reply-To: References: Message-ID: <20130628084616.GI20717@mdounin.ru> Hello! On Fri, Jun 28, 2013 at 01:25:20PM +0530, Pranay Kanwar wrote: > Hi, > > What is the correct way to get request body in a log phase handler module ? > > The usual method of ngx_http_read_client_request_body via a handler doesn't > work, > the r->request_body->bufs is always NULL. > > The handler works fine in other phases (for example the pre access phase). At the log phase it's already too late to read body - it's either read or discarded by previous code (and the connection might be already closed). -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Fri Jun 28 10:24:04 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 28 Jun 2013 10:24:04 +0000 Subject: [nginx] Fixed ngx_http_parse_chunked() minimal length calculation. Message-ID: details: http://hg.nginx.org/nginx/rev/b66ec10e901a branches: changeset: 5256:b66ec10e901a user: Maxim Dounin date: Fri Jun 28 13:55:05 2013 +0400 description: Fixed ngx_http_parse_chunked() minimal length calculation. Minimal data length we expect for further calls was calculated incorrectly if parsing stopped right after parsing chunk size. This might in theory affect clients and/or backends using LF instead of CRLF. Patch by Dmitry Popov. diffstat: src/http/ngx_http_parse.c | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diffs (15 lines): diff --git a/src/http/ngx_http_parse.c b/src/http/ngx_http_parse.c --- a/src/http/ngx_http_parse.c +++ b/src/http/ngx_http_parse.c @@ -2180,8 +2180,9 @@ data: ctx->length = 3 /* "0" LF LF */; break; case sw_chunk_size: - ctx->length = 2 /* LF LF */ - + (ctx->size ? ctx->size + 4 /* LF "0" LF LF */ : 0); + ctx->length = 1 /* LF */ + + (ctx->size ? ctx->size + 4 /* LF "0" LF LF */ + : 1 /* LF */); break; case sw_chunk_extension: case sw_chunk_extension_almost_done: From mdounin at mdounin.ru Fri Jun 28 10:24:45 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 28 Jun 2013 14:24:45 +0400 Subject: [PATCH] ngx_http_parse_chunked might request wrong number of bytes In-Reply-To: <20130627232056.a19b1638a4c10aa76e17fce2@highloadlab.com> References: <20130627232056.a19b1638a4c10aa76e17fce2@highloadlab.com> Message-ID: <20130628102444.GJ20717@mdounin.ru> Hello! On Thu, Jun 27, 2013 at 11:20:56PM +0400, Dmitry Popov wrote: > Consider a case when we've just read chunk size (but nothing else): > case sw_chunk_size: > ctx->length = 2 /* LF LF */ > + (ctx->size ? ctx->size + 4 /* LF "0" LF LF */ : 0); > break; > ctx->length will be equal to 6 + ctx->size, but actually we need 5 + ctx->size > bytes: LF LF 0 LF LF. It may lead to a deadlock (peer waits for a > response from us while we wait for that last byte). > > * IIRC, RFC states that CRLF should be used after chunk size, not LF, so it's > not so critical, but I think it should be fixed anyway. Thanks, patch committed (with minor changes). -- Maxim Dounin http://nginx.org/en/donation.html From wandenberg at gmail.com Sat Jun 29 01:36:39 2013 From: wandenberg at gmail.com (Wandenberg Peixoto) Date: Fri, 28 Jun 2013 22:36:39 -0300 Subject: Help with shared memory usage Message-ID: Hi, I'm trying to understand how the shared memory pool works inside the Nginx. To do that, I made a very small module which create a shared memory zone with 2097152 bytes, and allocating and freeing blocks of memory, starting from 0 and increasing by 1kb until the allocation fails. The strange parts to me were: - the maximum block I could allocate was 128000 bytes - each time the allocation fails, I started again from 0, but the maximum allocated block changed with the following profile 128000 87040 70656 62464 58368 54272 50176 46080 41984 37888 33792 29696 This is the expected behavior? Can anyone help me explaining how shared memory works? I have another module which do an intensive shared memory usage, and understanding this can help me improve it solving some "no memory" messages. I put the code in attach. Thanks, Wandenberg -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-shm-fragmentation-module.tar.gz Type: application/x-gzip Size: 2063 bytes Desc: not available URL: From dakota at brokenpipe.ru Sun Jun 30 11:04:12 2013 From: dakota at brokenpipe.ru (Marat Dakota) Date: Sun, 30 Jun 2013 15:04:12 +0400 Subject: How to abort subrequest properly? Message-ID: Hi, I am parsing a subrequest's body as it arrives. At some point I could decide that the subrequest's body is not well-formed. I want to stop receiving the rest of the subrequest's body and close its connection. My main request and all other subrequests should continue working. I've tried something like ngx_http_finalize_request(sr, NGX_ABORT). It looks like it's not the thing. What steps should be applied to abort a subrequest? Thanks. -- Marat -------------- next part -------------- An HTML attachment was scrubbed... URL: