From shawgoff at amazon.com Sat Jan 3 00:02:50 2015 From: shawgoff at amazon.com (Shawn J. Goff) Date: Fri, 02 Jan 2015 16:02:50 -0800 Subject: [PATCH 2 of 3] Tests: http_end - make getline() work with chunked messages In-Reply-To: References: Message-ID: <7e0cf8051ecb5c358dfd.1420243370@uecf4bb4ec21f540e4949.ant.amazon.com> # HG changeset patch # User Shawn J. Goff # Date 1419638613 28800 # Node ID 7e0cf8051ecb5c358dfd0bea94aa0aefa744ee29 # Parent 130a511d3faa26f79225dff7ef6796f4d39bc482 Tests: http_end - make getline() work with chunked messages When testing HTTP/1.1 keep-alive requests, this getline() will hang forever as it never gets EOF. This change avoids clearing $/ so that getline() will return for each line. It looks for a last-chunk (CRLF0CRLF) followed by a CRLF to delimit the message. This means it won't work for a test that uses a chunk-extension or a trailer, but there are no such tests yet. diff -r 130a511d3faa -r 7e0cf8051ecb lib/Test/Nginx.pm --- a/lib/Test/Nginx.pm Wed Dec 17 17:34:14 2014 -0800 +++ b/lib/Test/Nginx.pm Fri Dec 26 16:03:33 2014 -0800 @@ -531,9 +531,14 @@ local $SIG{PIPE} = sub { die "sigpipe\n" }; alarm(5); - local $/; - $reply = $s->getline(); - + while (defined(my $line = $s->getline())){ + $reply = $reply.$line; + # finish if we get the last chunk of a chunked-encoding + last if ($reply =~ qr/\x0d\x0a0\x0d\x0a\x0d\x0a$/) + # Note: for the sake of a simpler test, this does not + # account for a chunk-extension or for a trailer. See + # RFC2616#sec3.6.1 for details. + } alarm(0); }; alarm(0); From shawgoff at amazon.com Sat Jan 3 00:03:58 2015 From: shawgoff at amazon.com (Shawn J. Goff) Date: Fri, 02 Jan 2015 16:03:58 -0800 Subject: [PATCH 0 of 1] Upstream: add propagate_connection_close directive Message-ID: This patch adds a new directive to the upstream module: propagate_connection_close. It puts a "Connection: close" header in the downstream response if the upstream server sent one. This is useful when Nginx is used as HTTP/HTTPS endpoints with load blancers in front of them. It allows the upstream server to close connections in order to shed load. I can submit a documentation patch if this patch is accepted. In addition to being useful for us, I've found several examples of people asking for this type of behavior in public forums. https://github.com/openresty/headers-more-nginx-module/issues/22 http://stackoverflow.com/questions/5100971/nginx-and-proxy-pass-send-connection-close-headers http://serverfault.com/questions/480171/is-it-possible-to-tell-nginx-over-fastcgi-to-pass-a-connection-close-header-thr From shawgoff at amazon.com Sat Jan 3 00:02:49 2015 From: shawgoff at amazon.com (Shawn J. Goff) Date: Fri, 02 Jan 2015 16:02:49 -0800 Subject: [PATCH 1 of 3] Tests: add test library function for simple HTTP/1.1 request In-Reply-To: References: Message-ID: <130a511d3faa26f79225.1420243369@uecf4bb4ec21f540e4949.ant.amazon.com> # HG changeset patch # User Shawn J. Goff # Date 1418866454 28800 # Node ID 130a511d3faa26f79225dff7ef6796f4d39bc482 # Parent b2c3d509b2f90c97bfc79f64cb93e3d12ec6e40a Tests: add test library function for simple HTTP/1.1 request. This is for the upcomming propagate_connection_close tests. diff -r b2c3d509b2f9 -r 130a511d3faa lib/Test/Nginx.pm --- a/lib/Test/Nginx.pm Wed Dec 24 14:15:30 2014 +0300 +++ b/lib/Test/Nginx.pm Wed Dec 17 17:34:14 2014 -0800 @@ -11,7 +11,7 @@ use base qw/ Exporter /; -our @EXPORT = qw/ log_in log_out http http_get http_head /; +our @EXPORT = qw/ log_in log_out http http_get http_get_v11 http_head /; our @EXPORT_OK = qw/ http_gzip_request http_gzip_like http_start http_end /; our %EXPORT_TAGS = ( gzip => [ qw/ http_gzip_request http_gzip_like / ] @@ -450,15 +450,23 @@ ############################################################################### -sub http_get($;%) { - my ($url, %extra) = @_; +sub http_get_v($;%) { + my ($ver, $url, %extra) = @_; return http(< References: Message-ID: <739882cc36a6b52b6340.1420243439@uecf4bb4ec21f540e4949.ant.amazon.com> # HG changeset patch # User Shawn J. Goff # Date 1418438144 28800 # Node ID 739882cc36a6b52b6340d2f1b63e98fb47f4ab8b # Parent e9effef98874c619326ec11e25b11333225cf797 Upstream: add propagate_connection_close directive. This directive ensures that if the upstream response includes a "Connection: close" header, the response sent back to the downstream client also has the "Connection: close" header and the downstream connection is closed. This is useful where the upstream service handles closing connections to keep the load balanced with the rest of the fleet. propagate_connection_close is off by default to keep existing behavior. diff -r e9effef98874 -r 739882cc36a6 src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Fri Dec 26 16:22:59 2014 +0300 +++ b/src/http/ngx_http_upstream.c Fri Dec 12 18:35:44 2014 -0800 @@ -159,6 +159,9 @@ static void *ngx_http_upstream_create_main_conf(ngx_conf_t *cf); static char *ngx_http_upstream_init_main_conf(ngx_conf_t *cf, void *conf); +static void *ngx_http_upstream_create_loc_conf(ngx_conf_t *cf); +static char *ngx_http_upstream_merge_loc_conf(ngx_conf_t *cf, + void *parent, void *child); #if (NGX_HTTP_SSL) static void ngx_http_upstream_ssl_init_connection(ngx_http_request_t *, @@ -314,6 +317,13 @@ 0, NULL }, + { ngx_string("propagate_connection_close"), + NGX_HTTP_LOC_CONF|NGX_HTTP_UPS_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_upstream_loc_conf_t, propagate_close), + NULL }, + ngx_null_command }; @@ -328,8 +338,8 @@ NULL, /* create server configuration */ NULL, /* merge server configuration */ - NULL, /* create location configuration */ - NULL /* merge location configuration */ + ngx_http_upstream_create_loc_conf, /* create location configuration */ + ngx_http_upstream_merge_loc_conf /* merge location configuration */ }; @@ -1834,9 +1844,12 @@ static void ngx_http_upstream_process_header(ngx_http_request_t *r, ngx_http_upstream_t *u) { - ssize_t n; - ngx_int_t rc; - ngx_connection_t *c; + ssize_t n; + ngx_int_t rc; + ngx_connection_t *c; + ngx_http_upstream_loc_conf_t *ulcf; + + ulcf = ngx_http_get_module_loc_conf(r, ngx_http_upstream_module); c = u->peer.connection; @@ -1967,6 +1980,10 @@ } } + if (ulcf->propagate_close == 1 && u->headers_in.connection_close == 1){ + r->keepalive = 0; + } + if (ngx_http_upstream_process_headers(r, u) != NGX_OK) { return; } @@ -5778,3 +5795,30 @@ return NGX_CONF_OK; } + + +static void * +ngx_http_upstream_create_loc_conf(ngx_conf_t *cf) +{ + ngx_http_upstream_loc_conf_t *conf; + + conf = ngx_palloc(cf->pool, sizeof(ngx_http_upstream_loc_conf_t)); + if (conf == NULL) { + return NULL; + } + + conf->propagate_close = NGX_CONF_UNSET; + return conf; +} + + +static char * +ngx_http_upstream_merge_loc_conf(ngx_conf_t *cf, void *parent, void *child) +{ + ngx_http_upstream_loc_conf_t *prev = parent; + ngx_http_upstream_loc_conf_t *conf = child; + + ngx_conf_merge_value(conf->propagate_close, prev->propagate_close, 0); + + return NGX_CONF_OK; +} diff -r e9effef98874 -r 739882cc36a6 src/http/ngx_http_upstream.h --- a/src/http/ngx_http_upstream.h Fri Dec 26 16:22:59 2014 +0300 +++ b/src/http/ngx_http_upstream.h Fri Dec 12 18:35:44 2014 -0800 @@ -72,6 +72,11 @@ /* ngx_http_upstream_srv_conf_t */ } ngx_http_upstream_main_conf_t; + +typedef struct { + ngx_flag_t propagate_close; +} ngx_http_upstream_loc_conf_t; + typedef struct ngx_http_upstream_srv_conf_s ngx_http_upstream_srv_conf_t; typedef ngx_int_t (*ngx_http_upstream_init_pt)(ngx_conf_t *cf, From shawgoff at amazon.com Sat Jan 3 00:41:41 2015 From: shawgoff at amazon.com (Shawn J. Goff) Date: Fri, 02 Jan 2015 16:41:41 -0800 Subject: [PATCH 0 of 3] Tests: add propagate_connection_close tests Message-ID: These patches add tests for my propagate_connection_close patch for the upstream module. Due to the nature of this change, there were a couple modifications required to the lib/Test/Nginx.pm to work with chunked responses. I reran the test suite with all features compiled in and it still passes, but some tests were skipped because I don't have the needed libraries installed. From shawgoff at amazon.com Sat Jan 3 00:54:14 2015 From: shawgoff at amazon.com (Shawn J. Goff) Date: Fri, 02 Jan 2015 16:54:14 -0800 Subject: [PATCH 3 of 3] Tests: add propagate_connection_close tests In-Reply-To: References: Message-ID: # HG changeset patch # User Shawn J. Goff # Date 1419637716 28800 # Node ID fdf48082ebc389a967b9d4064f1b8cc447427c78 # Parent 7e0cf8051ecb5c358dfd0bea94aa0aefa744ee29 Tests: add propagate_connection_close tests. propagate_connection_close causes nginx to close the downstream connection if the upstream returns a "Connection: close" header. This change adds tests for the existing/default behavior as well as enabled and disabled. For each setting, it does two tests: one that will cause the upstream server to close the connection and one that will cause the upstream server to keep the connection alive. Conflicts: lib/Test/Nginx.pm diff -r 7e0cf8051ecb -r fdf48082ebc3 upstream.t --- a/upstream.t Fri Dec 26 16:03:33 2014 -0800 +++ b/upstream.t Fri Dec 26 15:48:36 2014 -0800 @@ -21,7 +21,7 @@ select STDERR; $| = 1; select STDOUT; $| = 1; -my $t = Test::Nginx->new()->has(qw/http proxy/)->plan(3); +my $t = Test::Nginx->new()->has(qw/http proxy/)->plan(9); $t->write_file_expand('nginx.conf', <<'EOF'); @@ -55,6 +55,29 @@ location /close2 { proxy_pass http://u2; } + + location /close_pcc_default { + proxy_pass http://u2; + } + location /keep-alive_pcc_default { + proxy_pass http://u2; + } + location /close_pcc_on { + proxy_pass http://u2; + propagate_connection_close on; + } + location /keep-alive_pcc_on { + proxy_pass http://u2; + propagate_connection_close on; + } + location /close_pcc_off { + proxy_pass http://u2; + propagate_connection_close off; + } + location /keep-alive_pcc_off { + proxy_pass http://u2; + propagate_connection_close off; + } } } @@ -77,6 +100,18 @@ is(many('/close', 30), '8081: 6, 8082: 24', 'failures'); +# test propagate_connection_close=default behavior +like(http_get_v11('/keep-alive_pcc_default'), qr/Connection: keep-alive/, 'pcc keep-alive default'); +like(http_get_v11('/close_pcc_default'), qr/Connection: keep-alive/, 'pcc close default'); + +# test propagate_connection_close=on behavior +like(http_get_v11('/keep-alive_pcc_on'), qr/Connection: keep-alive/, 'pcc keep-alive on'); +like(http_get_v11('/close_pcc_on'), qr/Connection: close/, 'pcc close on'); + +# test propagate_connection_close=off behavior +like(http_get_v11('/keep-alive_pcc_off'), qr/Connection: keep-alive/, 'pcc keep-alive off'); +like(http_get_v11('/close_pcc_off'), qr/Connection: keep-alive/, 'pcc close off'); + SKIP: { skip 'long test', 1 unless $ENV{TEST_NGINX_UNSAFE}; @@ -129,6 +164,7 @@ my $headers = ''; my $uri = ''; + my $connection_header = 'close'; while (<$client>) { $headers .= $_; @@ -141,9 +177,13 @@ next; } + if ($uri =~ 'keep-alive') { + $connection_header = 'keep-alive' + } + print $client < References: Message-ID: <54A73FC9.4030302@amazon.com> On 01/02/2015 04:41 PM, Shawn J. Goff wrote: > These patches add tests for my propagate_connection_close patch for > the upstream module. Due to the nature of this change, there were a > couple modifications required to the lib/Test/Nginx.pm to work with > chunked responses. I reran the test suite with all features compiled > in and it still passes, but some tests were skipped because I don't > have the needed libraries installed. > These went through weird. I sent them using patchbomb, but 2/4 of the e-mails were rejected from the list. I resent just the two that were rejected using sendmail, so if you use hg mbox to pull these in, it may not work. Let me know if you need me to resend them. From yugal.mullick at gmail.com Sat Jan 3 04:47:18 2015 From: yugal.mullick at gmail.com (Yugal Mullick) Date: Sat, 3 Jan 2015 10:17:18 +0530 Subject: Help test Message-ID: -------------- next part -------------- An HTML attachment was scrubbed... URL: From yugal.mullick at gmail.com Sat Jan 3 05:07:25 2015 From: yugal.mullick at gmail.com (Yugal Mullick) Date: Sat, 3 Jan 2015 10:37:25 +0530 Subject: Help test In-Reply-To: References: Message-ID: Hi Team, Getting below error while downloading and extracting Nginx. techmahindra at techmahindra-ThinkCentre-M93p:~/yugal/src$ wget http://nginx.org/download/nginx-0.7.66.tar.gz --2015-01-02 17:59:58-- http://nginx.org/download/nginx-0.7.66.tar.gz Connecting to 127.0.0.1:3128... connected. Proxy request sent, awaiting response... 302 Moved Location: http://10.254.40.54:15871/cgi-bin/blockpage.cgi?ws-session=3162698151 [following] --2015-01-02 17:59:58-- http://10.254.40.54:15871/cgi-bin/blockpage.cgi?ws-session=3162698151 Reusing existing connection to 127.0.0.1:3128. Proxy request sent, awaiting response... Read error (Connection reset by peer) in headers. Retrying. --2015-01-02 17:59:59-- (try: 2) http://10.254.40.54:15871/cgi-bin/blockpage.cgi?ws-session=3162698151 Connecting to 127.0.0.1:3128... connected. Proxy request sent, awaiting response... 200 OK Length: 1511 (1.5K) [text/html] Saving to: ?nginx-0.7.66.tar.gz? 100%[==============================================================================================================================>] 1,511 --.-K/s in 0s 2015-01-02 17:59:59 (535 MB/s) - ?nginx-0.7.66.tar.gz? saved [1511/1511] techmahindra at techmahindra-ThinkCentre-M93p:~/yugal/src$ tar zxf nginx-0.7.66.tar.gz gzip: stdin: not in gzip format tar: Child returned status 1 tar: Error is not recoverable: exiting now techmahindra at techmahindra-ThinkCentre-M93p:~/yugal/src$ Regards, Yugal -7798784888 On Sat, Jan 3, 2015 at 10:17 AM, Yugal Mullick wrote: > > -------------- next part -------------- An HTML attachment was scrubbed... URL: From yugal.mullick at gmail.com Sat Jan 3 05:10:29 2015 From: yugal.mullick at gmail.com (Yugal Mullick) Date: Sat, 3 Jan 2015 10:40:29 +0530 Subject: Help test In-Reply-To: References: Message-ID: Hi Team, Getting below error while downloading and extracting Nginx. techmahindra at techmahindra-ThinkCentre-M93p:~/yugal/src$ wget http://nginx.org/download/nginx-0.7.66.tar.gz --2015-01-02 17:59:58-- http://nginx.org/download/nginx-0.7.66.tar.gz Connecting to 127.0.0.1:3128... connected. Proxy request sent, awaiting response... 302 Moved Location: http://10.254.40.54:15871/cgi-bin/blockpage.cgi?ws-session=3162698151 [following] --2015-01-02 17:59:58-- http://10.254.40.54:15871/cgi-bin/blockpage.cgi?ws-session=3162698151 Reusing existing connection to 127.0.0.1:3128. Proxy request sent, awaiting response... Read error (Connection reset by peer) in headers. Retrying. --2015-01-02 17:59:59-- (try: 2) http://10.254.40.54:15871/cgi-bin/blockpage.cgi?ws-session=3162698151 Connecting to 127.0.0.1:3128... connected. Proxy request sent, awaiting response... 200 OK Length: 1511 (1.5K) [text/html] Saving to: ?nginx-0.7.66.tar.gz? 100%[==============================================================================================================================>] 1,511 --.-K/s in 0s 2015-01-02 17:59:59 (535 MB/s) - ?nginx-0.7.66.tar.gz? saved [1511/1511] techmahindra at techmahindra-ThinkCentre-M93p:~/yugal/src$ tar zxf nginx-0.7.66.tar.gz gzip: stdin: not in gzip format tar: Child returned status 1 tar: Error is not recoverable: exiting now techmahindra at techmahindra-ThinkCentre-M93p:~/yugal/src$ Regards, Yugal -7798784888 On Sat, Jan 3, 2015 at 10:17 AM, Yugal Mullick wrote: > > -------------- next part -------------- An HTML attachment was scrubbed... URL: From mat999 at gmail.com Sat Jan 3 05:16:21 2015 From: mat999 at gmail.com (SplitIce) Date: Sat, 3 Jan 2015 16:16:21 +1100 Subject: Help test In-Reply-To: References: Message-ID: You appear to be behind a proxy, look at the output - http://10.254.40.54:15871/cgi-bin/blockpage.cgi?ws-session=3162698151 Nothing really to do with nginx. On Sat, Jan 3, 2015 at 4:10 PM, Yugal Mullick wrote: > Hi Team, > > > > Getting below error while downloading and extracting Nginx. > > > > > > techmahindra at techmahindra-ThinkCentre-M93p:~/yugal/src$ wget > http://nginx.org/download/nginx-0.7.66.tar.gz > > --2015-01-02 17:59:58-- http://nginx.org/download/nginx-0.7.66.tar.gz > > Connecting to 127.0.0.1:3128... connected. > > Proxy request sent, awaiting response... 302 Moved > > Location: > http://10.254.40.54:15871/cgi-bin/blockpage.cgi?ws-session=3162698151 > [following] > > --2015-01-02 17:59:58-- > http://10.254.40.54:15871/cgi-bin/blockpage.cgi?ws-session=3162698151 > > Reusing existing connection to 127.0.0.1:3128. > > Proxy request sent, awaiting response... Read error (Connection reset by > peer) in headers. > > Retrying. > > > > --2015-01-02 17:59:59-- (try: 2) > http://10.254.40.54:15871/cgi-bin/blockpage.cgi?ws-session=3162698151 > > Connecting to 127.0.0.1:3128... connected. > > Proxy request sent, awaiting response... 200 OK > > Length: 1511 (1.5K) [text/html] > > Saving to: ?nginx-0.7.66.tar.gz? > > > > 100%[==============================================================================================================================>] > 1,511 --.-K/s in 0s > > > > 2015-01-02 17:59:59 (535 MB/s) - ?nginx-0.7.66.tar.gz? saved [1511/1511] > > > > techmahindra at techmahindra-ThinkCentre-M93p:~/yugal/src$ tar zxf > nginx-0.7.66.tar.gz > > > > gzip: stdin: not in gzip format > > tar: Child returned status 1 > > tar: Error is not recoverable: exiting now > > techmahindra at techmahindra-ThinkCentre-M93p:~/yugal/src$ > > > > Regards, > Yugal -7798784888 > > On Sat, Jan 3, 2015 at 10:17 AM, Yugal Mullick > wrote: > >> >> > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From wandenberg at gmail.com Sat Jan 3 23:57:32 2015 From: wandenberg at gmail.com (Wandenberg Peixoto) Date: Sat, 3 Jan 2015 21:57:32 -0200 Subject: handle NGX_AGAIN properly In-Reply-To: References: Message-ID: Hi Julien, I was having the same problem with NGX_AGAIN and could solved it. Did you fix your problem? I can try to help you. Regards, Wandenberg On Thu, Jul 18, 2013 at 1:37 AM, Julien Zefi wrote: > Hi all, > > thanks for the help but after more changes and taking in count your > suggestions i am still stuck with the problem (it cannot be in Lua, must be > done in C as i am streaming binary data). > > If anyone of you is interested, i will put a budget of 100USD for who is > interested into fix the test case as required, for more details send me a > private email to discuss the requirements and what is expected as result. > > thanks, > > > > On Sun, Jul 14, 2013 at 10:57 PM, Yichun Zhang (agentzh) < > agentzh at gmail.com> wrote: > >> Hello! >> >> On Sun, Jul 14, 2013 at 8:43 PM, Julien Zefi wrote: >> > >> > Sorry by bother you again but i still cannot figure out how some >> internals >> > are not working as i expect. I have take in count your suggestions and >> wrote >> > a new test case (file attached). >> > >> >> 1. You should simply call ngx_http_output_filter(r, NULL); in your >> r->write_event_handler, but you set r->write_event_handler to >> ngx_http_test_stream_handler which always emits brand new data. I'm >> guessing you don't really understand how the ngx_http_writer and >> ngx_http_set_write_handler functions are implemented in the Nginx >> core. Look harder. >> >> 2. You should not set r->header_only = 1 in your case because you're >> actually sending out the response body. Ensure that you know how a >> flag works before you start using it. >> >> 3. Another obvious mistake is that you incorrectly perform >> >> r->main->count++; >> >> without decrementing it by calling ngx_http_finalize_request, which >> will certainly lead to request hang. Ensure that you understand this >> flag before using it. >> >> > The test case writes 12.3KB of data every 1ms, at some point it will >> raise >> > NGX_AGAIN but from there is not recovering, it keeps in the same state >> > forever, do you see any specific problem when handling the exception ? >> > >> >> This is trivial to implement by writing some Lua code using ngx_lua >> module: >> >> location /t { >> content_by_lua ' >> local message = "..." >> for i = 1, 100 do >> ngx.print(message) >> ngx.flush(true) >> ngx.sleep(0.001) >> end >> '; >> } >> >> Maybe you can just use ngx_lua for your purposes without all the pain >> of understanding the nginx internals (you seem to lack a lot of >> knowledge here). If you insist in writing your own nginx C module, >> then just check out how ngx_lua implements all the APIs demonstrated >> in the example above. You can also check out the official >> documentation of ngx_lua: >> >> http://wiki.nginx.org/HttpLuaModule >> >> Best regards, >> -agentzh >> > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Mon Jan 5 03:47:44 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 5 Jan 2015 06:47:44 +0300 Subject: [PATCH 0 of 1] Upstream: add propagate_connection_close directive In-Reply-To: References: Message-ID: <20150105034744.GA47350@mdounin.ru> Hello! On Fri, Jan 02, 2015 at 04:03:58PM -0800, Shawn J. Goff wrote: > This patch adds a new directive to the upstream module: > propagate_connection_close. It puts a "Connection: close" header in > the downstream response if the upstream server sent one. This is > useful when Nginx is used as HTTP/HTTPS endpoints with load blancers > in front of them. It allows the upstream server to close connections > in order to shed load. You may try to better elaborate on the problem you are trying to solve and why existing mechanisms do not work for you. As of now, nginx can: - Disable keepalive completely ("keepalive_timeout 0") on a per-location basis (http://nginx.org/r/keepalive_timeout). - Disable keepalive based on a number of requests served in a connection (http://nginx.org/r/keepalive_requests). - Disable keepalive in some specific conditions (http://nginx.org/r/keepalive_disable). - Disable chunked transfer encoding on a per-location basis (http://nginx.org/r/chunked_transfer_encoding). I think this addresses most, if not all, problems in the links you provided. In particular, low enough keepalive_requests can be used to distribute load if needed. > I can submit a documentation patch if this patch is accepted. The approach taken in the patch looks wrong, for multiple reasons, in particular: - The upstream module isn't expected to contain it's own directives, expect ones used to define upstream{} blocks. Instead, there should be directives in modules implementing protocols, like "proxy_foo_bar...". - The "Connection: close" header is a hop-by-hop http header, and "propogating" it looks like a bad idea. It mixes control of the nginx-to-backend connection with control of the client-to-nginx connection. Instead, there should be a way to control these connections separately. It may be an option to add X-Accel-... header instead, similart to X-Accel-Limit-Rate. Though this approach has it's own problems too, see below. - It is not possible to control connections that aren't proxied to backends but are handled locally - e.g., when using embedded perl or even just serving static files. If there is a need to allow dynamic control of keepalive, I think that proper way would be to extend the "keepalive_disable" directive with variables support. -- Maxim Dounin http://nginx.org/ From piotr at cloudflare.com Mon Jan 5 22:12:04 2015 From: piotr at cloudflare.com (Piotr Sikora) Date: Mon, 5 Jan 2015 14:12:04 -0800 Subject: [PATCH] Add strict Host validation In-Reply-To: References: <20141219163705.GU79300@mdounin.ru> Message-ID: Hey Maxim, > While I agree that there is no real reason for forbidding some of > those characters, I think that Host still should be restricted to at > least printable ASCII characters (minus space and path separators). > > I can't think of any reason why would you intentionally allow control > characters in there. Ping... or is it still a "no"? Best regards, Piotr Sikora From shawgoff at amazon.com Tue Jan 6 01:47:28 2015 From: shawgoff at amazon.com (Shawn J. Goff) Date: Mon, 5 Jan 2015 17:47:28 -0800 Subject: [PATCH 0 of 1] Upstream: add propagate_connection_close directive In-Reply-To: <20150105034744.GA47350@mdounin.ru> References: <20150105034744.GA47350@mdounin.ru> Message-ID: <54AB3EB0.8040500@amazon.com> On 01/04/2015 07:47 PM, Maxim Dounin wrote: > Hello! Hi, thanks for taking the time to review the patch! > On Fri, Jan 02, 2015 at 04:03:58PM -0800, Shawn J. Goff wrote: > >> This patch adds a new directive to the upstream module: >> propagate_connection_close. It puts a "Connection: close" header in >> the downstream response if the upstream server sent one. This is >> useful when Nginx is used as HTTP/HTTPS endpoints with load blancers >> in front of them. It allows the upstream server to close connections >> in order to shed load. > You may try to better elaborate on the problem you are trying to > solve and why existing mechanisms do not work for you. We have HTTP servers that sit behind TCP load balancers. The servers currently have a protocol for making sure long-lived connections are balanced among them. This involves closing specific connections at appropriate times; this causes the client to open a new connection, which will most-likely be handled by another host. We now want those hosts to accept HTTPS connections in a well-understood, reliable way that has acceptable performance characteristics. We are hoping to use Nginx. We found that it breaks our load-balancing because connections are not being closed. > As of now, nginx can: > but > - Disable keepalive completely ("keepalive_timeout 0") on a > per-location basis (http://nginx.org/r/keepalive_timeout). This will turn off keep-live for every connection at the location, which is not what we want - we want to close specific existing connections. > - Disable keepalive based on a number of requests served in a > connection (http://nginx.org/r/keepalive_requests). This is not enough; we already have a way of choosing connections to close, and it's not only based on the number of requests (or on length of idle time, keepalive_timeout). > - Disable keepalive in some specific conditions > (http://nginx.org/r/keepalive_disable). This disables keep-alive per browser, which is very much a different thing, and not our problem. > - Disable chunked transfer encoding on a per-location basis > (http://nginx.org/r/chunked_transfer_encoding). This is also not going to help us. > I think this addresses most, if not all, problems in the links you > provided. In particular, low enough keepalive_requests can be > used to distribute load if needed. It can, at the expense of higher latency. We are extremely latency-sensitive, so we've put a lot of work into ensuring our p99.9+ latencies are consistent and as low as possible. To that end, we have our own method of distributing the load that should really not be part of Nginx, as it's highly specific to our service. > >> I can submit a documentation patch if this patch is accepted. > The approach taken in the patch looks wrong, for multiple reasons, > in particular: > > - The upstream module isn't expected to contain it's own > directives, expect ones used to define upstream{} blocks. > Instead, there should be directives in modules implementing > protocols, like "proxy_foo_bar...". I had considered putting it in upstream, but thought the having it in location{} would give more flexibility. I'd be fine putting it in upstream{} instead. As far as putting it in a proxy_foo_bar module, I took a look through the modules here: http://nginx.org/en/docs/http/ngx_http_proxy_module.html . The only one I see that might be appropriate is proxy_pass; are there any others you were referring to? I chose to put this in the upstream module because that is what strips out the Connection header and sets the connection_close field in the headers_in struct that is specific to the upstream module. > > - The "Connection: close" header is a hop-by-hop http header, and > "propogating" it looks like a bad idea. It mixes control of the > nginx-to-backend connection with control of the client-to-nginx > connection. Instead, there should be a way to control these > connections separately. It may be an option to add X-Accel-... > header instead, similart to X-Accel-Limit-Rate. Though this > approach has it's own problems too, see below. It is hop-by-hop, but we're not really wanting Nginx as a separate hop; that is just a byproduct. Nginx on the same host as the upstream server; it's just there to take care of TLS for us. > > - It is not possible to control connections that aren't proxied > to backends but are handled locally - e.g., when using embedded > perl or even just serving static files. > > If there is a need to allow dynamic control of keepalive, I think > that proper way would be to extend the "keepalive_disable" > directive with variables support. > How would this work? Should I set a variable depending on whether some X-Accel- header is present, then set keepalive_disable per request depending on that variable? From piotr at cloudflare.com Tue Jan 6 02:11:16 2015 From: piotr at cloudflare.com (Piotr Sikora) Date: Mon, 5 Jan 2015 18:11:16 -0800 Subject: [nginx] Core: added prefix-based temporary files. In-Reply-To: References: Message-ID: Hey Valentin, > details: http://hg.nginx.org/nginx/rev/a9138c35120d > branches: > changeset: 5958:a9138c35120d > user: Valentin Bartenev > date: Fri Dec 26 16:22:54 2014 +0300 > description: > Core: added prefix-based temporary files. > > Now, if the "path" parameter is NULL, ngx_create_temp_file() will use > file->name as a predefined file path prefix. This breaks terribly when "levels" parameter is also used, i.e.: proxy_cache_path /tmp/cache keys_zone=x:1m levels=2 use_temp_path=off; Such configuration results in: [debug] 23696#0: *1 hashed path: /tmp/cache/97/a81259cef8e959c624df1d456e5d3297.0000000001 [debug] 23696#0: *1 temp fd:-1 [crit] 23696#0: *1 open() "/tmp/cache/97/a81259cef8e959c624df1d456e5d3297.0000000001" failed (2: No such file or directory) while reading upstream, client: 127.0.0.1, server: , request: "GET /valid1 HTTP/1.1", upstream: "http://127.0.0.1:7071/", host: "localhost:8081" [debug] 23696#0: *1 finalize http upstream request: -1 because "/tmp/cache/97/" doesn't exist and nginx never tries to create it in the prefix-based version. Best regards, Piotr Sikora From piotr at cloudflare.com Tue Jan 6 03:15:52 2015 From: piotr at cloudflare.com (Piotr Sikora) Date: Mon, 05 Jan 2015 19:15:52 -0800 Subject: [PATCH] Upstream: add use_temp_path=tmp to proxy_cache_path and friends Message-ID: # HG changeset patch # User Piotr Sikora # Date 1420514028 28800 # Mon Jan 05 19:13:48 2015 -0800 # Node ID e7596cd5b480c9cbabe583a8f47301e32fdf179a # Parent e9effef98874c619326ec11e25b11333225cf797 Upstream: add use_temp_path=tmp to proxy_cache_path and friends. When set to "tmp", store temporary files in "tmp" directory inside cache path. The advantage of this solution is that temporary files are stored inside cache path (same as with "off"), which makes path suitable to be used in multi-disks setups, but at the same time they aren't mixed with complete files, which makes it easier to manage them. Signed-off-by: Piotr Sikora diff -r e9effef98874 -r e7596cd5b480 src/http/ngx_http_cache.h --- a/src/http/ngx_http_cache.h Fri Dec 26 16:22:59 2014 +0300 +++ b/src/http/ngx_http_cache.h Mon Jan 05 19:13:48 2015 -0800 @@ -142,6 +142,7 @@ struct ngx_http_file_cache_s { ngx_slab_pool_t *shpool; ngx_path_t *path; + ngx_path_t *temp; off_t max_size; size_t bsize; @@ -157,7 +158,7 @@ struct ngx_http_file_cache_s { ngx_shm_zone_t *shm_zone; ngx_uint_t use_temp_path; - /* unsigned use_temp_path:1 */ + /* unsigned use_temp_path:2 */ }; diff -r e9effef98874 -r e7596cd5b480 src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c Fri Dec 26 16:22:59 2014 +0300 +++ b/src/http/ngx_http_file_cache.c Mon Jan 05 19:13:48 2015 -0800 @@ -2069,6 +2069,7 @@ ngx_http_file_cache_set_slot(ngx_conf_t off_t max_size; u_char *last, *p; + size_t len; time_t inactive; ssize_t size; ngx_str_t s, name, *value; @@ -2158,10 +2159,30 @@ ngx_http_file_cache_set_slot(ngx_conf_t } else if (ngx_strcmp(&value[i].data[14], "off") == 0) { use_temp_path = 0; + } else if (ngx_strcmp(&value[i].data[14], "tmp") == 0) { + use_temp_path = 2; + + len = cache->path->name.len + sizeof("/tmp") - 1; + + p = ngx_pnalloc(cf->pool, len); + if (p == NULL) { + return NGX_CONF_ERROR; + } + + (void) ngx_sprintf(p, "%V/tmp", &cache->path->name); + + cache->temp = ngx_pcalloc(cf->pool, sizeof(ngx_path_t)); + if (cache->temp == NULL) { + return NGX_CONF_ERROR; + } + + cache->temp->name.data = p; + cache->temp->name.len = len; + } else { ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid use_temp_path value \"%V\", " - "it must be \"on\" or \"off\"", + "it must be \"on\", \"off\" or \"tmp\"", &value[i]); return NGX_CONF_ERROR; } @@ -2291,6 +2312,15 @@ ngx_http_file_cache_set_slot(ngx_conf_t return NGX_CONF_ERROR; } + if (cache->temp) { + cache->temp->conf_file = cf->conf_file->file.name.data; + cache->temp->line = cf->conf_file->line; + + if (ngx_add_path(cf, &cache->temp) != NGX_OK) { + return NGX_CONF_ERROR; + } + } + cache->shm_zone = ngx_shared_memory_add(cf, &name, size, cmd->post); if (cache->shm_zone == NULL) { return NGX_CONF_ERROR; diff -r e9effef98874 -r e7596cd5b480 src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Fri Dec 26 16:22:59 2014 +0300 +++ b/src/http/ngx_http_upstream.c Mon Jan 05 19:13:48 2015 -0800 @@ -2678,10 +2678,15 @@ ngx_http_upstream_send_response(ngx_http p->temp_file->persistent = 1; #if (NGX_HTTP_CACHE) - if (r->cache && !r->cache->file_cache->use_temp_path) { - p->temp_file->file.name = r->cache->file.name; - p->temp_file->path = r->cache->file_cache->path; - p->temp_file->prefix = 1; + if (r->cache) { + if (r->cache->file_cache->use_temp_path == 0) { + p->temp_file->file.name = r->cache->file.name; + p->temp_file->path = r->cache->file_cache->path; + p->temp_file->prefix = 1; + + } else if (r->cache->file_cache->use_temp_path == 2) { + p->temp_file->path = r->cache->file_cache->temp; + } } #endif From arut at nginx.com Tue Jan 6 12:53:03 2015 From: arut at nginx.com (Roman Arutyunyan) Date: Tue, 6 Jan 2015 15:53:03 +0300 Subject: [PATCH] Upstream: add use_temp_path=tmp to proxy_cache_path and friends In-Reply-To: References: Message-ID: <6F10FE84-DBBC-4B92-85A3-E7B907093174@nginx.com> On 06 Jan 2015, at 06:15, Piotr Sikora wrote: > # HG changeset patch > # User Piotr Sikora > # Date 1420514028 28800 > # Mon Jan 05 19:13:48 2015 -0800 > # Node ID e7596cd5b480c9cbabe583a8f47301e32fdf179a > # Parent e9effef98874c619326ec11e25b11333225cf797 > Upstream: add use_temp_path=tmp to proxy_cache_path and friends. > > When set to "tmp", store temporary files in "tmp" directory inside cache > path. > > The advantage of this solution is that temporary files are stored inside > cache path (same as with "off"), which makes path suitable to be used in > multi-disks setups, but at the same time they aren't mixed with complete > files, which makes it easier to manage them. > > Signed-off-by: Piotr Sikora > > diff -r e9effef98874 -r e7596cd5b480 src/http/ngx_http_cache.h > --- a/src/http/ngx_http_cache.h Fri Dec 26 16:22:59 2014 +0300 > +++ b/src/http/ngx_http_cache.h Mon Jan 05 19:13:48 2015 -0800 > @@ -142,6 +142,7 @@ struct ngx_http_file_cache_s { > ngx_slab_pool_t *shpool; > > ngx_path_t *path; > + ngx_path_t *temp; > > off_t max_size; > size_t bsize; > @@ -157,7 +158,7 @@ struct ngx_http_file_cache_s { > ngx_shm_zone_t *shm_zone; > > ngx_uint_t use_temp_path; > - /* unsigned use_temp_path:1 */ > + /* unsigned use_temp_path:2 */ > }; > > > diff -r e9effef98874 -r e7596cd5b480 src/http/ngx_http_file_cache.c > --- a/src/http/ngx_http_file_cache.c Fri Dec 26 16:22:59 2014 +0300 > +++ b/src/http/ngx_http_file_cache.c Mon Jan 05 19:13:48 2015 -0800 > @@ -2069,6 +2069,7 @@ ngx_http_file_cache_set_slot(ngx_conf_t > > off_t max_size; > u_char *last, *p; > + size_t len; > time_t inactive; > ssize_t size; > ngx_str_t s, name, *value; > @@ -2158,10 +2159,30 @@ ngx_http_file_cache_set_slot(ngx_conf_t > } else if (ngx_strcmp(&value[i].data[14], "off") == 0) { > use_temp_path = 0; > > + } else if (ngx_strcmp(&value[i].data[14], "tmp") == 0) { > + use_temp_path = 2; > + > + len = cache->path->name.len + sizeof("/tmp") - 1; > + > + p = ngx_pnalloc(cf->pool, len); > + if (p == NULL) { > + return NGX_CONF_ERROR; > + } > + > + (void) ngx_sprintf(p, "%V/tmp", &cache->path->name); > + > + cache->temp = ngx_pcalloc(cf->pool, sizeof(ngx_path_t)); > + if (cache->temp == NULL) { > + return NGX_CONF_ERROR; > + } > + > + cache->temp->name.data = p; > + cache->temp->name.len = len; What about level settings for this temp path? > + > } else { > ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, > "invalid use_temp_path value \"%V\", " > - "it must be \"on\" or \"off\"", > + "it must be \"on\", \"off\" or \"tmp\"", > &value[i]); > return NGX_CONF_ERROR; > } > @@ -2291,6 +2312,15 @@ ngx_http_file_cache_set_slot(ngx_conf_t > return NGX_CONF_ERROR; > } > > + if (cache->temp) { > + cache->temp->conf_file = cf->conf_file->file.name.data; > + cache->temp->line = cf->conf_file->line; > + > + if (ngx_add_path(cf, &cache->temp) != NGX_OK) { > + return NGX_CONF_ERROR; > + } > + } > + > cache->shm_zone = ngx_shared_memory_add(cf, &name, size, cmd->post); > if (cache->shm_zone == NULL) { > return NGX_CONF_ERROR; > diff -r e9effef98874 -r e7596cd5b480 src/http/ngx_http_upstream.c > --- a/src/http/ngx_http_upstream.c Fri Dec 26 16:22:59 2014 +0300 > +++ b/src/http/ngx_http_upstream.c Mon Jan 05 19:13:48 2015 -0800 > @@ -2678,10 +2678,15 @@ ngx_http_upstream_send_response(ngx_http > p->temp_file->persistent = 1; > > #if (NGX_HTTP_CACHE) > - if (r->cache && !r->cache->file_cache->use_temp_path) { > - p->temp_file->file.name = r->cache->file.name; > - p->temp_file->path = r->cache->file_cache->path; > - p->temp_file->prefix = 1; > + if (r->cache) { > + if (r->cache->file_cache->use_temp_path == 0) { > + p->temp_file->file.name = r->cache->file.name; > + p->temp_file->path = r->cache->file_cache->path; > + p->temp_file->prefix = 1; > + > + } else if (r->cache->file_cache->use_temp_path == 2) { > + p->temp_file->path = r->cache->file_cache->temp; > + } > } > #endif > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > From piotr at cloudflare.com Tue Jan 6 19:10:21 2015 From: piotr at cloudflare.com (Piotr Sikora) Date: Tue, 6 Jan 2015 11:10:21 -0800 Subject: [PATCH] Upstream: add use_temp_path=tmp to proxy_cache_path and friends In-Reply-To: <6F10FE84-DBBC-4B92-85A3-E7B907093174@nginx.com> References: <6F10FE84-DBBC-4B92-85A3-E7B907093174@nginx.com> Message-ID: Hey Roman, > What about level settings for this temp path? The level settings for the cache path work normally, but the "tmp" directory is flat - levels could be easily added to it, but to be honest, we never had any need for them and I didn't want to over-complicate this patch. Best regards, Piotr Sikora From ryan at mediapixel.co.nz Thu Jan 8 00:36:25 2015 From: ryan at mediapixel.co.nz (Ryan Johnston) Date: Thu, 8 Jan 2015 13:36:25 +1300 Subject: infinite keepalive_requests Message-ID: Hi, Can you please provide a value for infinite keepalive_requests. There is instances where the keepalive connection should stay open regardless of how many requests go through it for persistant APIs. Kind regards, Ryan -------------- next part -------------- An HTML attachment was scrubbed... URL: From shawgoff at amazon.com Thu Jan 8 03:06:38 2015 From: shawgoff at amazon.com (Shawn J. Goff) Date: Wed, 7 Jan 2015 19:06:38 -0800 Subject: [PATCH 0 of 1] Upstream: add propagate_connection_close directive In-Reply-To: <20150105034744.GA47350@mdounin.ru> References: <20150105034744.GA47350@mdounin.ru> Message-ID: <54ADF43E.1030308@amazon.com> On 01/04/2015 07:47 PM, Maxim Dounin wrote: > If there is a need to allow dynamic control of keepalive, I think > that proper way would be to extend the "keepalive_disable" > directive with variables support. > Thanks for this suggestion. I started exploring this option today. The keepalive directive is used to set the keepalive field on the request struct in the find_config_phase before we have the content. The Connection header is later added in a filter. If I need to make keepalive_disable have variable support that can use the $http_ variables (or something else that depends on the content), I need to do this after we have the content and before the filter. Should I continue down this path, or am I misunderstanding what you mean here? From oschaaf at we-amp.com Thu Jan 8 08:55:19 2015 From: oschaaf at we-amp.com (Otto van der Schaaf) Date: Thu, 8 Jan 2015 09:55:19 +0100 Subject: Persisting the module request context across internal redirects Message-ID: Hi, For ngx_pagespeed, I'm looking for a way to persist its module request context and restore it even after request processing has been restarted for a named location or internal redirect. Keeping a single request context during this process would allow us to avoid repeating some work we already did earlier, like cache lookups. For testing, I've achieved this by storing a pointer in the request headers structure for the request and using that after an internal redirect has been processed instead of just using ngx_http_get_module_ctx. This works, but there are lots of reasons why I'd rather prefer not (ab)using the request headers structure for this. Is there a better way to keep a reference to the first module request context in case of internal redirects? Are there reasons why this could be a bad idea in general? Kind regards, Otto -------------- next part -------------- An HTML attachment was scrubbed... URL: From ian.labbe at gmail.com Thu Jan 8 17:34:39 2015 From: ian.labbe at gmail.com (=?UTF-8?Q?Ian_Labb=C3=A9?=) Date: Thu, 8 Jan 2015 12:34:39 -0500 Subject: ngx_hash_init Message-ID: Hello, Maybe i am not in the right mailing list, please refer me to the good one if i am at the wrong one. I just want to understand the " for (size = start; size <= hinit->max_size; size++) " loop in the ngx_hash_init function. I do not understand what "size", "key" and "test[key]" mean in first place. Thank you for your help. -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Mon Jan 12 12:36:22 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 12 Jan 2015 15:36:22 +0300 Subject: [PATCH] Add strict Host validation In-Reply-To: References: <20141219163705.GU79300@mdounin.ru> Message-ID: <20150112123622.GF47350@mdounin.ru> Hello! On Mon, Jan 05, 2015 at 02:12:04PM -0800, Piotr Sikora wrote: > Hey Maxim, > > > While I agree that there is no real reason for forbidding some of > > those characters, I think that Host still should be restricted to at > > least printable ASCII characters (minus space and path separators). > > > > I can't think of any reason why would you intentionally allow control > > characters in there. > > Ping... or is it still a "no"? I still think it's a "no". If needed, allowed characters can be easily restricted by a configuration. -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Mon Jan 12 15:19:06 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 12 Jan 2015 18:19:06 +0300 Subject: ngx_hash_init In-Reply-To: References: Message-ID: <20150112151906.GL47350@mdounin.ru> Hello! On Thu, Jan 08, 2015 at 12:34:39PM -0500, Ian Labb? wrote: > Hello, > > Maybe i am not in the right mailing list, please refer me to the good one > if i am at the wrong one. > > I just want to understand the " for (size = start; size <= hinit->max_size; > size++) " loop in the ngx_hash_init function. > I do not understand what "size", "key" and "test[key]" mean in first place. The ngx_hash_init() functions tries to build a hash by using a varying number of buckets. The "size" variable corresponds to the number of buckets we test at the current loop iteration. For each size we iterate over all hash items to check how buckets will be ("key" is a bucket number for a given hash item, "test[key]" stores how many bytes will be stored in the bucket). -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Mon Jan 12 17:48:48 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 12 Jan 2015 20:48:48 +0300 Subject: [PATCH] Upstream: add use_temp_path=tmp to proxy_cache_path and friends In-Reply-To: References: <6F10FE84-DBBC-4B92-85A3-E7B907093174@nginx.com> Message-ID: <20150112174848.GN47350@mdounin.ru> Hello! On Tue, Jan 06, 2015 at 11:10:21AM -0800, Piotr Sikora wrote: > Hey Roman, > > > What about level settings for this temp path? > > The level settings for the cache path work normally, but the "tmp" > directory is flat - levels could be easily added to it, but to be > honest, we never had any need for them and I didn't want to > over-complicate this patch. The downside of this approach is that with a separate temp directory cache can't be effectively spread over multiple file systems. -- Maxim Dounin http://nginx.org/ From piotr at cloudflare.com Mon Jan 12 23:45:03 2015 From: piotr at cloudflare.com (Piotr Sikora) Date: Mon, 12 Jan 2015 15:45:03 -0800 Subject: [PATCH] Add strict Host validation In-Reply-To: <20150112123622.GF47350@mdounin.ru> References: <20141219163705.GU79300@mdounin.ru> <20150112123622.GF47350@mdounin.ru> Message-ID: Hey Maxim, > I still think it's a "no". If needed, allowed characters can be > easily restricted by a configuration. Just to make a point: $ curl -I nginx.org HTTP/1.1 200 OK Server: nginx/1.7.7 Date: Mon, 12 Jan 2015 23:42:27 GMT Content-Type: text/html; charset=utf-8 Content-Length: 8981 Last-Modified: Tue, 23 Dec 2014 15:38:45 GMT Connection: keep-alive Keep-Alive: timeout=15 ETag: "54998c85-2315" Accept-Ranges: bytes $ curl -I nginx.org -H"Host: /" HTTP/1.1 400 Bad Request Server: nginx/1.7.7 Date: Mon, 12 Jan 2015 23:42:38 GMT Content-Type: text/html Content-Length: 172 Connection: close $ curl -I nginx.org -H"Host: \$" curl: (52) Empty reply from server You cannot possibly tell me that's correct and/or expected behavior? And that's not even a control character. Best regards, Piotr Sikora From piotr at cloudflare.com Mon Jan 12 23:47:27 2015 From: piotr at cloudflare.com (Piotr Sikora) Date: Mon, 12 Jan 2015 15:47:27 -0800 Subject: [PATCH] Upstream: add use_temp_path=tmp to proxy_cache_path and friends In-Reply-To: <20150112174848.GN47350@mdounin.ru> References: <6F10FE84-DBBC-4B92-85A3-E7B907093174@nginx.com> <20150112174848.GN47350@mdounin.ru> Message-ID: Hey Maxim, > The downside of this approach is that with a separate temp > directory cache can't be effectively spread over multiple file > systems. I disagree, we've been using exactly this structure to spread cache over multiple file systems (via multiple cache paths, each with its own "tmp") for the last 2 years. Best regards, Piotr Sikora From ru at nginx.com Tue Jan 13 12:39:00 2015 From: ru at nginx.com (Ruslan Ermilov) Date: Tue, 13 Jan 2015 15:39:00 +0300 Subject: [PATCH] Add strict Host validation In-Reply-To: References: <20141219163705.GU79300@mdounin.ru> <20150112123622.GF47350@mdounin.ru> Message-ID: <20150113123900.GB38937@lo0.su> On Mon, Jan 12, 2015 at 03:45:03PM -0800, Piotr Sikora wrote: > Hey Maxim, > > > I still think it's a "no". If needed, allowed characters can be > > easily restricted by a configuration. > > Just to make a point: > > $ curl -I nginx.org > HTTP/1.1 200 OK > Server: nginx/1.7.7 > Date: Mon, 12 Jan 2015 23:42:27 GMT > Content-Type: text/html; charset=utf-8 > Content-Length: 8981 > Last-Modified: Tue, 23 Dec 2014 15:38:45 GMT > Connection: keep-alive > Keep-Alive: timeout=15 > ETag: "54998c85-2315" > Accept-Ranges: bytes > > $ curl -I nginx.org -H"Host: /" > HTTP/1.1 400 Bad Request > Server: nginx/1.7.7 > Date: Mon, 12 Jan 2015 23:42:38 GMT > Content-Type: text/html > Content-Length: 172 > Connection: close > > $ curl -I nginx.org -H"Host: \$" > curl: (52) Empty reply from server > > You cannot possibly tell me that's correct and/or expected behavior? > And that's not even a control character. That's because this site is configured to reject unknown server names, like here: http://nginx.org/en/docs/http/server_names.html#miscellaneous_names : In catch-all server examples the strange name ?_? can be seen: : : server { : listen 80 default_server; : server_name _; : return 444; : } From sb at nginx.com Tue Jan 13 12:43:33 2015 From: sb at nginx.com (Sergey Budnevitch) Date: Tue, 13 Jan 2015 15:43:33 +0300 Subject: [PATCH] Add strict Host validation In-Reply-To: References: <20141219163705.GU79300@mdounin.ru> <20150112123622.GF47350@mdounin.ru> Message-ID: <69EAF756-20B2-46DA-AAE9-131647982A2E@nginx.com> > On 13 Jan 2015, at 02:45, Piotr Sikora wrote: > > $ curl -I nginx.org -H"Host: \$" > curl: (52) Empty reply from server > > You cannot possibly tell me that's correct and/or expected behavior? > And that's not even a control character. Yes, that is expected behaviour, from nginx.conf: server { listen 80 default_server; return 444; } From mdounin at mdounin.ru Tue Jan 13 13:00:08 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 13 Jan 2015 16:00:08 +0300 Subject: [PATCH] Upstream: add use_temp_path=tmp to proxy_cache_path and friends In-Reply-To: References: <6F10FE84-DBBC-4B92-85A3-E7B907093174@nginx.com> <20150112174848.GN47350@mdounin.ru> Message-ID: <20150113130008.GB79857@mdounin.ru> Hello! On Mon, Jan 12, 2015 at 03:47:27PM -0800, Piotr Sikora wrote: > Hey Maxim, > > > The downside of this approach is that with a separate temp > > directory cache can't be effectively spread over multiple file > > systems. > > I disagree, we've been using exactly this structure to spread cache > over multiple file systems (via multiple cache paths, each with its > own "tmp") for the last 2 years. The idea was to allow a _single_ cache to be on multiple file systems (e.g., via symlinking individual directories from different disks). On the other hand, may be your approach is simplier and worth considering (especially given the fact that in most cases separate cache path will be required anyway, to maintain max_size, and we have variables support now to facilitate this). Valentin is currently working on fixing the code, and I asked him to consider your approach as well. -- Maxim Dounin http://nginx.org/ From gmm at csdoc.com Tue Jan 13 14:13:11 2015 From: gmm at csdoc.com (Gena Makhomed) Date: Tue, 13 Jan 2015 16:13:11 +0200 Subject: [PATCH] Add strict Host validation In-Reply-To: <20150113123900.GB38937@lo0.su> References: <20141219163705.GU79300@mdounin.ru> <20150112123622.GF47350@mdounin.ru> <20150113123900.GB38937@lo0.su> Message-ID: <54B527F7.40803@csdoc.com> On 13.01.2015 14:39, Ruslan Ermilov wrote: >> $ curl -I nginx.org -H"Host: \$" >> curl: (52) Empty reply from server >> >> You cannot possibly tell me that's correct and/or expected behavior? >> And that's not even a control character. > > That's because this site is configured to reject unknown server > names, like here: > > http://nginx.org/en/docs/http/server_names.html#miscellaneous_names > > : In catch-all server examples the strange name ?_? can be seen: > : > : server { > : listen 80 default_server; > : server_name _; > : return 444; > : } As it described in http://tools.ietf.org/html/rfc7230#section-5.4 nginx in this case MUST respond with a 400 (Bad Request) status code to be compliant with HTTP/1.1 spec: : A server MUST respond with a 400 (Bad Request) status code to any : HTTP/1.1 request message that lacks a Host header field and to any : request message that contains more than one Host header field or a : Host header field with an invalid field-value. May be it will be better to fix documentation examples to make these examples compatible with HTTP/1.1 RFCs ? -- Best regards, Gena From mdounin at mdounin.ru Tue Jan 13 15:11:35 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 13 Jan 2015 18:11:35 +0300 Subject: [PATCH 0 of 1] Upstream: add propagate_connection_close directive In-Reply-To: <54AB3EB0.8040500@amazon.com> References: <20150105034744.GA47350@mdounin.ru> <54AB3EB0.8040500@amazon.com> Message-ID: <20150113151135.GH79857@mdounin.ru> Hello! On Mon, Jan 05, 2015 at 05:47:28PM -0800, Shawn J. Goff wrote: [...] > >>I can submit a documentation patch if this patch is accepted. > >The approach taken in the patch looks wrong, for multiple reasons, > >in particular: > > > >- The upstream module isn't expected to contain it's own > > directives, expect ones used to define upstream{} blocks. > > Instead, there should be directives in modules implementing > > protocols, like "proxy_foo_bar...". > > I had considered putting it in upstream, but thought the having it in > location{} would give more flexibility. I'd be fine putting it in upstream{} > instead. > > As far as putting it in a proxy_foo_bar module, I took a look through the > modules here: http://nginx.org/en/docs/http/ngx_http_proxy_module.html . The > only one I see that might be appropriate is proxy_pass; are there any others > you were referring to? > > I chose to put this in the upstream module because that is what strips out > the Connection header and sets the connection_close field in the headers_in > struct that is specific to the upstream module. There are multiple modules in nginx that implement various protocols on top of the upstream module: proxy, fastcgi, scgi, uwsgi, memcached. Depending on the protocol, an option may or may not have sense. For example, there are "proxy_ignore_headers" and "fastcgi_ignore_headers" directives, but no "memcached_ignore_headers". > >- The "Connection: close" header is a hop-by-hop http header, and > > "propogating" it looks like a bad idea. It mixes control of the > > nginx-to-backend connection with control of the client-to-nginx > > connection. Instead, there should be a way to control these > > connections separately. It may be an option to add X-Accel-... > > header instead, similart to X-Accel-Limit-Rate. Though this > > approach has it's own problems too, see below. > > It is hop-by-hop, but we're not really wanting Nginx as a separate hop; that > is just a byproduct. Nginx on the same host as the upstream server; it's > just there to take care of TLS for us. Sure, in your particular case. But the behaviour you suggests doesn't solve the problem you are trying to solve for ones who do want nginx as a separate hop - and want, e.g., to maintain persistent connections between nginx and backend, while being able to selectively close connections with clients. Or, vice versa, want to be able to don't maintain persistent connections between nginx and a backend, while being able to maintain connections with clients and at the same time being able to close them. > >- It is not possible to control connections that aren't proxied > > to backends but are handled locally - e.g., when using embedded > > perl or even just serving static files. > > > >If there is a need to allow dynamic control of keepalive, I think > >that proper way would be to extend the "keepalive_disable" > >directive with variables support. > > > > How would this work? Should I set a variable depending on whether some > X-Accel- header is present, then set keepalive_disable per request depending > on that variable? All headers returned by the upstream server are available as $upstream_http_* variables. So it should be possible to do something like this: keepalive_disable $upstream_http_x_connection_close; That is, disable keepalive if the "X-Connection-Close" header is present in the response. Or it should be possible to test the "Connection" header returned by the upstream server, like this: map $upstream_http_connection $close { default 0; ~close 1; } keepalive_disable $close; Hope this explains the idea. -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Tue Jan 13 15:18:36 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 13 Jan 2015 18:18:36 +0300 Subject: [PATCH 0 of 1] Upstream: add propagate_connection_close directive In-Reply-To: <54ADF43E.1030308@amazon.com> References: <20150105034744.GA47350@mdounin.ru> <54ADF43E.1030308@amazon.com> Message-ID: <20150113151836.GI79857@mdounin.ru> Hello! On Wed, Jan 07, 2015 at 07:06:38PM -0800, Shawn J. Goff wrote: > On 01/04/2015 07:47 PM, Maxim Dounin wrote: > >If there is a need to allow dynamic control of keepalive, I think > >that proper way would be to extend the "keepalive_disable" > >directive with variables support. > > > > Thanks for this suggestion. I started exploring this option today. The > keepalive directive is used to set the keepalive field on the request struct > in the find_config_phase before we have the content. The Connection header > is later added in a filter. If I need to make keepalive_disable have > variable support that can use the $http_ variables (or something else that > depends on the content), I need to do this after we have the content and > before the filter. > > Should I continue down this path, or am I misunderstanding what you mean > here? Yes, this approach will require testing of variables specified in the keepalive_disable at some point later, probably in the header filter of the header filter chain. -- Maxim Dounin http://nginx.org/ From mdounin at mdounin.ru Tue Jan 13 16:49:11 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 13 Jan 2015 16:49:11 +0000 Subject: [nginx] Fixed sendfile() trailers on OS X (8e903522c17a, 1.7.8). Message-ID: details: http://hg.nginx.org/nginx/rev/7554c83287dc branches: changeset: 5961:7554c83287dc user: Maxim Dounin date: Tue Jan 13 18:58:23 2015 +0300 description: Fixed sendfile() trailers on OS X (8e903522c17a, 1.7.8). The trailer.count variable was not initialized if there was a header, resulting in "sendfile() failed (22: Invalid argument)" alerts on OS X if the "sendfile" directive was used. The bug was introduced in 8e903522c17a (1.7.8). diffstat: src/os/unix/ngx_darwin_sendfile_chain.c | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diffs (13 lines): diff --git a/src/os/unix/ngx_darwin_sendfile_chain.c b/src/os/unix/ngx_darwin_sendfile_chain.c --- a/src/os/unix/ngx_darwin_sendfile_chain.c +++ b/src/os/unix/ngx_darwin_sendfile_chain.c @@ -111,6 +111,9 @@ ngx_darwin_sendfile_chain(ngx_connection } send += trailer.size; + + } else { + trailer.count = 0; } /* From mdounin at mdounin.ru Tue Jan 13 16:54:56 2015 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 13 Jan 2015 16:54:56 +0000 Subject: [nginx] Core: added disk_full_time checks to error log. Message-ID: details: http://hg.nginx.org/nginx/rev/727177743c3c branches: changeset: 5962:727177743c3c user: Maxim Dounin date: Tue Jan 13 19:51:37 2015 +0300 description: Core: added disk_full_time checks to error log. diffstat: src/core/ngx_log.c | 25 +++++++++++++++++++++---- src/core/ngx_log.h | 2 ++ 2 files changed, 23 insertions(+), 4 deletions(-) diffs (62 lines): diff --git a/src/core/ngx_log.c b/src/core/ngx_log.c --- a/src/core/ngx_log.c +++ b/src/core/ngx_log.c @@ -91,8 +91,9 @@ ngx_log_error_core(ngx_uint_t level, ngx va_list args; #endif u_char *p, *last, *msg; + ssize_t n; + ngx_uint_t wrote_stderr, debug_connection; u_char errstr[NGX_MAX_ERROR_STR]; - ngx_uint_t wrote_stderr, debug_connection; last = errstr + NGX_MAX_ERROR_STR; @@ -150,16 +151,32 @@ ngx_log_error_core(ngx_uint_t level, ngx if (log->writer) { log->writer(log, level, errstr, p - errstr); - log = log->next; - continue; + goto next; } - (void) ngx_write_fd(log->file->fd, errstr, p - errstr); + if (ngx_time() == log->disk_full_time) { + + /* + * on FreeBSD writing to a full filesystem with enabled softupdates + * may block process for much longer time than writing to non-full + * filesystem, so we skip writing to a log for one second + */ + + goto next; + } + + n = ngx_write_fd(log->file->fd, errstr, p - errstr); + + if (n == -1 && ngx_errno == NGX_ENOSPC) { + log->disk_full_time = ngx_time(); + } if (log->file->fd == ngx_stderr) { wrote_stderr = 1; } + next: + log = log->next; } diff --git a/src/core/ngx_log.h b/src/core/ngx_log.h --- a/src/core/ngx_log.h +++ b/src/core/ngx_log.h @@ -53,6 +53,8 @@ struct ngx_log_s { ngx_atomic_uint_t connection; + time_t disk_full_time; + ngx_log_handler_pt handler; void *data; From vbart at nginx.com Tue Jan 13 17:16:11 2015 From: vbart at nginx.com (Valentin Bartenev) Date: Tue, 13 Jan 2015 17:16:11 +0000 Subject: [nginx] Year 2015. Message-ID: details: http://hg.nginx.org/nginx/rev/e0920ea61632 branches: changeset: 5963:e0920ea61632 user: Valentin Bartenev date: Tue Jan 13 20:13:16 2015 +0300 description: Year 2015. diffstat: docs/text/LICENSE | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (12 lines): diff -r 727177743c3c -r e0920ea61632 docs/text/LICENSE --- a/docs/text/LICENSE Tue Jan 13 19:51:37 2015 +0300 +++ b/docs/text/LICENSE Tue Jan 13 20:13:16 2015 +0300 @@ -1,6 +1,6 @@ /* - * Copyright (C) 2002-2014 Igor Sysoev - * Copyright (C) 2011-2014 Nginx, Inc. + * Copyright (C) 2002-2015 Igor Sysoev + * Copyright (C) 2011-2015 Nginx, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without From gyb997 at gmail.com Thu Jan 15 09:46:38 2015 From: gyb997 at gmail.com (=?UTF-8?B?5oiR5LiN54ix5ZCD6aaE6aWo6Z2i?=) Date: Thu, 15 Jan 2015 17:46:38 +0800 Subject: hello ,everyone I have some path to nginx proxy cache subsystem Message-ID: i have a requirement of the proxy cache it's about the path stored in disk because i must do the purge aobut the url path, like that. http://hostname/a/b/c if some chage happened in b,all the subdir of b in cache must be delete. and i change the cache filepath save pattern. in the past save like /path/to/cahce/*/****** (* is the hexdump of key md5) and now /path/to/cache/a/b/c/*/***** for purge b ,you can rm all the subdir of b. And the request is bypass for the new cache. DO this patch is useful for nginx? AND is there some testcases for it,or is some patch standard for nginx ,so I can submit it? -------------- next part -------------- An HTML attachment was scrubbed... URL: From vl at nginx.com Thu Jan 15 13:52:27 2015 From: vl at nginx.com (Homutov Vladimir) Date: Thu, 15 Jan 2015 13:52:27 +0000 Subject: [nginx] Upstream: $upstream_header_time variable. Message-ID: details: http://hg.nginx.org/nginx/rev/0a198a517eaf branches: changeset: 5964:0a198a517eaf user: Vladimir Homutov date: Wed Jan 14 09:03:35 2015 +0300 description: Upstream: $upstream_header_time variable. Keeps time spent on obtaining the header from an upstream server. The value is formatted similar to the $upstream_response_time variable. diffstat: src/http/ngx_http_upstream.c | 24 ++++++++++++++++++++++-- src/http/ngx_http_upstream.h | 2 ++ 2 files changed, 24 insertions(+), 2 deletions(-) diffs (74 lines): diff -r e0920ea61632 -r 0a198a517eaf src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c Tue Jan 13 20:13:16 2015 +0300 +++ b/src/http/ngx_http_upstream.c Wed Jan 14 09:03:35 2015 +0300 @@ -359,6 +359,10 @@ static ngx_http_variable_t ngx_http_ups ngx_http_upstream_status_variable, 0, NGX_HTTP_VAR_NOCACHEABLE, 0 }, + { ngx_string("upstream_header_time"), NULL, + ngx_http_upstream_response_time_variable, 1, + NGX_HTTP_VAR_NOCACHEABLE, 0 }, + { ngx_string("upstream_response_time"), NULL, ngx_http_upstream_response_time_variable, 0, NGX_HTTP_VAR_NOCACHEABLE, 0 }, @@ -1315,6 +1319,7 @@ ngx_http_upstream_connect(ngx_http_reque tp = ngx_timeofday(); u->state->response_sec = tp->sec; u->state->response_msec = tp->msec; + u->state->header_sec = (time_t) NGX_ERROR; rc = ngx_event_connect_peer(&u->peer); @@ -1836,6 +1841,7 @@ ngx_http_upstream_process_header(ngx_htt { ssize_t n; ngx_int_t rc; + ngx_time_t *tp; ngx_connection_t *c; c = u->peer.connection; @@ -1956,6 +1962,10 @@ ngx_http_upstream_process_header(ngx_htt /* rc == NGX_OK */ + tp = ngx_timeofday(); + u->state->header_sec = tp->sec - u->state->response_sec; + u->state->header_msec = tp->msec - u->state->response_msec; + if (u->headers_in.status_n >= NGX_HTTP_SPECIAL_RESPONSE) { if (ngx_http_upstream_test_next(r, u) == NGX_OK) { @@ -4822,8 +4832,18 @@ ngx_http_upstream_response_time_variable for ( ;; ) { if (state[i].status) { - ms = (ngx_msec_int_t) - (state[i].response_sec * 1000 + state[i].response_msec); + + if (data + && state[i].header_sec != (time_t) NGX_ERROR) + { + ms = (ngx_msec_int_t) + (state[i].header_sec * 1000 + state[i].header_msec); + + } else { + ms = (ngx_msec_int_t) + (state[i].response_sec * 1000 + state[i].response_msec); + } + ms = ngx_max(ms, 0); p = ngx_sprintf(p, "%T.%03M", (time_t) ms / 1000, ms % 1000); diff -r e0920ea61632 -r 0a198a517eaf src/http/ngx_http_upstream.h --- a/src/http/ngx_http_upstream.h Tue Jan 13 20:13:16 2015 +0300 +++ b/src/http/ngx_http_upstream.h Wed Jan 14 09:03:35 2015 +0300 @@ -60,6 +60,8 @@ typedef struct { ngx_uint_t status; time_t response_sec; ngx_uint_t response_msec; + time_t header_sec; + ngx_uint_t header_msec; off_t response_length; ngx_str_t *peer; From richard at fussenegger.info Thu Jan 15 15:21:34 2015 From: richard at fussenegger.info (Richard Fussenegger) Date: Thu, 15 Jan 2015 16:21:34 +0100 Subject: SPDY add_header with Alternate-Protocol Message-ID: <54B7DAFE.2030902@fussenegger.info> I'm often seeing the advice to add the following line to your SPDY configuration: add_header Alternate-Protocol 443:npn-spdy/3; Is this actually necessary? I mean, my Firefox is connecting via SPDY to my nginx and I don't have this in my configuration. For example seen at: https://github.com/h5bp/server-configs-nginx/blob/master/h5bp/directive-only/spdy.conf Best Richard -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 4237 bytes Desc: S/MIME Cryptographic Signature URL: From pluknet at nginx.com Thu Jan 15 16:47:35 2015 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 15 Jan 2015 19:47:35 +0300 Subject: SPDY add_header with Alternate-Protocol In-Reply-To: <54B7DAFE.2030902@fussenegger.info> References: <54B7DAFE.2030902@fussenegger.info> Message-ID: <5B8B0F3C-DB58-4889-AEFA-0A15B78BB316@nginx.com> On Jan 15, 2015, at 6:21 PM, Richard Fussenegger wrote: > I'm often seeing the advice to add the following line to your SPDY configuration: > > add_header Alternate-Protocol 443:npn-spdy/3; > > Is this actually necessary? I mean, my Firefox is connecting via SPDY to my nginx and I don?t have this in my configuration. The Alternate-Protocol header is used for advertising such capability, It is mentioned in SPDY/2 draft, and later removed. draft-ietf-httpbis-alt-svc was influenced by its design for the Alt-Svc header. -- Sergey Kandaurov From richard at fussenegger.info Thu Jan 15 17:11:46 2015 From: richard at fussenegger.info (Richard Fussenegger) Date: Thu, 15 Jan 2015 18:11:46 +0100 Subject: SPDY add_header with Alternate-Protocol In-Reply-To: <5B8B0F3C-DB58-4889-AEFA-0A15B78BB316@nginx.com> References: <54B7DAFE.2030902@fussenegger.info> <5B8B0F3C-DB58-4889-AEFA-0A15B78BB316@nginx.com> Message-ID: <54B7F4D2.8040500@fussenegger.info> But isn't nginx advertising them without manual adding of such headers? I mean, why configure SPDY on the listen directive when it isn't going to be used by clients (which is not the case, all browsers happily connect via SPDY). I fully understand that I could run an HTTP/2 server listening on a different port and configure nginx to advertise this. add_header Alt-Svc h2=":666"; # I love id software's reserved port. Richard On 1/15/2015 5:47 PM, Sergey Kandaurov wrote: > On Jan 15, 2015, at 6:21 PM, Richard Fussenegger wrote: >> I'm often seeing the advice to add the following line to your SPDY configuration: >> >> add_header Alternate-Protocol 443:npn-spdy/3; >> >> Is this actually necessary? I mean, my Firefox is connecting via SPDY to my nginx and I don?t have this in my configuration. > The Alternate-Protocol header is used for advertising such capability, > It is mentioned in SPDY/2 draft, and later removed. > > draft-ietf-httpbis-alt-svc was influenced by its design for the Alt-Svc header. > -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 4237 bytes Desc: S/MIME Cryptographic Signature URL: From vbart at nginx.com Thu Jan 15 18:35:47 2015 From: vbart at nginx.com (Valentin V. Bartenev) Date: Thu, 15 Jan 2015 21:35:47 +0300 Subject: SPDY add_header with Alternate-Protocol In-Reply-To: <54B7F4D2.8040500@fussenegger.info> References: <54B7DAFE.2030902@fussenegger.info> <5B8B0F3C-DB58-4889-AEFA-0A15B78BB316@nginx.com> <54B7F4D2.8040500@fussenegger.info> Message-ID: <3331483.zFKrcoyQJe@vbart-workstation> On Thursday 15 January 2015 18:11:46 Richard Fussenegger wrote: > But isn't nginx advertising them without manual adding of such headers? > I mean, why configure SPDY on the listen directive when it isn't going > to be used by clients (which is not the case, all browsers happily > connect via SPDY). [..] They use SPDY because it's advertised during TLS handshake using NPN/ALPN TLS extensions. The "Alternate-Protocol" header was introduced for cases when no other mechanisms available (e.g. for plain HTTP connections). wbr, Valentin V. Bartenev From richard at fussenegger.info Thu Jan 15 18:38:42 2015 From: richard at fussenegger.info (Richard Fussenegger) Date: Thu, 15 Jan 2015 19:38:42 +0100 Subject: SPDY add_header with Alternate-Protocol In-Reply-To: <3331483.zFKrcoyQJe@vbart-workstation> References: <54B7DAFE.2030902@fussenegger.info> <5B8B0F3C-DB58-4889-AEFA-0A15B78BB316@nginx.com> <54B7F4D2.8040500@fussenegger.info> <3331483.zFKrcoyQJe@vbart-workstation> Message-ID: <54B80932.5050008@fussenegger.info> Thanks, this answers my actual question. In this case I never need it, since I only serve encrypted traffic to anyone. Richard On 1/15/2015 7:35 PM, Valentin V. Bartenev wrote: > On Thursday 15 January 2015 18:11:46 Richard Fussenegger wrote: >> But isn't nginx advertising them without manual adding of such headers? >> I mean, why configure SPDY on the listen directive when it isn't going >> to be used by clients (which is not the case, all browsers happily >> connect via SPDY). > [..] > > They use SPDY because it's advertised during TLS handshake using > NPN/ALPN TLS extensions. > > The "Alternate-Protocol" header was introduced for cases when no > other mechanisms available (e.g. for plain HTTP connections). > > wbr, Valentin V. Bartenev > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 4237 bytes Desc: S/MIME Cryptographic Signature URL: From richard at fussenegger.info Fri Jan 16 13:03:43 2015 From: richard at fussenegger.info (Richard Fussenegger) Date: Fri, 16 Jan 2015 14:03:43 +0100 Subject: Shared TLS session cache FIFO? In-Reply-To: <3331483.zFKrcoyQJe@vbart-workstation> References: <54B7DAFE.2030902@fussenegger.info> <5B8B0F3C-DB58-4889-AEFA-0A15B78BB316@nginx.com> <54B7F4D2.8040500@fussenegger.info> <3331483.zFKrcoyQJe@vbart-workstation> Message-ID: <54B90C2F.5010908@fussenegger.info> How does the shared session cache of nginx work, does it use FIFO? This is especially interesting with long lived session entries, e.g. 12 hours. Richard -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 4237 bytes Desc: S/MIME Cryptographic Signature URL: From dakota at brokenpipe.ru Sun Jan 18 01:10:13 2015 From: dakota at brokenpipe.ru (Marat Dakota) Date: Sun, 18 Jan 2015 05:10:13 +0400 Subject: Event loop plus another event loop Message-ID: Hi, Please correct me if I'm wrong, but as for my understanding nginx works like this: // Busy loop while (true) { `process events and call callbacks`(); } I'm writing a module for a piece of software which works the same way. So, what I need is a mechanism to call both handlers in a busy loop: while (true) { `process events and call callbacks`(); `process my piece of software events and call callbacks`(); } Is this possible? Thanks. -- Marat -------------- next part -------------- An HTML attachment was scrubbed... URL: From poczta at krzysztofgrzadziel.pl Sun Jan 18 16:25:11 2015 From: poczta at krzysztofgrzadziel.pl (Krzysztof Grzadziel) Date: Sun, 18 Jan 2015 17:25:11 +0100 Subject: [PATCH] Upstream: path_access_rights and file_access_rights of proxy_cache_path and friends Message-ID: # HG changeset patch # User Krzysztof Grzadziel # Date 1421595254 -3600 # Sun Jan 18 16:34:14 2015 +0100 # Node ID d43b6b438baf90b38bd6b95986527d40fc9a9d41 # Parent 0a198a517eaf48baad03a76b182698c50496d380 Upstream: path_access_rights and file_access_rights of proxy_cache_path and friends. Add two optional parameters to proxy_cache_path and friends. path_access_rights for set chmod for directories under cache path. file_access_rights for set chmod for files under cache path. diff -r 0a198a517eaf -r d43b6b438baf src/core/ngx_string.c --- a/src/core/ngx_string.c Wed Jan 14 09:03:35 2015 +0300 +++ b/src/core/ngx_string.c Sun Jan 18 16:34:14 2015 +0100 @@ -1085,6 +1085,33 @@ } +/* parse octal number string representation to integer */ +ngx_int_t +ngx_octtoi(u_char *line, size_t n) +{ + ngx_int_t value; + + if (n == 0) { + return NGX_ERROR; + } + + for (value = 0; n--; line++) { + if (*line < '0' || *line > '7') { + return NGX_ERROR; + } + + value = value * 8 + (*line - '0'); + } + + if (value < 0) { + return NGX_ERROR; + + } else { + return value; + } +} + + u_char * ngx_hex_dump(u_char *dst, u_char *src, size_t len) { diff -r 0a198a517eaf -r d43b6b438baf src/core/ngx_string.h --- a/src/core/ngx_string.h Wed Jan 14 09:03:35 2015 +0300 +++ b/src/core/ngx_string.h Sun Jan 18 16:34:14 2015 +0100 @@ -175,6 +175,7 @@ off_t ngx_atoof(u_char *line, size_t n); time_t ngx_atotm(u_char *line, size_t n); ngx_int_t ngx_hextoi(u_char *line, size_t n); +ngx_int_t ngx_octtoi(u_char *line, size_t n); u_char *ngx_hex_dump(u_char *dst, u_char *src, size_t len); diff -r 0a198a517eaf -r d43b6b438baf src/http/ngx_http_cache.h --- a/src/http/ngx_http_cache.h Wed Jan 14 09:03:35 2015 +0300 +++ b/src/http/ngx_http_cache.h Sun Jan 18 16:34:14 2015 +0100 @@ -154,6 +154,9 @@ ngx_msec_t loader_sleep; ngx_msec_t loader_threshold; + ngx_int_t file_access_rights; + ngx_int_t path_access_rights; + ngx_shm_zone_t *shm_zone; ngx_uint_t use_temp_path; diff -r 0a198a517eaf -r d43b6b438baf src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c Wed Jan 14 09:03:35 2015 +0300 +++ b/src/http/ngx_http_file_cache.c Sun Jan 18 16:34:14 2015 +0100 @@ -1268,8 +1268,8 @@ "http file cache rename: \"%s\" to \"%s\"", tf->file.name.data, c->file.name.data); - ext.access = NGX_FILE_OWNER_ACCESS; - ext.path_access = NGX_FILE_OWNER_ACCESS; + ext.access = cache->file_access_rights; + ext.path_access = cache->path_access_rights; ext.time = -1; ext.create_path = 1; ext.delete_file = 1; @@ -2075,6 +2075,7 @@ ngx_int_t loader_files; ngx_msec_t loader_sleep, loader_threshold; ngx_uint_t i, n, use_temp_path; + ngx_int_t file_access_rights, path_access_rights; ngx_array_t *caches; ngx_http_file_cache_t *cache, **ce; @@ -2094,6 +2095,8 @@ loader_files = 100; loader_sleep = 50; loader_threshold = 200; + file_access_rights = NGX_FILE_OWNER_ACCESS; + path_access_rights = NGX_FILE_OWNER_ACCESS; name.len = 0; size = 0; @@ -2236,6 +2239,30 @@ continue; } + if (ngx_strncmp(value[i].data, "file_access_rights=", 19) == 0) { + + file_access_rights = ngx_octtoi(value[i].data + 19, value[i].len - 19); + if (file_access_rights == NGX_ERROR) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid file_access_rights value \"%V\"", &value[i]); + return NGX_CONF_ERROR; + } + + continue; + } + + if (ngx_strncmp(value[i].data, "path_access_rights=", 19) == 0) { + + path_access_rights = ngx_octtoi(value[i].data + 19, value[i].len - 19); + if (path_access_rights == NGX_ERROR) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid path_access_rights value \"%V\"", &value[i]); + return NGX_CONF_ERROR; + } + + continue; + } + if (ngx_strncmp(value[i].data, "loader_sleep=", 13) == 0) { s.len = value[i].len - 13; @@ -2286,6 +2313,8 @@ cache->loader_files = loader_files; cache->loader_sleep = loader_sleep; cache->loader_threshold = loader_threshold; + cache->file_access_rights = file_access_rights; + cache->path_access_rights = path_access_rights; if (ngx_add_path(cf, &cache->path) != NGX_OK) { return NGX_CONF_ERROR; From ian.labbe at gmail.com Mon Jan 19 14:38:53 2015 From: ian.labbe at gmail.com (=?UTF-8?Q?Ian_Labb=C3=A9?=) Date: Mon, 19 Jan 2015 09:38:53 -0500 Subject: ngx_hash_init In-Reply-To: <20150112151906.GL47350@mdounin.ru> References: <20150112151906.GL47350@mdounin.ru> Message-ID: thank you very much 2015-01-12 10:19 GMT-05:00 Maxim Dounin : > Hello! > > On Thu, Jan 08, 2015 at 12:34:39PM -0500, Ian Labb? wrote: > > > Hello, > > > > Maybe i am not in the right mailing list, please refer me to the good one > > if i am at the wrong one. > > > > I just want to understand the " for (size = start; size <= > hinit->max_size; > > size++) " loop in the ngx_hash_init function. > > I do not understand what "size", "key" and "test[key]" mean in first > place. > > The ngx_hash_init() functions tries to build a hash by using a > varying number of buckets. The "size" variable corresponds to the > number of buckets we test at the current loop iteration. For each > size we iterate over all hash items to check how buckets will be > ("key" is a bucket number for a given hash item, "test[key]" > stores how many bytes will be stored in the bucket). > > -- > Maxim Dounin > http://nginx.org/ > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Ian Labb? Chemin des Quatre-Bourgeois 418-529-0210 G1W 2L1 -------------- next part -------------- An HTML attachment was scrubbed... URL: From damien at commerceguys.com Tue Jan 20 23:54:40 2015 From: damien at commerceguys.com (Damien Tournoud) Date: Wed, 21 Jan 2015 00:54:40 +0100 Subject: [PATCH] http_core: Do not match a file for a directory in try_files Message-ID: # HG changeset patch # User Damien Tournoud # Date 1421796392 -3600 # Wed Jan 21 00:26:32 2015 +0100 # Node ID c8f2fbe53f5df811dcaada94d3eca6c34070c610 # Parent 0a198a517eaf48baad03a76b182698c50496d380 http_core: Do not match a file for a directory in try_files. A try_files directive with a file match (i.e. something not ending with a "/") does not match a directory of the same name. But a try_files directive with a directory match like this: try_files $uri/ =404; ... does currently match a *file* of the same name. This doesn't break any test, so I assume this is not the expected behavior. I have a separate changeset to extend the test coverage. This makes it impossible to target generic locations for index-expansion only like this "allow only html files, but perform index expansion": location / { try_files $uri/ =404; index index.html; } location ~ "\.html$" { } diff -r 0a198a517eaf -r c8f2fbe53f5d src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c Wed Jan 14 09:03:35 2015 +0300 +++ b/src/http/ngx_http_core_module.c Wed Jan 21 00:26:32 2015 +0100 @@ -1353,7 +1353,7 @@ continue; } - if (of.is_dir && !test_dir) { + if (of.is_dir != test_dir) { continue; } From damien at commerceguys.com Tue Jan 20 23:57:53 2015 From: damien at commerceguys.com (Damien Tournoud) Date: Wed, 21 Jan 2015 00:57:53 +0100 Subject: [PATCH] http_try_files.t: extend test coverage Message-ID: # HG changeset patch # User Damien Tournoud # Date 1421798229 -3600 # Wed Jan 21 00:57:09 2015 +0100 # Node ID b99eb5bef46a7b7d9e7bcd32967041d287c80fdb # Parent 4bcf8bc2bafe1515154e60710ea6ae674080b26c http_try_files.t: extend test coverage This extends the test coverage for all the cases of files or directories matched against each other. The test case "file doesn't match dir" currently fails, fixed in http://mailman.nginx.org/pipermail/nginx-devel/2015-January/006468.html diff -r 4bcf8bc2bafe -r b99eb5bef46a http_try_files.t --- a/http_try_files.t Thu Jan 15 18:23:16 2015 +0300 +++ b/http_try_files.t Wed Jan 21 00:57:09 2015 +0100 @@ -21,7 +21,7 @@ select STDERR; $| = 1; select STDOUT; $| = 1; -my $t = Test::Nginx->new()->has(qw/http proxy rewrite/)->plan(4) +my $t = Test::Nginx->new()->has(qw/http proxy rewrite/)->plan(8) ->write_file_expand('nginx.conf', <<'EOF'); %%TEST_GLOBALS%% @@ -50,6 +50,22 @@ try_files /short $uri =404; } + location /file-file/ { + try_files /found.html =404; + } + + location /file-dir/ { + try_files /found.html/ =404; + } + + location /dir-dir/ { + try_files /directory/ =404; + } + + location /dir-file/ { + try_files /directory =404; + } + location /fallback { proxy_pass http://127.0.0.1:8081/fallback; } @@ -72,6 +88,7 @@ EOF $t->write_file('found.html', 'SEE THIS'); +mkdir $t->{_testdir} . '/directory'; $t->run(); ############################################################################### @@ -80,5 +97,9 @@ like(http_get('/uri/notfound'), qr!X-URI: /fallback!, 'not found uri'); like(http_get('/nouri/notfound'), qr!X-URI: /fallback!, 'not found nouri'); like(http_get('/short/long'), qr!404 Not!, 'short uri in try_files'); +like(http_get('/file-file/'), qr!SEE THIS!, 'file matches file'); +like(http_get('/file-dir/'), qr!404 Not!, 'file doesn\'t match dir'); +like(http_get('/dir-dir/'), qr!301 Moved Permanently!, 'dir matches dir'); +like(http_get('/dir-file/'), qr!404 Not!, 'dir doesn\'t match file'); ############################################################################### From pluknet at nginx.com Wed Jan 21 16:21:32 2015 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 21 Jan 2015 19:21:32 +0300 Subject: [PATCH] http_core: Do not match a file for a directory in try_files In-Reply-To: References: Message-ID: <1B0532BF-5827-41FF-A1DE-96455E1DFD69@nginx.com> On Jan 21, 2015, at 2:54 AM, Damien Tournoud wrote: > # HG changeset patch > # User Damien Tournoud > # Date 1421796392 -3600 > # Wed Jan 21 00:26:32 2015 +0100 > # Node ID c8f2fbe53f5df811dcaada94d3eca6c34070c610 > # Parent 0a198a517eaf48baad03a76b182698c50496d380 > http_core: Do not match a file for a directory in try_files. > > A try_files directive with a file match (i.e. something > not ending with a "/") does not match a directory of > the same name. > > But a try_files directive with a directory match like this: > > try_files $uri/ =404; > > ... does currently match a *file* of the same name. > Indeed, it is odd to lookup a regular file with a trailing slash, since this is generally only allowed for directories. > > diff -r 0a198a517eaf -r c8f2fbe53f5d src/http/ngx_http_core_module.c > --- a/src/http/ngx_http_core_module.c Wed Jan 14 09:03:35 2015 +0300 > +++ b/src/http/ngx_http_core_module.c Wed Jan 21 00:26:32 2015 +0100 > @@ -1353,7 +1353,7 @@ > continue; > } > > - if (of.is_dir && !test_dir) { > + if (of.is_dir != test_dir) { > continue; > } > The patch looks good to me. -- Sergey Kandaurov From grantksupport at operamail.com Wed Jan 21 18:20:47 2015 From: grantksupport at operamail.com (grantksupport at operamail.com) Date: Wed, 21 Jan 2015 10:20:47 -0800 Subject: dev plans for pcre2 and sha>256? Message-ID: <1421864447.3917230.216996693.156324FB@webmail.messagingengine.com> (1) current nginx ver 1.7.9 can be easily linked against a local libpcre, ldd /usr/local/sbin/nginx | grep libpcre libpcre.so.1 => /usr/local/lib64/libpcre.so.1 (0x00007f4f6bce6000) no issues/problems. when is support for pcre2 currently slotted in nginx devel? either for libpcre2-posix, or libpcre2_8 directly? (2) nginx's current config options include --with-md5=DIR set path to md5 library sources --with-md5-opt=OPTIONS set additional build options for md5 --with-md5-asm use md5 assembler sources --with-sha1=DIR set path to sha1 library sources --with-sha1-opt=OPTIONS set additional build options for sha1 --with-sha1-asm use sha1 assembler sources admitting I'm not clear on the scope of usage of these crypto libs within the app, 'elsewhere' it's widely recommended to abandon use of sha1, and similarly nd5 -- tho less widely so. Should there be a option for replacement with, sha256/384/512? From agentzh at gmail.com Wed Jan 21 22:00:14 2015 From: agentzh at gmail.com (Yichun Zhang (agentzh)) Date: Wed, 21 Jan 2015 14:00:14 -0800 Subject: Event loop plus another event loop In-Reply-To: References: Message-ID: Hello! On Sat, Jan 17, 2015 at 5:10 PM, Marat Dakota wrote: > I'm writing a module for a piece of software which works the same way. > So, what I need is a mechanism to call both handlers in a busy loop: > > while (true) { > `process events and call callbacks`(); > `process my piece of software events and call callbacks`(); > } > Well, nginx is a single-threaded application (for each worker process) so you can only have one event loop, otherwise other event loops can always block the main event loop in the nginx core, ruining the performance. You can register the fd of the socket created by your 3rd-party libraries into nginx's event model and use nginx's event loop to dispatch read/write events on it on the library's behalf. For example, our ngx_drizzle module integrates the 3rd-party libdrizzle library into nginx to talk to MySQL servers nonblockingly: https://github.com/openresty/drizzle-nginx-module#readme And our ngx_postgres module integrates the official libpq library from PostgreSQL in a similar way: https://github.com/FRiCKLE/ngx_postgres#readme Regards, -agentzh From jefftk at google.com Thu Jan 22 13:55:39 2015 From: jefftk at google.com (Jeff Kaufman) Date: Thu, 22 Jan 2015 08:55:39 -0500 Subject: Event loop plus another event loop In-Reply-To: References: Message-ID: On Wed, Jan 21, 2015 at 5:00 PM, Yichun Zhang (agentzh) wrote: > You can register the fd of the socket created by your 3rd-party > libraries into nginx's event model and use nginx's event loop to > dispatch read/write events on it on the library's behalf. > Alternatively, and this is kind of a hack, you can run your other event loop in a different thread and connect the two up with a pipe. You give one end of the pipe to nginx to watch and you write to the other end when you want to run your handler in the main nginx context. From igor at sysoev.ru Tue Jan 27 12:38:47 2015 From: igor at sysoev.ru (Igor Sysoev) Date: Tue, 27 Jan 2015 12:38:47 +0000 Subject: [nginx] A bounds check of %N format on Windows. Message-ID: details: http://hg.nginx.org/nginx/rev/78271500b8de branches: changeset: 5965:78271500b8de user: Igor Sysoev date: Tue Jan 27 15:38:15 2015 +0300 description: A bounds check of %N format on Windows. Thanks to Joe Bialek, Adam Zabrocki and Microsoft Vulnerability Research. diffstat: src/core/ngx_string.c | 6 +++++- 1 files changed, 5 insertions(+), 1 deletions(-) diffs (17 lines): diff -r 0a198a517eaf -r 78271500b8de src/core/ngx_string.c --- a/src/core/ngx_string.c Wed Jan 14 09:03:35 2015 +0300 +++ b/src/core/ngx_string.c Tue Jan 27 15:38:15 2015 +0300 @@ -429,8 +429,12 @@ ngx_vslprintf(u_char *buf, u_char *last, case 'N': #if (NGX_WIN32) *buf++ = CR; + if (buf < last) { + *buf++ = LF; + } +#else + *buf++ = LF; #endif - *buf++ = LF; fmt++; continue; From alex at cooperi.net Wed Jan 28 04:35:33 2015 From: alex at cooperi.net (Alex Wilson) Date: Wed, 28 Jan 2015 14:35:33 +1000 Subject: [PATCH] set $https for use behind SSL-stripping load-balancer Message-ID: <556B82D2-1214-4F53-9424-8EA18BAB65B1@cooperi.net> Currently when using nginx behind an SSL-stripping load-balancer, there is no way to control the scheme used when generating directory redirects. By this I mean, if you are serving a static directory tree and you visit the URL of a directory without the trailing / (eg https://example.com/foo), you get served a redirect to http://example.com/foo/ (note trailing slash, and "http" instead of "https"). You could argue that the front-end load balancer should rewrite the redirect on its way from the backend to the client in this case, but alas not all hardware/software used for the front-end supports this correctly, and it also reduces the scalability/performance of the front-end if it has to scan for these URLs and correct them. I think nginx should support generating these redirects correctly in the back-end. The code that generates these redirects is in ngx_http_header_filter_module.c, around line 528-529 (in v1.6.2). It only looks at whether connection->ssl is 0 or not before deciding whether to introduce the 's' after 'http'. This patch adds an extra flag to the ngx_connection_t called "ssl_set", and makes this redirect generator, as well as the getters for $scheme and $https, look at this flag as well as ->ssl. Then it adds a variable setter for $https that uses this flag where appropriate. After this, you can simply add "set $https on;" to the server block on the backend server and it will generate correct redirects. Conveniently, you can also then use the stock fastcgi.conf / fastcgi_params for a back-end nginx and not have to worry about creating a different variable to make the fastcgi_param HTTPS be set correctly. I thought about some alternatives to the "set $https on;" semantic -- maybe you should "set $scheme"? Or, as I've seen suggested once before, a "real_scheme" directive to pull it out of an HTTP header? It seems like there is not that much difference between the approaches though, and the "set variable" based approach is the most general: you can emulate the real_scheme directive with it by using a "map", but not the other way around. So if there are other ideas for a better approach I'd like to hear them, otherwise here is a patch for "set $https" :) -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-set-https.patch Type: application/octet-stream Size: 3458 bytes Desc: not available URL: From lh2008999 at gmail.com Wed Jan 28 13:58:33 2015 From: lh2008999 at gmail.com (=?UTF-8?B?5L2V6b6Z?=) Date: Wed, 28 Jan 2015 21:58:33 +0800 Subject: question about ngx_ssl_clear_error() Message-ID: in src/event/ngx_event_openssl.c:1907, i found function ngx_ssl_clear_error() call openssl function ERR_peek_error() in a while loop, intend to clear global error code queue, but according to openssl's documents and source code, ERR_peek_error() will not pop the first error code , and it does not modify the queue. so it looks like this will cause a busy loop. is my understanding correct? reference: nginx source code " static void ngx_ssl_clear_error(ngx_log_t *log) { while (ERR_peek_error()) { ngx_ssl_error(NGX_LOG_ALERT, log, 0, "ignoring stale global SSL error"); } ERR_clear_error(); } " openssl source code: https://github.com/openssl/openssl/blob/master/crypto/err/err.c#L770 From lh2008999 at gmail.com Wed Jan 28 14:06:31 2015 From: lh2008999 at gmail.com (=?UTF-8?B?5L2V6b6Z?=) Date: Wed, 28 Jan 2015 22:06:31 +0800 Subject: question about ngx_ssl_clear_error() In-Reply-To: References: Message-ID: sorry i make a mistake. the nginx code is correct. int the loop , ngx_ssl_error() called ERR_get_error(), which will pop error queue head element. 2015-01-28 21:58 GMT+08:00 ?? : > in src/event/ngx_event_openssl.c:1907, i found function > ngx_ssl_clear_error() call openssl function ERR_peek_error() in a > while loop, intend to clear global error code queue, > > but according to openssl's documents and source code, ERR_peek_error() will not > pop the first error code , and it does not modify the queue. > > so it looks like this will cause a busy loop. > > is my understanding correct? > > reference: > > nginx source code > " > static void > ngx_ssl_clear_error(ngx_log_t *log) > { > while (ERR_peek_error()) { > ngx_ssl_error(NGX_LOG_ALERT, log, 0, "ignoring stale global SSL error"); > } > > ERR_clear_error(); > } > > " > > openssl source code: > https://github.com/openssl/openssl/blob/master/crypto/err/err.c#L770 From ggarcia at deic.uab.cat Wed Jan 28 16:17:15 2015 From: ggarcia at deic.uab.cat (Gerard) Date: Wed, 28 Jan 2015 17:17:15 +0100 Subject: Send continuous stream of data Message-ID: Hi, I'd like to send a fragmented MP4 while it is being generated, therefore while the file grows. Is there an easy way to do this? Or do I need to do something like create a timer and use that timer to send the data as it is being generated using chained buffers? Thanks, Gerard -------------- next part -------------- An HTML attachment was scrubbed... URL: From hungnv at opensource.com.vn Fri Jan 30 04:29:56 2015 From: hungnv at opensource.com.vn (hungnv at opensource.com.vn) Date: Fri, 30 Jan 2015 11:29:56 +0700 Subject: [PATCH] Enable faststart for mp4 module Message-ID: <031db7af488c045fc4f0.1422592196@Hungs-MacBook-Air.local> # HG changeset patch # User Hung Nguyen +#include +#include +#include +#ifdef WIN32 +#include +#include +#define DIR_SEPARATOR '\\' +#define strdup _strdup +#define open _open +#define close _close +#define write _write +#define lseek _lseeki64 +#define stat _stat64 +#else +#include +#include +#include +#include +#include +#include +#include +#endif + + + +#ifdef __MINGW32__ +#define fseeko(x,y,z) fseeko64(x,y,z) +#define ftello(x) ftello64(x) +#endif + +#define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1]) + +#define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \ + (((uint8_t*)(x))[1] << 16) | \ + (((uint8_t*)(x))[2] << 8) | \ + ((uint8_t*)(x))[3]) + +#define BE_64(x) (((uint64_t)(((uint8_t*)(x))[0]) << 56) | \ + ((uint64_t)(((uint8_t*)(x))[1]) << 48) | \ + ((uint64_t)(((uint8_t*)(x))[2]) << 40) | \ + ((uint64_t)(((uint8_t*)(x))[3]) << 32) | \ + ((uint64_t)(((uint8_t*)(x))[4]) << 24) | \ + ((uint64_t)(((uint8_t*)(x))[5]) << 16) | \ + ((uint64_t)(((uint8_t*)(x))[6]) << 8) | \ + ((uint64_t)((uint8_t*)(x))[7])) + +#define BE_FOURCC( ch0, ch1, ch2, ch3 ) \ + ( (uint32_t)(unsigned char)(ch3) | \ + ( (uint32_t)(unsigned char)(ch2) << 8 ) | \ + ( (uint32_t)(unsigned char)(ch1) << 16 ) | \ + ( (uint32_t)(unsigned char)(ch0) << 24 ) ) + +#define QT_ATOM BE_FOURCC + +/* top level atoms */ +#define FREE_ATOM QT_ATOM('f', 'r', 'e', 'e') + +#define JUNK_ATOM QT_ATOM('j', 'u', 'n', 'k') + +#define MDAT_ATOM QT_ATOM('m', 'd', 'a', 't') + +#define MOOV_ATOM QT_ATOM('m', 'o', 'o', 'v') + +#define PNOT_ATOM QT_ATOM('p', 'n', 'o', 't') + +#define SKIP_ATOM QT_ATOM('s', 'k', 'i', 'p') + +#define WIDE_ATOM QT_ATOM('w', 'i', 'd', 'e') + +#define PICT_ATOM QT_ATOM('P', 'I', 'C', 'T') + +#define FTYP_ATOM QT_ATOM('f', 't', 'y', 'p') + +#define UUID_ATOM QT_ATOM('u', 'u', 'i', 'd') + +#define CMOV_ATOM QT_ATOM('c', 'm', 'o', 'v') + +#define STCO_ATOM QT_ATOM('s', 't', 'c', 'o') + +#define CO64_ATOM QT_ATOM('c', 'o', '6', '4') + +#define ATOM_PREAMBLE_SIZE 8 + +#define COPY_BUFFER_SIZE 1024 + + +/* we take 2 arguments from ngx_http_mp4_module + * path: to open it in write mode once source file need to be modified. + * file descriptor: nginx already opened the file, we dont have to open again + * ngx_fd_t is actually an integer (see ngx_files.h) + */ + + +int ngx_http_enable_fast_start(ngx_str_t *path, ngx_fd_t +ngx_open_file_cached_fd, ngx_http_request_t *r) { + unsigned char atom_bytes[ATOM_PREAMBLE_SIZE]; + uint32_t atom_type = 0; + uint64_t atom_size = 0; + uint64_t atom_offset = 0; + uint64_t last_offset; + unsigned char *moov_atom = NULL; + unsigned char *ftyp_atom = NULL; + uint64_t moov_atom_size; + uint64_t ftyp_atom_size = 0; + uint64_t i, j; + uint32_t offset_count; + uint64_t current_offset; + uint64_t start_offset = 0; + int outfile_fd = -1; + unsigned char *temp_buf = NULL; + ngx_log_t *log = r->connection->log; + + + /* traverse through the atoms in the file to make sure that 'moov' is + * at the end */ + while (1) { + + if (read(ngx_open_file_cached_fd, atom_bytes, ATOM_PREAMBLE_SIZE) == 0) + break; + + atom_size = (uint32_t) BE_32(&atom_bytes[0]); + atom_type = BE_32(&atom_bytes[4]); + /* keep ftyp atom */ + + if (atom_type == FTYP_ATOM) { + ftyp_atom_size = atom_size; + free(ftyp_atom); + ftyp_atom = ngx_palloc(r->connection->pool, ftyp_atom_size); + // ftyp_atom = malloc(ftyp_atom_size); + + if (!ftyp_atom) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, "could not allocate " + "%"PRIu64" byte for ftyp atom\n", atom_size); + goto error_out; + } + + lseek(ngx_open_file_cached_fd, -ATOM_PREAMBLE_SIZE, SEEK_CUR); + ngx_log_debug(NGX_LOG_DEBUG, log, 0, "atom_size: " + "%"PRIu64" \n", atom_size); + + if (read(ngx_open_file_cached_fd, ftyp_atom, atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + start_offset = atom_size; + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, + "start_offset to verify: %"PRIu64" \n", start_offset); + + } else { + /* 64-bit special case */ + if (atom_size == 1) { + if (read(ngx_open_file_cached_fd, atom_bytes, + ATOM_PREAMBLE_SIZE) == 0) { + break; + } + + atom_size = BE_64(&atom_bytes[0]); + lseek(ngx_open_file_cached_fd, atom_size - + ATOM_PREAMBLE_SIZE * 2, SEEK_CUR); + + } else { + lseek(ngx_open_file_cached_fd, atom_size - ATOM_PREAMBLE_SIZE, + SEEK_CUR); + } + + } + + ngx_log_debug(NGX_LOG_DEBUG_HTTP, log, 0, "%c%c%c%c %10"PRIu64"" + " %"PRIu64"\n", + (atom_type >> 24) & 255, + (atom_type >> 16) & 255, + (atom_type >> 8) & 255, + (atom_type >> 0) & 255, + atom_offset, + atom_size); + + if ((atom_type != FREE_ATOM) && + (atom_type != JUNK_ATOM) && + (atom_type != MDAT_ATOM) && + (atom_type != MOOV_ATOM) && + (atom_type != PNOT_ATOM) && + (atom_type != SKIP_ATOM) && + (atom_type != WIDE_ATOM) && + (atom_type != PICT_ATOM) && + (atom_type != UUID_ATOM) && + (atom_type != FTYP_ATOM)) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, "encountered non-QT " + "top-level atom (is this a Quicktime file?)\n"); + break; + } + + atom_offset += atom_size; + + /* The atom header is 8 (or 16 bytes), if the atom size (which + * includes these 8 or 16 bytes) is less than that, we won't be + * able to continue scanning sensibly after this atom, so break. */ + if (atom_size < 8) + break; + } + + if (atom_type != MOOV_ATOM) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "last atom in file: " + "%s was not a moov atom\n", path->data); + if (ftyp_atom) ngx_pfree(r->connection->pool, ftyp_atom); + // dont close file, not our job + return NGX_OK; + } + + /* moov atom was, in fact, the last atom in the chunk; load the whole + * moov atom */ + last_offset = lseek(ngx_open_file_cached_fd, -atom_size, SEEK_END); + moov_atom_size = atom_size; + moov_atom = ngx_palloc(r->connection->pool, moov_atom_size); + + if (!moov_atom) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, "could not allocate " + "%"PRIu64" byte for moov atom\n", atom_size); + goto error_out; + } + + if (read(ngx_open_file_cached_fd, moov_atom, atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + /* this utility does not support compressed atoms yet, so disqualify + * files with compressed QT atoms */ + if (BE_32(&moov_atom[12]) == CMOV_ATOM) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, "this module does " + "not support compressed moov atoms yet\n"); + free(ftyp_atom); + if (moov_atom) free(moov_atom); + /* should not return error, if we cannot fix it, + * let player download the whole file then play it*/ + return NGX_OK; + } + + /* read next move_atom_size bytes + * since we read/write file in same time, we must read before write into + * the buffer + */ + temp_buf = ngx_palloc(r->connection->pool, moov_atom_size); + + if (!temp_buf) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, "Cannot allocate %"PRIu64" " + "byte for temp buf \n", moov_atom_size); + goto error_out; + } + + /* seek to after ftyp_atom */ + lseek(ngx_open_file_cached_fd, ftyp_atom_size, SEEK_SET); + + if (read(ngx_open_file_cached_fd, temp_buf, moov_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + start_offset += moov_atom_size; + + /* end read temp buffer bytes */ + + /* crawl through the moov chunk in search of stco or co64 atoms */ + for (i = 4; i < moov_atom_size - 4; i++) { + atom_type = BE_32(&moov_atom[i]); + + if (atom_type == STCO_ATOM) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "%s patching stco " + "atom...\n", path->data); + atom_size = BE_32(&moov_atom[i - 4]); + + if (i + atom_size - 4 > moov_atom_size) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, " bad atom size\n"); + goto error_out; + } + + offset_count = BE_32(&moov_atom[i + 8]); + + for (j = 0; j < offset_count; j++) { + current_offset = BE_32(&moov_atom[i + 12 + j * 4]); + current_offset += moov_atom_size; + moov_atom[i + 12 + j * 4 + 0] = (current_offset >> 24) & 0xFF; + moov_atom[i + 12 + j * 4 + 1] = (current_offset >> 16) & 0xFF; + moov_atom[i + 12 + j * 4 + 2] = (current_offset >> 8) & 0xFF; + moov_atom[i + 12 + j * 4 + 3] = (current_offset >> 0) & 0xFF; + } + + i += atom_size - 4; + + } else if (atom_type == CO64_ATOM) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "%s patching co64 " + "atom...\n", path->data); + atom_size = BE_32(&moov_atom[i - 4]); + + if (i + atom_size - 4 > moov_atom_size) { + ngx_log_error(NGX_LOG_ERR, log, ngx_errno, " bad atom size\n"); + goto error_out; + } + + offset_count = BE_32(&moov_atom[i + 8]); + + for (j = 0; j < offset_count; j++) { + current_offset = BE_64(&moov_atom[i + 12 + j * 8]); + current_offset += moov_atom_size; + moov_atom[i + 12 + j * 8 + 0] = (current_offset >> 56) & 0xFF; + moov_atom[i + 12 + j * 8 + 1] = (current_offset >> 48) & 0xFF; + moov_atom[i + 12 + j * 8 + 2] = (current_offset >> 40) & 0xFF; + moov_atom[i + 12 + j * 8 + 3] = (current_offset >> 32) & 0xFF; + moov_atom[i + 12 + j * 8 + 4] = (current_offset >> 24) & 0xFF; + moov_atom[i + 12 + j * 8 + 5] = (current_offset >> 16) & 0xFF; + moov_atom[i + 12 + j * 8 + 6] = (current_offset >> 8) & 0xFF; + moov_atom[i + 12 + j * 8 + 7] = (current_offset >> 0) & 0xFF; + } + + i += atom_size - 4; + } + } + + + if (start_offset > 0) { /* seek after ftyp atom */ + lseek(ngx_open_file_cached_fd, start_offset, SEEK_SET); + last_offset -= start_offset; + } + + outfile_fd = open((const char *) path->data, O_WRONLY); + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "outfile fd: %d\n", outfile_fd); + + if (outfile_fd < 0) { + perror((const char *) path->data); + goto error_out; + } + + /* dump the same ftyp atom */ + if (ftyp_atom_size > 0) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "%s: writing ftyp atom...\n" + , path->data); + + if (write(outfile_fd, ftyp_atom, ftyp_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + } + + i = 0; + /* + we must use 2 buffer to read/write + */ + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, " moov_atom_size: %"PRIu64" \n" + , moov_atom_size); + + while (last_offset) { + // printf("last offset: %"PRIu64" \n", last_offset); + if (i == 0) { + ngx_log_debug(NGX_LOG_DEBUG, log, 0, " writing moov atom...\n"); + i = 1; + } + + if (write(outfile_fd, moov_atom, moov_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + if (last_offset < moov_atom_size) + moov_atom_size = last_offset; + + if (read(ngx_open_file_cached_fd, moov_atom, moov_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + last_offset -= moov_atom_size; + + if (write(outfile_fd, temp_buf, moov_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + if (last_offset < moov_atom_size) + moov_atom_size = last_offset; + + if (read(ngx_open_file_cached_fd, temp_buf, moov_atom_size) < 0) { + perror((const char *) path->data); + goto error_out; + } + + last_offset -= moov_atom_size; + } + + /* seek to beginning of source file*/ + lseek(ngx_open_file_cached_fd, 0, SEEK_SET); + + close(outfile_fd); + ngx_pfree(r->connection->pool, moov_atom); + ngx_pfree(r->connection->pool, ftyp_atom); + ngx_pfree(r->connection->pool, temp_buf); + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, " finish fixing file: %s\n" + , path->data); + return NGX_OK; + +error_out: + if (outfile_fd > 0) + close(outfile_fd); + + if (moov_atom) + ngx_pfree(r->connection->pool, moov_atom); + + if (ftyp_atom) + ngx_pfree(r->connection->pool, ftyp_atom); + + if (temp_buf) + ngx_pfree(r->connection->pool, temp_buf); + + return NGX_ERROR; +} diff -r 78271500b8de -r 031db7af488c src/http/modules/ngx_http_mp4_module.c --- a/src/http/modules/ngx_http_mp4_module.c Tue Jan 27 15:38:15 2015 +0300 +++ b/src/http/modules/ngx_http_mp4_module.c Fri Jan 30 11:11:00 2015 +0700 @@ -7,6 +7,7 @@ #include #include #include +#include "ngx_http_mp4_faststart.h" #define NGX_HTTP_MP4_TRAK_ATOM 0 @@ -43,6 +44,7 @@ typedef struct { size_t buffer_size; size_t max_buffer_size; + ngx_flag_t mp4_enhance; } ngx_http_mp4_conf_t; @@ -332,7 +334,14 @@ offsetof(ngx_http_mp4_conf_t, max_buffer_size), NULL }, - ngx_null_command + { ngx_string("fix_mp4"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_mp4_conf_t, mp4_enhance), + NULL }, + + ngx_null_command }; @@ -429,6 +438,7 @@ ngx_http_mp4_file_t *mp4; ngx_open_file_info_t of; ngx_http_core_loc_conf_t *clcf; + ngx_http_mp4_conf_t *mlcf; if (!(r->method & (NGX_HTTP_GET|NGX_HTTP_HEAD))) { return NGX_HTTP_NOT_ALLOWED; @@ -522,6 +532,18 @@ return NGX_DECLINED; } + /* move atom to beginning of file if it's in the last*/ + mlcf = ngx_http_get_module_loc_conf(r, ngx_http_mp4_module); + if (mlcf->mp4_enhance == 1) { + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, + "examine mp4 filename: \"%V\"", &path); + + if (ngx_http_enable_fast_start(&path, of.fd, r) != NGX_OK) { + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + } + r->root_tested = !r->error_page; r->allow_ranges = 1; @@ -3495,6 +3517,7 @@ ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, 512 * 1024); ngx_conf_merge_size_value(conf->max_buffer_size, prev->max_buffer_size, 10 * 1024 * 1024); + ngx_conf_merge_off_value(conf->mp4_enhance, prev->mp4_enhance, 0); return NGX_CONF_OK; }