From vbart at nginx.com Fri Mar 1 14:55:43 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Fri, 1 Mar 2013 14:55:43 +0000 Subject: [nginx] svn commit: r5097 - trunk/src/http Message-ID: <20130301145543.C2A8F3FAC79@mail.nginx.com> Author: vbart Date: 2013-03-01 14:55:42 +0000 (Fri, 01 Mar 2013) New Revision: 5097 URL: http://trac.nginx.org/nginx/changeset/5097/nginx Log: Allocate request object from its own pool. Previously, it was allocated from a connection pool and was selectively freed for an idle keepalive connection. The goal is to put coupled things in one chunk of memory, and to simplify handling of request objects. Modified: trunk/src/http/ngx_http_request.c trunk/src/http/ngx_http_request.h Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2013-02-27 17:41:34 UTC (rev 5096) +++ trunk/src/http/ngx_http_request.c 2013-03-01 14:55:42 UTC (rev 5097) @@ -365,6 +365,7 @@ static void ngx_http_init_request(ngx_event_t *rev) { + ngx_pool_t *pool; ngx_time_t *tp; ngx_connection_t *c; ngx_http_request_t *r; @@ -387,27 +388,25 @@ hc = c->data; - r = hc->request; + cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module); - if (r) { - ngx_memzero(r, sizeof(ngx_http_request_t)); + pool = ngx_create_pool(cscf->request_pool_size, c->log); + if (pool == NULL) { + ngx_http_close_connection(c); + return; + } - r->pipeline = hc->pipeline; + r = ngx_pcalloc(pool, sizeof(ngx_http_request_t)); + if (r == NULL) { + ngx_destroy_pool(pool); + ngx_http_close_connection(c); + return; + } - if (hc->nbusy) { - r->header_in = hc->busy[0]; - } + r->pool = pool; - } else { - r = ngx_pcalloc(c->pool, sizeof(ngx_http_request_t)); - if (r == NULL) { - ngx_http_close_connection(c); - return; - } + r->pipeline = hc->pipeline; - hc->request = r; - } - c->data = r; r->http_connection = hc; @@ -426,28 +425,18 @@ ngx_http_set_connection_log(r->connection, clcf->error_log); - cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module); - if (c->buffer == NULL) { c->buffer = ngx_create_temp_buf(c->pool, cscf->client_header_buffer_size); if (c->buffer == NULL) { + ngx_destroy_pool(r->pool); ngx_http_close_connection(c); return; } } - if (r->header_in == NULL) { - r->header_in = c->buffer; - } + r->header_in = hc->nbusy ? hc->busy[0] : c->buffer; - r->pool = ngx_create_pool(cscf->request_pool_size, c->log); - if (r->pool == NULL) { - ngx_http_close_connection(c); - return; - } - - if (ngx_list_init(&r->headers_out.headers, r->pool, 20, sizeof(ngx_table_elt_t)) != NGX_OK) @@ -2663,6 +2652,7 @@ } } + /* guard against recursive call from ngx_http_finalize_connection() */ r->keepalive = 0; ngx_http_free_request(r, 0); @@ -2694,17 +2684,12 @@ hc->pipeline = 0; /* - * To keep a memory footprint as small as possible for an idle - * keepalive connection we try to free the ngx_http_request_t and - * c->buffer's memory if they were allocated outside the c->pool. - * The large header buffers are always allocated outside the c->pool and - * are freed too. + * To keep a memory footprint as small as possible for an idle keepalive + * connection we try to free c->buffer's memory if it was allocated outside + * the c->pool. The large header buffers are always allocated outside the + * c->pool and are freed too. */ - if (ngx_pfree(c->pool, r) == NGX_OK) { - hc->request = NULL; - } - b = c->buffer; if (ngx_pfree(c->pool, b->start) == NGX_OK) { @@ -3155,6 +3140,7 @@ ngx_http_free_request(ngx_http_request_t *r, ngx_int_t rc) { ngx_log_t *log; + ngx_pool_t *pool; struct linger linger; ngx_http_cleanup_t *cln; ngx_http_log_ctx_t *ctx; @@ -3221,7 +3207,15 @@ r->connection->destroyed = 1; - ngx_destroy_pool(r->pool); + /* + * Setting r->pool to NULL will increase probability to catch double close + * of request since the request object is allocated from its own pool. + */ + + pool = r->pool; + r->pool = NULL; + + ngx_destroy_pool(pool); } Modified: trunk/src/http/ngx_http_request.h =================================================================== --- trunk/src/http/ngx_http_request.h 2013-02-27 17:41:34 UTC (rev 5096) +++ trunk/src/http/ngx_http_request.h 2013-03-01 14:55:42 UTC (rev 5097) @@ -302,8 +302,6 @@ #endif #endif - ngx_http_request_t *request; - ngx_buf_t **busy; ngx_int_t nbusy; From a.marinov at ucdn.com Fri Mar 1 15:41:18 2013 From: a.marinov at ucdn.com (Anatoli Marinov) Date: Fri, 1 Mar 2013 17:41:18 +0200 Subject: problem with cache size Message-ID: Hi, I have a strange problem with cache size on XFS. I tried to debug it and I found something. First there is a test request for 1 file which is 10 MB. In the function ngx_http_file_cache_update I have counted size 17039360 because st_blocks = 33280, st_size = 10486040. After that I am trying to restart the server and cache manager add this size for cache structure but the size is different as follow for cache size the increment is with 10489856, st_size = 10486040, st_blocks = 20488 As could be seen st_blocks is different for one and the same file. I think the second file size is right but why the first is wrong? Thanks in advance Anatoli Marinov -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Mar 1 15:59:55 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 1 Mar 2013 19:59:55 +0400 Subject: problem with cache size In-Reply-To: References: Message-ID: <20130301155955.GJ94127@mdounin.ru> Hello! On Fri, Mar 01, 2013 at 05:41:18PM +0200, Anatoli Marinov wrote: > Hi, > I have a strange problem with cache size on XFS. I tried to debug it and I > found something. > > First there is a test request for 1 file which is 10 MB. > In the function ngx_http_file_cache_update I have counted size 17039360 > because st_blocks = 33280, st_size = 10486040. > > After that I am trying to restart the server and cache manager add this > size for cache structure but the size is different as follow > for cache size the increment is with 10489856, st_size = 10486040, > st_blocks = 20488 > > As could be seen st_blocks is different for one and the same file. > I think the second file size is right but why the first is wrong? This is an known XFS feature, it reports preallocated blocks instead of a real size before the file is closed. Some details are here: http://trac.nginx.org/nginx/ticket/157 -- Maxim Dounin http://nginx.org/en/donation.html From nick at marden.org Sat Mar 2 02:23:08 2013 From: nick at marden.org (Nick Marden) Date: Fri, 1 Mar 2013 21:23:08 -0500 Subject: Patch proposal: allow alternatives to 503 status code in limit_req module Message-ID: Hey there, I've been doing some work using limit_req to prevent overzealous clients from DOS'ing my site. Specifically, I wanted to use a different HTTP status code such as 420 or 429 so that it would be straightforward to show a "hey man, chill out" page rather than my generic 503 error page. Attached is a patch that enables this option for the limit_req directive. It still defaults to 503, but you can set it to any 4xx or 5xx value of your choosing by specifying limit_req zone=foo burst=10 status_code=420; for example. I hope I've sent this to the right place. Please let me know where else to send it if I'm in the wrong place. Cheers, -- Nick Marden nick at marden.org -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: alternative_limit_req_status_code.patch Type: application/octet-stream Size: 2375 bytes Desc: not available URL: From mdounin at mdounin.ru Sat Mar 2 23:14:12 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 3 Mar 2013 03:14:12 +0400 Subject: Patch proposal: allow alternatives to 503 status code in limit_req module In-Reply-To: References: Message-ID: <20130302231412.GD15378@mdounin.ru> Hello! On Fri, Mar 01, 2013 at 09:23:08PM -0500, Nick Marden wrote: > Hey there, > > I've been doing some work using limit_req to prevent overzealous clients > from DOS'ing my site. Specifically, I wanted to use a different HTTP status > code such as 420 or 429 so that it would be straightforward to show a "hey > man, chill out" page rather than my generic 503 error page. > > Attached is a patch that enables this option for the limit_req directive. > It still defaults to 503, but you can set it to any 4xx or 5xx value of > your choosing by specifying > > limit_req zone=foo burst=10 status_code=420; > > for example. I don't think this should be per-limit settings, for the following reasons in no particular order: - This makes things complicated in case of multiple limits used. Current concept is to pass a request if it satisfies all limits configured. If at least one limit reached - request is rejected (and nothing else happens). With such aproach limit check order isn't significant. Introducing per-limit status code will make check order significant. - There is no way to easily set default code server-wide. I think it should be separate directive to set status, something like limit_req_status 429; Additionally, there should be limit_conn counterpart, limit_conn_status 429; > I hope I've sent this to the right place. Please let me know where else to > send it if I'm in the wrong place. It's the right place. -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Mon Mar 4 15:39:04 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 4 Mar 2013 15:39:04 +0000 Subject: [nginx] svn commit: r5098 - trunk/src/http/modules Message-ID: <20130304153904.5FB743FA125@mail.nginx.com> Author: mdounin Date: 2013-03-04 15:39:03 +0000 (Mon, 04 Mar 2013) New Revision: 5098 URL: http://trac.nginx.org/nginx/changeset/5098/nginx Log: Mp4: fixed handling of too small mdat atoms (ticket #266). Patch by Gernot Vormayr (with minor changes). Modified: trunk/src/http/modules/ngx_http_mp4_module.c Modified: trunk/src/http/modules/ngx_http_mp4_module.c =================================================================== --- trunk/src/http/modules/ngx_http_mp4_module.c 2013-03-01 14:55:42 UTC (rev 5097) +++ trunk/src/http/modules/ngx_http_mp4_module.c 2013-03-04 15:39:03 UTC (rev 5098) @@ -754,6 +754,13 @@ *prev = &mp4->mdat_atom; + if (start_offset > mp4->mdat_data.buf->file_last) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "start time is out mp4 mdat atom in \"%s\"", + mp4->file.name.data); + return NGX_ERROR; + } + adjustment = mp4->ftyp_size + mp4->moov_size + ngx_http_mp4_update_mdat_atom(mp4, start_offset) - start_offset; From mdounin at mdounin.ru Tue Mar 5 14:36:00 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Tue, 5 Mar 2013 14:36:00 +0000 Subject: [nginx] svn commit: r5099 - trunk/docs/xml/nginx Message-ID: <20130305143601.142F73FAB72@mail.nginx.com> Author: mdounin Date: 2013-03-05 14:35:58 +0000 (Tue, 05 Mar 2013) New Revision: 5099 URL: http://trac.nginx.org/nginx/changeset/5099/nginx Log: nginx-1.3.14-RELEASE Modified: trunk/docs/xml/nginx/changes.xml Modified: trunk/docs/xml/nginx/changes.xml =================================================================== --- trunk/docs/xml/nginx/changes.xml 2013-03-04 15:39:03 UTC (rev 5098) +++ trunk/docs/xml/nginx/changes.xml 2013-03-05 14:35:58 UTC (rev 5099) @@ -5,6 +5,79 @@ + + + + +?????????? $connections_active, $connections_reading ? $connections_writing +? ?????? ngx_http_stub_status_module. + + +$connections_active, $connections_reading, and $connections_writing variables +in the ngx_http_stub_status_module. + + + + + +????????? WebSocket-?????????? +? ??????? ngx_http_uwsgi_module ? ngx_http_scgi_module. + + +support of WebSocket connections +in the ngx_http_uwsgi_module and ngx_http_scgi_module. + + + + + +? ????????? ??????????? ???????? ??? ????????????? SNI. + + +in virtual servers handling with SNI. + + + + + +??? ????????????? ????????? "ssl_session_cache shared" +????? ?????? ????? ?? ???????????, +???? ????????????? ????? ? ??????????? ??????.
+??????? Piotr Sikora. +
+ +new sessions were not always stored +if the "ssl_session_cache shared" directive was used +and there was no free space in shared memory.
+Thanks to Piotr Sikora. +
+
+ + + +????????? ?????????? X-Forwarded-For ?????????????? ???????????.
+??????? Neal Poole ?? ????????????? ??????????. +
+ +multiple X-Forwarded-For headers were handled incorrectly.
+Thanks to Neal Poole for sponsoring this work. +
+
+ + + +? ?????? ngx_http_mp4_module.
+??????? Gernot Vormayr. +
+ +in the ngx_http_mp4_module.
+Thanks to Gernot Vormayr. +
+
+ +
+ + From mdounin at mdounin.ru Tue Mar 5 14:36:21 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Tue, 5 Mar 2013 14:36:21 +0000 Subject: [nginx] svn commit: r5100 - tags Message-ID: <20130305143622.117F23FAB81@mail.nginx.com> Author: mdounin Date: 2013-03-05 14:36:20 +0000 (Tue, 05 Mar 2013) New Revision: 5100 URL: http://trac.nginx.org/nginx/changeset/5100/nginx Log: release-1.3.14 tag Added: tags/release-1.3.14/ From nick at marden.org Tue Mar 5 18:28:44 2013 From: nick at marden.org (Nick Marden) Date: Tue, 5 Mar 2013 13:28:44 -0500 Subject: Patch proposal: allow alternatives to 503 status code in limit_req module Message-ID: On Sun, Mar 3, 2013 at 7:00 AM, wrote: > Message: 1 > Date: Sun, 3 Mar 2013 03:14:12 +0400 > From: Maxim Dounin > To: nginx-devel at nginx.org > Subject: Re: Patch proposal: allow alternatives to 503 status code in > limit_req module > Message-ID: <20130302231412.GD15378 at mdounin.ru> > Content-Type: text/plain; charset=us-ascii > > Hello! > > On Fri, Mar 01, 2013 at 09:23:08PM -0500, Nick Marden wrote: > > > Hey there, > > > > I've been doing some work using limit_req to prevent overzealous clients > > from DOS'ing my site. Specifically, I wanted to use a different HTTP > status > > code such as 420 or 429 so that it would be straightforward to show a > "hey > > man, chill out" page rather than my generic 503 error page. > > > > Attached is a patch that enables this option for the limit_req directive. > > It still defaults to 503, but you can set it to any 4xx or 5xx value of > > your choosing by specifying > > > > limit_req zone=foo burst=10 status_code=420; > > > > for example. > > I don't think this should be per-limit settings, for the following > reasons in no particular order: > > - This makes things complicated in case of multiple limits used. > Current concept is to pass a request if it satisfies all limits > configured. If at least one limit reached - request is rejected > (and nothing else happens). With such aproach limit check order > isn't significant. Introducing per-limit status code will make > check order significant. > > - There is no way to easily set default code server-wide. > > I think it should be separate directive to set status, something > like > > limit_req_status 429; > > Additionally, there should be limit_conn counterpart, > > limit_conn_status 429; > I understand what you are saying and have made the corresponding changes to my patch (attached). > > I hope I've sent this to the right place. Please let me know where else > to > > send it if I'm in the wrong place. > > It's the right place. > Thanks. Please let me know if there is anything else I can do to help get this patch onto trunk. Cheers, -- Nick Marden nick at marden.org -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: alternative_limit_status_codes.patch Type: application/octet-stream Size: 4319 bytes Desc: not available URL: From alexander_koch_log at lavabit.com Tue Mar 5 23:01:44 2013 From: alexander_koch_log at lavabit.com (alexander_koch_log) Date: Wed, 06 Mar 2013 00:01:44 +0100 Subject: Nginx Module (I/O block) In-Reply-To: <20130209232504.GU66348@mdounin.ru> References: <51166E42.40904@lavabit.com> <20130209232504.GU66348@mdounin.ru> Message-ID: <51367958.9020401@lavabit.com> On 02/10/2013 12:25 AM, Maxim Dounin wrote: > Hello! > > On Sat, Feb 09, 2013 at 04:41:54PM +0100, alexander_koch_log wrote: > >> It is not clear to me how to avoid blocking the nginx reactor loop >> when creating an nginx module which should perform some long I/O >> operations and return the response to the client. Or is this >> handled internally by Nginx? > Correct aproach is to avoid blocking for a long time, and use > non-blocking I/O and event-based notification instead. > > What exactly should (and can) be done depends on the exact case. > E.g. to work with sockets there are lots of various functions > available to simplify things. Working with files without blocking > is harder and not always possible, but a common case is handled by > nginx - to send some large file you just have to open the file and > ask nginx to send an in-file buffer. > When sending an in-file buffer, is it still possible to have the read stop when a special byte char is read like \xFF even though the provided bytes to read are larger? One way would be to read to the buffer then truncate the memory, then send the buffer? Or is there an efficient way? Thanks, Alex From mdounin at mdounin.ru Wed Mar 6 12:50:31 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 6 Mar 2013 16:50:31 +0400 Subject: Nginx Module (I/O block) In-Reply-To: <51367958.9020401@lavabit.com> References: <51166E42.40904@lavabit.com> <20130209232504.GU66348@mdounin.ru> <51367958.9020401@lavabit.com> Message-ID: <20130306125031.GT15378@mdounin.ru> Hello! On Wed, Mar 06, 2013 at 12:01:44AM +0100, alexander_koch_log wrote: > On 02/10/2013 12:25 AM, Maxim Dounin wrote: > >Hello! > > > >On Sat, Feb 09, 2013 at 04:41:54PM +0100, alexander_koch_log wrote: > > > >>It is not clear to me how to avoid blocking the nginx reactor loop > >>when creating an nginx module which should perform some long I/O > >>operations and return the response to the client. Or is this > >>handled internally by Nginx? > >Correct aproach is to avoid blocking for a long time, and use > >non-blocking I/O and event-based notification instead. > > > >What exactly should (and can) be done depends on the exact case. > >E.g. to work with sockets there are lots of various functions > >available to simplify things. Working with files without blocking > >is harder and not always possible, but a common case is handled by > >nginx - to send some large file you just have to open the file and > >ask nginx to send an in-file buffer. > > > > When sending an in-file buffer, is it still possible to have the > read stop when a special byte char is read like \xFF even though the > provided bytes to read are larger? As e.g. with sendfile used file's data is never available in user memory, the answer is no. > One way would be to read to the buffer then truncate the memory, > then send the buffer? Or is there an efficient way? What you are trying to do is probably beter handled by a filter module. You may ask nginx to read a file into memory buffers (output_buffers), and then inspect/modify all data passed though you filter. -- Maxim Dounin http://nginx.org/en/donation.html From alexander_koch_log at lavabit.com Wed Mar 6 16:56:15 2013 From: alexander_koch_log at lavabit.com (alexander_koch_log) Date: Wed, 06 Mar 2013 17:56:15 +0100 Subject: Nginx Module (I/O block) In-Reply-To: <20130306125031.GT15378@mdounin.ru> References: <51166E42.40904@lavabit.com> <20130209232504.GU66348@mdounin.ru> <51367958.9020401@lavabit.com> <20130306125031.GT15378@mdounin.ru> Message-ID: <5137752F.5050205@lavabit.com> Hi, On 03/06/2013 01:50 PM, Maxim Dounin wrote: > Hello! >> When sending an in-file buffer, is it still possible to have the >> read stop when a special byte char is read like \xFF even though the >> provided bytes to read are larger? > As e.g. with sendfile used file's data is never available in > user memory, the answer is no. > >> One way would be to read to the buffer then truncate the memory, >> then send the buffer? Or is there an efficient way? > What you are trying to do is probably beter handled by a filter > module. You may ask nginx to read a file into memory buffers > (output_buffers), and then inspect/modify all data passed though > you filter. > So the downside of this method would be to not use sendfile and deal with the over of read/write, correct? Thanks, Alex From mdounin at mdounin.ru Thu Mar 7 14:09:49 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 7 Mar 2013 18:09:49 +0400 Subject: Nginx Module (I/O block) In-Reply-To: <5137752F.5050205@lavabit.com> References: <51166E42.40904@lavabit.com> <20130209232504.GU66348@mdounin.ru> <51367958.9020401@lavabit.com> <20130306125031.GT15378@mdounin.ru> <5137752F.5050205@lavabit.com> Message-ID: <20130307140949.GG15378@mdounin.ru> Hello! On Wed, Mar 06, 2013 at 05:56:15PM +0100, alexander_koch_log wrote: > Hi, > > On 03/06/2013 01:50 PM, Maxim Dounin wrote: > >Hello! > >>When sending an in-file buffer, is it still possible to have the > >>read stop when a special byte char is read like \xFF even though the > >>provided bytes to read are larger? > >As e.g. with sendfile used file's data is never available in > >user memory, the answer is no. > > > >>One way would be to read to the buffer then truncate the memory, > >>then send the buffer? Or is there an efficient way? > >What you are trying to do is probably beter handled by a filter > >module. You may ask nginx to read a file into memory buffers > >(output_buffers), and then inspect/modify all data passed though > >you filter. > > > > So the downside of this method would be to not use sendfile and deal > with the over of read/write, correct? You can't use sendfile if you have to scan file contents for a special byte. By writing a filter you may avoid reinventing the wheel and doing file reads yourself. Downside is that you have to write filter which is expected to work with a data stream, which is not always feasible. -- Maxim Dounin http://nginx.org/en/donation.html From vbart at nginx.com Thu Mar 7 17:07:05 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Thu, 7 Mar 2013 17:07:05 +0000 Subject: [nginx] svn commit: r5101 - in trunk/src: core http/modules/perl Message-ID: <20130307170706.41CDA3F9FE6@mail.nginx.com> Author: vbart Date: 2013-03-07 17:07:04 +0000 (Thu, 07 Mar 2013) New Revision: 5101 URL: http://trac.nginx.org/nginx/changeset/5101/nginx Log: Version bump. Modified: trunk/src/core/nginx.h trunk/src/http/modules/perl/nginx.pm Modified: trunk/src/core/nginx.h =================================================================== --- trunk/src/core/nginx.h 2013-03-05 14:36:20 UTC (rev 5100) +++ trunk/src/core/nginx.h 2013-03-07 17:07:04 UTC (rev 5101) @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1003014 -#define NGINX_VERSION "1.3.14" +#define nginx_version 1003015 +#define NGINX_VERSION "1.3.15" #define NGINX_VER "nginx/" NGINX_VERSION #define NGINX_VAR "NGINX" Modified: trunk/src/http/modules/perl/nginx.pm =================================================================== --- trunk/src/http/modules/perl/nginx.pm 2013-03-05 14:36:20 UTC (rev 5100) +++ trunk/src/http/modules/perl/nginx.pm 2013-03-07 17:07:04 UTC (rev 5101) @@ -50,7 +50,7 @@ HTTP_INSUFFICIENT_STORAGE ); -our $VERSION = '1.3.14'; +our $VERSION = '1.3.15'; require XSLoader; XSLoader::load('nginx', $VERSION); From vbart at nginx.com Thu Mar 7 17:21:51 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Thu, 7 Mar 2013 17:21:51 +0000 Subject: [nginx] svn commit: r5102 - trunk/src/http Message-ID: <20130307172151.4E37C3FABFE@mail.nginx.com> Author: vbart Date: 2013-03-07 17:21:50 +0000 (Thu, 07 Mar 2013) New Revision: 5102 URL: http://trac.nginx.org/nginx/changeset/5102/nginx Log: Create request object only after the first byte was received. Previously, we always created an object and logged 400 (Bad Request) in access log if a client closed connection without sending any data. Such a connection was counted as "reading". Since it's common for modern browsers to behave like this, it's no longer considered an error if a client closes connection without sending any data, and such a connection will be counted as "waiting". Now, we do not log 400 (Bad Request) and keep memory footprint as small as possible. Modified: trunk/src/http/ngx_http_request.c Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2013-03-07 17:07:04 UTC (rev 5101) +++ trunk/src/http/ngx_http_request.c 2013-03-07 17:21:50 UTC (rev 5102) @@ -10,6 +10,7 @@ #include +static void ngx_http_wait_request_handler(ngx_event_t *ev); static void ngx_http_init_request(ngx_event_t *ev); static void ngx_http_process_request_line(ngx_event_t *rev); static void ngx_http_process_request_headers(ngx_event_t *rev); @@ -308,12 +309,12 @@ c->log->connection = c->number; c->log->handler = ngx_http_log_error; c->log->data = ctx; - c->log->action = "reading client request line"; + c->log->action = "waiting for request"; c->log_error = NGX_ERROR_INFO; rev = c->read; - rev->handler = ngx_http_init_request; + rev->handler = ngx_http_wait_request_handler; c->write->handler = ngx_http_empty_handler; #if (NGX_HTTP_SSL) @@ -363,6 +364,99 @@ static void +ngx_http_wait_request_handler(ngx_event_t *rev) +{ + size_t size; + ssize_t n; + ngx_buf_t *b; + ngx_connection_t *c; + ngx_http_connection_t *hc; + ngx_http_core_srv_conf_t *cscf; + + c = rev->data; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http wait request handler"); + + if (rev->timedout) { + ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed out"); + ngx_http_close_connection(c); + return; + } + + hc = c->data; + cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module); + + size = cscf->client_header_buffer_size; + + b = c->buffer; + + if (b == NULL) { + b = ngx_create_temp_buf(c->pool, size); + if (b == NULL) { + ngx_http_close_connection(c); + return; + } + + c->buffer = b; + + } else if (b->start == NULL) { + + b->start = ngx_palloc(c->pool, size); + if (b->start == NULL) { + ngx_http_close_connection(c); + return; + } + + b->pos = b->start; + b->last = b->start; + b->end = b->last + size; + } + + n = c->recv(c, b->last, size); + + if (n == NGX_AGAIN) { + + if (!rev->timer_set) { + ngx_add_timer(rev, c->listening->post_accept_timeout); + } + + if (ngx_handle_read_event(rev, 0) != NGX_OK) { + ngx_http_close_connection(c); + return; + } + + /* + * We are trying to not hold c->buffer's memory for an idle connection. + */ + + if (ngx_pfree(c->pool, b->start) == NGX_OK) { + b->start = NULL; + } + + return; + } + + if (n == NGX_ERROR) { + ngx_http_close_connection(c); + return; + } + + if (n == 0) { + ngx_log_error(NGX_LOG_INFO, c->log, ngx_socket_errno, + "client closed connection"); + ngx_http_close_connection(c); + return; + } + + b->last += n; + + c->log->action = "reading client request line"; + + ngx_http_init_request(rev); +} + + +static void ngx_http_init_request(ngx_event_t *rev) { ngx_pool_t *pool; @@ -377,13 +471,6 @@ c = rev->data; - if (rev->timedout) { - ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed out"); - - ngx_http_close_connection(c); - return; - } - c->requests++; hc = c->data; @@ -425,16 +512,6 @@ ngx_http_set_connection_log(r->connection, clcf->error_log); - if (c->buffer == NULL) { - c->buffer = ngx_create_temp_buf(c->pool, - cscf->client_header_buffer_size); - if (c->buffer == NULL) { - ngx_destroy_pool(r->pool); - ngx_http_close_connection(c); - return; - } - } - r->header_in = hc->nbusy ? hc->busy[0] : c->buffer; if (ngx_list_init(&r->headers_out.headers, r->pool, 20, @@ -592,10 +669,10 @@ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, rev->log, 0, "plain http"); - c->log->action = "reading client request line"; + c->log->action = "waiting for request"; - rev->handler = ngx_http_init_request; - ngx_http_init_request(rev); + rev->handler = ngx_http_wait_request_handler; + ngx_http_wait_request_handler(rev); return; } @@ -620,12 +697,12 @@ c->ssl->no_wait_shutdown = 1; - c->log->action = "reading client request line"; + c->log->action = "waiting for request"; - c->read->handler = ngx_http_init_request; + c->read->handler = ngx_http_wait_request_handler; /* STUB: epoll edge */ c->write->handler = ngx_http_empty_handler; - ngx_http_init_request(c->read); + ngx_http_wait_request_handler(c->read); return; } From vbart at nginx.com Thu Mar 7 17:41:40 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Thu, 7 Mar 2013 17:41:40 +0000 Subject: [nginx] svn commit: r5103 - trunk/src/http Message-ID: <20130307174140.677263FA78E@mail.nginx.com> Author: vbart Date: 2013-03-07 17:41:40 +0000 (Thu, 07 Mar 2013) New Revision: 5103 URL: http://trac.nginx.org/nginx/changeset/5103/nginx Log: Use "client_header_timeout" for all requests in a connection. Previously, only the first request in a connection used timeout value from the "client_header_timeout" directive while reading header. All subsequent requests used "keepalive_timeout" for that. It happened because timeout of the read event was set to the value of "keepalive_timeout" in ngx_http_set_keepalive(), but was not removed when the next request arrived. Modified: trunk/src/http/ngx_http_request.c Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2013-03-07 17:21:50 UTC (rev 5102) +++ trunk/src/http/ngx_http_request.c 2013-03-07 17:41:40 UTC (rev 5103) @@ -2736,8 +2736,6 @@ c->data = hc; - ngx_add_timer(rev, clcf->keepalive_timeout); - if (ngx_handle_read_event(rev, 0) != NGX_OK) { ngx_http_close_connection(c); return; @@ -2753,6 +2751,10 @@ hc->pipeline = 1; c->log->action = "reading client pipelined request line"; + if (rev->timer_set) { + ngx_del_timer(rev); + } + rev->handler = ngx_http_init_request; ngx_post_event(rev, &ngx_posted_events); return; @@ -2872,6 +2874,8 @@ c->idle = 1; ngx_reusable_connection(c, 1); + ngx_add_timer(rev, clcf->keepalive_timeout); + if (rev->ready) { ngx_post_event(rev, &ngx_posted_events); } @@ -2993,6 +2997,8 @@ c->idle = 0; ngx_reusable_connection(c, 0); + ngx_del_timer(rev); + ngx_http_init_request(rev); } From vbart at nginx.com Thu Mar 7 17:59:28 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Thu, 7 Mar 2013 17:59:28 +0000 Subject: [nginx] svn commit: r5104 - trunk/src/http Message-ID: <20130307175928.26F2A3FA023@mail.nginx.com> Author: vbart Date: 2013-03-07 17:59:27 +0000 (Thu, 07 Mar 2013) New Revision: 5104 URL: http://trac.nginx.org/nginx/changeset/5104/nginx Log: Respect the new behavior of TCP_DEFER_ACCEPT. In Linux 2.6.32, TCP_DEFER_ACCEPT was changed to accept connections after the deferring period is finished without any data available. (Reading from the socket returns EAGAIN in this case.) Since in nginx TCP_DEFER_ACCEPT is set to "post_accept_timeout", we do not need to wait longer if deferred accept returns with no data. Modified: trunk/src/http/ngx_http_request.c Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2013-03-07 17:41:40 UTC (rev 5103) +++ trunk/src/http/ngx_http_request.c 2013-03-07 17:59:27 UTC (rev 5104) @@ -416,6 +416,20 @@ if (n == NGX_AGAIN) { +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) + if (c->listening->deferred_accept +#if (NGX_HTTP_SSL) + && c->ssl == NULL +#endif + ) + { + ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, + "client timed out in deferred accept"); + ngx_http_close_connection(c); + return; + } +#endif + if (!rev->timer_set) { ngx_add_timer(rev, c->listening->post_accept_timeout); } @@ -617,6 +631,15 @@ if (n == -1) { if (err == NGX_EAGAIN) { +#if (NGX_HAVE_DEFERRED_ACCEPT && defined TCP_DEFER_ACCEPT) + if (c->listening->deferred_accept) { + ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, + "client timed out in deferred accept"); + ngx_http_close_connection(c); + return; + } +#endif + if (!rev->timer_set) { ngx_add_timer(rev, c->listening->post_accept_timeout); } From vbart at nginx.com Thu Mar 7 18:07:16 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Thu, 7 Mar 2013 18:07:16 +0000 Subject: [nginx] svn commit: r5105 - in trunk/src: core http Message-ID: <20130307180716.B15273FAAD1@mail.nginx.com> Author: vbart Date: 2013-03-07 18:07:16 +0000 (Thu, 07 Mar 2013) New Revision: 5105 URL: http://trac.nginx.org/nginx/changeset/5105/nginx Log: Removed c->single_connection flag. The c->single_connection was intended to be used as lock mechanism to serialize modifications of request object from several threads working with client and upstream connections. The flag is redundant since threads in nginx have never been used that way. Modified: trunk/src/core/ngx_connection.c trunk/src/core/ngx_connection.h trunk/src/http/ngx_http_request.c trunk/src/http/ngx_http_upstream.c Modified: trunk/src/core/ngx_connection.c =================================================================== --- trunk/src/core/ngx_connection.c 2013-03-07 17:59:27 UTC (rev 5104) +++ trunk/src/core/ngx_connection.c 2013-03-07 18:07:16 UTC (rev 5105) @@ -900,11 +900,9 @@ c->read->closed = 1; c->write->closed = 1; - if (c->single_connection) { - ngx_unlock(&c->lock); - c->read->locked = 0; - c->write->locked = 0; - } + ngx_unlock(&c->lock); + c->read->locked = 0; + c->write->locked = 0; ngx_mutex_unlock(ngx_posted_events_mutex); Modified: trunk/src/core/ngx_connection.h =================================================================== --- trunk/src/core/ngx_connection.h 2013-03-07 17:59:27 UTC (rev 5104) +++ trunk/src/core/ngx_connection.h 2013-03-07 18:07:16 UTC (rev 5105) @@ -152,7 +152,6 @@ unsigned log_error:3; /* ngx_connection_log_error_e */ - unsigned single_connection:1; unsigned unexpected_eof:1; unsigned timedout:1; unsigned error:1; Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2013-03-07 17:59:27 UTC (rev 5104) +++ trunk/src/http/ngx_http_request.c 2013-03-07 18:07:16 UTC (rev 5105) @@ -554,7 +554,6 @@ return; } - c->single_connection = 1; c->destroyed = 0; #if (NGX_HTTP_SSL) Modified: trunk/src/http/ngx_http_upstream.c =================================================================== --- trunk/src/http/ngx_http_upstream.c 2013-03-07 17:59:27 UTC (rev 5104) +++ trunk/src/http/ngx_http_upstream.c 2013-03-07 18:07:16 UTC (rev 5105) @@ -1118,8 +1118,6 @@ r->connection->log->action = "connecting to upstream"; - r->connection->single_connection = 0; - if (u->state && u->state->response_sec) { tp = ngx_timeofday(); u->state->response_sec = tp->sec - u->state->response_sec; From vbart at nginx.com Thu Mar 7 18:14:27 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Thu, 7 Mar 2013 18:14:27 +0000 Subject: [nginx] svn commit: r5106 - trunk/src/http Message-ID: <20130307181427.B18503FAB1A@mail.nginx.com> Author: vbart Date: 2013-03-07 18:14:27 +0000 (Thu, 07 Mar 2013) New Revision: 5106 URL: http://trac.nginx.org/nginx/changeset/5106/nginx Log: Refactored ngx_http_init_request(). Now it can be used as the request object factory with minimal impact on the connection object. Therefore it was renamed to ngx_http_create_request(). Modified: trunk/src/http/ngx_http_request.c trunk/src/http/ngx_http_request.h Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2013-03-07 18:07:16 UTC (rev 5105) +++ trunk/src/http/ngx_http_request.c 2013-03-07 18:14:27 UTC (rev 5106) @@ -11,7 +11,7 @@ static void ngx_http_wait_request_handler(ngx_event_t *ev); -static void ngx_http_init_request(ngx_event_t *ev); +static ngx_http_request_t *ngx_http_create_request(ngx_connection_t *c); static void ngx_http_process_request_line(ngx_event_t *rev); static void ngx_http_process_request_headers(ngx_event_t *rev); static ssize_t ngx_http_read_request_header(ngx_http_request_t *r); @@ -466,16 +466,22 @@ c->log->action = "reading client request line"; - ngx_http_init_request(rev); + c->data = ngx_http_create_request(c); + if (c->data == NULL) { + ngx_http_close_connection(c); + return; + } + + rev->handler = ngx_http_process_request_line; + ngx_http_process_request_line(rev); } -static void -ngx_http_init_request(ngx_event_t *rev) +static ngx_http_request_t * +ngx_http_create_request(ngx_connection_t *c) { ngx_pool_t *pool; ngx_time_t *tp; - ngx_connection_t *c; ngx_http_request_t *r; ngx_http_log_ctx_t *ctx; ngx_http_connection_t *hc; @@ -483,8 +489,6 @@ ngx_http_core_loc_conf_t *clcf; ngx_http_core_main_conf_t *cmcf; - c = rev->data; - c->requests++; hc = c->data; @@ -493,27 +497,19 @@ pool = ngx_create_pool(cscf->request_pool_size, c->log); if (pool == NULL) { - ngx_http_close_connection(c); - return; + return NULL; } r = ngx_pcalloc(pool, sizeof(ngx_http_request_t)); if (r == NULL) { ngx_destroy_pool(pool); - ngx_http_close_connection(c); - return; + return NULL; } r->pool = pool; - r->pipeline = hc->pipeline; - - c->data = r; r->http_connection = hc; - - c->sent = 0; r->signature = NGX_HTTP_MODULE; - r->connection = c; r->main_conf = hc->conf_ctx->main_conf; @@ -533,15 +529,13 @@ != NGX_OK) { ngx_destroy_pool(r->pool); - ngx_http_close_connection(c); - return; + return NULL; } r->ctx = ngx_pcalloc(r->pool, sizeof(void *) * ngx_http_max_module); if (r->ctx == NULL) { ngx_destroy_pool(r->pool); - ngx_http_close_connection(c); - return; + return NULL; } cmcf = ngx_http_get_module_main_conf(r, ngx_http_core_module); @@ -550,12 +544,9 @@ * sizeof(ngx_http_variable_value_t)); if (r->variables == NULL) { ngx_destroy_pool(r->pool); - ngx_http_close_connection(c); - return; + return NULL; } - c->destroyed = 0; - #if (NGX_HTTP_SSL) if (c->ssl) { r->main_filter_need_in_memory = 1; @@ -592,8 +583,7 @@ (void) ngx_atomic_fetch_add(ngx_stat_requests, 1); #endif - rev->handler = ngx_http_process_request_line; - ngx_http_process_request_line(rev); + return r; } @@ -2722,7 +2712,7 @@ /* * If the large header buffers were allocated while the previous * request processing then we do not use c->buffer for - * the pipelined request (see ngx_http_init_request()). + * the pipelined request (see ngx_http_create_request()). * * Now we would move the large header buffers to the free list. */ @@ -2770,20 +2760,30 @@ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "pipelined request"); - hc->pipeline = 1; c->log->action = "reading client pipelined request line"; + r = ngx_http_create_request(c); + if (r == NULL) { + ngx_http_close_connection(c); + return; + } + + r->pipeline = 1; + + c->data = r; + + c->sent = 0; + c->destroyed = 0; + if (rev->timer_set) { ngx_del_timer(rev); } - rev->handler = ngx_http_init_request; + rev->handler = ngx_http_process_request_line; ngx_post_event(rev, &ngx_posted_events); return; } - hc->pipeline = 0; - /* * To keep a memory footprint as small as possible for an idle keepalive * connection we try to free c->buffer's memory if it was allocated outside @@ -3019,9 +3019,19 @@ c->idle = 0; ngx_reusable_connection(c, 0); + c->data = ngx_http_create_request(c); + if (c->data == NULL) { + ngx_http_close_connection(c); + return; + } + + c->sent = 0; + c->destroyed = 0; + ngx_del_timer(rev); - ngx_http_init_request(rev); + rev->handler = ngx_http_process_request_line; + ngx_http_process_request_line(rev); } Modified: trunk/src/http/ngx_http_request.h =================================================================== --- trunk/src/http/ngx_http_request.h 2013-03-07 18:07:16 UTC (rev 5105) +++ trunk/src/http/ngx_http_request.h 2013-03-07 18:14:27 UTC (rev 5106) @@ -308,8 +308,9 @@ ngx_buf_t **free; ngx_int_t nfree; - unsigned pipeline:1; - unsigned ssl:1; +#if (NGX_HTTP_SSL) + ngx_uint_t ssl; /* unsigned ssl:1; */ +#endif } ngx_http_connection_t; From vbart at nginx.com Thu Mar 7 18:21:29 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Thu, 7 Mar 2013 18:21:29 +0000 Subject: [nginx] svn commit: r5107 - trunk/src/http/modules Message-ID: <20130307182130.E86C13FAC53@mail.nginx.com> Author: vbart Date: 2013-03-07 18:21:28 +0000 (Thu, 07 Mar 2013) New Revision: 5107 URL: http://trac.nginx.org/nginx/changeset/5107/nginx Log: SSL: Next Protocol Negotiation extension support. Not only this is useful for the upcoming SPDY support, but it can also help to improve HTTPS performance by enabling TLS False Start in Chrome/Chromium browsers [1]. So, we always enable NPN for HTTPS if it is supported by OpenSSL. [1] http://www.imperialviolet.org/2012/04/11/falsestart.html Modified: trunk/src/http/modules/ngx_http_ssl_module.c Modified: trunk/src/http/modules/ngx_http_ssl_module.c =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.c 2013-03-07 18:14:27 UTC (rev 5106) +++ trunk/src/http/modules/ngx_http_ssl_module.c 2013-03-07 18:21:28 UTC (rev 5107) @@ -18,6 +18,11 @@ #define NGX_DEFAULT_ECDH_CURVE "prime256v1" +#ifdef TLSEXT_TYPE_next_proto_neg +static int ngx_http_ssl_npn_advertised(ngx_ssl_conn_t *ssl_conn, + const unsigned char **out, unsigned int *outlen, void *arg); +#endif + static ngx_int_t ngx_http_ssl_static_variable(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_ssl_variable(ngx_http_request_t *r, @@ -262,6 +267,30 @@ static ngx_str_t ngx_http_ssl_sess_id_ctx = ngx_string("HTTP"); +#ifdef TLSEXT_TYPE_next_proto_neg + +#define NGX_HTTP_NPN_ADVERTISE "\x08http/1.1" + +static int +ngx_http_ssl_npn_advertised(ngx_ssl_conn_t *ssl_conn, + const unsigned char **out, unsigned int *outlen, void *arg) +{ +#if (NGX_DEBUG) + ngx_connection_t *c; + + c = ngx_ssl_get_connection(ssl_conn); + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "SSL NPN advertised"); +#endif + + *out = (unsigned char *) NGX_HTTP_NPN_ADVERTISE; + *outlen = sizeof(NGX_HTTP_NPN_ADVERTISE) - 1; + + return SSL_TLSEXT_ERR_OK; +} + +#endif + + static ngx_int_t ngx_http_ssl_static_variable(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data) @@ -490,6 +519,11 @@ #endif +#ifdef TLSEXT_TYPE_next_proto_neg + SSL_CTX_set_next_protos_advertised_cb(conf->ssl.ctx, + ngx_http_ssl_npn_advertised, NULL); +#endif + cln = ngx_pool_cleanup_add(cf->pool, 0); if (cln == NULL) { return NGX_CONF_ERROR; From ja.nginx at mailnull.com Thu Mar 7 23:06:48 2013 From: ja.nginx at mailnull.com (SamB) Date: Fri, 08 Mar 2013 00:06:48 +0100 Subject: [PATCH] Return http status code from XSLT Message-ID: <51391D88.4060906@mailnull.com> Hi, this patch provides simple possibility to return http error code from within XSLT transformation result. This is simple way to quickly and correctly return i.e. 404 error codes instead of producing dummy soft-404 pages. Sample XSLT: Thanks Sam src/http/modules/ngx_http_xslt_filter_module.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/http/modules/ngx_http_xslt_filter_module.c b/src/http/modules/ngx_http_xslt_filter_module.c index a6ae1ce..c5710d5 100644 --- a/src/http/modules/ngx_http_xslt_filter_module.c +++ b/src/http/modules/ngx_http_xslt_filter_module.c @@ -27,6 +27,9 @@ #endif +#define NGX_HTTP_XSLT_NS "http://www.w3.org/http" + + typedef struct { u_char *name; void *data; @@ -477,6 +480,7 @@ ngx_http_xslt_apply_stylesheet(ngx_http_request_t *r, ngx_uint_t i; xmlChar *buf; xmlDocPtr doc, res; + xmlNodePtr node; ngx_http_xslt_sheet_t *sheet; ngx_http_xslt_filter_loc_conf_t *conf; @@ -551,6 +555,24 @@ ngx_http_xslt_apply_stylesheet(ngx_http_request_t *r, doc_type, type ? type : (u_char *) "(null)", encoding ? encoding : (u_char *) "(null)"); + node = xmlDocGetRootElement(doc); + if (node != NULL) { + xmlChar *attr = NULL; + + attr = xmlGetNsProp(node, (const xmlChar *)"status", (const xmlChar *)NGX_HTTP_XSLT_NS); + if (attr != NULL + && attr[0] != '\0') { + + ngx_uint_t status = strtoul((const char *)attr, NULL, 10); + + if (status > 0 + && r->headers_out.status != status) { + r->headers_out.status = status; + r->headers_out.status_line.len = 0; + } + } + } + rc = xsltSaveResultToString(&buf, &len, doc, sheet[i - 1].stylesheet); xmlFreeDoc(doc); From daisuke.miura at drakontia.com Fri Mar 8 10:16:36 2013 From: daisuke.miura at drakontia.com (=?UTF-8?B?5LiJ5rWm5aSn5L2R?=) Date: Fri, 8 Mar 2013 19:16:36 +0900 Subject: couldn't build Nginx with spdy support on Windows XP Message-ID: Hi, I have a problem about spdy support. I tried building Nginx with SPDY according to the page. http://nginx.org/en/docs/howto_build_on_win32.html but, make command was stopped when making ngx_http_spdy.obj. [Environment] Nginx: 1.3.14(from svn source) uname -a: MINGW32_NT-5.1 EBB007K 1.0.18(0.48/3/2) 2012-11-21 22:34 i686 Msys $ export LIB="C:\\Program Files\\Microsoft Visual Studio 10.0\\VC\\lib;C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\Lib" $ export INCLUDE="C:\\Program Files\\Microsoft Visual Studio 10.0\\VC\\include;C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\Include" $ export PATH="$PATH:/c/strawberryperl/perl/bin:/c/Program Files/Microsoft Visual Studio 10.0/VC/bin:/c/Program Files/Microsoft Visual Studio 10.0/Common7/IDE:/c/Program Files/Microsoft SDKs/Windows/v7.1/Bin" configure command $auto/configure --with-cc=cl --builddir=obj --prefix= --conf-path=conf/nginx.conf --pid-path=logs/ngi nx.pid --http-log-path=logs/access.log --error-log-path=logs/error.log --sbin-path=nginx.exe --http- client-body-temp-path=temp/client_body_temp --http-proxy-temp-path=temp/proxy_temp --http-fastcgi-te mp-path=temp/fastcgi_temp --with-cc-opt=-DFD_SETSIZE=1024 --with-pcre=obj/lib/pcre-8.32 --with-zlib= obj/lib/zlib-1.2.7 --with-openssl=obj/lib/openssl-1.0.1e --with-select_module --with-http_ssl_module --with-http_spdy_module --with-http_perl_module --with-http_dav_module make command $ nmake -f obj/Makefile ... cl -c -O2 -W4 -WX -nologo -MT -Zi -DFD_SETSIZE=1024 -DNO_SYS_TYPES_H -Yungx_config.h -Fpobj /ngx_config.pch -I src/core -I src/event -I src/event/modules -I src/os/win32 -I obj/lib/pcre-8. 32 -I obj/lib/openssl-1.0.1e/openssl/include -I obj/lib/zlib-1.2.7 -I obj -I src/http -I src/ht tp/modules -I src/http/modules/perl -I src/mail -Foobj/src/http/ngx_http_spdy.obj src/http/ngx_h ttp_spdy.c ngx_http_spdy.c src/http/ngx_http_spdy.c(1264) : error C2220: warning treated as error - no 'object' file generated src/http/ngx_http_spdy.c(1264) : warning C4244: 'function' : conversion from 'off_t' to 'size_t', po ssible loss of data src/http/ngx_http_spdy.c(1642) : warning C4310: cast truncates constant value NMAKE : fatal error U1077: '"\Program Files\Microsoft Visual Studio 10.0\VC\bin\cl.EXE"' : return co de '0x2' Stop. No one try to build Nginx with SPDY on Windows XP? This problem is happened only on Windows? Please help me. BTW, why configure command adds "--prefix=openssl" to openssl directory? I extended zip file to obj/lib/openssl-1.0.1e directory that include "Configure" file and "include" directory. But, configure command said "obj/lib/openssl-1.0.1e/openssl/include". It's prrety strange. So, I create "obj/lib/openssl-1.0.1e/openssl" directory and copy include directory to that. Thanks. -------------- next part -------------- An HTML attachment was scrubbed... URL: From vbart at nginx.com Fri Mar 8 16:51:24 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Fri, 8 Mar 2013 20:51:24 +0400 Subject: couldn't build Nginx with spdy support on Windows XP In-Reply-To: References: Message-ID: <201303082051.24156.vbart@nginx.com> On Friday 08 March 2013 14:16:36 ???? wrote: > Hi, > I have a problem about spdy support. > > I tried building Nginx with SPDY according to the page. > http://nginx.org/en/docs/howto_build_on_win32.html > > but, make command was stopped when making ngx_http_spdy.obj. > > [Environment] > Nginx: 1.3.14(from svn source) > uname -a: MINGW32_NT-5.1 EBB007K 1.0.18(0.48/3/2) 2012-11-21 22:34 i686 > Msys > > $ export LIB="C:\\Program Files\\Microsoft Visual Studio > 10.0\\VC\\lib;C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\Lib" > $ export INCLUDE="C:\\Program Files\\Microsoft Visual Studio > 10.0\\VC\\include;C:\\Program Files\\Microsoft > SDKs\\Windows\\v7.1\\Include" > $ export PATH="$PATH:/c/strawberryperl/perl/bin:/c/Program Files/Microsoft > Visual Studio 10.0/VC/bin:/c/Program Files/Microsoft Visual Studio > 10.0/Common7/IDE:/c/Program Files/Microsoft SDKs/Windows/v7.1/Bin" > > configure command > $auto/configure --with-cc=cl --builddir=obj --prefix= > --conf-path=conf/nginx.conf --pid-path=logs/ngi > nx.pid --http-log-path=logs/access.log --error-log-path=logs/error.log > --sbin-path=nginx.exe --http- > client-body-temp-path=temp/client_body_temp > --http-proxy-temp-path=temp/proxy_temp --http-fastcgi-te > mp-path=temp/fastcgi_temp --with-cc-opt=-DFD_SETSIZE=1024 > --with-pcre=obj/lib/pcre-8.32 --with-zlib= > obj/lib/zlib-1.2.7 --with-openssl=obj/lib/openssl-1.0.1e > --with-select_module --with-http_ssl_module > --with-http_spdy_module --with-http_perl_module --with-http_dav_module > > make command > $ nmake -f obj/Makefile > > ... > cl -c -O2 -W4 -WX -nologo -MT -Zi -DFD_SETSIZE=1024 > -DNO_SYS_TYPES_H -Yungx_config.h -Fpobj > /ngx_config.pch -I src/core -I src/event -I src/event/modules -I > src/os/win32 -I obj/lib/pcre-8. > 32 -I obj/lib/openssl-1.0.1e/openssl/include -I obj/lib/zlib-1.2.7 -I > obj -I src/http -I src/ht > tp/modules -I src/http/modules/perl -I src/mail > -Foobj/src/http/ngx_http_spdy.obj src/http/ngx_h > ttp_spdy.c > ngx_http_spdy.c > src/http/ngx_http_spdy.c(1264) : error C2220: warning treated as error - no > 'object' file generated > src/http/ngx_http_spdy.c(1264) : warning C4244: 'function' : conversion > from 'off_t' to 'size_t', po > ssible loss of data > src/http/ngx_http_spdy.c(1642) : warning C4310: cast truncates constant > value > NMAKE : fatal error U1077: '"\Program Files\Microsoft Visual Studio > 10.0\VC\bin\cl.EXE"' : return co > de '0x2' > Stop. > > No one try to build Nginx with SPDY on Windows XP? > This problem is happened only on Windows? Yes, looks so. > Please help me. > Well, I'll try. wbr, Valentin V. Bartenev -- http://nginx.org/en/donation.html From actioncao2012 at gmail.com Sun Mar 10 01:18:16 2013 From: actioncao2012 at gmail.com (actioncao) Date: Sun, 10 Mar 2013 09:18:16 +0800 Subject: subscript Message-ID: ???? iPhone From vbart at nginx.com Sun Mar 10 17:49:12 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Sun, 10 Mar 2013 21:49:12 +0400 Subject: couldn't build Nginx with spdy support on Windows XP In-Reply-To: <201303082051.24156.vbart@nginx.com> References: <201303082051.24156.vbart@nginx.com> Message-ID: <201303102149.12538.vbart@nginx.com> On Friday 08 March 2013 20:51:24 Valentin V. Bartenev wrote: > On Friday 08 March 2013 14:16:36 ???? wrote: > > Hi, > > I have a problem about spdy support. > > > > I tried building Nginx with SPDY according to the page. > > http://nginx.org/en/docs/howto_build_on_win32.html > > > > but, make command was stopped when making ngx_http_spdy.obj. > > > > [Environment] > > Nginx: 1.3.14(from svn source) > > uname -a: MINGW32_NT-5.1 EBB007K 1.0.18(0.48/3/2) 2012-11-21 22:34 i686 > > Msys > > > > $ export LIB="C:\\Program Files\\Microsoft Visual Studio > > 10.0\\VC\\lib;C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\Lib" > > $ export INCLUDE="C:\\Program Files\\Microsoft Visual Studio > > 10.0\\VC\\include;C:\\Program Files\\Microsoft > > SDKs\\Windows\\v7.1\\Include" > > $ export PATH="$PATH:/c/strawberryperl/perl/bin:/c/Program > > Files/Microsoft Visual Studio 10.0/VC/bin:/c/Program Files/Microsoft > > Visual Studio 10.0/Common7/IDE:/c/Program Files/Microsoft > > SDKs/Windows/v7.1/Bin" > > > > configure command > > $auto/configure --with-cc=cl --builddir=obj --prefix= > > --conf-path=conf/nginx.conf --pid-path=logs/ngi > > nx.pid --http-log-path=logs/access.log --error-log-path=logs/error.log > > --sbin-path=nginx.exe --http- > > client-body-temp-path=temp/client_body_temp > > --http-proxy-temp-path=temp/proxy_temp --http-fastcgi-te > > mp-path=temp/fastcgi_temp --with-cc-opt=-DFD_SETSIZE=1024 > > --with-pcre=obj/lib/pcre-8.32 --with-zlib= > > obj/lib/zlib-1.2.7 --with-openssl=obj/lib/openssl-1.0.1e > > --with-select_module --with-http_ssl_module > > > > --with-http_spdy_module --with-http_perl_module --with-http_dav_module > > > > make command > > $ nmake -f obj/Makefile > > > > ... > > > > cl -c -O2 -W4 -WX -nologo -MT -Zi -DFD_SETSIZE=1024 > > > > -DNO_SYS_TYPES_H -Yungx_config.h -Fpobj > > /ngx_config.pch -I src/core -I src/event -I src/event/modules -I > > src/os/win32 -I obj/lib/pcre-8. > > 32 -I obj/lib/openssl-1.0.1e/openssl/include -I obj/lib/zlib-1.2.7 -I > > obj -I src/http -I src/ht > > tp/modules -I src/http/modules/perl -I src/mail > > > > -Foobj/src/http/ngx_http_spdy.obj src/http/ngx_h > > > > ttp_spdy.c > > ngx_http_spdy.c > > src/http/ngx_http_spdy.c(1264) : error C2220: warning treated as error - > > no 'object' file generated > > src/http/ngx_http_spdy.c(1264) : warning C4244: 'function' : conversion > > from 'off_t' to 'size_t', po > > ssible loss of data > > src/http/ngx_http_spdy.c(1642) : warning C4310: cast truncates constant > > value > > NMAKE : fatal error U1077: '"\Program Files\Microsoft Visual Studio > > 10.0\VC\bin\cl.EXE"' : return co > > de '0x2' > > Stop. > > > > No one try to build Nginx with SPDY on Windows XP? > > This problem is happened only on Windows? > > Yes, looks so. > > > Please help me. > > Well, I'll try. > The issue should be fixed in: http://nginx.org/patches/spdy/patch.spdy-67_1.3.14.txt At least it builds flawlessly now with VC 2010 on WinXP. Thank you for the report. wbr, Valentin V. Bartenev -- http://nginx.org/en/donation.html From vbart at nginx.com Mon Mar 11 11:19:59 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Mon, 11 Mar 2013 11:19:59 +0000 Subject: [nginx] svn commit: r5108 - trunk/src/http/modules Message-ID: <20130311111959.4FED13F9F44@mail.nginx.com> Author: vbart Date: 2013-03-11 11:19:58 +0000 (Mon, 11 Mar 2013) New Revision: 5108 URL: http://trac.nginx.org/nginx/changeset/5108/nginx Log: Gzip: fixed setting of NGX_HTTP_GZIP_BUFFERED. In r2411 setting of NGX_HTTP_GZIP_BUFFERED in c->buffered was moved from ngx_http_gzip_filter_deflate_start() to ngx_http_gzip_filter_buffer() since it was always called first. But in r2543 the "postpone_gzipping" directive was introduced, and if postponed gzipping is disabled (the default setting), ngx_http_gzip_filter_buffer() is not called at all. We must always set NGX_HTTP_GZIP_BUFFERED after the start of compression since there is always a trailer that is buffered. There are no known cases when it leads to any problem with current code. But we already had troubles in upcoming SPDY implementation. Modified: trunk/src/http/modules/ngx_http_gzip_filter_module.c Modified: trunk/src/http/modules/ngx_http_gzip_filter_module.c =================================================================== --- trunk/src/http/modules/ngx_http_gzip_filter_module.c 2013-03-07 18:21:28 UTC (rev 5107) +++ trunk/src/http/modules/ngx_http_gzip_filter_module.c 2013-03-11 11:19:58 UTC (rev 5108) @@ -620,6 +620,8 @@ return NGX_ERROR; } + r->connection->buffered |= NGX_HTTP_GZIP_BUFFERED; + ctx->last_out = &ctx->out; ctx->crc32 = crc32(0L, Z_NULL, 0); ctx->flush = Z_NO_FLUSH; From vbart at nginx.com Mon Mar 11 14:44:56 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Mon, 11 Mar 2013 14:44:56 +0000 Subject: [nginx] svn commit: r5109 - trunk/src/http Message-ID: <20130311144456.809FA3F9EA7@mail.nginx.com> Author: vbart Date: 2013-03-11 14:44:56 +0000 (Mon, 11 Mar 2013) New Revision: 5109 URL: http://trac.nginx.org/nginx/changeset/5109/nginx Log: Removed unused prototype of ngx_http_find_server_conf(). This function prototype and its implementation was added in r90, but the implementation was removed in r97. Modified: trunk/src/http/ngx_http.h Modified: trunk/src/http/ngx_http.h =================================================================== --- trunk/src/http/ngx_http.h 2013-03-11 11:19:58 UTC (rev 5108) +++ trunk/src/http/ngx_http.h 2013-03-11 14:44:56 UTC (rev 5109) @@ -104,7 +104,6 @@ ngx_http_chunked_t *ctx); -ngx_int_t ngx_http_find_server_conf(ngx_http_request_t *r); void ngx_http_update_location_config(ngx_http_request_t *r); void ngx_http_handler(ngx_http_request_t *r); void ngx_http_run_posted_requests(ngx_connection_t *c); From brian at akins.org Mon Mar 11 17:04:32 2013 From: brian at akins.org (Brian Akins) Date: Mon, 11 Mar 2013 13:04:32 -0400 Subject: 1.3.x if-none-match only works if las-modified set Message-ID: in ngx_http_not_modified_header_filter if (r->headers_out.status != NGX_HTTP_OK || r != r->main || r->headers_out.last_modified_time == -1) { So, it only checks if-match or if-none-match if last-modified is set. Is this on purpose? I'm just wondering if we want to be able to check etime without checking last-modified. From mdounin at mdounin.ru Mon Mar 11 17:40:57 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 11 Mar 2013 21:40:57 +0400 Subject: 1.3.x if-none-match only works if las-modified set In-Reply-To: References: Message-ID: <20130311174057.GE15378@mdounin.ru> Hello! On Mon, Mar 11, 2013 at 01:04:32PM -0400, Brian Akins wrote: > in ngx_http_not_modified_header_filter > > if (r->headers_out.status != NGX_HTTP_OK > || r != r->main > || r->headers_out.last_modified_time == -1) > { > > So, it only checks if-match or if-none-match if last-modified is set. > Is this on purpose? I'm just wondering if we want to be able to check > etime without checking last-modified. The following commit log suggests it was done intentionally, http://trac.nginx.org/nginx/changeset/4745/nginx: : Note that the "r->headers_out.last_modified_time == -1" check in the not : modified filter is left as is intentionally. It's to prevent handling : of If-* headers in case of proxy without cache (much like currently : done with If-Modified-Since). Changing the code to work with ETag from cache without Last-Modified being present in a cached response should be possible, but it will require some additional changes. -- Maxim Dounin http://nginx.org/en/donation.html From brian at akins.org Mon Mar 11 18:08:17 2013 From: brian at akins.org (Brian Akins) Date: Mon, 11 Mar 2013 14:08:17 -0400 Subject: 1.3.x if-none-match only works if las-modified set In-Reply-To: <20130311174057.GE15378@mdounin.ru> References: <20130311174057.GE15378@mdounin.ru> Message-ID: On Mon, Mar 11, 2013 at 1:40 PM, Maxim Dounin wrote: > Changing the code to work with ETag from cache without > Last-Modified being present in a cached response should be > possible, but it will require some additional changes. In my case, the modified time changes although the content doesn't actually change. The upstream always reports the generated time as last modified (or includes no last-modified) at all. --Brian From mdounin at mdounin.ru Mon Mar 11 18:20:48 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 11 Mar 2013 22:20:48 +0400 Subject: [PATCH] Return http status code from XSLT In-Reply-To: <51391D88.4060906@mailnull.com> References: <51391D88.4060906@mailnull.com> Message-ID: <20130311182048.GF15378@mdounin.ru> Hello! On Fri, Mar 08, 2013 at 12:06:48AM +0100, SamB wrote: > Hi, > > this patch provides simple possibility to return http error code > from within XSLT transformation result. > This is simple way to quickly and correctly return i.e. 404 error > codes instead of producing dummy soft-404 pages. > > Sample XSLT: > > > > > While an ability to alter status code returned is intresting, I don't think it should be done this way, abusing output attributes. I would rather think of something like an XSLT variable with a predefined name queried after a transformation. -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Tue Mar 12 13:38:05 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Tue, 12 Mar 2013 13:38:05 +0000 Subject: [nginx] svn commit: r5110 - trunk/src/http Message-ID: <20130312133805.66F193F9F9C@mail.nginx.com> Author: mdounin Date: 2013-03-12 13:38:04 +0000 (Tue, 12 Mar 2013) New Revision: 5110 URL: http://trac.nginx.org/nginx/changeset/5110/nginx Log: Fixed logging in ngx_http_wait_request_handler(). If c->recv() returns 0 there is no sense in using ngx_socket_errno for logging, its value meaningless. (The code in question was copied from ngx_http_keepalive_handler(), but ngx_socket_errno makes sense there as it's used as a part of ECONNRESET handling, and the c->recv() call is preceeded by the ngx_set_socket_errno(0) call.) Modified: trunk/src/http/ngx_http_request.c Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2013-03-11 14:44:56 UTC (rev 5109) +++ trunk/src/http/ngx_http_request.c 2013-03-12 13:38:04 UTC (rev 5110) @@ -456,7 +456,7 @@ } if (n == 0) { - ngx_log_error(NGX_LOG_INFO, c->log, ngx_socket_errno, + ngx_log_error(NGX_LOG_INFO, c->log, 0, "client closed connection"); ngx_http_close_connection(c); return; From zls.sogou at gmail.com Tue Mar 12 16:31:00 2013 From: zls.sogou at gmail.com (lanshun zhou) Date: Wed, 13 Mar 2013 00:31:00 +0800 Subject: [BUG] Posted requests not handled after dns resolving Message-ID: in ngx_http_upstream_resolve_handler, posted requests are not handled, so if a run time dns resolving is failed in a subrequest, (for example, the resolver can not be reached, or the domain does not exist) the main request will know nothing about this, until something else attached to this connection happens, like connection broken or a write timeout a patch is attached and hope it helps simple configuration that can reproduce the problem (with addition module enabled): addition_types *; resolver your_resolver_here; location /test { set $ihost xxx; # xxx here causes a failed run-time dns resolving proxy_pass http://$ihost; } location /zzz { add_after_body /test; return 200 "test"; } curl -v http://localhost/zzz -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: posted_requests.patch Type: application/octet-stream Size: 1420 bytes Desc: not available URL: From tss at iki.fi Wed Mar 13 13:53:16 2013 From: tss at iki.fi (Timo Sirainen) Date: Wed, 13 Mar 2013 15:53:16 +0200 Subject: imap proxy and untagged commands Message-ID: <1363182796.10326.129.camel@innu> Hi, I noticed that the IMAP proxying code currently doesn't ignore extra untagged replies from the backend server. For example this would be a valid session: S: * OK Server ready S: * NO We're having some load issues C: a login {4} S: * OK foo S: + OK S: * OK bar C: user pass S: * OK almost done S: a OK logged in In real world Dovecot can already send some extra untagged replies if there is some trouble (e.g. heavy load) with its authentication process. What happens is: S: * OK Waiting for authentication process to respond.. C: a LOGIN {3} S: * OK Waiting for authentication process to respond.. and nginx fails: 2013/03/13 15:38:55 [error] 7257#0: *15 upstream sent invalid response: "* OK Waiting for authentication process to respond.." while reading response from upstream, client: 127.0.0.1, server: 0.0.0.0:10143, login: "tss", upstream: 127.0.0.1:143 I attempted to fix this, but it looks like the current code doesn't make this very easy. Especially since it seems to be handling data one "read block" at a time, which can contain multiple lines, each of which should be handled separately (the untagged and non-untagged replies may arrive in the same IP packet). From mdounin at mdounin.ru Wed Mar 13 14:48:57 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 13 Mar 2013 18:48:57 +0400 Subject: imap proxy and untagged commands In-Reply-To: <1363182796.10326.129.camel@innu> References: <1363182796.10326.129.camel@innu> Message-ID: <20130313144857.GA15378@mdounin.ru> Hello! First of all, thank you for Dovecot! On Wed, Mar 13, 2013 at 03:53:16PM +0200, Timo Sirainen wrote: > Hi, > > I noticed that the IMAP proxying code currently doesn't ignore extra > untagged replies from the backend server. For example this would be a > valid session: > > S: * OK Server ready > S: * NO We're having some load issues > C: a login {4} > S: * OK foo > S: + OK > S: * OK bar > C: user pass > S: * OK almost done > S: a OK logged in > > In real world Dovecot can already send some extra untagged replies if > there is some trouble (e.g. heavy load) with its authentication process. > What happens is: > > S: * OK Waiting for authentication process to respond.. > C: a LOGIN {3} > S: * OK Waiting for authentication process to respond.. > > and nginx fails: > > 2013/03/13 15:38:55 [error] 7257#0: *15 upstream sent invalid response: > "* OK Waiting for authentication process to respond.." while reading > response from upstream, client: 127.0.0.1, server: 0.0.0.0:10143, login: > "tss", upstream: 127.0.0.1:143 > > I attempted to fix this, but it looks like the current code doesn't make > this very easy. Especially since it seems to be handling data one "read > block" at a time, which can contain multiple lines, each of which should > be handled separately (the untagged and non-untagged replies may arrive > in the same IP packet). Yes, thanks, it's a known issue - untagged responses are not handled properly (and there is a similar issue with SMTP multiline replies, quick and dirty patch at [1]). This isn't considered as a major problem since nginx is expected to work with controlled IMAP backends, and it's usually trivial to avoid such untagged responses. It would be good to fix it though. [1] http://mailman.nginx.org/pipermail/nginx/2010-August/021785.html -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Wed Mar 13 16:29:37 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 13 Mar 2013 20:29:37 +0400 Subject: [BUG] Posted requests not handled after dns resolving In-Reply-To: References: Message-ID: <20130313162937.GD15378@mdounin.ru> Hello! On Wed, Mar 13, 2013 at 12:31:00AM +0800, lanshun zhou wrote: > in ngx_http_upstream_resolve_handler, posted requests are not handled, so > if a run time > dns resolving is failed in a subrequest, (for example, the resolver can not > be reached, or > the domain does not exist) the main request will know nothing about this, > until something > else attached to this connection happens, like connection broken or a write > timeout > > a patch is attached and hope it helps > > simple configuration that can reproduce the problem (with addition module > enabled): > > addition_types *; > > resolver your_resolver_here; > > location /test { > set $ihost xxx; # xxx here causes a failed > run-time dns resolving > proxy_pass http://$ihost; > } > > location /zzz { > add_after_body /test; > return 200 "test"; > } > > curl -v http://localhost/zzz Thank you for your report, see below for comments about the patch. > diff -ruNp nginx-1.3.14/src/http/ngx_http_upstream.c nginx-1.3.14_zls/src/http/ngx_http_upstream.c > --- nginx-1.3.14/src/http/ngx_http_upstream.c 2013-02-18 23:08:46.000000000 +0800 > +++ nginx-1.3.14_zls/src/http/ngx_http_upstream.c 2013-03-13 00:01:01.490582380 +0800 > @@ -878,11 +878,13 @@ ngx_http_upstream_cache_send(ngx_http_re > static void > ngx_http_upstream_resolve_handler(ngx_resolver_ctx_t *ctx) > { > + ngx_connection_t *c; > ngx_http_request_t *r; > ngx_http_upstream_t *u; > ngx_http_upstream_resolved_t *ur; > > r = ctx->data; > + c = r->connection; > > u = r->upstream; > ur = u->resolved; > @@ -894,7 +896,8 @@ ngx_http_upstream_resolve_handler(ngx_re > ngx_resolver_strerror(ctx->state)); > > ngx_http_upstream_finalize_request(r, u, NGX_HTTP_BAD_GATEWAY); > - return; > + > + goto posted_requests; > } > > ur->naddrs = ctx->naddrs; > @@ -919,13 +922,17 @@ ngx_http_upstream_resolve_handler(ngx_re > if (ngx_http_upstream_create_round_robin_peer(r, ur) != NGX_OK) { > ngx_http_upstream_finalize_request(r, u, > NGX_HTTP_INTERNAL_SERVER_ERROR); > - return; > + goto posted_requests; > } > > ngx_resolve_name_done(ctx); > ur->ctx = NULL; > > ngx_http_upstream_connect(r, u); > + > +posted_requests: > + > + ngx_http_run_posted_requests(c); > } Any reason to run posted requests on successful resolution? For me it looks like something like this should be enough: --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -894,6 +894,7 @@ ngx_http_upstream_resolve_handler(ngx_re ngx_resolver_strerror(ctx->state)); ngx_http_upstream_finalize_request(r, u, NGX_HTTP_BAD_GATEWAY); + ngx_http_run_posted_requests(r->connection); return; } @@ -919,6 +920,7 @@ ngx_http_upstream_resolve_handler(ngx_re if (ngx_http_upstream_create_round_robin_peer(r, ur) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); + ngx_http_run_posted_requests(r->connection); return; } -- Maxim Dounin http://nginx.org/en/donation.html From ibobrik at gmail.com Wed Mar 13 18:04:35 2013 From: ibobrik at gmail.com (ivan babrou) Date: Wed, 13 Mar 2013 22:04:35 +0400 Subject: Support for progressive jpeg in image_filter module Message-ID: This patch adds support for progressive jpeg and pgn encoding with image_filter_interlace on/off setting. Google suggests to use progressive jpegs so why not with nginx? diff --git a/ngx_http_image_filter_module.c b/ngx_http_image_filter_module.c index b086e3c..cc44d90 100644 --- a/ngx_http_image_filter_module.c +++ b/ngx_http_image_filter_module.c @@ -52,6 +52,7 @@ typedef struct { ngx_uint_t offset_y; ngx_flag_t transparency; + ngx_flag_t interlace; ngx_http_complex_value_t *wcv; ngx_http_complex_value_t *hcv; @@ -142,6 +143,13 @@ static ngx_command_t ngx_http_image_filter_commands[] = { 0, NULL }, + { ngx_string("image_filter_interlace"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_image_filter_conf_t, interlace), + NULL }, + { ngx_string("image_filter_sharpen"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, ngx_http_image_filter_sharpen, @@ -1115,10 +1123,13 @@ ngx_http_image_out(ngx_http_request_t *r, ngx_uint_t type, gdImagePtr img, out = NULL; + conf = ngx_http_get_module_loc_conf(r, ngx_http_image_filter_module); + + gdImageInterlace(img, conf->interlace); + switch (type) { case NGX_HTTP_IMAGE_JPEG: - conf = ngx_http_get_module_loc_conf(r, ngx_http_image_filter_module); jq = ngx_http_image_filter_get_value(r, conf->jqcv, conf->jpeg_quality); if (jq <= 0) { @@ -1237,6 +1248,7 @@ ngx_http_image_filter_create_conf(ngx_conf_t *cf) conf->sharpen = NGX_CONF_UNSET_UINT; conf->angle = NGX_CONF_UNSET_UINT; conf->transparency = NGX_CONF_UNSET; + conf->interlace = NGX_CONF_UNSET; conf->buffer_size = NGX_CONF_UNSET_SIZE; conf->offset_x = NGX_CONF_UNSET_UINT; conf->offset_y = NGX_CONF_UNSET_UINT; @@ -1292,6 +1304,8 @@ ngx_http_image_filter_merge_conf(ngx_conf_t *cf, void *parent, void *child) ngx_conf_merge_value(conf->transparency, prev->transparency, 1); + ngx_conf_merge_value(conf->interlace, prev->interlace, 0); + ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, 1 * 1024 * 1024); -- Regards, Ian Babrou http://bobrik.name http://twitter.com/ibobrik skype:i.babrou -------------- next part -------------- An HTML attachment was scrubbed... URL: From zls.sogou at gmail.com Wed Mar 13 22:44:48 2013 From: zls.sogou at gmail.com (lanshun zhou) Date: Thu, 14 Mar 2013 06:44:48 +0800 Subject: [BUG] Posted requests not handled after dns resolving In-Reply-To: <20130313162937.GD15378@mdounin.ru> References: <20130313162937.GD15378@mdounin.ru> Message-ID: Because there is chance to call ngx_http_upstream_finalize_request in function ngx_http_upstream_connect or it's sub calls, and i think this makes no difference with failed resolution~ Hello! On Wed, Mar 13, 2013 at 12:31:00AM +0800, lanshun zhou wrote: > in ngx_http_upstream_resolve_handler, posted requests are not handled, so > if a run time > dns resolving is failed in a subrequest, (for example, the resolver can not > be reached, or > the domain does not exist) the main request will know nothing about this, > until something > else attached to this connection happens, like connection broken or a write > timeout > > a patch is attached and hope it helps > > simple configuration that can reproduce the problem (with addition module > enabled): > > addition_types *; > > resolver your_resolver_here; > > location /test { > set $ihost xxx; # xxx here causes a failed > run-time dns resolving > proxy_pass http://$ihost; > } > > location /zzz { > add_after_body /test; > return 200 "test"; > } > > curl -v http://localhost/zzz Thank you for your report, see below for comments about the patch. > diff -ruNp nginx-1.3.14/src/http/ngx_http_upstream.c nginx-1.3.14_zls/src/http/ngx_http_upstream.c > --- nginx-1.3.14/src/http/ngx_http_upstream.c 2013-02-18 23:08:46.000000000 +0800 > +++ nginx-1.3.14_zls/src/http/ngx_http_upstream.c 2013-03-13 00:01:01.490582380 +0800 > @@ -878,11 +878,13 @@ ngx_http_upstream_cache_send(ngx_http_re > static void > ngx_http_upstream_resolve_handler(ngx_resolver_ctx_t *ctx) > { > + ngx_connection_t *c; > ngx_http_request_t *r; > ngx_http_upstream_t *u; > ngx_http_upstream_resolved_t *ur; > > r = ctx->data; > + c = r->connection; > > u = r->upstream; > ur = u->resolved; > @@ -894,7 +896,8 @@ ngx_http_upstream_resolve_handler(ngx_re > ngx_resolver_strerror(ctx->state)); > > ngx_http_upstream_finalize_request(r, u, NGX_HTTP_BAD_GATEWAY); > - return; > + > + goto posted_requests; > } > > ur->naddrs = ctx->naddrs; > @@ -919,13 +922,17 @@ ngx_http_upstream_resolve_handler(ngx_re > if (ngx_http_upstream_create_round_robin_peer(r, ur) != NGX_OK) { > ngx_http_upstream_finalize_request(r, u, > NGX_HTTP_INTERNAL_SERVER_ERROR); > - return; > + goto posted_requests; > } > > ngx_resolve_name_done(ctx); > ur->ctx = NULL; > > ngx_http_upstream_connect(r, u); > + > +posted_requests: > + > + ngx_http_run_posted_requests(c); > } Any reason to run posted requests on successful resolution? For me it looks like something like this should be enough: --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -894,6 +894,7 @@ ngx_http_upstream_resolve_handler(ngx_re ngx_resolver_strerror(ctx->state)); ngx_http_upstream_finalize_request(r, u, NGX_HTTP_BAD_GATEWAY); + ngx_http_run_posted_requests(r->connection); return; } @@ -919,6 +920,7 @@ ngx_http_upstream_resolve_handler(ngx_re if (ngx_http_upstream_create_round_robin_peer(r, ur) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); + ngx_http_run_posted_requests(r->connection); return; } -- Maxim Dounin http://nginx.org/en/donation.html _______________________________________________ nginx-devel mailing list nginx-devel at nginx.org http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Mar 14 12:28:54 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Thu, 14 Mar 2013 12:28:54 +0000 Subject: [nginx] svn commit: r5111 - trunk/src/http Message-ID: <20130314122854.8FD9D3F9F6E@mail.nginx.com> Author: mdounin Date: 2013-03-14 12:28:53 +0000 (Thu, 14 Mar 2013) New Revision: 5111 URL: http://trac.nginx.org/nginx/changeset/5111/nginx Log: Request body: next upstream fix. After introduction of chunked request body handling in 1.3.9 (r4931), r->request_body->bufs buffers have b->start pointing to original buffer start (and b->pos pointing to real data of this particular buffer). While this is ok as per se, it caused bad things (usually original request headers included before the request body) after reinit of the request chain in ngx_http_upstream_reinit() while sending the request to a next upstream server (which used to do b->pos = b->start for each buffer in the request chain). Patch by Piotr Sikora. Modified: trunk/src/http/ngx_http_request_body.c Modified: trunk/src/http/ngx_http_request_body.c =================================================================== --- trunk/src/http/ngx_http_request_body.c 2013-03-12 13:38:04 UTC (rev 5110) +++ trunk/src/http/ngx_http_request_body.c 2013-03-14 12:28:53 UTC (rev 5111) @@ -826,7 +826,7 @@ b->temporary = 1; b->tag = (ngx_buf_tag_t) &ngx_http_read_client_request_body; - b->start = cl->buf->start; + b->start = cl->buf->pos; b->pos = cl->buf->pos; b->last = cl->buf->last; b->end = cl->buf->end; @@ -933,7 +933,7 @@ b->temporary = 1; b->tag = (ngx_buf_tag_t) &ngx_http_read_client_request_body; - b->start = cl->buf->start; + b->start = cl->buf->pos; b->pos = cl->buf->pos; b->last = cl->buf->last; b->end = cl->buf->end; From mdounin at mdounin.ru Thu Mar 14 12:30:26 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Thu, 14 Mar 2013 12:30:26 +0000 Subject: [nginx] svn commit: r5112 - trunk/src/http Message-ID: <20130314123026.E733F3F9F6E@mail.nginx.com> Author: mdounin Date: 2013-03-14 12:30:26 +0000 (Thu, 14 Mar 2013) New Revision: 5112 URL: http://trac.nginx.org/nginx/changeset/5112/nginx Log: Request body: avoid linking rb->buf to r->header_in. Code to reuse of r->request_body->buf in upstream module assumes it's dedicated buffer, hence after 1.3.9 (r4931) it might reuse r->header_in if client_body_in_file_only was set, resulting in original request corruption. It is considered to be safer to always create a dedicated buffer for rb->bufs to avoid such problems. Modified: trunk/src/http/ngx_http_request_body.c Modified: trunk/src/http/ngx_http_request_body.c =================================================================== --- trunk/src/http/ngx_http_request_body.c 2013-03-14 12:28:53 UTC (rev 5111) +++ trunk/src/http/ngx_http_request_body.c 2013-03-14 12:30:26 UTC (rev 5112) @@ -104,7 +104,20 @@ { /* the whole request body may be placed in r->header_in */ - rb->buf = r->header_in; + b = ngx_calloc_buf(r->pool); + if (b == NULL) { + rc = NGX_HTTP_INTERNAL_SERVER_ERROR; + goto done; + } + + b->temporary = 1; + b->start = r->header_in->pos; + b->pos = r->header_in->pos; + b->last = r->header_in->last; + b->end = r->header_in->end; + + rb->buf = b; + r->read_event_handler = ngx_http_read_client_request_body_handler; r->write_event_handler = ngx_http_request_empty_handler; From mdounin at mdounin.ru Thu Mar 14 12:37:54 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Thu, 14 Mar 2013 12:37:54 +0000 Subject: [nginx] svn commit: r5113 - trunk/src/http Message-ID: <20130314123754.F36A43F9C3E@mail.nginx.com> Author: mdounin Date: 2013-03-14 12:37:54 +0000 (Thu, 14 Mar 2013) New Revision: 5113 URL: http://trac.nginx.org/nginx/changeset/5113/nginx Log: Upstream: call ngx_http_run_posted_requests() on resolve errors. If proxy_pass to a host with dynamic resolution was used to handle a subrequest, and host resolution failed, the main request wasn't run till something else happened on the connection. E.g. request to "/zzz" with the following configuration hanged: addition_types *; resolver 8.8.8.8; location /test { set $ihost xxx; proxy_pass http://$ihost; } location /zzz { add_after_body /test; return 200 "test"; } Report and original version of the patch by Lanshun Zhou, http://mailman.nginx.org/pipermail/nginx-devel/2013-March/003476.html. Modified: trunk/src/http/ngx_http_upstream.c Modified: trunk/src/http/ngx_http_upstream.c =================================================================== --- trunk/src/http/ngx_http_upstream.c 2013-03-14 12:30:26 UTC (rev 5112) +++ trunk/src/http/ngx_http_upstream.c 2013-03-14 12:37:54 UTC (rev 5113) @@ -894,7 +894,7 @@ ngx_resolver_strerror(ctx->state)); ngx_http_upstream_finalize_request(r, u, NGX_HTTP_BAD_GATEWAY); - return; + goto failed; } ur->naddrs = ctx->naddrs; @@ -919,13 +919,17 @@ if (ngx_http_upstream_create_round_robin_peer(r, ur) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); - return; + goto failed; } ngx_resolve_name_done(ctx); ur->ctx = NULL; ngx_http_upstream_connect(r, u); + +failed: + + ngx_http_run_posted_requests(r->connection); } From mdounin at mdounin.ru Thu Mar 14 12:38:42 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 14 Mar 2013 16:38:42 +0400 Subject: [BUG] Posted requests not handled after dns resolving In-Reply-To: References: <20130313162937.GD15378@mdounin.ru> Message-ID: <20130314123842.GJ15378@mdounin.ru> Hello! On Thu, Mar 14, 2013 at 06:44:48AM +0800, lanshun zhou wrote: > Because there is chance to call ngx_http_upstream_finalize_request in > function ngx_http_upstream_connect or it's sub calls, and i think this > makes no difference with failed resolution~ Yes, you are right. I'v committed slightly modified version of the patch. -- Maxim Dounin http://nginx.org/en/donation.html From zls.sogou at gmail.com Thu Mar 14 15:20:17 2013 From: zls.sogou at gmail.com (lanshun zhou) Date: Thu, 14 Mar 2013 23:20:17 +0800 Subject: [BUG] Posted requests not handled after dns resolving In-Reply-To: <20130314123842.GJ15378@mdounin.ru> References: <20130313162937.GD15378@mdounin.ru> <20130314123842.GJ15378@mdounin.ru> Message-ID: Thanks~ And in the original patch, I introduced a variable c to save the connection. I did this because I think it may be not safe to continue using the variable r after calling ngx_http_upstream_finalize_request, in which the request struct and pool may be freed. 2013/3/14 Maxim Dounin > Hello! > > On Thu, Mar 14, 2013 at 06:44:48AM +0800, lanshun zhou wrote: > > > Because there is chance to call ngx_http_upstream_finalize_request in > > function ngx_http_upstream_connect or it's sub calls, and i think this > > makes no difference with failed resolution~ > > Yes, you are right. I'v committed slightly modified version of > the patch. > > -- > Maxim Dounin > http://nginx.org/en/donation.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Mar 14 16:22:43 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Thu, 14 Mar 2013 16:22:43 +0000 Subject: [nginx] svn commit: r5114 - trunk/src/http Message-ID: <20130314162244.101D83F9C46@mail.nginx.com> Author: mdounin Date: 2013-03-14 16:22:43 +0000 (Thu, 14 Mar 2013) New Revision: 5114 URL: http://trac.nginx.org/nginx/changeset/5114/nginx Log: Upstream: fixed previous commit. Store r->connection on stack to make sure it's still available if request finalization happens to actually free request memory. Modified: trunk/src/http/ngx_http_upstream.c Modified: trunk/src/http/ngx_http_upstream.c =================================================================== --- trunk/src/http/ngx_http_upstream.c 2013-03-14 12:37:54 UTC (rev 5113) +++ trunk/src/http/ngx_http_upstream.c 2013-03-14 16:22:43 UTC (rev 5114) @@ -878,11 +878,13 @@ static void ngx_http_upstream_resolve_handler(ngx_resolver_ctx_t *ctx) { + ngx_connection_t *c; ngx_http_request_t *r; ngx_http_upstream_t *u; ngx_http_upstream_resolved_t *ur; r = ctx->data; + c = r->connection; u = r->upstream; ur = u->resolved; @@ -929,7 +931,7 @@ failed: - ngx_http_run_posted_requests(r->connection); + ngx_http_run_posted_requests(c); } From mdounin at mdounin.ru Thu Mar 14 16:22:45 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 14 Mar 2013 20:22:45 +0400 Subject: [BUG] Posted requests not handled after dns resolving In-Reply-To: References: <20130313162937.GD15378@mdounin.ru> <20130314123842.GJ15378@mdounin.ru> Message-ID: <20130314162244.GL15378@mdounin.ru> Hello! On Thu, Mar 14, 2013 at 11:20:17PM +0800, lanshun zhou wrote: > Thanks~ And in the original patch, I introduced a variable c to save the > connection. I did > this because I think it may be not safe to continue using the variable r > after calling > ngx_http_upstream_finalize_request, in which the request struct and pool > may be freed. Yes, you are right again. I've restored this part of the patch. Thanks again! -- Maxim Dounin http://nginx.org/en/donation.html From tss at iki.fi Thu Mar 14 16:25:55 2013 From: tss at iki.fi (Timo Sirainen) Date: Thu, 14 Mar 2013 18:25:55 +0200 Subject: imap proxy and untagged commands In-Reply-To: <20130313144857.GA15378@mdounin.ru> References: <1363182796.10326.129.camel@innu> <20130313144857.GA15378@mdounin.ru> Message-ID: <1363278355.10326.172.camel@innu> Hi, On Wed, 2013-03-13 at 18:48 +0400, Maxim Dounin wrote: > > In real world Dovecot can already send some extra untagged replies if > > there is some trouble (e.g. heavy load) with its authentication process. > > What happens is: > > > > S: * OK Waiting for authentication process to respond.. > > C: a LOGIN {3} > > S: * OK Waiting for authentication process to respond.. > > > > and nginx fails: > > > > 2013/03/13 15:38:55 [error] 7257#0: *15 upstream sent invalid response: > > "* OK Waiting for authentication process to respond.." while reading > > response from upstream, client: 127.0.0.1, server: 0.0.0.0:10143, login: > > "tss", upstream: 127.0.0.1:143 > > > > I attempted to fix this, but it looks like the current code doesn't make > > this very easy. Especially since it seems to be handling data one "read > > block" at a time, which can contain multiple lines, each of which should > > be handled separately (the untagged and non-untagged replies may arrive > > in the same IP packet). > > Yes, thanks, it's a known issue - untagged responses are not > handled properly (and there is a similar issue with SMTP multiline > replies, quick and dirty patch at [1]). This isn't considered as > a major problem since nginx is expected to work with controlled > IMAP backends, and it's usually trivial to avoid such untagged > responses. It would be good to fix it though. The other reason why I was hoping for this to get fixed was to be able to add some new functionality to nginx more easily and efficiently. Things that I'd like to have in nginx proxy, in the order of importance: 1. If IMAP banner has [CAPABILITY .. ID ..], then send to the server: a ID ("x-originating-ip" "1.2.3.4" "x-originating-port" "12345" "x-connected-ip" "4.3.2.1" "x-connected-port" "993") That could be pipelined directly to server with the LOGIN command without first waiting for server to reply. Except then the server could send both replies in one IP packet.. But yeah, it could be implemented without pipelining also. The command can also send "x-session-id" to give the session a unique ID (in Dovecot unique for 9 years). If each log line related to the session has this ID logged, it makes it easier to track the session through the proxy/backend logs. 2. If POP3 banner has [XCLIENT], send "XCLIENT ADDR=1.2.3.4 PORT=12345" to the server. (And again maybe some other stuff.) 3. After logging in to backend server issue CAPABILITY command once more, and send its result to the client. This is only needed/useful if the client asked for CAPABILITY command before login. Anyway this allows nginx to advertise only minimal capabilities and have client update them automatically after login. (Dovecot v2.x does this too and it works with all the major clients.) 4. Authentication could support master users and have nginx log in using AUTHENTICATE PLAIN. 5. If backend server supports LITERAL+, the LOGIN command could use LITERAL+ to avoid waiting for server. nginx could also support LITERAL+ itself. 6. nginx could support SASL-IR extension. 7. nginx could support ID extension itself also by remembering what the client sent, and sending it to the backend server during login. So .. would anyone want to help with any of this? :) I think these would make my life easier in future, so I'm planning on implementing at least some of them. (In some projects nginx makes SSL configuration easier since it can be used for proxying all the imap/pop3/smtp/http traffic with one config.) From mdounin at mdounin.ru Fri Mar 15 12:51:22 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 15 Mar 2013 16:51:22 +0400 Subject: imap proxy and untagged commands In-Reply-To: <1363278355.10326.172.camel@innu> References: <1363182796.10326.129.camel@innu> <20130313144857.GA15378@mdounin.ru> <1363278355.10326.172.camel@innu> Message-ID: <20130315125122.GX15378@mdounin.ru> Hello! On Thu, Mar 14, 2013 at 06:25:55PM +0200, Timo Sirainen wrote: > Hi, > > On Wed, 2013-03-13 at 18:48 +0400, Maxim Dounin wrote: > > > > In real world Dovecot can already send some extra untagged replies if > > > there is some trouble (e.g. heavy load) with its authentication process. > > > What happens is: > > > > > > S: * OK Waiting for authentication process to respond.. > > > C: a LOGIN {3} > > > S: * OK Waiting for authentication process to respond.. > > > > > > and nginx fails: > > > > > > 2013/03/13 15:38:55 [error] 7257#0: *15 upstream sent invalid response: > > > "* OK Waiting for authentication process to respond.." while reading > > > response from upstream, client: 127.0.0.1, server: 0.0.0.0:10143, login: > > > "tss", upstream: 127.0.0.1:143 > > > > > > I attempted to fix this, but it looks like the current code doesn't make > > > this very easy. Especially since it seems to be handling data one "read > > > block" at a time, which can contain multiple lines, each of which should > > > be handled separately (the untagged and non-untagged replies may arrive > > > in the same IP packet). > > > > Yes, thanks, it's a known issue - untagged responses are not > > handled properly (and there is a similar issue with SMTP multiline > > replies, quick and dirty patch at [1]). This isn't considered as > > a major problem since nginx is expected to work with controlled > > IMAP backends, and it's usually trivial to avoid such untagged > > responses. It would be good to fix it though. > > The other reason why I was hoping for this to get fixed was to be able > to add some new functionality to nginx more easily and efficiently. > Things that I'd like to have in nginx proxy, in the order of importance: > > 1. If IMAP banner has [CAPABILITY .. ID ..], then send to the server: > > a ID ("x-originating-ip" "1.2.3.4" "x-originating-port" "12345" > "x-connected-ip" "4.3.2.1" "x-connected-port" "993") > > That could be pipelined directly to server with the LOGIN command > without first waiting for server to reply. Except then the server could > send both replies in one IP packet.. But yeah, it could be implemented > without pipelining also. > > The command can also send "x-session-id" to give the session a unique ID > (in Dovecot unique for 9 years). If each log line related to the session > has this ID logged, it makes it easier to track the session through the > proxy/backend logs. > > 2. If POP3 banner has [XCLIENT], send "XCLIENT ADDR=1.2.3.4 PORT=12345" > to the server. (And again maybe some other stuff.) > > 3. After logging in to backend server issue CAPABILITY command once > more, and send its result to the client. This is only needed/useful if > the client asked for CAPABILITY command before login. Anyway this allows > nginx to advertise only minimal capabilities and have client update them > automatically after login. (Dovecot v2.x does this too and it works with > all the major clients.) > > 4. Authentication could support master users and have nginx log in using > AUTHENTICATE PLAIN. > > 5. If backend server supports LITERAL+, the LOGIN command could use > LITERAL+ to avoid waiting for server. nginx could also support LITERAL+ > itself. > > 6. nginx could support SASL-IR extension. > > 7. nginx could support ID extension itself also by remembering what the > client sent, and sending it to the backend server during login. > > So .. would anyone want to help with any of this? :) I think these would > make my life easier in future, so I'm planning on implementing at least > some of them. (In some projects nginx makes SSL configuration easier > since it can be used for proxying all the imap/pop3/smtp/http traffic > with one config.) Overall this looks interesting. I likely will commit some of my mail-related patches in the near future (including the one referenced), I think it will improve things a bit and may make further progress a bit easier. Unfortunately, I'm a bit busy and unlikely will be able to actively help with writing code, but feel free to ask question if you have any and/or submit patches for review. -- Maxim Dounin http://nginx.org/en/donation.html From dave at daveb.net Fri Mar 15 16:59:57 2013 From: dave at daveb.net (Dave Bailey) Date: Fri, 15 Mar 2013 09:59:57 -0700 Subject: proxy input filter chain? Message-ID: Hi, I would like to apply a filter to the request body as it is read from the client, but before it is sent upstream. So in the various proxy module input filters, when the input ngx_buf_t are initialized and pushed onto the input buffer chain, I would like to pass them through my filter at that time, so that the input buffer chain contains the buffers that my filter generates from the original input buffers. My filter might also (based on its input) choose to finalize the request with a status code 403 or similar (e.g. if the input is deemed to be malicious, etc) before the upstream connection is made. My questions: 1) Is anything like this currently being developed? 2) If not, could I try to provide a patch? -dave -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Mar 15 17:46:22 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 15 Mar 2013 21:46:22 +0400 Subject: proxy input filter chain? In-Reply-To: References: Message-ID: <20130315174622.GG15378@mdounin.ru> Hello! On Fri, Mar 15, 2013 at 09:59:57AM -0700, Dave Bailey wrote: > Hi, > > I would like to apply a filter to the request body as it is read from the > client, but before it is sent upstream. So in the various proxy module > input filters, when the input ngx_buf_t are initialized and pushed onto the > input buffer chain, I would like to pass them through my filter at that > time, so that the input buffer chain contains the buffers that my filter > generates from the original input buffers. My filter might also (based on > its input) choose to finalize the request with a status code 403 or similar > (e.g. if the input is deemed to be malicious, etc) before the upstream > connection is made. > > My questions: > > 1) Is anything like this currently being developed? > 2) If not, could I try to provide a patch? There is an experimental patch which introduces request body filters (attached). It was written with chunked input support, but wasn't committed as there is no clear understanding it should work this way. -- Maxim Dounin http://nginx.org/en/donation.html -------------- next part -------------- # HG changeset patch # User Maxim Dounin # Date 1358156187 -14400 # Node ID 26a111efb3ba16b45a9688dbaf5823f9235e221f # Parent 13c4c155f26f772b0bc1074a05298088d6499218 Request body: filters support (experimental). diff --git a/src/http/ngx_http.c b/src/http/ngx_http.c --- a/src/http/ngx_http.c +++ b/src/http/ngx_http.c @@ -69,8 +69,9 @@ static ngx_int_t ngx_http_add_addrs6(ngx ngx_uint_t ngx_http_max_module; -ngx_int_t (*ngx_http_top_header_filter) (ngx_http_request_t *r); -ngx_int_t (*ngx_http_top_body_filter) (ngx_http_request_t *r, ngx_chain_t *ch); +ngx_http_output_header_filter_pt ngx_http_top_header_filter; +ngx_http_output_body_filter_pt ngx_http_top_body_filter; +ngx_http_request_body_filter_pt ngx_http_top_request_body_filter; ngx_str_t ngx_http_html_default_types[] = { diff --git a/src/http/ngx_http.h b/src/http/ngx_http.h --- a/src/http/ngx_http.h +++ b/src/http/ngx_http.h @@ -166,6 +166,7 @@ extern ngx_str_t ngx_http_html_default_ extern ngx_http_output_header_filter_pt ngx_http_top_header_filter; extern ngx_http_output_body_filter_pt ngx_http_top_body_filter; +extern ngx_http_request_body_filter_pt ngx_http_top_request_body_filter; #endif /* _NGX_HTTP_H_INCLUDED_ */ diff --git a/src/http/ngx_http_core_module.c b/src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c +++ b/src/http/ngx_http_core_module.c @@ -26,6 +26,7 @@ static ngx_int_t ngx_http_core_find_stat ngx_http_location_tree_node_t *node); static ngx_int_t ngx_http_core_preconfiguration(ngx_conf_t *cf); +static ngx_int_t ngx_http_core_postconfiguration(ngx_conf_t *cf); static void *ngx_http_core_create_main_conf(ngx_conf_t *cf); static char *ngx_http_core_init_main_conf(ngx_conf_t *cf, void *conf); static void *ngx_http_core_create_srv_conf(ngx_conf_t *cf); @@ -792,7 +793,7 @@ static ngx_command_t ngx_http_core_comm static ngx_http_module_t ngx_http_core_module_ctx = { ngx_http_core_preconfiguration, /* preconfiguration */ - NULL, /* postconfiguration */ + ngx_http_core_postconfiguration, /* postconfiguration */ ngx_http_core_create_main_conf, /* create main configuration */ ngx_http_core_init_main_conf, /* init main configuration */ @@ -3314,6 +3315,15 @@ ngx_http_core_preconfiguration(ngx_conf_ } +static ngx_int_t +ngx_http_core_postconfiguration(ngx_conf_t *cf) +{ + ngx_http_top_request_body_filter = ngx_http_request_body_save_filter; + + return NGX_OK; +} + + static void * ngx_http_core_create_main_conf(ngx_conf_t *cf) { diff --git a/src/http/ngx_http_core_module.h b/src/http/ngx_http_core_module.h --- a/src/http/ngx_http_core_module.h +++ b/src/http/ngx_http_core_module.h @@ -506,10 +506,14 @@ ngx_http_cleanup_t *ngx_http_cleanup_add typedef ngx_int_t (*ngx_http_output_header_filter_pt)(ngx_http_request_t *r); typedef ngx_int_t (*ngx_http_output_body_filter_pt) (ngx_http_request_t *r, ngx_chain_t *chain); +typedef ngx_int_t (*ngx_http_request_body_filter_pt) + (ngx_http_request_t *r, ngx_chain_t *chain); ngx_int_t ngx_http_output_filter(ngx_http_request_t *r, ngx_chain_t *chain); ngx_int_t ngx_http_write_filter(ngx_http_request_t *r, ngx_chain_t *chain); +ngx_int_t ngx_http_request_body_save_filter(ngx_http_request_t *r, + ngx_chain_t *chain); ngx_int_t ngx_http_set_disable_symlinks(ngx_http_request_t *r, diff --git a/src/http/ngx_http_request_body.c b/src/http/ngx_http_request_body.c --- a/src/http/ngx_http_request_body.c +++ b/src/http/ngx_http_request_body.c @@ -24,8 +24,6 @@ static ngx_int_t ngx_http_request_body_l ngx_chain_t *in); static ngx_int_t ngx_http_request_body_chunked_filter(ngx_http_request_t *r, ngx_chain_t *in); -static ngx_int_t ngx_http_request_body_save_filter(ngx_http_request_t *r, - ngx_chain_t *in); ngx_int_t @@ -832,7 +830,7 @@ ngx_http_request_body_length_filter(ngx_ ll = &tl->next; } - rc = ngx_http_request_body_save_filter(r, out); + rc = ngx_http_top_request_body_filter(r, out); ngx_chain_update_chains(r->pool, &rb->free, &rb->busy, &out, (ngx_buf_tag_t) &ngx_http_read_client_request_body); @@ -984,7 +982,7 @@ ngx_http_request_body_chunked_filter(ngx } } - rc = ngx_http_request_body_save_filter(r, out); + rc = ngx_http_top_request_body_filter(r, out); ngx_chain_update_chains(r->pool, &rb->free, &rb->busy, &out, (ngx_buf_tag_t) &ngx_http_read_client_request_body); @@ -993,7 +991,7 @@ ngx_http_request_body_chunked_filter(ngx } -static ngx_int_t +ngx_int_t ngx_http_request_body_save_filter(ngx_http_request_t *r, ngx_chain_t *in) { #if (NGX_DEBUG) From dave at daveb.net Fri Mar 15 18:04:13 2013 From: dave at daveb.net (Dave Bailey) Date: Fri, 15 Mar 2013 11:04:13 -0700 Subject: proxy input filter chain? In-Reply-To: <20130315174622.GG15378@mdounin.ru> References: <20130315174622.GG15378@mdounin.ru> Message-ID: Hi Maxim, On Fri, Mar 15, 2013 at 10:46 AM, Maxim Dounin wrote: > Hello! > > On Fri, Mar 15, 2013 at 09:59:57AM -0700, Dave Bailey wrote: > > > Hi, > > > > I would like to apply a filter to the request body as it is read from the > > client, but before it is sent upstream. So in the various proxy module > > input filters, when the input ngx_buf_t are initialized and pushed onto > the > > input buffer chain, I would like to pass them through my filter at that > > time, so that the input buffer chain contains the buffers that my filter > > generates from the original input buffers. My filter might also (based > on > > its input) choose to finalize the request with a status code 403 or > similar > > (e.g. if the input is deemed to be malicious, etc) before the upstream > > connection is made. > > > > My questions: > > > > 1) Is anything like this currently being developed? > > 2) If not, could I try to provide a patch? > > There is an experimental patch which introduces request body > filters (attached). It was written with chunked input support, > but wasn't committed as there is no clear understanding it should > work this way. > Thank you for the patch. For what it's worth, it seems ideal to me, as it provides an interface that's consistent with the other filter interfaces for headers and response body, and it is applied to the request body before the buffers may hit the disk. I could see a lot of great functionality taking advantage of this new interface, e.g. WAF inspection of the request body, encryption and/or encoding or other transformation of form data, etc. Please consider this email as a vote to commit the patch. In the meantime, I will try it out. -dave > -- > Maxim Dounin > http://nginx.org/en/donation.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From vbart at nginx.com Fri Mar 15 19:49:55 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Fri, 15 Mar 2013 19:49:55 +0000 Subject: [nginx] svn commit: r5115 - trunk/src/http Message-ID: <20130315194955.3B1783F9FD5@mail.nginx.com> Author: vbart Date: 2013-03-15 19:49:54 +0000 (Fri, 15 Mar 2013) New Revision: 5115 URL: http://trac.nginx.org/nginx/changeset/5115/nginx Log: Allow to reuse connections that wait their first request. This should improve behavior under deficiency of connections. Since SSL handshake usually takes significant amount of time, we exclude connections from reusable queue during this period to avoid premature flush of them. Modified: trunk/src/http/ngx_http_request.c Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2013-03-14 16:22:43 UTC (rev 5114) +++ trunk/src/http/ngx_http_request.c 2013-03-15 19:49:54 UTC (rev 5115) @@ -355,6 +355,7 @@ } ngx_add_timer(rev, c->listening->post_accept_timeout); + ngx_reusable_connection(c, 1); if (ngx_handle_read_event(rev, 0) != NGX_OK) { ngx_http_close_connection(c); @@ -383,6 +384,11 @@ return; } + if (c->close) { + ngx_http_close_connection(c); + return; + } + hc = c->data; cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module); @@ -432,6 +438,7 @@ if (!rev->timer_set) { ngx_add_timer(rev, c->listening->post_accept_timeout); + ngx_reusable_connection(c, 1); } if (ngx_handle_read_event(rev, 0) != NGX_OK) { @@ -466,6 +473,8 @@ c->log->action = "reading client request line"; + ngx_reusable_connection(c, 0); + c->data = ngx_http_create_request(c); if (c->data == NULL) { ngx_http_close_connection(c); @@ -611,6 +620,11 @@ return; } + if (c->close) { + ngx_http_close_connection(c); + return; + } + n = recv(c->fd, (char *) buf, 1, MSG_PEEK); err = ngx_socket_errno; @@ -631,6 +645,7 @@ if (!rev->timer_set) { ngx_add_timer(rev, c->listening->post_accept_timeout); + ngx_reusable_connection(c, 1); } if (ngx_handle_read_event(rev, 0) != NGX_OK) { @@ -670,6 +685,8 @@ ngx_add_timer(rev, c->listening->post_accept_timeout); } + ngx_reusable_connection(c, 0); + c->ssl->handler = ngx_http_ssl_handshake_handler; return; } @@ -714,6 +731,8 @@ c->read->handler = ngx_http_wait_request_handler; /* STUB: epoll edge */ c->write->handler = ngx_http_empty_handler; + ngx_reusable_connection(c, 1); + ngx_http_wait_request_handler(c->read); return; From vbart at nginx.com Fri Mar 15 20:00:49 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Fri, 15 Mar 2013 20:00:49 +0000 Subject: [nginx] svn commit: r5116 - in trunk/src: core event http/modules Message-ID: <20130315200049.D4A473F9FE5@mail.nginx.com> Author: vbart Date: 2013-03-15 20:00:49 +0000 (Fri, 15 Mar 2013) New Revision: 5116 URL: http://trac.nginx.org/nginx/changeset/5116/nginx Log: Status: introduced the "ngx_stat_waiting" counter. And corresponding variable $connections_waiting was added. Previously, waiting connections were counted as the difference between active connections and the sum of reading and writing connections. That made it impossible to count more than one request in one connection as reading or writing (as is the case for SPDY). Also, we no longer count connections in handshake state as waiting. Modified: trunk/src/core/ngx_connection.c trunk/src/event/ngx_event.c trunk/src/event/ngx_event.h trunk/src/http/modules/ngx_http_stub_status_module.c Modified: trunk/src/core/ngx_connection.c =================================================================== --- trunk/src/core/ngx_connection.c 2013-03-15 19:49:54 UTC (rev 5115) +++ trunk/src/core/ngx_connection.c 2013-03-15 20:00:49 UTC (rev 5116) @@ -970,6 +970,10 @@ if (c->reusable) { ngx_queue_remove(&c->queue); + +#if (NGX_STAT_STUB) + (void) ngx_atomic_fetch_add(ngx_stat_waiting, -1); +#endif } c->reusable = reusable; @@ -979,6 +983,10 @@ ngx_queue_insert_head( (ngx_queue_t *) &ngx_cycle->reusable_connections_queue, &c->queue); + +#if (NGX_STAT_STUB) + (void) ngx_atomic_fetch_add(ngx_stat_waiting, 1); +#endif } } Modified: trunk/src/event/ngx_event.c =================================================================== --- trunk/src/event/ngx_event.c 2013-03-15 19:49:54 UTC (rev 5115) +++ trunk/src/event/ngx_event.c 2013-03-15 20:00:49 UTC (rev 5116) @@ -73,6 +73,8 @@ ngx_atomic_t *ngx_stat_reading = &ngx_stat_reading0; ngx_atomic_t ngx_stat_writing0; ngx_atomic_t *ngx_stat_writing = &ngx_stat_writing0; +ngx_atomic_t ngx_stat_waiting0; +ngx_atomic_t *ngx_stat_waiting = &ngx_stat_waiting0; #endif @@ -511,7 +513,8 @@ + cl /* ngx_stat_requests */ + cl /* ngx_stat_active */ + cl /* ngx_stat_reading */ - + cl; /* ngx_stat_writing */ + + cl /* ngx_stat_writing */ + + cl; /* ngx_stat_waiting */ #endif @@ -558,6 +561,7 @@ ngx_stat_active = (ngx_atomic_t *) (shared + 6 * cl); ngx_stat_reading = (ngx_atomic_t *) (shared + 7 * cl); ngx_stat_writing = (ngx_atomic_t *) (shared + 8 * cl); + ngx_stat_waiting = (ngx_atomic_t *) (shared + 9 * cl); #endif Modified: trunk/src/event/ngx_event.h =================================================================== --- trunk/src/event/ngx_event.h 2013-03-15 19:49:54 UTC (rev 5115) +++ trunk/src/event/ngx_event.h 2013-03-15 20:00:49 UTC (rev 5116) @@ -511,6 +511,7 @@ extern ngx_atomic_t *ngx_stat_active; extern ngx_atomic_t *ngx_stat_reading; extern ngx_atomic_t *ngx_stat_writing; +extern ngx_atomic_t *ngx_stat_waiting; #endif Modified: trunk/src/http/modules/ngx_http_stub_status_module.c =================================================================== --- trunk/src/http/modules/ngx_http_stub_status_module.c 2013-03-15 19:49:54 UTC (rev 5115) +++ trunk/src/http/modules/ngx_http_stub_status_module.c 2013-03-15 20:00:49 UTC (rev 5116) @@ -73,6 +73,9 @@ { ngx_string("connections_writing"), NULL, ngx_http_stub_status_variable, 2, NGX_HTTP_VAR_NOCACHEABLE, 0 }, + { ngx_string("connections_waiting"), NULL, ngx_http_stub_status_variable, + 3, NGX_HTTP_VAR_NOCACHEABLE, 0 }, + { ngx_null_string, NULL, NULL, 0, 0, 0 } }; @@ -83,7 +86,7 @@ ngx_int_t rc; ngx_buf_t *b; ngx_chain_t out; - ngx_atomic_int_t ap, hn, ac, rq, rd, wr; + ngx_atomic_int_t ap, hn, ac, rq, rd, wr, wa; if (r->method != NGX_HTTP_GET && r->method != NGX_HTTP_HEAD) { return NGX_HTTP_NOT_ALLOWED; @@ -126,6 +129,7 @@ rq = *ngx_stat_requests; rd = *ngx_stat_reading; wr = *ngx_stat_writing; + wa = *ngx_stat_waiting; b->last = ngx_sprintf(b->last, "Active connections: %uA \n", ac); @@ -135,7 +139,7 @@ b->last = ngx_sprintf(b->last, " %uA %uA %uA \n", ap, hn, rq); b->last = ngx_sprintf(b->last, "Reading: %uA Writing: %uA Waiting: %uA \n", - rd, wr, ac - (rd + wr)); + rd, wr, wa); r->headers_out.status = NGX_HTTP_OK; r->headers_out.content_length_n = b->last - b->pos; @@ -177,6 +181,10 @@ value = *ngx_stat_writing; break; + case 3: + value = *ngx_stat_waiting; + break; + /* suppress warning */ default: value = 0; From piotr at cloudflare.com Fri Mar 15 21:27:57 2013 From: piotr at cloudflare.com (Piotr Sikora) Date: Fri, 15 Mar 2013 14:27:57 -0700 Subject: [PATCH] Guard against failed allocation during binary upgrade Message-ID: Hey, pretty obvious patch attached ;) Best regards, Piotr Sikora diff -r a29c574d61fa src/core/nginx.c --- a/src/core/nginx.c Fri Mar 15 20:00:49 2013 +0000 +++ b/src/core/nginx.c Fri Mar 15 14:22:04 2013 -0700 @@ -594,6 +594,9 @@ var = ngx_alloc(sizeof(NGINX_VAR) + cycle->listening.nelts * (NGX_INT32_LEN + 1) + 2, cycle->log); + if (var == NULL) { + return NGX_INVALID_PID; + } p = ngx_cpymem(var, NGINX_VAR "=", sizeof(NGINX_VAR)); From ru at nginx.com Sat Mar 16 03:56:27 2013 From: ru at nginx.com (Ruslan Ermilov) Date: Sat, 16 Mar 2013 07:56:27 +0400 Subject: Support for progressive jpeg in image_filter module In-Reply-To: References: Message-ID: <20130316035627.GC75995@lo0.su> On Wed, Mar 13, 2013 at 10:04:35PM +0400, ivan babrou wrote: > This patch adds support for progressive jpeg and pgn encoding with > image_filter_interlace on/off setting. Google suggests to use progressive > jpegs so why not with nginx? Your MUA broke the text patch, and HTML with the patch is generally meaningless. Please find a way to properly send patches (experiment locally). > diff --git a/ngx_http_image_filter_module.c b/ngx_http_image_filter_module.c > index b086e3c..cc44d90 100644 > --- a/ngx_http_image_filter_module.c > +++ b/ngx_http_image_filter_module.c > @@ -52,6 +52,7 @@ typedef struct { > ngx_uint_t offset_y; > > ngx_flag_t transparency; > + ngx_flag_t interlace; > > ngx_http_complex_value_t *wcv; > ngx_http_complex_value_t *hcv; > @@ -142,6 +143,13 @@ static ngx_command_t ngx_http_image_filter_commands[] > = { > 0, > NULL }, > > + { ngx_string("image_filter_interlace"), > + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, > + ngx_conf_set_flag_slot, > + NGX_HTTP_LOC_CONF_OFFSET, > + offsetof(ngx_http_image_filter_conf_t, interlace), > + NULL }, > + Please keep the same order here as in ngx_http_image_filter_conf_t and ngx_http_image_filter_{create,merge}_conf(). That is (sharpen, transparency, interlace). > { ngx_string("image_filter_sharpen"), > > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > ngx_http_image_filter_sharpen, > @@ -1115,10 +1123,13 @@ ngx_http_image_out(ngx_http_request_t *r, > ngx_uint_t type, gdImagePtr img, > > out = NULL; > > + conf = ngx_http_get_module_loc_conf(r, ngx_http_image_filter_module); > + > + gdImageInterlace(img, conf->interlace); > + conf->interlace which is of type ngx_flag_t isn't the same as "int" that this function expects. This will cause warnings if compiled with -Wshorten-64-to-32 on 64-bit platforms. While nginx has several such issues at the moment, adding another one is not welcome. Cast it to "int" here. > switch (type) { > > case NGX_HTTP_IMAGE_JPEG: > - conf = ngx_http_get_module_loc_conf(r, > ngx_http_image_filter_module); > > jq = ngx_http_image_filter_get_value(r, conf->jqcv, > conf->jpeg_quality); > if (jq <= 0) { > @@ -1237,6 +1248,7 @@ ngx_http_image_filter_create_conf(ngx_conf_t *cf) > conf->sharpen = NGX_CONF_UNSET_UINT; > conf->angle = NGX_CONF_UNSET_UINT; > conf->transparency = NGX_CONF_UNSET; > + conf->interlace = NGX_CONF_UNSET; > conf->buffer_size = NGX_CONF_UNSET_SIZE; > conf->offset_x = NGX_CONF_UNSET_UINT; > conf->offset_y = NGX_CONF_UNSET_UINT; > @@ -1292,6 +1304,8 @@ ngx_http_image_filter_merge_conf(ngx_conf_t *cf, void > *parent, void *child) > > ngx_conf_merge_value(conf->transparency, prev->transparency, 1); > > + ngx_conf_merge_value(conf->interlace, prev->interlace, 0); > + > ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, > 1 * 1024 * 1024); > From ru at nginx.com Sat Mar 16 03:58:38 2013 From: ru at nginx.com (Ruslan Ermilov) Date: Sat, 16 Mar 2013 07:58:38 +0400 Subject: [PATCH] Guard against failed allocation during binary upgrade In-Reply-To: References: Message-ID: <20130316035838.GD75995@lo0.su> On Fri, Mar 15, 2013 at 02:27:57PM -0700, Piotr Sikora wrote: > Hey, > pretty obvious patch attached ;) Your patch obviously looks good. :) > diff -r a29c574d61fa src/core/nginx.c > --- a/src/core/nginx.c Fri Mar 15 20:00:49 2013 +0000 > +++ b/src/core/nginx.c Fri Mar 15 14:22:04 2013 -0700 > @@ -594,6 +594,9 @@ > var = ngx_alloc(sizeof(NGINX_VAR) > + cycle->listening.nelts * (NGX_INT32_LEN + 1) + 2, > cycle->log); > + if (var == NULL) { > + return NGX_INVALID_PID; > + } > > p = ngx_cpymem(var, NGINX_VAR "=", sizeof(NGINX_VAR)); > From ibobrik at gmail.com Sat Mar 16 08:07:26 2013 From: ibobrik at gmail.com (ivan babrou) Date: Sat, 16 Mar 2013 12:07:26 +0400 Subject: Support for progressive jpeg in image_filter module In-Reply-To: <20130316035627.GC75995@lo0.su> References: <20130316035627.GC75995@lo0.su> Message-ID: I added cast to int, fixed order and this is plain text version of the patch. diff --git a/ngx_http_image_filter_module.c b/ngx_http_image_filter_module.c index b086e3c..968e460 100644 --- a/ngx_http_image_filter_module.c +++ b/ngx_http_image_filter_module.c @@ -52,6 +52,7 @@ typedef struct { ngx_uint_t offset_y; ngx_flag_t transparency; + ngx_flag_t interlace; ngx_http_complex_value_t *wcv; ngx_http_complex_value_t *hcv; @@ -156,6 +157,13 @@ static ngx_command_t ngx_http_image_filter_commands[] = { offsetof(ngx_http_image_filter_conf_t, transparency), NULL }, + { ngx_string("image_filter_interlace"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_image_filter_conf_t, interlace), + NULL }, + { ngx_string("image_filter_buffer"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, ngx_conf_set_size_slot, @@ -1115,10 +1123,13 @@ ngx_http_image_out(ngx_http_request_t *r, ngx_uint_t type, gdImagePtr img, out = NULL; + conf = ngx_http_get_module_loc_conf(r, ngx_http_image_filter_module); + + gdImageInterlace(img, (int) conf->interlace); + switch (type) { case NGX_HTTP_IMAGE_JPEG: - conf = ngx_http_get_module_loc_conf(r, ngx_http_image_filter_module); jq = ngx_http_image_filter_get_value(r, conf->jqcv, conf->jpeg_quality); if (jq <= 0) { @@ -1237,6 +1248,7 @@ ngx_http_image_filter_create_conf(ngx_conf_t *cf) conf->sharpen = NGX_CONF_UNSET_UINT; conf->angle = NGX_CONF_UNSET_UINT; conf->transparency = NGX_CONF_UNSET; + conf->interlace = NGX_CONF_UNSET; conf->buffer_size = NGX_CONF_UNSET_SIZE; conf->offset_x = NGX_CONF_UNSET_UINT; conf->offset_y = NGX_CONF_UNSET_UINT; @@ -1292,6 +1304,8 @@ ngx_http_image_filter_merge_conf(ngx_conf_t *cf, void *parent, void *child) ngx_conf_merge_value(conf->transparency, prev->transparency, 1); + ngx_conf_merge_value(conf->interlace, prev->interlace, 0); + ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, 1 * 1024 * 1024); On 16 March 2013 07:56, Ruslan Ermilov wrote: > On Wed, Mar 13, 2013 at 10:04:35PM +0400, ivan babrou wrote: >> This patch adds support for progressive jpeg and pgn encoding with >> image_filter_interlace on/off setting. Google suggests to use progressive >> jpegs so why not with nginx? > > Your MUA broke the text patch, and HTML with the patch is generally > meaningless. Please find a way to properly send patches (experiment > locally). > >> diff --git a/ngx_http_image_filter_module.c b/ngx_http_image_filter_module.c >> index b086e3c..cc44d90 100644 >> --- a/ngx_http_image_filter_module.c >> +++ b/ngx_http_image_filter_module.c >> @@ -52,6 +52,7 @@ typedef struct { >> ngx_uint_t offset_y; >> >> ngx_flag_t transparency; >> + ngx_flag_t interlace; >> >> ngx_http_complex_value_t *wcv; >> ngx_http_complex_value_t *hcv; >> @@ -142,6 +143,13 @@ static ngx_command_t ngx_http_image_filter_commands[] >> = { >> 0, >> NULL }, >> >> + { ngx_string("image_filter_interlace"), >> + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, >> + ngx_conf_set_flag_slot, >> + NGX_HTTP_LOC_CONF_OFFSET, >> + offsetof(ngx_http_image_filter_conf_t, interlace), >> + NULL }, >> + > > Please keep the same order here as in ngx_http_image_filter_conf_t and > ngx_http_image_filter_{create,merge}_conf(). That is (sharpen, > transparency, interlace). > >> { ngx_string("image_filter_sharpen"), >> >> NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, >> ngx_http_image_filter_sharpen, >> @@ -1115,10 +1123,13 @@ ngx_http_image_out(ngx_http_request_t *r, >> ngx_uint_t type, gdImagePtr img, >> >> out = NULL; >> >> + conf = ngx_http_get_module_loc_conf(r, ngx_http_image_filter_module); >> + >> + gdImageInterlace(img, conf->interlace); >> + > > conf->interlace which is of type ngx_flag_t isn't the same > as "int" that this function expects. This will cause warnings > if compiled with -Wshorten-64-to-32 on 64-bit platforms. > While nginx has several such issues at the moment, adding > another one is not welcome. Cast it to "int" here. > >> switch (type) { >> >> case NGX_HTTP_IMAGE_JPEG: >> - conf = ngx_http_get_module_loc_conf(r, >> ngx_http_image_filter_module); >> >> jq = ngx_http_image_filter_get_value(r, conf->jqcv, >> conf->jpeg_quality); >> if (jq <= 0) { >> @@ -1237,6 +1248,7 @@ ngx_http_image_filter_create_conf(ngx_conf_t *cf) >> conf->sharpen = NGX_CONF_UNSET_UINT; >> conf->angle = NGX_CONF_UNSET_UINT; >> conf->transparency = NGX_CONF_UNSET; >> + conf->interlace = NGX_CONF_UNSET; >> conf->buffer_size = NGX_CONF_UNSET_SIZE; >> conf->offset_x = NGX_CONF_UNSET_UINT; >> conf->offset_y = NGX_CONF_UNSET_UINT; >> @@ -1292,6 +1304,8 @@ ngx_http_image_filter_merge_conf(ngx_conf_t *cf, void >> *parent, void *child) >> >> ngx_conf_merge_value(conf->transparency, prev->transparency, 1); >> >> + ngx_conf_merge_value(conf->interlace, prev->interlace, 0); >> + >> ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, >> 1 * 1024 * 1024); >> > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Regards, Ian Babrou http://bobrik.name http://twitter.com/ibobrik skype:i.babrou From ru at nginx.com Sat Mar 16 17:59:35 2013 From: ru at nginx.com (Ruslan Ermilov) Date: Sat, 16 Mar 2013 21:59:35 +0400 Subject: Support for progressive jpeg in image_filter module In-Reply-To: References: <20130316035627.GC75995@lo0.su> Message-ID: <20130316175935.GE75995@lo0.su> On Sat, Mar 16, 2013 at 12:07:26PM +0400, ivan babrou wrote: > I added cast to int, fixed order and this is plain text version of the patch. It's still broken [1], please find a way to send non-broken patches. $ patch < p patching file ngx_http_image_filter_module.c Hunk #1 succeeded at 45 with fuzz 1 (offset -7 lines). Hunk #2 succeeded at 144 (offset -13 lines). patch: **** malformed patch at line 30: ngx_uint_t type, gdImagePtr img, > diff --git a/ngx_http_image_filter_module.c b/ngx_http_image_filter_module.c [...] > @@ -1115,10 +1123,13 @@ ngx_http_image_out(ngx_http_request_t *r, > ngx_uint_t type, gdImagePtr img, [1] As you can see, your MUA split this line into two. > > out = NULL; > > + conf = ngx_http_get_module_loc_conf(r, ngx_http_image_filter_module); > + > + gdImageInterlace(img, (int) conf->interlace); > + I think ngx_http_image_out() is the wrong place for this. How's this instead? diff --git a/src/http/modules/ngx_http_image_filter_module.c b/src/http/modules/ngx_http_image_filter_module.c --- a/src/http/modules/ngx_http_image_filter_module.c +++ b/src/http/modules/ngx_http_image_filter_module.c @@ -45,6 +45,7 @@ typedef struct { ngx_uint_t sharpen; ngx_flag_t transparency; + ngx_flag_t interlace; ngx_http_complex_value_t *wcv; ngx_http_complex_value_t *hcv; @@ -143,6 +144,13 @@ static ngx_command_t ngx_http_image_fil offsetof(ngx_http_image_filter_conf_t, transparency), NULL }, + { ngx_string("image_filter_interlace"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_image_filter_conf_t, interlace), + NULL }, + { ngx_string("image_filter_buffer"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, ngx_conf_set_size_slot, @@ -963,6 +971,8 @@ transparent: gdImageSharpen(dst, sharpen); } + gdImageInterlace(dst, (int) conf->interlace); + out = ngx_http_image_out(r, ctx->type, dst, &size); ngx_log_debug3(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, @@ -1186,6 +1196,7 @@ ngx_http_image_filter_create_conf(ngx_co conf->jpeg_quality = NGX_CONF_UNSET_UINT; conf->sharpen = NGX_CONF_UNSET_UINT; conf->transparency = NGX_CONF_UNSET; + conf->interlace = NGX_CONF_UNSET; conf->buffer_size = NGX_CONF_UNSET_SIZE; return conf; @@ -1234,6 +1245,8 @@ ngx_http_image_filter_merge_conf(ngx_con ngx_conf_merge_value(conf->transparency, prev->transparency, 1); + ngx_conf_merge_value(conf->interlace, prev->interlace, 0); + ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, 1 * 1024 * 1024); From ibobrik at gmail.com Sat Mar 16 18:06:34 2013 From: ibobrik at gmail.com (ivan babrou) Date: Sat, 16 Mar 2013 22:06:34 +0400 Subject: Support for progressive jpeg in image_filter module In-Reply-To: <20130316175935.GE75995@lo0.su> References: <20130316035627.GC75995@lo0.su> <20130316175935.GE75995@lo0.su> Message-ID: Maybe you're right about moving gdImageInterlace from ngx_http_image_out. Should I fix something else or is it okay now? On 16 March 2013 21:59, Ruslan Ermilov wrote: > On Sat, Mar 16, 2013 at 12:07:26PM +0400, ivan babrou wrote: > > I added cast to int, fixed order and this is plain text version of the > patch. > > It's still broken [1], please find a way to send non-broken patches. > > $ patch < p > patching file ngx_http_image_filter_module.c > Hunk #1 succeeded at 45 with fuzz 1 (offset -7 lines). > Hunk #2 succeeded at 144 (offset -13 lines). > patch: **** malformed patch at line 30: ngx_uint_t type, gdImagePtr img, > > > diff --git a/ngx_http_image_filter_module.c > b/ngx_http_image_filter_module.c > [...] > > @@ -1115,10 +1123,13 @@ ngx_http_image_out(ngx_http_request_t *r, > > ngx_uint_t type, gdImagePtr img, > > [1] As you can see, your MUA split this line into two. > > > > > out = NULL; > > > > + conf = ngx_http_get_module_loc_conf(r, > ngx_http_image_filter_module); > > + > > + gdImageInterlace(img, (int) conf->interlace); > > + > > I think ngx_http_image_out() is the wrong place for this. > > How's this instead? > > diff --git a/src/http/modules/ngx_http_image_filter_module.c > b/src/http/modules/ngx_http_image_filter_module.c > --- a/src/http/modules/ngx_http_image_filter_module.c > +++ b/src/http/modules/ngx_http_image_filter_module.c > @@ -45,6 +45,7 @@ typedef struct { > ngx_uint_t sharpen; > > ngx_flag_t transparency; > + ngx_flag_t interlace; > > ngx_http_complex_value_t *wcv; > ngx_http_complex_value_t *hcv; > @@ -143,6 +144,13 @@ static ngx_command_t ngx_http_image_fil > offsetof(ngx_http_image_filter_conf_t, transparency), > NULL }, > > + { ngx_string("image_filter_interlace"), > + > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, > + ngx_conf_set_flag_slot, > + NGX_HTTP_LOC_CONF_OFFSET, > + offsetof(ngx_http_image_filter_conf_t, interlace), > + NULL }, > + > { ngx_string("image_filter_buffer"), > > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > ngx_conf_set_size_slot, > @@ -963,6 +971,8 @@ transparent: > gdImageSharpen(dst, sharpen); > } > > + gdImageInterlace(dst, (int) conf->interlace); > + > out = ngx_http_image_out(r, ctx->type, dst, &size); > > ngx_log_debug3(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, > @@ -1186,6 +1196,7 @@ ngx_http_image_filter_create_conf(ngx_co > conf->jpeg_quality = NGX_CONF_UNSET_UINT; > conf->sharpen = NGX_CONF_UNSET_UINT; > conf->transparency = NGX_CONF_UNSET; > + conf->interlace = NGX_CONF_UNSET; > conf->buffer_size = NGX_CONF_UNSET_SIZE; > > return conf; > @@ -1234,6 +1245,8 @@ ngx_http_image_filter_merge_conf(ngx_con > > ngx_conf_merge_value(conf->transparency, prev->transparency, 1); > > + ngx_conf_merge_value(conf->interlace, prev->interlace, 0); > + > ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, > 1 * 1024 * 1024); > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -- Regards, Ian Babrou http://bobrik.name http://twitter.com/ibobrik skype:i.babrou -------------- next part -------------- An HTML attachment was scrubbed... URL: From ru at nginx.com Sat Mar 16 18:18:31 2013 From: ru at nginx.com (Ruslan Ermilov) Date: Sat, 16 Mar 2013 22:18:31 +0400 Subject: Support for progressive jpeg in image_filter module In-Reply-To: References: <20130316035627.GC75995@lo0.su> <20130316175935.GE75995@lo0.su> Message-ID: <20130316181831.GF75995@lo0.su> On Sat, Mar 16, 2013 at 10:06:34PM +0400, ivan babrou wrote: > Maybe you're right about moving?gdImageInterlace > from?ngx_http_image_out. Should I fix something else or is it okay now? I like the patch in the form I sent it back. From mdounin at mdounin.ru Sun Mar 17 00:21:41 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sun, 17 Mar 2013 04:21:41 +0400 Subject: [PATCH] Guard against failed allocation during binary upgrade In-Reply-To: <20130316035838.GD75995@lo0.su> References: <20130316035838.GD75995@lo0.su> Message-ID: <20130317002141.GO15378@mdounin.ru> Hello! On Sat, Mar 16, 2013 at 07:58:38AM +0400, Ruslan Ermilov wrote: > On Fri, Mar 15, 2013 at 02:27:57PM -0700, Piotr Sikora wrote: > > Hey, > > pretty obvious patch attached ;) > > Your patch obviously looks good. :) Please commit then. > > diff -r a29c574d61fa src/core/nginx.c > > --- a/src/core/nginx.c Fri Mar 15 20:00:49 2013 +0000 > > +++ b/src/core/nginx.c Fri Mar 15 14:22:04 2013 -0700 > > @@ -594,6 +594,9 @@ > > var = ngx_alloc(sizeof(NGINX_VAR) > > + cycle->listening.nelts * (NGX_INT32_LEN + 1) + 2, > > cycle->log); > > + if (var == NULL) { > > + return NGX_INVALID_PID; > > + } > > > > p = ngx_cpymem(var, NGINX_VAR "=", sizeof(NGINX_VAR)); > > -- Maxim Dounin http://nginx.org/en/donation.html From ru at nginx.com Mon Mar 18 07:13:58 2013 From: ru at nginx.com (ru at nginx.com) Date: Mon, 18 Mar 2013 07:13:58 +0000 Subject: [nginx] svn commit: r5117 - trunk/src/core Message-ID: <20130318071358.614703F9E7F@mail.nginx.com> Author: ru Date: 2013-03-18 07:13:57 +0000 (Mon, 18 Mar 2013) New Revision: 5117 URL: http://trac.nginx.org/nginx/changeset/5117/nginx Log: Core: guard against failed allocation during binary upgrade. Patch by Piotr Sikora. Modified: trunk/src/core/nginx.c Modified: trunk/src/core/nginx.c =================================================================== --- trunk/src/core/nginx.c 2013-03-15 20:00:49 UTC (rev 5116) +++ trunk/src/core/nginx.c 2013-03-18 07:13:57 UTC (rev 5117) @@ -594,6 +594,9 @@ var = ngx_alloc(sizeof(NGINX_VAR) + cycle->listening.nelts * (NGX_INT32_LEN + 1) + 2, cycle->log); + if (var == NULL) { + return NGX_INVALID_PID; + } p = ngx_cpymem(var, NGINX_VAR "=", sizeof(NGINX_VAR)); From ru at nginx.com Mon Mar 18 07:14:47 2013 From: ru at nginx.com (Ruslan Ermilov) Date: Mon, 18 Mar 2013 11:14:47 +0400 Subject: [PATCH] Guard against failed allocation during binary upgrade In-Reply-To: <20130317002141.GO15378@mdounin.ru> References: <20130316035838.GD75995@lo0.su> <20130317002141.GO15378@mdounin.ru> Message-ID: <20130318071447.GA77559@lo0.su> On Sun, Mar 17, 2013 at 04:21:41AM +0400, Maxim Dounin wrote: > On Sat, Mar 16, 2013 at 07:58:38AM +0400, Ruslan Ermilov wrote: > > > On Fri, Mar 15, 2013 at 02:27:57PM -0700, Piotr Sikora wrote: > > > Hey, > > > pretty obvious patch attached ;) > > > > Your patch obviously looks good. :) > > Please commit then. http://trac.nginx.org/nginx/changeset/5117/nginx Thanks, Piotr! From mdounin at mdounin.ru Mon Mar 18 14:50:29 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 18 Mar 2013 14:50:29 +0000 Subject: [nginx] svn commit: r5118 - trunk/src/http/modules Message-ID: <20130318145029.B65023F9F8D@mail.nginx.com> Author: mdounin Date: 2013-03-18 14:50:29 +0000 (Mon, 18 Mar 2013) New Revision: 5118 URL: http://trac.nginx.org/nginx/changeset/5118/nginx Log: The limit_req_status and limit_conn_status directives. Patch by Nick Marden, with minor changes. Modified: trunk/src/http/modules/ngx_http_limit_conn_module.c trunk/src/http/modules/ngx_http_limit_req_module.c Modified: trunk/src/http/modules/ngx_http_limit_conn_module.c =================================================================== --- trunk/src/http/modules/ngx_http_limit_conn_module.c 2013-03-18 07:13:57 UTC (rev 5117) +++ trunk/src/http/modules/ngx_http_limit_conn_module.c 2013-03-18 14:50:29 UTC (rev 5118) @@ -40,6 +40,7 @@ typedef struct { ngx_array_t limits; ngx_uint_t log_level; + ngx_uint_t status_code; } ngx_http_limit_conn_conf_t; @@ -74,6 +75,11 @@ }; +static ngx_conf_num_bounds_t ngx_http_limit_conn_status_bounds = { + ngx_conf_check_num_bounds, 400, 599 +}; + + static ngx_command_t ngx_http_limit_conn_commands[] = { { ngx_string("limit_conn_zone"), @@ -104,6 +110,13 @@ offsetof(ngx_http_limit_conn_conf_t, log_level), &ngx_http_limit_conn_log_levels }, + { ngx_string("limit_conn_status"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, + ngx_conf_set_num_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_limit_conn_conf_t, status_code), + &ngx_http_limit_conn_status_bounds }, + ngx_null_command }; @@ -206,7 +219,7 @@ if (node == NULL) { ngx_shmtx_unlock(&shpool->mutex); ngx_http_limit_conn_cleanup_all(r->pool); - return NGX_HTTP_SERVICE_UNAVAILABLE; + return lccf->status_code; } lc = (ngx_http_limit_conn_node_t *) &node->color; @@ -231,7 +244,7 @@ &limits[i].shm_zone->shm.name); ngx_http_limit_conn_cleanup_all(r->pool); - return NGX_HTTP_SERVICE_UNAVAILABLE; + return lccf->status_code; } lc->conn++; @@ -467,6 +480,7 @@ */ conf->log_level = NGX_CONF_UNSET_UINT; + conf->status_code = NGX_CONF_UNSET_UINT; return conf; } @@ -483,6 +497,8 @@ } ngx_conf_merge_uint_value(conf->log_level, prev->log_level, NGX_LOG_ERR); + ngx_conf_merge_uint_value(conf->status_code, prev->status_code, + NGX_HTTP_SERVICE_UNAVAILABLE); return NGX_CONF_OK; } Modified: trunk/src/http/modules/ngx_http_limit_req_module.c =================================================================== --- trunk/src/http/modules/ngx_http_limit_req_module.c 2013-03-18 07:13:57 UTC (rev 5117) +++ trunk/src/http/modules/ngx_http_limit_req_module.c 2013-03-18 14:50:29 UTC (rev 5118) @@ -53,6 +53,7 @@ ngx_array_t limits; ngx_uint_t limit_log_level; ngx_uint_t delay_log_level; + ngx_uint_t status_code; } ngx_http_limit_req_conf_t; @@ -84,6 +85,11 @@ }; +static ngx_conf_num_bounds_t ngx_http_limit_req_status_bounds = { + ngx_conf_check_num_bounds, 400, 599 +}; + + static ngx_command_t ngx_http_limit_req_commands[] = { { ngx_string("limit_req_zone"), @@ -107,6 +113,13 @@ offsetof(ngx_http_limit_req_conf_t, limit_log_level), &ngx_http_limit_req_log_levels }, + { ngx_string("limit_req_status"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, + ngx_conf_set_num_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_limit_req_conf_t, status_code), + &ngx_http_limit_req_status_bounds }, + ngx_null_command }; @@ -245,7 +258,7 @@ ctx->node = NULL; } - return NGX_HTTP_SERVICE_UNAVAILABLE; + return lrcf->status_code; } /* rc == NGX_AGAIN || rc == NGX_OK */ @@ -682,6 +695,7 @@ */ conf->limit_log_level = NGX_CONF_UNSET_UINT; + conf->status_code = NGX_CONF_UNSET_UINT; return conf; } @@ -703,6 +717,9 @@ conf->delay_log_level = (conf->limit_log_level == NGX_LOG_INFO) ? NGX_LOG_INFO : conf->limit_log_level + 1; + ngx_conf_merge_uint_value(conf->status_code, prev->status_code, + NGX_HTTP_SERVICE_UNAVAILABLE); + return NGX_CONF_OK; } From mdounin at mdounin.ru Mon Mar 18 14:51:48 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 18 Mar 2013 18:51:48 +0400 Subject: Patch proposal: allow alternatives to 503 status code in limit_req module In-Reply-To: References: Message-ID: <20130318145148.GC15378@mdounin.ru> Hello! On Tue, Mar 05, 2013 at 01:28:44PM -0500, Nick Marden wrote: [...] > I understand what you are saying and have made the corresponding changes to > my patch (attached). I've committed the patch (with minor style fixes), thanks! -- Maxim Dounin http://nginx.org/en/donation.html From crk_world at yahoo.com.cn Tue Mar 19 07:26:25 2013 From: crk_world at yahoo.com.cn (chen cw) Date: Tue, 19 Mar 2013 15:26:25 +0800 Subject: ssl_shutdown() may fail when used with http lingering close Message-ID: Hi when we turn lingering_close on, https may fail when it is going to send "Shutdown Notification" because nginx has already closed write peer. when we turn on 'lingering_close always', nginx will never send this notification again. -- Charles Chen Software Engineer Server Platforms Team at Taobao.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From crk_world at yahoo.com.cn Tue Mar 19 07:44:00 2013 From: crk_world at yahoo.com.cn (chen cw) Date: Tue, 19 Mar 2013 15:44:00 +0800 Subject: =?UTF-8?Q?https_=E2=80=98close_notify=E2=80=99_may_be_lost?= Message-ID: Hi, again i found sometimes https 'close notify' from nginx could be lost according to the network. when this packet was lost, nginx wound not retransfer it because nginx had already reset the connection. in this case, some client, e.g. apache httpclient, would fail because of ssl timeout. this case did not occur on apache, because it shutdown ssl first, then turned on lingering close, by which it succeeded to avoid resetting connection. -- -- Charles Chen Software Engineer Server Platforms Team at Taobao.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From ru at nginx.com Tue Mar 19 08:13:49 2013 From: ru at nginx.com (ru at nginx.com) Date: Tue, 19 Mar 2013 08:13:49 +0000 Subject: [nginx] svn commit: r5119 - trunk/src/http/modules Message-ID: <20130319081349.64F893F9F9F@mail.nginx.com> Author: ru Date: 2013-03-19 08:13:48 +0000 (Tue, 19 Mar 2013) New Revision: 5119 URL: http://trac.nginx.org/nginx/changeset/5119/nginx Log: Image filter: the "image_filter_interlace" directive. Patch by Ian Babrou, with minor changes. Modified: trunk/src/http/modules/ngx_http_image_filter_module.c Modified: trunk/src/http/modules/ngx_http_image_filter_module.c =================================================================== --- trunk/src/http/modules/ngx_http_image_filter_module.c 2013-03-18 14:50:29 UTC (rev 5118) +++ trunk/src/http/modules/ngx_http_image_filter_module.c 2013-03-19 08:13:48 UTC (rev 5119) @@ -45,6 +45,7 @@ ngx_uint_t sharpen; ngx_flag_t transparency; + ngx_flag_t interlace; ngx_http_complex_value_t *wcv; ngx_http_complex_value_t *hcv; @@ -143,6 +144,13 @@ offsetof(ngx_http_image_filter_conf_t, transparency), NULL }, + { ngx_string("image_filter_interlace"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_image_filter_conf_t, interlace), + NULL }, + { ngx_string("image_filter_buffer"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, ngx_conf_set_size_slot, @@ -963,6 +971,8 @@ gdImageSharpen(dst, sharpen); } + gdImageInterlace(dst, (int) conf->interlace); + out = ngx_http_image_out(r, ctx->type, dst, &size); ngx_log_debug3(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, @@ -1186,6 +1196,7 @@ conf->jpeg_quality = NGX_CONF_UNSET_UINT; conf->sharpen = NGX_CONF_UNSET_UINT; conf->transparency = NGX_CONF_UNSET; + conf->interlace = NGX_CONF_UNSET; conf->buffer_size = NGX_CONF_UNSET_SIZE; return conf; @@ -1234,6 +1245,8 @@ ngx_conf_merge_value(conf->transparency, prev->transparency, 1); + ngx_conf_merge_value(conf->interlace, prev->interlace, 0); + ngx_conf_merge_size_value(conf->buffer_size, prev->buffer_size, 1 * 1024 * 1024); From ru at nginx.com Tue Mar 19 08:15:08 2013 From: ru at nginx.com (Ruslan Ermilov) Date: Tue, 19 Mar 2013 12:15:08 +0400 Subject: Support for progressive jpeg in image_filter module In-Reply-To: <20130316181831.GF75995@lo0.su> References: <20130316035627.GC75995@lo0.su> <20130316175935.GE75995@lo0.su> <20130316181831.GF75995@lo0.su> Message-ID: <20130319081508.GC85764@lo0.su> On Sat, Mar 16, 2013 at 10:18:31PM +0400, Ruslan Ermilov wrote: > On Sat, Mar 16, 2013 at 10:06:34PM +0400, ivan babrou wrote: > > Maybe you're right about moving?gdImageInterlace > > from?ngx_http_image_out. Should I fix something else or is it okay now? > > I like the patch in the form I sent it back. https://trac.nginx.org/nginx/changeset/5119/nginx Documentation will be updated soon. From ibobrik at gmail.com Tue Mar 19 08:19:42 2013 From: ibobrik at gmail.com (ivan babrou) Date: Tue, 19 Mar 2013 12:19:42 +0400 Subject: Support for progressive jpeg in image_filter module In-Reply-To: <20130319081508.GC85764@lo0.su> References: <20130316035627.GC75995@lo0.su> <20130316175935.GE75995@lo0.su> <20130316181831.GF75995@lo0.su> <20130319081508.GC85764@lo0.su> Message-ID: Thank you! On 19 March 2013 12:15, Ruslan Ermilov wrote: > On Sat, Mar 16, 2013 at 10:18:31PM +0400, Ruslan Ermilov wrote: > > On Sat, Mar 16, 2013 at 10:06:34PM +0400, ivan babrou wrote: > > > Maybe you're right about moving gdImageInterlace > > > from ngx_http_image_out. Should I fix something else or is it okay > now? > > > > I like the patch in the form I sent it back. > > https://trac.nginx.org/nginx/changeset/5119/nginx > > Documentation will be updated soon. > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -- Regards, Ian Babrou http://bobrik.name http://twitter.com/ibobrik skype:i.babrou -------------- next part -------------- An HTML attachment was scrubbed... URL: From dmitry.petroff at gmail.com Tue Mar 19 10:11:08 2013 From: dmitry.petroff at gmail.com (Dmitry Petrov) Date: Tue, 19 Mar 2013 14:11:08 +0400 Subject: add_header directive is not working when r->headers_out.status == NGX_HTTP_CREATED Message-ID: Hello I was trying to add some custom headers via add_header directive to webdav response and run into the problem: if response code was 201 (file created), then custom headers weren't added. The reason in that big if statement in ngx_http_headers_filter doesn't check for NGX_HTTP_CREATED. Is it intended behavior that custom headers aren't added to 201 responses? If we want to add headers to any 2xx response, then why just don't change if ((conf->expires == NGX_HTTP_EXPIRES_OFF && conf->headers == NULL) || r != r->main || (r->headers_out.status != NGX_HTTP_OK && r->headers_out.status != NGX_HTTP_CREATED && r->headers_out.status != NGX_HTTP_NO_CONTENT && r->headers_out.status != NGX_HTTP_PARTIAL_CONTENT && r->headers_out.status != NGX_HTTP_MOVED_PERMANENTLY && r->headers_out.status != NGX_HTTP_MOVED_TEMPORARILY && r->headers_out.status != NGX_HTTP_SEE_OTHER && r->headers_out.status != NGX_HTTP_NOT_MODIFIED && r->headers_out.status != NGX_HTTP_TEMPORARY_REDIRECT)) { return ngx_http_next_header_filter(r); } to if ((conf->expires == NGX_HTTP_EXPIRES_OFF && conf->headers == NULL) || r != r->main || r->headers_out.status / 100 != 2) { return ngx_http_next_header_filter(r); } -- Regards, Dmitry -------------- next part -------------- An HTML attachment was scrubbed... URL: From dmitry.petroff at gmail.com Tue Mar 19 10:59:22 2013 From: dmitry.petroff at gmail.com (Dmitry Petrov) Date: Tue, 19 Mar 2013 14:59:22 +0400 Subject: add_header directive is not working when r->headers_out.status == NGX_HTTP_CREATED In-Reply-To: References: Message-ID: Oh, never mind. This is fixed in recent nginx version. On Tue, Mar 19, 2013 at 2:11 PM, Dmitry Petrov wrote: > Hello > > I was trying to add some custom headers via add_header directive to webdav > response and run into the problem: if response code was 201 (file created), > then custom headers weren't added. The reason in that big if statement in > ngx_http_headers_filter doesn't check for NGX_HTTP_CREATED. Is it intended > behavior that custom headers aren't added to 201 responses? > > If we want to add headers to any 2xx response, then why just don't change > if ((conf->expires == NGX_HTTP_EXPIRES_OFF && conf->headers == NULL) > || r != r->main > || (r->headers_out.status != NGX_HTTP_OK > && r->headers_out.status != NGX_HTTP_CREATED > && r->headers_out.status != NGX_HTTP_NO_CONTENT > && r->headers_out.status != NGX_HTTP_PARTIAL_CONTENT > && r->headers_out.status != NGX_HTTP_MOVED_PERMANENTLY > && r->headers_out.status != NGX_HTTP_MOVED_TEMPORARILY > && r->headers_out.status != NGX_HTTP_SEE_OTHER > && r->headers_out.status != NGX_HTTP_NOT_MODIFIED > && r->headers_out.status != NGX_HTTP_TEMPORARY_REDIRECT)) > { > return ngx_http_next_header_filter(r); > } > to > if ((conf->expires == NGX_HTTP_EXPIRES_OFF && conf->headers == NULL) > || r != r->main > || r->headers_out.status / 100 != 2) > { > return ngx_http_next_header_filter(r); > } > > -- > Regards, > Dmitry > -- Regards, Dmitry -------------- next part -------------- An HTML attachment was scrubbed... URL: From nginx at lukaperkov.net Tue Mar 19 12:31:38 2013 From: nginx at lukaperkov.net (Luka Perkov) Date: Tue, 19 Mar 2013 13:31:38 +0100 Subject: 100-continue and 401 Message-ID: <20130319123138-8002@mutt-kz> Hi all, I'm using nginx as a frontend for my SCGI application and I want to handle authentication in my SCGI code. I have to deal with POST requests. Is it ok that nginx sends "401 Unauthorized" after sending "100 Continue"? Are both requests bellow correct? I'm asking because of this curl message: "HTTP error before end of send, stop sending". Luka $ curl -v -X POST -d "@/path/to/some/blob" http://127.0.0.1:8000/ * About to connect() to 127.0.0.1 port 8000 (#0) * Trying 127.0.0.1... * Connected to 127.0.0.1 (127.0.0.1) port 8000 (#0) > POST / HTTP/1.1 > User-Agent: curl/7.29.0 > Host: 127.0.0.1:8000 > Accept: */* > Content-Length: 3398 > Content-Type: application/x-www-form-urlencoded > Expect: 100-continue > < HTTP/1.1 100 Continue < HTTP/1.1 401 Unauthorized < Server: nginx/1.3.14 < Date: Tue, 19 Mar 2013 12:11:16 GMT < Transfer-Encoding: chunked < Connection: keep-alive < WWW-Authenticate: Basic realm="bla-bla-bla" * HTTP error before end of send, stop sending < * Closing connection 0 $ curl -v -X POST -d "@/dev/null" http://127.0.0.1:8000/ * About to connect() to 127.0.0.1 port 8000 (#0) * Trying 127.0.0.1... * Connected to 127.0.0.1 (127.0.0.1) port 8000 (#0) > POST / HTTP/1.1 > User-Agent: curl/7.29.0 > Host: 127.0.0.1:8000 > Accept: */* > Content-Length: 0 > Content-Type: application/x-www-form-urlencoded > < HTTP/1.1 401 Unauthorized < Server: nginx/1.3.14 < Date: Tue, 19 Mar 2013 12:23:17 GMT < Transfer-Encoding: chunked < Connection: keep-alive < WWW-Authenticate: Basic realm="bla-bla-bla" * HTTP error before end of send, stop sending < * Closing connection 0 From vshebordaev at mail.ru Tue Mar 19 15:36:07 2013 From: vshebordaev at mail.ru (Vladimir Shebordaev) Date: Tue, 19 Mar 2013 19:36:07 +0400 Subject: 100-continue and 401 In-Reply-To: <20130319123138-8002@mutt-kz> References: <20130319123138-8002@mutt-kz> Message-ID: <514885E7.8030403@mail.ru> Hi, On 19.03.2013 16:31, Luka Perkov wrote: > Hi all, > > I'm using nginx as a frontend for my SCGI application and I want to > handle authentication in my SCGI code. I have to deal with POST > requests. Is it ok that nginx sends "401 Unauthorized" after sending > "100 Continue"? > > Are both requests bellow correct? > Both requests do look basically correct just like the nginx's and curl's behavior. You didn't supply any credentials for authentication to continue. You can use -u curl option to explicitly specify on the command line either -n option to read 'em from your netrc. Curl will at least try to authenticate itself with the supplied credentials then. You could also find this document useful > I'm asking because of this curl message: "HTTP error before end of send, > stop sending". > Hope it helps. Regards, Vladimir > Luka > > $ curl -v -X POST -d "@/path/to/some/blob" http://127.0.0.1:8000/ > * About to connect() to 127.0.0.1 port 8000 (#0) > * Trying 127.0.0.1... > * Connected to 127.0.0.1 (127.0.0.1) port 8000 (#0) >> POST / HTTP/1.1 >> User-Agent: curl/7.29.0 >> Host: 127.0.0.1:8000 >> Accept: */* >> Content-Length: 3398 >> Content-Type: application/x-www-form-urlencoded >> Expect: 100-continue >> > < HTTP/1.1 100 Continue > < HTTP/1.1 401 Unauthorized > < Server: nginx/1.3.14 > < Date: Tue, 19 Mar 2013 12:11:16 GMT > < Transfer-Encoding: chunked > < Connection: keep-alive > < WWW-Authenticate: Basic realm="bla-bla-bla" > * HTTP error before end of send, stop sending > < > * Closing connection 0 > > > $ curl -v -X POST -d "@/dev/null" http://127.0.0.1:8000/ > * About to connect() to 127.0.0.1 port 8000 (#0) > * Trying 127.0.0.1... > * Connected to 127.0.0.1 (127.0.0.1) port 8000 (#0) >> POST / HTTP/1.1 >> User-Agent: curl/7.29.0 >> Host: 127.0.0.1:8000 >> Accept: */* >> Content-Length: 0 >> Content-Type: application/x-www-form-urlencoded >> > < HTTP/1.1 401 Unauthorized > < Server: nginx/1.3.14 > < Date: Tue, 19 Mar 2013 12:23:17 GMT > < Transfer-Encoding: chunked > < Connection: keep-alive > < WWW-Authenticate: Basic realm="bla-bla-bla" > * HTTP error before end of send, stop sending > < > * Closing connection 0 > From niq at apache.org Tue Mar 19 17:09:38 2013 From: niq at apache.org (Nick Kew) Date: Tue, 19 Mar 2013 17:09:38 +0000 Subject: 100-continue and 401 In-Reply-To: <20130319123138-8002@mutt-kz> References: <20130319123138-8002@mutt-kz> Message-ID: <8F282430-4484-4424-BE61-AB594EC3637C@apache.org> On 19 Mar 2013, at 12:31, Luka Perkov wrote: > Hi all, > > I'm using nginx as a frontend for my SCGI application and I want to > handle authentication in my SCGI code. I have to deal with POST > requests. Is it ok that nginx sends "401 Unauthorized" after sending > "100 Continue"? > > Are both requests bellow correct? > > I'm asking because of this curl message: "HTTP error before end of send, > stop sending". > > Luka > > $ curl -v -X POST -d "@/path/to/some/blob" http://127.0.0.1:8000/ > * About to connect() to 127.0.0.1 port 8000 (#0) > * Trying 127.0.0.1... > * Connected to 127.0.0.1 (127.0.0.1) port 8000 (#0) >> POST / HTTP/1.1 >> User-Agent: curl/7.29.0 >> Host: 127.0.0.1:8000 >> Accept: */* >> Content-Length: 3398 >> Content-Type: application/x-www-form-urlencoded >> Expect: 100-continue That's OK. >> > < HTTP/1.1 100 Continue > < HTTP/1.1 401 Unauthorized That's not OK. It would need at least a blank line between those two to make an intermediate and a final response (without that it's broken). The server should also wait for the payload it's just invited, but perhaps the client didn't wait before sending it? > < Server: nginx/1.3.14 > < Date: Tue, 19 Mar 2013 12:11:16 GMT > < Transfer-Encoding: chunked > < Connection: keep-alive > < WWW-Authenticate: Basic realm="bla-bla-bla" That's all in order in the final (401) response. >> POST / HTTP/1.1 >> User-Agent: curl/7.29.0 >> Host: 127.0.0.1:8000 >> Accept: */* >> Content-Length: 0 >> Content-Type: application/x-www-form-urlencoded That's also OK. > < HTTP/1.1 401 Unauthorized > < Server: nginx/1.3.14 > < Date: Tue, 19 Mar 2013 12:23:17 GMT > < Transfer-Encoding: chunked > < Connection: keep-alive > < WWW-Authenticate: Basic realm="bla-bla-bla" That's OK so long as it sends the (chunked) response promised. > * HTTP error before end of send, stop sending > < > * Closing connection 0 ? but the client thinks it didn't. Whoops! How much of those responses are being generated by your app? I'd expect the server to take care of protocol issues like the intermediate response and chunked encoding - unless your app disables it! -- Nick Kew From nginx at lukaperkov.net Wed Mar 20 08:44:18 2013 From: nginx at lukaperkov.net (Luka Perkov) Date: Wed, 20 Mar 2013 09:44:18 +0100 Subject: 100-continue and 401 In-Reply-To: <514885E7.8030403@mail.ru> References: <20130319123138-8002@mutt-kz> <514885E7.8030403@mail.ru> Message-ID: <20130320084418-6229@mutt-kz> Hi Vladimir, On Tue, Mar 19, 2013 at 07:36:07PM +0400, Vladimir Shebordaev wrote: > Both requests do look basically correct just like the nginx's and > curl's behavior. You didn't supply any credentials for > authentication to continue. I know I didn't. That is the example test, if my SCGI daemon gets request without username and password it will reply to nginx with this: Status: 401 Unauthorized WWW-Authenticate: Basic realm="bla-bla-bla" Then nginx will pass that to the client. The problem is that nginx sends "100 Continue" first. I think that my SCGI code is correct, I'm not sure if nginx should send "100 Continue" before "401 Unauthorized". My guess is that the same will happen using CGI, FastCGI, uWSGI or when using any upstream server. Somebody correct me if I'm wrong. Luka From nginx at lukaperkov.net Wed Mar 20 09:15:27 2013 From: nginx at lukaperkov.net (Luka Perkov) Date: Wed, 20 Mar 2013 10:15:27 +0100 Subject: 100-continue and 401 In-Reply-To: <8F282430-4484-4424-BE61-AB594EC3637C@apache.org> References: <20130319123138-8002@mutt-kz> <8F282430-4484-4424-BE61-AB594EC3637C@apache.org> Message-ID: <20130320091527-14773@mutt-kz> Hi Nick, On Tue, Mar 19, 2013 at 05:09:38PM +0000, Nick Kew wrote: > On 19 Mar 2013, at 12:31, Luka Perkov wrote: > > I'm using nginx as a frontend for my SCGI application and I want to > > $ curl -v -X POST -d "@/path/to/some/blob" http://127.0.0.1:8000/ > > * About to connect() to 127.0.0.1 port 8000 (#0) > > * Trying 127.0.0.1... > > * Connected to 127.0.0.1 (127.0.0.1) port 8000 (#0) > >> POST / HTTP/1.1 > >> User-Agent: curl/7.29.0 > >> Host: 127.0.0.1:8000 > >> Accept: */* > >> Content-Length: 3398 > >> Content-Type: application/x-www-form-urlencoded > >> Expect: 100-continue > > That's OK. > >> > > < HTTP/1.1 100 Continue > > < HTTP/1.1 401 Unauthorized > > That's not OK. It would need at least a blank line between > those two to make an intermediate and a final response > (without that it's broken). If I add blank line before sending "401 Unauthorized" I end up with this: $ curl -v -X POST -d "@/path/to/some/blob" http://127.0.0.1:8000/ * About to connect() to 127.0.0.1 port 8000 (#0) * Trying 127.0.0.1... * Connected to 127.0.0.1 (127.0.0.1) port 8000 (#0) > POST / HTTP/1.1 > User-Agent: curl/7.29.0 > Host: 127.0.0.1:8000 > Accept: */* > Cookie: queue=bla > Content-Length: 3398 > Content-Type: application/x-www-form-urlencoded > Expect: 100-continue > < HTTP/1.1 100 Continue < HTTP/1.1 200 OK < Server: nginx/1.3.14 < Date: Wed, 20 Mar 2013 09:05:18 GMT < Transfer-Encoding: chunked < Connection: keep-alive < Status: 401 Unauthorized WWW-Authenticate: Basic realm="freeacs-ng" And that is not what I want ;) > The server should also wait for the payload it's just invited, > but perhaps the client didn't wait before sending it? Well, as far as I understand nginx can send SCGI request only after it got the entire post body. And in order to get the post body it needs to send "100 Continue"... > > < WWW-Authenticate: Basic realm="bla-bla-bla" > > That's OK so long as it sends the (chunked) response promised. > > > * HTTP error before end of send, stop sending > > < > > * Closing connection 0 > > ? but the client thinks it didn't. Whoops! I can make tcpdump captures if that would help... > How much of those responses are being generated by your app? > I'd expect the server to take care of protocol issues like the > intermediate response and chunked encoding - unless your app > disables it! I would like that my SCGI daemon is in charge of user authorization which will be performed based on HTTP authorization header. Is that possible? And if yes what response should the SCGI daemon send? Luka From dmitry.petroff at gmail.com Wed Mar 20 09:12:11 2013 From: dmitry.petroff at gmail.com (Dmitry Petrov) Date: Wed, 20 Mar 2013 13:12:11 +0400 Subject: 100-continue and 401 In-Reply-To: <20130320084418-6229@mutt-kz> References: <20130319123138-8002@mutt-kz> <514885E7.8030403@mail.ru> <20130320084418-6229@mutt-kz> Message-ID: Hi Luka, There was a post about similar problem on nginx forum with reply from Igor: http://forum.nginx.org/read.php?2,212533,212549#msg-212549 On Wed, Mar 20, 2013 at 12:44 PM, Luka Perkov wrote: > Hi Vladimir, > > On Tue, Mar 19, 2013 at 07:36:07PM +0400, Vladimir Shebordaev wrote: > > Both requests do look basically correct just like the nginx's and > > curl's behavior. You didn't supply any credentials for > > authentication to continue. > > I know I didn't. That is the example test, if my SCGI daemon gets > request without username and password it will reply to nginx with this: > > Status: 401 Unauthorized > WWW-Authenticate: Basic realm="bla-bla-bla" > > Then nginx will pass that to the client. The problem is that nginx sends > "100 Continue" first. I think that my SCGI code is correct, I'm not sure > if nginx should send "100 Continue" before "401 Unauthorized". > > My guess is that the same will happen using CGI, FastCGI, uWSGI or when > using any upstream server. Somebody correct me if I'm wrong. > > Luka > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -- Regards, Dmitry -------------- next part -------------- An HTML attachment was scrubbed... URL: From vbart at nginx.com Wed Mar 20 09:36:27 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Wed, 20 Mar 2013 09:36:27 +0000 Subject: [nginx] svn commit: r5120 - trunk/src/http Message-ID: <20130320093627.E55D73F9F87@mail.nginx.com> Author: vbart Date: 2013-03-20 09:36:27 +0000 (Wed, 20 Mar 2013) New Revision: 5120 URL: http://trac.nginx.org/nginx/changeset/5120/nginx Log: URI processing code moved to a separate function. This allows to reuse it in the upcoming SPDY module. Modified: trunk/src/http/ngx_http_request.c Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2013-03-19 08:13:48 UTC (rev 5119) +++ trunk/src/http/ngx_http_request.c 2013-03-20 09:36:27 UTC (rev 5120) @@ -31,6 +31,7 @@ static ngx_int_t ngx_http_process_user_agent(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset); +static ngx_int_t ngx_http_process_request_uri(ngx_http_request_t *r); static ngx_int_t ngx_http_process_request_header(ngx_http_request_t *r); static void ngx_http_process_request(ngx_http_request_t *r); static ngx_int_t ngx_http_validate_host(ngx_str_t *host, ngx_pool_t *pool, @@ -838,12 +839,11 @@ static void ngx_http_process_request_line(ngx_event_t *rev) { - ssize_t n; - ngx_int_t rc, rv; - ngx_str_t host; - ngx_connection_t *c; - ngx_http_request_t *r; - ngx_http_core_srv_conf_t *cscf; + ssize_t n; + ngx_int_t rc, rv; + ngx_str_t host; + ngx_connection_t *c; + ngx_http_request_t *r; c = rev->data; r = c->data; @@ -880,130 +880,20 @@ r->request_line.data = r->request_start; r->request_length = r->header_in->pos - r->request_start; + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, + "http request line: \"%V\"", &r->request_line); - if (r->args_start) { - r->uri.len = r->args_start - 1 - r->uri_start; - } else { - r->uri.len = r->uri_end - r->uri_start; - } - - - if (r->complex_uri || r->quoted_uri) { - - r->uri.data = ngx_pnalloc(r->pool, r->uri.len + 1); - if (r->uri.data == NULL) { - ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); - return; - } - - cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module); - - rc = ngx_http_parse_complex_uri(r, cscf->merge_slashes); - - if (rc == NGX_HTTP_PARSE_INVALID_REQUEST) { - ngx_log_error(NGX_LOG_INFO, c->log, 0, - "client sent invalid request"); - ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); - return; - } - - } else { - r->uri.data = r->uri_start; - } - - - r->unparsed_uri.len = r->uri_end - r->uri_start; - r->unparsed_uri.data = r->uri_start; - - r->valid_unparsed_uri = r->space_in_uri ? 0 : 1; - r->method_name.len = r->method_end - r->request_start + 1; r->method_name.data = r->request_line.data; - if (r->http_protocol.data) { r->http_protocol.len = r->request_end - r->http_protocol.data; } - - if (r->uri_ext) { - if (r->args_start) { - r->exten.len = r->args_start - 1 - r->uri_ext; - } else { - r->exten.len = r->uri_end - r->uri_ext; - } - - r->exten.data = r->uri_ext; + if (ngx_http_process_request_uri(r) != NGX_OK) { + return; } - - if (r->args_start && r->uri_end > r->args_start) { - r->args.len = r->uri_end - r->args_start; - r->args.data = r->args_start; - } - -#if (NGX_WIN32) - { - u_char *p, *last; - - p = r->uri.data; - last = r->uri.data + r->uri.len; - - while (p < last) { - - if (*p++ == ':') { - - /* - * this check covers "::$data", "::$index_allocation" and - * ":$i30:$index_allocation" - */ - - if (p < last && *p == '$') { - ngx_log_error(NGX_LOG_INFO, c->log, 0, - "client sent unsafe win32 URI"); - ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); - return; - } - } - } - - p = r->uri.data + r->uri.len - 1; - - while (p > r->uri.data) { - - if (*p == ' ') { - p--; - continue; - } - - if (*p == '.') { - p--; - continue; - } - - break; - } - - if (p != r->uri.data + r->uri.len - 1) { - r->uri.len = p + 1 - r->uri.data; - ngx_http_set_exten(r); - } - - } -#endif - - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, - "http request line: \"%V\"", &r->request_line); - - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, - "http uri: \"%V\"", &r->uri); - - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, - "http args: \"%V\"", &r->args); - - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, - "http exten: \"%V\"", &r->exten); - if (r->host_start && r->host_end) { host.len = r->host_end - r->host_start; @@ -1095,6 +985,121 @@ } +static ngx_int_t +ngx_http_process_request_uri(ngx_http_request_t *r) +{ + ngx_http_core_srv_conf_t *cscf; + + if (r->args_start) { + r->uri.len = r->args_start - 1 - r->uri_start; + } else { + r->uri.len = r->uri_end - r->uri_start; + } + + if (r->complex_uri || r->quoted_uri) { + + r->uri.data = ngx_pnalloc(r->pool, r->uri.len + 1); + if (r->uri.data == NULL) { + ngx_http_close_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); + return NGX_ERROR; + } + + cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module); + + if (ngx_http_parse_complex_uri(r, cscf->merge_slashes) != NGX_OK) { + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "client sent invalid request"); + ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); + return NGX_ERROR; + } + + } else { + r->uri.data = r->uri_start; + } + + r->unparsed_uri.len = r->uri_end - r->uri_start; + r->unparsed_uri.data = r->uri_start; + + r->valid_unparsed_uri = r->space_in_uri ? 0 : 1; + + if (r->uri_ext) { + if (r->args_start) { + r->exten.len = r->args_start - 1 - r->uri_ext; + } else { + r->exten.len = r->uri_end - r->uri_ext; + } + + r->exten.data = r->uri_ext; + } + + if (r->args_start && r->uri_end > r->args_start) { + r->args.len = r->uri_end - r->args_start; + r->args.data = r->args_start; + } + +#if (NGX_WIN32) + { + u_char *p, *last; + + p = r->uri.data; + last = r->uri.data + r->uri.len; + + while (p < last) { + + if (*p++ == ':') { + + /* + * this check covers "::$data", "::$index_allocation" and + * ":$i30:$index_allocation" + */ + + if (p < last && *p == '$') { + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "client sent unsafe win32 URI"); + ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); + return NGX_ERROR; + } + } + } + + p = r->uri.data + r->uri.len - 1; + + while (p > r->uri.data) { + + if (*p == ' ') { + p--; + continue; + } + + if (*p == '.') { + p--; + continue; + } + + break; + } + + if (p != r->uri.data + r->uri.len - 1) { + r->uri.len = p + 1 - r->uri.data; + ngx_http_set_exten(r); + } + + } +#endif + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http uri: \"%V\"", &r->uri); + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http args: \"%V\"", &r->args); + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http exten: \"%V\"", &r->exten); + + return NGX_OK; +} + + static void ngx_http_process_request_headers(ngx_event_t *rev) { From vbart at nginx.com Wed Mar 20 10:18:27 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Wed, 20 Mar 2013 10:18:27 +0000 Subject: [nginx] svn commit: r5121 - trunk/src/os/win32 Message-ID: <20130320101827.56B163F9F80@mail.nginx.com> Author: vbart Date: 2013-03-20 10:18:26 +0000 (Wed, 20 Mar 2013) New Revision: 5121 URL: http://trac.nginx.org/nginx/changeset/5121/nginx Log: Win32: disabled MSVC warning about '\0' not fitting into array. We believe that this warning produces more inconvience than real benefit. Here is an example to trigger: u_char a[4] = "test"; Modified: trunk/src/os/win32/ngx_win32_config.h Modified: trunk/src/os/win32/ngx_win32_config.h =================================================================== --- trunk/src/os/win32/ngx_win32_config.h 2013-03-20 09:36:27 UTC (rev 5120) +++ trunk/src/os/win32/ngx_win32_config.h 2013-03-20 10:18:26 UTC (rev 5121) @@ -70,6 +70,9 @@ /* FD_SET() and FD_CLR(): conditional expression is constant */ #pragma warning(disable:4127) +/* array is too small to include a terminating null character */ +#pragma warning(disable:4295) + #endif From vbart at nginx.com Wed Mar 20 10:36:57 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Wed, 20 Mar 2013 10:36:57 +0000 Subject: [nginx] svn commit: r5122 - in trunk: auto src/http src/http/modules Message-ID: <20130320103658.3A3B63FAC11@mail.nginx.com> Author: vbart Date: 2013-03-20 10:36:57 +0000 (Wed, 20 Mar 2013) New Revision: 5122 URL: http://trac.nginx.org/nginx/changeset/5122/nginx Log: Preliminary experimental support for SPDY draft 2. Added: trunk/src/http/ngx_http_spdy.c trunk/src/http/ngx_http_spdy.h trunk/src/http/ngx_http_spdy_filter_module.c trunk/src/http/ngx_http_spdy_module.c trunk/src/http/ngx_http_spdy_module.h Modified: trunk/auto/modules trunk/auto/options trunk/auto/sources trunk/src/http/modules/ngx_http_ssl_module.c trunk/src/http/ngx_http.c trunk/src/http/ngx_http.h trunk/src/http/ngx_http_core_module.c trunk/src/http/ngx_http_core_module.h trunk/src/http/ngx_http_parse.c trunk/src/http/ngx_http_request.c trunk/src/http/ngx_http_request.h trunk/src/http/ngx_http_request_body.c trunk/src/http/ngx_http_upstream.c Modified: trunk/auto/modules =================================================================== --- trunk/auto/modules 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/auto/modules 2013-03-20 10:36:57 UTC (rev 5122) @@ -100,6 +100,7 @@ # ngx_http_write_filter # ngx_http_header_filter # ngx_http_chunked_filter +# ngx_http_spdy_filter # ngx_http_range_header_filter # ngx_http_gzip_filter # ngx_http_postpone_filter @@ -118,9 +119,14 @@ HTTP_FILTER_MODULES="$HTTP_WRITE_FILTER_MODULE \ $HTTP_HEADER_FILTER_MODULE \ - $HTTP_CHUNKED_FILTER_MODULE \ - $HTTP_RANGE_HEADER_FILTER_MODULE" + $HTTP_CHUNKED_FILTER_MODULE" +if [ $HTTP_SPDY = YES ]; then + HTTP_FILTER_MODULES="$HTTP_FILTER_MODULES $HTTP_SPDY_FILTER_MODULE" +fi + +HTTP_FILTER_MODULES="$HTTP_FILTER_MODULES $HTTP_RANGE_HEADER_FILTER_MODULE" + if [ $HTTP_GZIP = YES ]; then have=NGX_HTTP_GZIP . auto/have USE_ZLIB=YES @@ -179,6 +185,15 @@ HTTP_SRCS="$HTTP_SRCS $HTTP_USERID_SRCS" fi + +if [ $HTTP_SPDY = YES ]; then + have=NGX_HTTP_SPDY . auto/have + USE_ZLIB=YES + HTTP_MODULES="$HTTP_MODULES $HTTP_SPDY_MODULE" + HTTP_DEPS="$HTTP_DEPS $HTTP_SPDY_DEPS" + HTTP_SRCS="$HTTP_SRCS $HTTP_SPDY_SRCS" +fi + HTTP_MODULES="$HTTP_MODULES $HTTP_STATIC_MODULE" if [ $HTTP_GZIP_STATIC = YES ]; then Modified: trunk/auto/options =================================================================== --- trunk/auto/options 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/auto/options 2013-03-20 10:36:57 UTC (rev 5122) @@ -60,6 +60,7 @@ HTTP_CHARSET=YES HTTP_GZIP=YES HTTP_SSL=NO +HTTP_SPDY=NO HTTP_SSI=YES HTTP_POSTPONE=NO HTTP_REALIP=NO @@ -202,6 +203,7 @@ --http-scgi-temp-path=*) NGX_HTTP_SCGI_TEMP_PATH="$value" ;; --with-http_ssl_module) HTTP_SSL=YES ;; + --with-http_spdy_module) HTTP_SPDY=YES ;; --with-http_realip_module) HTTP_REALIP=YES ;; --with-http_addition_module) HTTP_ADDITION=YES ;; --with-http_xslt_module) HTTP_XSLT=YES ;; @@ -349,6 +351,7 @@ --with-ipv6 enable IPv6 support --with-http_ssl_module enable ngx_http_ssl_module + --with-http_spdy_module enable ngx_http_spdy_module --with-http_realip_module enable ngx_http_realip_module --with-http_addition_module enable ngx_http_addition_module --with-http_xslt_module enable ngx_http_xslt_module Modified: trunk/auto/sources =================================================================== --- trunk/auto/sources 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/auto/sources 2013-03-20 10:36:57 UTC (rev 5122) @@ -324,6 +324,15 @@ HTTP_FILE_CACHE_SRCS=src/http/ngx_http_file_cache.c +HTTP_SPDY_MODULE=ngx_http_spdy_module +HTTP_SPDY_FILTER_MODULE=ngx_http_spdy_filter_module +HTTP_SPDY_DEPS="src/http/ngx_http_spdy.h \ + src/http/ngx_http_spdy_module.h" +HTTP_SPDY_SRCS="src/http/ngx_http_spdy.c \ + src/http/ngx_http_spdy_module.c \ + src/http/ngx_http_spdy_filter_module.c" + + HTTP_CHARSET_FILTER_MODULE=ngx_http_charset_filter_module HTTP_CHARSET_SRCS=src/http/modules/ngx_http_charset_filter_module.c Modified: trunk/src/http/modules/ngx_http_ssl_module.c =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.c 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/src/http/modules/ngx_http_ssl_module.c 2013-03-20 10:36:57 UTC (rev 5122) @@ -275,13 +275,28 @@ ngx_http_ssl_npn_advertised(ngx_ssl_conn_t *ssl_conn, const unsigned char **out, unsigned int *outlen, void *arg) { -#if (NGX_DEBUG) +#if (NGX_HTTP_SPDY || NGX_DEBUG) ngx_connection_t *c; c = ngx_ssl_get_connection(ssl_conn); ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "SSL NPN advertised"); #endif +#if (NGX_HTTP_SPDY) + { + ngx_http_connection_t *hc; + + hc = c->data; + + if (hc->addr_conf->spdy) { + *out = (unsigned char *) NGX_SPDY_NPN_ADVERTISE NGX_HTTP_NPN_ADVERTISE; + *outlen = sizeof(NGX_SPDY_NPN_ADVERTISE NGX_HTTP_NPN_ADVERTISE) - 1; + + return SSL_TLSEXT_ERR_OK; + } + } +#endif + *out = (unsigned char *) NGX_HTTP_NPN_ADVERTISE; *outlen = sizeof(NGX_HTTP_NPN_ADVERTISE) - 1; Modified: trunk/src/http/ngx_http.c =================================================================== --- trunk/src/http/ngx_http.c 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/src/http/ngx_http.c 2013-03-20 10:36:57 UTC (rev 5122) @@ -1225,6 +1225,9 @@ #if (NGX_HTTP_SSL) ngx_uint_t ssl; #endif +#if (NGX_HTTP_SPDY) + ngx_uint_t spdy; +#endif /* * we cannot compare whole sockaddr struct's as kernel @@ -1277,6 +1280,9 @@ #if (NGX_HTTP_SSL) ssl = lsopt->ssl || addr[i].opt.ssl; #endif +#if (NGX_HTTP_SPDY) + spdy = lsopt->spdy || addr[i].opt.spdy; +#endif if (lsopt->set) { @@ -1307,6 +1313,9 @@ #if (NGX_HTTP_SSL) addr[i].opt.ssl = ssl; #endif +#if (NGX_HTTP_SPDY) + addr[i].opt.spdy = spdy; +#endif return NGX_OK; } @@ -1337,6 +1346,14 @@ } } +#if (NGX_HTTP_SPDY && NGX_HTTP_SSL && !defined TLSEXT_TYPE_next_proto_neg) + if (lsopt->spdy && lsopt->ssl) { + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, + "nginx was built without OpenSSL NPN support, " + "SPDY is not enabled for %s", lsopt->addr); + } +#endif + addr = ngx_array_push(&port->addrs); if (addr == NULL) { return NGX_ERROR; @@ -1820,6 +1837,9 @@ #if (NGX_HTTP_SSL) addrs[i].conf.ssl = addr[i].opt.ssl; #endif +#if (NGX_HTTP_SPDY) + addrs[i].conf.spdy = addr[i].opt.spdy; +#endif if (addr[i].hash.buckets == NULL && (addr[i].wc_head == NULL @@ -1881,6 +1901,9 @@ #if (NGX_HTTP_SSL) addrs6[i].conf.ssl = addr[i].opt.ssl; #endif +#if (NGX_HTTP_SPDY) + addrs6[i].conf.spdy = addr[i].opt.spdy; +#endif if (addr[i].hash.buckets == NULL && (addr[i].wc_head == NULL Modified: trunk/src/http/ngx_http.h =================================================================== --- trunk/src/http/ngx_http.h 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/src/http/ngx_http.h 2013-03-20 10:36:57 UTC (rev 5122) @@ -20,6 +20,10 @@ typedef struct ngx_http_log_ctx_s ngx_http_log_ctx_t; typedef struct ngx_http_chunked_s ngx_http_chunked_t; +#if (NGX_HTTP_SPDY) +typedef struct ngx_http_spdy_stream_s ngx_http_spdy_stream_t; +#endif + typedef ngx_int_t (*ngx_http_header_handler_pt)(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset); typedef u_char *(*ngx_http_log_handler_pt)(ngx_http_request_t *r, @@ -35,6 +39,9 @@ #include #include +#if (NGX_HTTP_SPDY) +#include +#endif #if (NGX_HTTP_CACHE) #include #endif @@ -80,12 +87,14 @@ void ngx_http_init_connection(ngx_connection_t *c); +void ngx_http_close_connection(ngx_connection_t *c); #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME int ngx_http_ssl_servername(ngx_ssl_conn_t *ssl_conn, int *ad, void *arg); #endif ngx_int_t ngx_http_parse_request_line(ngx_http_request_t *r, ngx_buf_t *b); +ngx_int_t ngx_http_parse_uri(ngx_http_request_t *r); ngx_int_t ngx_http_parse_complex_uri(ngx_http_request_t *r, ngx_uint_t merge_slashes); ngx_int_t ngx_http_parse_status_line(ngx_http_request_t *r, ngx_buf_t *b, @@ -104,12 +113,17 @@ ngx_http_chunked_t *ctx); +ngx_http_request_t *ngx_http_create_request(ngx_connection_t *c); +ngx_int_t ngx_http_process_request_uri(ngx_http_request_t *r); +ngx_int_t ngx_http_process_request_header(ngx_http_request_t *r); +void ngx_http_process_request(ngx_http_request_t *r); void ngx_http_update_location_config(ngx_http_request_t *r); void ngx_http_handler(ngx_http_request_t *r); void ngx_http_run_posted_requests(ngx_connection_t *c); ngx_int_t ngx_http_post_request(ngx_http_request_t *r, ngx_http_posted_request_t *pr); void ngx_http_finalize_request(ngx_http_request_t *r, ngx_int_t rc); +void ngx_http_free_request(ngx_http_request_t *r, ngx_int_t rc); void ngx_http_empty_handler(ngx_event_t *wev); void ngx_http_request_empty_handler(ngx_http_request_t *r); Modified: trunk/src/http/ngx_http_core_module.c =================================================================== --- trunk/src/http/ngx_http_core_module.c 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/src/http/ngx_http_core_module.c 2013-03-20 10:36:57 UTC (rev 5122) @@ -2130,6 +2130,13 @@ return NGX_DECLINED; } +#if (NGX_HTTP_SPDY) + if (r->spdy_stream) { + r->gzip_ok = 1; + return NGX_OK; + } +#endif + ae = r->headers_in.accept_encoding; if (ae == NULL) { return NGX_DECLINED; @@ -2464,6 +2471,10 @@ sr->request_body = r->request_body; +#if (NGX_HTTP_SPDY) + sr->spdy_stream = r->spdy_stream; +#endif + sr->method = NGX_HTTP_GET; sr->http_version = r->http_version; @@ -4130,6 +4141,18 @@ #endif } + if (ngx_strcmp(value[n].data, "spdy") == 0) { +#if (NGX_HTTP_SPDY) + lsopt.spdy = 1; + continue; +#else + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "the \"spdy\" parameter requires " + "ngx_http_spdy_module"); + return NGX_CONF_ERROR; +#endif + } + if (ngx_strncmp(value[n].data, "so_keepalive=", 13) == 0) { if (ngx_strcmp(&value[n].data[13], "on") == 0) { Modified: trunk/src/http/ngx_http_core_module.h =================================================================== --- trunk/src/http/ngx_http_core_module.h 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/src/http/ngx_http_core_module.h 2013-03-20 10:36:57 UTC (rev 5122) @@ -75,6 +75,9 @@ #if (NGX_HTTP_SSL) unsigned ssl:1; #endif +#if (NGX_HTTP_SPDY) + unsigned spdy:1; +#endif #if (NGX_HAVE_INET6 && defined IPV6_V6ONLY) unsigned ipv6only:1; #endif @@ -232,8 +235,11 @@ ngx_http_virtual_names_t *virtual_names; #if (NGX_HTTP_SSL) - ngx_uint_t ssl; /* unsigned ssl:1; */ + unsigned ssl:1; #endif +#if (NGX_HTTP_SPDY) + unsigned spdy:1; +#endif }; Modified: trunk/src/http/ngx_http_parse.c =================================================================== --- trunk/src/http/ngx_http_parse.c 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/src/http/ngx_http_parse.c 2013-03-20 10:36:57 UTC (rev 5122) @@ -1075,6 +1075,154 @@ ngx_int_t +ngx_http_parse_uri(ngx_http_request_t *r) +{ + u_char *p, ch; + enum { + sw_start = 0, + sw_after_slash_in_uri, + sw_check_uri, + sw_uri + } state; + + state = sw_start; + + for (p = r->uri_start; p != r->uri_end; p++) { + + ch = *p; + + switch (state) { + + case sw_start: + + if (ch != '/') { + return NGX_ERROR; + } + + state = sw_after_slash_in_uri; + break; + + /* check "/.", "//", "%", and "\" (Win32) in URI */ + case sw_after_slash_in_uri: + + if (usual[ch >> 5] & (1 << (ch & 0x1f))) { + state = sw_check_uri; + break; + } + + switch (ch) { + case ' ': + r->space_in_uri = 1; + state = sw_check_uri; + break; + case '.': + r->complex_uri = 1; + state = sw_uri; + break; + case '%': + r->quoted_uri = 1; + state = sw_uri; + break; + case '/': + r->complex_uri = 1; + state = sw_uri; + break; +#if (NGX_WIN32) + case '\\': + r->complex_uri = 1; + state = sw_uri; + break; +#endif + case '?': + r->args_start = p + 1; + state = sw_uri; + break; + case '#': + r->complex_uri = 1; + state = sw_uri; + break; + case '+': + r->plus_in_uri = 1; + break; + default: + state = sw_check_uri; + break; + } + break; + + /* check "/", "%" and "\" (Win32) in URI */ + case sw_check_uri: + + if (usual[ch >> 5] & (1 << (ch & 0x1f))) { + break; + } + + switch (ch) { + case '/': +#if (NGX_WIN32) + if (r->uri_ext == p) { + r->complex_uri = 1; + state = sw_uri; + break; + } +#endif + r->uri_ext = NULL; + state = sw_after_slash_in_uri; + break; + case '.': + r->uri_ext = p + 1; + break; + case ' ': + r->space_in_uri = 1; + break; +#if (NGX_WIN32) + case '\\': + r->complex_uri = 1; + state = sw_after_slash_in_uri; + break; +#endif + case '%': + r->quoted_uri = 1; + state = sw_uri; + break; + case '?': + r->args_start = p + 1; + state = sw_uri; + break; + case '#': + r->complex_uri = 1; + state = sw_uri; + break; + case '+': + r->plus_in_uri = 1; + break; + } + break; + + /* URI */ + case sw_uri: + + if (usual[ch >> 5] & (1 << (ch & 0x1f))) { + break; + } + + switch (ch) { + case ' ': + r->space_in_uri = 1; + break; + case '#': + r->complex_uri = 1; + break; + } + break; + } + } + + return NGX_OK; +} + + +ngx_int_t ngx_http_parse_complex_uri(ngx_http_request_t *r, ngx_uint_t merge_slashes) { u_char c, ch, decoded, *p, *u; Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/src/http/ngx_http_request.c 2013-03-20 10:36:57 UTC (rev 5122) @@ -11,7 +11,6 @@ static void ngx_http_wait_request_handler(ngx_event_t *ev); -static ngx_http_request_t *ngx_http_create_request(ngx_connection_t *c); static void ngx_http_process_request_line(ngx_event_t *rev); static void ngx_http_process_request_headers(ngx_event_t *rev); static ssize_t ngx_http_read_request_header(ngx_http_request_t *r); @@ -31,9 +30,6 @@ static ngx_int_t ngx_http_process_user_agent(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset); -static ngx_int_t ngx_http_process_request_uri(ngx_http_request_t *r); -static ngx_int_t ngx_http_process_request_header(ngx_http_request_t *r); -static void ngx_http_process_request(ngx_http_request_t *r); static ngx_int_t ngx_http_validate_host(ngx_str_t *host, ngx_pool_t *pool, ngx_uint_t alloc); static ngx_int_t ngx_http_set_virtual_server(ngx_http_request_t *r, @@ -56,9 +52,7 @@ static void ngx_http_lingering_close_handler(ngx_event_t *ev); static ngx_int_t ngx_http_post_action(ngx_http_request_t *r); static void ngx_http_close_request(ngx_http_request_t *r, ngx_int_t error); -static void ngx_http_free_request(ngx_http_request_t *r, ngx_int_t error); static void ngx_http_log_request(ngx_http_request_t *r); -static void ngx_http_close_connection(ngx_connection_t *c); static u_char *ngx_http_log_error(ngx_log_t *log, u_char *buf, size_t len); static u_char *ngx_http_log_error_handler(ngx_http_request_t *r, @@ -318,6 +312,12 @@ rev->handler = ngx_http_wait_request_handler; c->write->handler = ngx_http_empty_handler; +#if (NGX_HTTP_SPDY) + if (hc->addr_conf->spdy) { + rev->handler = ngx_http_spdy_init; + } +#endif + #if (NGX_HTTP_SSL) { ngx_http_ssl_srv_conf_t *sscf; @@ -487,7 +487,7 @@ } -static ngx_http_request_t * +ngx_http_request_t * ngx_http_create_request(ngx_connection_t *c) { ngx_pool_t *pool; @@ -727,6 +727,21 @@ c->ssl->no_wait_shutdown = 1; +#if (NGX_HTTP_SPDY && defined TLSEXT_TYPE_next_proto_neg) + { + unsigned int len; + const unsigned char *data; + static const ngx_str_t spdy = ngx_string(NGX_SPDY_NPN_NEGOTIATED); + + SSL_get0_next_proto_negotiated(c->ssl->connection, &data, &len); + + if (len == spdy.len && ngx_strncmp(data, spdy.data, spdy.len) == 0) { + ngx_http_spdy_init(c->read); + return; + } + } +#endif + c->log->action = "waiting for request"; c->read->handler = ngx_http_wait_request_handler; @@ -985,7 +1000,7 @@ } -static ngx_int_t +ngx_int_t ngx_http_process_request_uri(ngx_http_request_t *r) { ngx_http_core_srv_conf_t *cscf; @@ -1687,7 +1702,7 @@ } -static ngx_int_t +ngx_int_t ngx_http_process_request_header(ngx_http_request_t *r) { if (r->headers_in.server.len == 0 @@ -1757,7 +1772,7 @@ } -static void +void ngx_http_process_request(ngx_http_request_t *r) { ngx_connection_t *c; @@ -2434,6 +2449,13 @@ { ngx_http_core_loc_conf_t *clcf; +#if (NGX_HTTP_SPDY) + if (r->spdy_stream) { + ngx_http_close_request(r, 0); + return; + } +#endif + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); if (r->main->count != 1) { @@ -2488,6 +2510,12 @@ ngx_http_test_reading; r->write_event_handler = ngx_http_writer; +#if (NGX_HTTP_SPDY) + if (r->spdy_stream) { + return NGX_OK; + } +#endif + wev = r->connection->write; if (wev->ready && wev->delayed) { @@ -2635,6 +2663,19 @@ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http test reading"); +#if (NGX_HTTP_SPDY) + + if (r->spdy_stream) { + if (c->error) { + err = 0; + goto closed; + } + + return; + } + +#endif + #if (NGX_HAVE_KQUEUE) if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { @@ -3270,12 +3311,19 @@ return; } +#if (NGX_HTTP_SPDY) + if (r->spdy_stream) { + ngx_http_spdy_close_stream(r->spdy_stream, rc); + return; + } +#endif + ngx_http_free_request(r, rc); ngx_http_close_connection(c); } -static void +void ngx_http_free_request(ngx_http_request_t *r, ngx_int_t rc) { ngx_log_t *log; @@ -3376,7 +3424,7 @@ } -static void +void ngx_http_close_connection(ngx_connection_t *c) { ngx_pool_t *pool; Modified: trunk/src/http/ngx_http_request.h =================================================================== --- trunk/src/http/ngx_http_request.h 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/src/http/ngx_http_request.h 2013-03-20 10:36:57 UTC (rev 5122) @@ -429,6 +429,9 @@ ngx_uint_t err_status; ngx_http_connection_t *http_connection; +#if (NGX_HTTP_SPDY) + ngx_http_spdy_stream_t *spdy_stream; +#endif ngx_http_log_handler_pt log_handler; Modified: trunk/src/http/ngx_http_request_body.c =================================================================== --- trunk/src/http/ngx_http_request_body.c 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/src/http/ngx_http_request_body.c 2013-03-20 10:36:57 UTC (rev 5122) @@ -42,6 +42,13 @@ r->main->count++; +#if (NGX_HTTP_SPDY) + if (r->spdy_stream) { + rc = ngx_http_spdy_read_request_body(r, post_handler); + goto done; + } +#endif + if (r->request_body || r->discard_body) { post_handler(r); return NGX_OK; @@ -475,6 +482,13 @@ ngx_int_t rc; ngx_event_t *rev; +#if (NGX_HTTP_SPDY) + if (r->spdy_stream && r == r->main) { + r->spdy_stream->skip_data = NGX_SPDY_DATA_DISCARD; + return NGX_OK; + } +#endif + if (r != r->main || r->discard_body || r->request_body) { return NGX_OK; } Added: trunk/src/http/ngx_http_spdy.c =================================================================== --- trunk/src/http/ngx_http_spdy.c (rev 0) +++ trunk/src/http/ngx_http_spdy.c 2013-03-20 10:36:57 UTC (rev 5122) @@ -0,0 +1,2881 @@ + +/* + * Copyright (C) Nginx, Inc. + * Copyright (C) Valentin V. Bartenev + */ + + +#include +#include +#include +#include + +#include + + +#if (NGX_HAVE_LITTLE_ENDIAN && NGX_HAVE_NONALIGNED) + +#define ngx_str5cmp(m, c0, c1, c2, c3, c4) \ + *(uint32_t *) m == (c3 << 24 | c2 << 16 | c1 << 8 | c0) \ + && m[4] == c4 + +#else + +#define ngx_str5cmp(m, c0, c1, c2, c3, c4) \ + m[0] == c0 && m[1] == c1 && m[2] == c2 && m[3] == c3 && m[4] == c4 + +#endif + + +#if (NGX_HAVE_NONALIGNED) + +#define ngx_spdy_frame_parse_uint16(p) ntohs(*(uint16_t *) (p)) +#define ngx_spdy_frame_parse_uint32(p) ntohl(*(uint32_t *) (p)) + +#else + +#define ngx_spdy_frame_parse_uint16(p) ((p)[0] << 8 | (p)[1]) +#define ngx_spdy_frame_parse_uint32(p) \ + ((p)[0] << 24 | (p)[1] << 16 | (p)[2] << 8 | (p)[3]) + +#endif + +#define ngx_spdy_frame_parse_sid(p) \ + (ngx_spdy_frame_parse_uint32(p) & 0x7fffffff) + + +#define ngx_spdy_ctl_frame_check(h) \ + (((h) & 0xffffff00) == ngx_spdy_ctl_frame_head(0)) +#define ngx_spdy_data_frame_check(h) \ + (!((h) & (uint32_t) NGX_SPDY_CTL_BIT << 31)) + +#define ngx_spdy_ctl_frame_type(h) ((h) & 0x000000ff) +#define ngx_spdy_frame_flags(p) ((p) >> 24) +#define ngx_spdy_frame_length(p) ((p) & 0x00ffffff) + + +#define NGX_SPDY_SKIP_HEADERS_BUFFER_SIZE 4096 +#define NGX_SPDY_CTL_FRAME_BUFFER_SIZE 16 + +#define NGX_SPDY_PROTOCOL_ERROR 1 +#define NGX_SPDY_INVALID_STREAM 2 +#define NGX_SPDY_REFUSED_STREAM 3 +#define NGX_SPDY_UNSUPPORTED_VERSION 4 +#define NGX_SPDY_CANCEL 5 +#define NGX_SPDY_INTERNAL_ERROR 6 +#define NGX_SPDY_FLOW_CONTROL_ERROR 7 + +#define NGX_SPDY_SETTINGS_MAX_STREAMS 4 + +#define NGX_SPDY_SETTINGS_FLAG_PERSIST 0x01 + +typedef struct { + ngx_uint_t hash; + u_char len; + u_char header[7]; + ngx_int_t (*handler)(ngx_http_request_t *r); +} ngx_http_spdy_request_header_t; + + +static void ngx_http_spdy_read_handler(ngx_event_t *rev); +static void ngx_http_spdy_write_handler(ngx_event_t *wev); +static void ngx_http_spdy_handle_connection(ngx_http_spdy_connection_t *sc); + +static u_char *ngx_http_spdy_state_detect_settings( + ngx_http_spdy_connection_t *sc, u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_head(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_syn_stream(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_headers(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_headers_error(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_headers_skip(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_data(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_rst_stream(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_ping(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_skip(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_settings(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_noop(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_complete(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); +static u_char *ngx_http_spdy_state_save(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end, ngx_http_spdy_handler_pt handler); +static u_char *ngx_http_spdy_state_protocol_error( + ngx_http_spdy_connection_t *sc); +static u_char *ngx_http_spdy_state_internal_error( + ngx_http_spdy_connection_t *sc); + +static ngx_int_t ngx_http_spdy_send_rst_stream(ngx_http_spdy_connection_t *sc, + ngx_uint_t sid, ngx_uint_t status, ngx_uint_t priority); +static ngx_int_t ngx_http_spdy_send_settings(ngx_http_spdy_connection_t *sc); +static ngx_int_t ngx_http_spdy_settings_frame_handler( + ngx_http_spdy_connection_t *sc, ngx_http_spdy_out_frame_t *frame); +static ngx_http_spdy_out_frame_t *ngx_http_spdy_get_ctl_frame( + ngx_http_spdy_connection_t *sc, size_t size, ngx_uint_t priority); +static ngx_int_t ngx_http_spdy_ctl_frame_handler( + ngx_http_spdy_connection_t *sc, ngx_http_spdy_out_frame_t *frame); + +static ngx_http_spdy_stream_t *ngx_http_spdy_create_stream( + ngx_http_spdy_connection_t *sc, ngx_uint_t id, ngx_uint_t priority); +static ngx_http_spdy_stream_t *ngx_http_spdy_get_stream_by_id( + ngx_http_spdy_connection_t *sc, ngx_uint_t sid); +#define ngx_http_spdy_streams_index_size(sscf) (sscf->streams_index_mask + 1) +#define ngx_http_spdy_stream_index(sscf, sid) \ + ((sid >> 1) & sscf->streams_index_mask) + +static ngx_int_t ngx_http_spdy_parse_header(ngx_http_request_t *r); +static ngx_int_t ngx_http_spdy_alloc_large_header_buffer(ngx_http_request_t *r); + +static ngx_int_t ngx_http_spdy_handle_request_header(ngx_http_request_t *r); +static ngx_int_t ngx_http_spdy_parse_method(ngx_http_request_t *r); +static ngx_int_t ngx_http_spdy_parse_scheme(ngx_http_request_t *r); +static ngx_int_t ngx_http_spdy_parse_url(ngx_http_request_t *r); +static ngx_int_t ngx_http_spdy_parse_version(ngx_http_request_t *r); + +static ngx_int_t ngx_http_spdy_construct_request_line(ngx_http_request_t *r); +static void ngx_http_spdy_run_request(ngx_http_request_t *r); +static ngx_int_t ngx_http_spdy_init_request_body(ngx_http_request_t *r); + +static void ngx_http_spdy_handle_connection_handler(ngx_event_t *rev); +static void ngx_http_spdy_keepalive_handler(ngx_event_t *rev); +static void ngx_http_spdy_finalize_connection(ngx_http_spdy_connection_t *sc, + ngx_int_t rc); + +static void ngx_http_spdy_pool_cleanup(void *data); + +static void *ngx_http_spdy_zalloc(void *opaque, u_int items, u_int size); +static void ngx_http_spdy_zfree(void *opaque, void *address); + + +static const u_char ngx_http_spdy_dict[] = + "options" "get" "head" "post" "put" "delete" "trace" + "accept" "accept-charset" "accept-encoding" "accept-language" + "authorization" "expect" "from" "host" + "if-modified-since" "if-match" "if-none-match" "if-range" + "if-unmodifiedsince" "max-forwards" "proxy-authorization" + "range" "referer" "te" "user-agent" + "100" "101" "200" "201" "202" "203" "204" "205" "206" + "300" "301" "302" "303" "304" "305" "306" "307" + "400" "401" "402" "403" "404" "405" "406" "407" "408" "409" "410" + "411" "412" "413" "414" "415" "416" "417" + "500" "501" "502" "503" "504" "505" + "accept-ranges" "age" "etag" "location" "proxy-authenticate" "public" + "retry-after" "server" "vary" "warning" "www-authenticate" "allow" + "content-base" "content-encoding" "cache-control" "connection" "date" + "trailer" "transfer-encoding" "upgrade" "via" "warning" + "content-language" "content-length" "content-location" + "content-md5" "content-range" "content-type" "etag" "expires" + "last-modified" "set-cookie" + "Monday" "Tuesday" "Wednesday" "Thursday" "Friday" "Saturday" "Sunday" + "Jan" "Feb" "Mar" "Apr" "May" "Jun" "Jul" "Aug" "Sep" "Oct" "Nov" "Dec" + "chunked" "text/html" "image/png" "image/jpg" "image/gif" + "application/xml" "application/xhtml" "text/plain" "public" "max-age" + "charset=iso-8859-1" "utf-8" "gzip" "deflate" "HTTP/1.1" "status" + "version" "url"; + + +static ngx_http_spdy_request_header_t ngx_http_spdy_request_headers[] = { + { 0, 6, "method", ngx_http_spdy_parse_method }, + { 0, 6, "scheme", ngx_http_spdy_parse_scheme }, + { 0, 3, "url", ngx_http_spdy_parse_url }, + { 0, 7, "version", ngx_http_spdy_parse_version }, +}; + +#define NGX_SPDY_REQUEST_HEADERS \ + (sizeof(ngx_http_spdy_request_headers) \ + / sizeof(ngx_http_spdy_request_header_t)) + + +void +ngx_http_spdy_init(ngx_event_t *rev) +{ + int rc; + ngx_connection_t *c; + ngx_pool_cleanup_t *cln; + ngx_http_connection_t *hc; + ngx_http_spdy_srv_conf_t *sscf; + ngx_http_spdy_main_conf_t *smcf; + ngx_http_spdy_connection_t *sc; + + c = rev->data; + hc = c->data; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, + "init spdy request"); + + c->log->action = "processing SPDY"; + + smcf = ngx_http_get_module_main_conf(hc->conf_ctx, ngx_http_spdy_module); + + if (smcf->recv_buffer == NULL) { + smcf->recv_buffer = ngx_palloc(ngx_cycle->pool, smcf->recv_buffer_size); + if (smcf->recv_buffer == NULL) { + ngx_http_close_connection(c); + return; + } + } + + sc = ngx_pcalloc(c->pool, sizeof(ngx_http_spdy_connection_t)); + if (sc == NULL) { + ngx_http_close_connection(c); + return; + } + + sc->connection = c; + sc->http_connection = hc; + + sc->handler = ngx_http_spdy_state_detect_settings; + + sc->zstream_in.zalloc = ngx_http_spdy_zalloc; + sc->zstream_in.zfree = ngx_http_spdy_zfree; + sc->zstream_in.opaque = sc; + + rc = inflateInit(&sc->zstream_in); + if (rc != Z_OK) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "inflateInit() failed: %d", rc); + ngx_http_close_connection(c); + return; + } + + sc->zstream_out.zalloc = ngx_http_spdy_zalloc; + sc->zstream_out.zfree = ngx_http_spdy_zfree; + sc->zstream_out.opaque = sc; + + sscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_spdy_module); + + rc = deflateInit2(&sc->zstream_out, (int) sscf->headers_comp, + Z_DEFLATED, 11, 4, Z_DEFAULT_STRATEGY); + + if (rc != Z_OK) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "deflateInit2() failed: %d", rc); + ngx_http_close_connection(c); + return; + } + + rc = deflateSetDictionary(&sc->zstream_out, ngx_http_spdy_dict, + sizeof(ngx_http_spdy_dict)); + if (rc != Z_OK) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "deflateSetDictionary() failed: %d", rc); + ngx_http_close_connection(c); + return; + } + + sc->pool = ngx_create_pool(sscf->pool_size, sc->connection->log); + if (sc->pool == NULL) { + ngx_http_close_connection(c); + return; + } + + cln = ngx_pool_cleanup_add(c->pool, sizeof(ngx_pool_cleanup_file_t)); + if (cln == NULL) { + ngx_http_close_connection(c); + return; + } + + cln->handler = ngx_http_spdy_pool_cleanup; + cln->data = sc; + + sc->streams_index = ngx_pcalloc(sc->pool, + ngx_http_spdy_streams_index_size(sscf) + * sizeof(ngx_http_spdy_stream_t *)); + if (sc->streams_index == NULL) { + ngx_http_close_connection(c); + return; + } + + c->data = sc; + + rev->handler = ngx_http_spdy_read_handler; + c->write->handler = ngx_http_spdy_write_handler; + + ngx_http_spdy_read_handler(rev); +} + + +static void +ngx_http_spdy_read_handler(ngx_event_t *rev) +{ + u_char *p, *end; + size_t available; + ssize_t n; + ngx_connection_t *c; + ngx_http_spdy_main_conf_t *smcf; + ngx_http_spdy_connection_t *sc; + + c = rev->data; + sc = c->data; + + if (rev->timedout) { + ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed out"); + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_REQUEST_TIME_OUT); + return; + } + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "spdy read handler"); + + sc->blocked = 1; + + smcf = ngx_http_get_module_main_conf(sc->http_connection->conf_ctx, + ngx_http_spdy_module); + + available = smcf->recv_buffer_size - 2 * NGX_SPDY_STATE_BUFFER_SIZE; + + do { + p = smcf->recv_buffer; + + ngx_memcpy(p, sc->buffer, NGX_SPDY_STATE_BUFFER_SIZE); + end = p + sc->buffer_used; + + n = c->recv(c, end, available); + + if (n == NGX_AGAIN) { + break; + } + + if (n == 0 && (sc->waiting || sc->processing)) { + ngx_log_error(NGX_LOG_INFO, c->log, 0, + "client closed prematurely connection"); + } + + if (n == 0 || n == NGX_ERROR) { + ngx_http_spdy_finalize_connection(sc, + NGX_HTTP_CLIENT_CLOSED_REQUEST); + return; + } + + end += n; + + sc->buffer_used = 0; + sc->waiting = 0; + + do { + p = sc->handler(sc, p, end); + + if (p == NULL) { + return; + } + + } while (p != end); + + } while (rev->ready); + + if (ngx_handle_read_event(rev, 0) != NGX_OK) { + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + + sc->blocked = 0; + + if (sc->processing) { + if (rev->timer_set) { + ngx_del_timer(rev); + } + return; + } + + ngx_http_spdy_handle_connection(sc); +} + + +static void +ngx_http_spdy_write_handler(ngx_event_t *wev) +{ + ngx_int_t rc; + ngx_connection_t *c; + ngx_http_spdy_stream_t *stream, *s, *sn; + ngx_http_spdy_connection_t *sc; + + c = wev->data; + sc = c->data; + + if (wev->timedout) { + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, + "spdy write event timed out"); + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_CLIENT_CLOSED_REQUEST); + return; + } + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "spdy write handler"); + + sc->blocked = 2; + + rc = ngx_http_spdy_send_output_queue(sc); + + if (rc == NGX_ERROR) { + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_CLIENT_CLOSED_REQUEST); + return; + } + + stream = NULL; + + for (s = sc->last_stream; s; s = sn) { + sn = s->next; + s->next = stream; + stream = s; + } + + sc->last_stream = NULL; + + sc->blocked = 1; + + for ( /* void */ ; stream; stream = sn) { + sn = stream->next; + stream->handled = 0; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, + "spdy run stream %ui", stream->id); + + wev = stream->request->connection->write; + wev->handler(wev); + } + + sc->blocked = 0; + + if (rc == NGX_AGAIN) { + return; + } + + ngx_http_spdy_handle_connection(sc); +} + + +ngx_int_t +ngx_http_spdy_send_output_queue(ngx_http_spdy_connection_t *sc) +{ + ngx_chain_t *cl; + ngx_event_t *wev; + ngx_connection_t *c; + ngx_http_core_loc_conf_t *clcf; + ngx_http_spdy_out_frame_t *out, *frame, *fn; + + c = sc->connection; + + if (c->error) { + return NGX_ERROR; + } + + wev = c->write; + + if (!wev->ready) { + return NGX_OK; + } + + cl = NULL; + out = NULL; + + for (frame = sc->last_out; frame; frame = fn) { + frame->last->next = cl; + cl = frame->first; + + fn = frame->next; + frame->next = out; + out = frame; + + ngx_log_debug5(NGX_LOG_DEBUG_HTTP, c->log, 0, + "spdy frame out: %p sid:%ui prio:%ui bl:%ui size:%uz", + out, out->stream ? out->stream->id : 0, out->priority, + out->blocked, out->size); + } + + cl = c->send_chain(c, cl, 0); + + if (cl == NGX_CHAIN_ERROR) { + c->error = 1; + + if (!sc->blocked) { + ngx_post_event(wev, &ngx_posted_events); + } + + return NGX_ERROR; + } + + clcf = ngx_http_get_module_loc_conf(sc->http_connection->conf_ctx, + ngx_http_core_module); + + if (ngx_handle_write_event(wev, clcf->send_lowat) != NGX_OK) { + return NGX_ERROR; /* FIXME */ + } + + if (cl) { + ngx_add_timer(wev, clcf->send_timeout); + + } else { + if (wev->timer_set) { + ngx_del_timer(wev); + } + } + + for ( /* void */ ; out; out = out->next) { + if (out->handler(sc, out) != NGX_OK) { + out->blocked = 1; + out->priority = NGX_SPDY_HIGHEST_PRIORITY; + break; + } + + ngx_log_debug4(NGX_LOG_DEBUG_HTTP, c->log, 0, + "spdy frame sent: %p sid:%ui bl:%ui size:%uz", + out, out->stream ? out->stream->id : 0, + out->blocked, out->size); + } + + frame = NULL; + + for ( /* void */ ; out; out = fn) { + fn = out->next; + out->next = frame; + frame = out; + } + + sc->last_out = frame; + + return NGX_OK; +} + + +static void +ngx_http_spdy_handle_connection(ngx_http_spdy_connection_t *sc) +{ + ngx_connection_t *c; + ngx_http_spdy_srv_conf_t *sscf; + + if (sc->last_out || sc->processing) { + return; + } + + c = sc->connection; + + if (c->error) { + ngx_http_close_connection(c); + return; + } + + if (c->buffered) { + return; + } + + sscf = ngx_http_get_module_srv_conf(sc->http_connection->conf_ctx, + ngx_http_spdy_module); + if (sc->waiting) { + ngx_add_timer(c->read, sscf->recv_timeout); + return; + } + + if (ngx_terminate || ngx_exiting) { + ngx_http_close_connection(c); + return; + } + + ngx_destroy_pool(sc->pool); + + sc->pool = NULL; + sc->free_ctl_frames = NULL; + sc->free_fake_connections = NULL; + +#if (NGX_HTTP_SSL) + if (c->ssl) { + ngx_ssl_free_buffer(c); + } +#endif + + c->destroyed = 1; + c->idle = 1; + ngx_reusable_connection(c, 1); + + c->write->handler = ngx_http_empty_handler; + c->read->handler = ngx_http_spdy_keepalive_handler; + + if (c->write->timer_set) { + ngx_del_timer(c->write); + } + + ngx_add_timer(c->read, sscf->keepalive_timeout); +} + + +static u_char * +ngx_http_spdy_state_detect_settings(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end) +{ + if (end - pos < NGX_SPDY_FRAME_HEADER_SIZE) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_detect_settings); + } + + /* + * Since this is the first frame in a buffer, + * then it is properly aligned + */ + + if (*(uint32_t *) pos == htonl(ngx_spdy_ctl_frame_head(NGX_SPDY_SETTINGS))) + { + sc->length = ngx_spdy_frame_length(htonl(((uint32_t *) pos)[1])); + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy SETTINGS frame received, size: %uz", sc->length); + + pos += NGX_SPDY_FRAME_HEADER_SIZE; + + return ngx_http_spdy_state_settings(sc, pos, end); + } + + ngx_http_spdy_send_settings(sc); + + return ngx_http_spdy_state_head(sc, pos, end); +} + + +static u_char * +ngx_http_spdy_state_head(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + uint32_t head, flen; + + if (end - pos < NGX_SPDY_FRAME_HEADER_SIZE) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_head); + } + + head = ngx_spdy_frame_parse_uint32(pos); + + pos += sizeof(uint32_t); + + flen = ngx_spdy_frame_parse_uint32(pos); + + sc->flags = ngx_spdy_frame_flags(flen); + sc->length = ngx_spdy_frame_length(flen); + + pos += sizeof(uint32_t); + + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy process frame head:%08Xd f:%ui l:%ui", + head, sc->flags, sc->length); + + if (ngx_spdy_ctl_frame_check(head)) { + switch (ngx_spdy_ctl_frame_type(head)) { + + case NGX_SPDY_SYN_STREAM: + return ngx_http_spdy_state_syn_stream(sc, pos, end); + + case NGX_SPDY_SYN_REPLY: + return ngx_http_spdy_state_protocol_error(sc); + + case NGX_SPDY_RST_STREAM: + return ngx_http_spdy_state_rst_stream(sc, pos, end); + + case NGX_SPDY_SETTINGS: + return ngx_http_spdy_state_skip(sc, pos, end); + + case NGX_SPDY_NOOP: + return ngx_http_spdy_state_noop(sc, pos, end); + + case NGX_SPDY_PING: + return ngx_http_spdy_state_ping(sc, pos, end); + + case NGX_SPDY_GOAWAY: + return ngx_http_spdy_state_skip(sc, pos, end); /* TODO */ + + case NGX_SPDY_HEADERS: + return ngx_http_spdy_state_protocol_error(sc); + + default: /* TODO logging */ + return ngx_http_spdy_state_skip(sc, pos, end); + } + } + + if (ngx_spdy_data_frame_check(head)) { + sc->stream = ngx_http_spdy_get_stream_by_id(sc, head); + return ngx_http_spdy_state_data(sc, pos, end); + } + + + /* TODO version & type check */ + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy unknown frame"); + + return ngx_http_spdy_state_protocol_error(sc); +} + + +static u_char * +ngx_http_spdy_state_syn_stream(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + ngx_uint_t sid, prio; + ngx_http_spdy_stream_t *stream; + ngx_http_spdy_srv_conf_t *sscf; + + if (end - pos < NGX_SPDY_SYN_STREAM_SIZE) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_syn_stream); + } + + if (sc->length <= NGX_SPDY_SYN_STREAM_SIZE) { + /* TODO logging */ + return ngx_http_spdy_state_protocol_error(sc); + } + + sc->length -= NGX_SPDY_SYN_STREAM_SIZE; + + sid = ngx_spdy_frame_parse_sid(pos); + prio = pos[8] >> 6; + + pos += NGX_SPDY_SYN_STREAM_SIZE; + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy SYN_STREAM frame sid:%ui prio:%ui", sid, prio); + + sscf = ngx_http_get_module_srv_conf(sc->http_connection->conf_ctx, + ngx_http_spdy_module); + + if (sc->processing >= sscf->concurrent_streams) { + + ngx_log_error(NGX_LOG_INFO, sc->connection->log, 0, + "spdy concurrent streams excessed %ui", sc->processing); + + if (ngx_http_spdy_send_rst_stream(sc, sid, NGX_SPDY_REFUSED_STREAM, + prio) + != NGX_OK) + { + return ngx_http_spdy_state_internal_error(sc); + } + + return ngx_http_spdy_state_headers_skip(sc, pos, end); + } + + stream = ngx_http_spdy_create_stream(sc, sid, prio); + if (stream == NULL) { + return ngx_http_spdy_state_internal_error(sc); + } + + stream->in_closed = (sc->flags & NGX_SPDY_FLAG_FIN) ? 1 : 0; + + stream->request->request_length = NGX_SPDY_FRAME_HEADER_SIZE + + NGX_SPDY_SYN_STREAM_SIZE + + sc->length; + + sc->stream = stream; + + sc->last_sid = sid; + + return ngx_http_spdy_state_headers(sc, pos, end); +} + + +static u_char * +ngx_http_spdy_state_headers(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + int z; + size_t size; + ngx_buf_t *buf; + ngx_int_t rc; + ngx_uint_t complete; + ngx_http_request_t *r; + + size = end - pos; + + if (size == 0) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_headers); + } + + if (size >= sc->length) { + size = sc->length; + complete = 1; + + } else { + complete = 0; + } + + r = sc->stream->request; + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "spdy process HEADERS %uz of %uz", size, sc->length); + + buf = r->header_in; + + sc->zstream_in.next_in = pos; + sc->zstream_in.avail_in = size; + sc->zstream_in.next_out = buf->last; + sc->zstream_in.avail_out = buf->end - buf->last - 1; + + z = inflate(&sc->zstream_in, Z_NO_FLUSH); + + if (z == Z_NEED_DICT) { + z = inflateSetDictionary(&sc->zstream_in, ngx_http_spdy_dict, + sizeof(ngx_http_spdy_dict)); + if (z != Z_OK) { + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "spdy inflateSetDictionary() failed: %d", z); + ngx_http_spdy_close_stream(sc->stream, 0); + return ngx_http_spdy_state_protocol_error(sc); + } + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "spdy inflateSetDictionary(): %d", z); + + z = sc->zstream_in.avail_in ? inflate(&sc->zstream_in, Z_NO_FLUSH) + : Z_OK; + } + + if (z != Z_OK) { + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "spdy inflate() failed: %d", z); + ngx_http_spdy_close_stream(sc->stream, 0); + return ngx_http_spdy_state_protocol_error(sc); + } + + ngx_log_debug5(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "spdy inflate out: ni:%p no:%p ai:%ud ao:%ud rc:%d", + sc->zstream_in.next_in, sc->zstream_in.next_out, + sc->zstream_in.avail_in, sc->zstream_in.avail_out, + z); + + sc->length -= sc->zstream_in.next_in - pos; + pos = sc->zstream_in.next_in; + + buf->last = sc->zstream_in.next_out; + + if (r->headers_in.headers.part.elts == NULL) { + + if (buf->last - buf->pos < NGX_SPDY_NV_NUM_SIZE) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_headers); + } + + sc->headers = ngx_spdy_frame_parse_uint16(buf->pos); + + buf->pos += NGX_SPDY_NV_NUM_SIZE; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "spdy headers count: %ui", sc->headers); + + if (ngx_list_init(&r->headers_in.headers, r->pool, sc->headers + 3, + sizeof(ngx_table_elt_t)) + != NGX_OK) + { + ngx_http_spdy_close_stream(sc->stream, + NGX_HTTP_INTERNAL_SERVER_ERROR); + return ngx_http_spdy_state_headers_error(sc, pos, end); + } + + if (ngx_array_init(&r->headers_in.cookies, r->pool, 2, + sizeof(ngx_table_elt_t *)) + != NGX_OK) + { + ngx_http_spdy_close_stream(sc->stream, + NGX_HTTP_INTERNAL_SERVER_ERROR); + return ngx_http_spdy_state_headers_error(sc, pos, end); + } + } + + while (sc->headers) { + + rc = ngx_http_spdy_parse_header(r); + + switch (rc) { + + case NGX_DONE: + sc->headers--; + + case NGX_OK: + break; + + case NGX_AGAIN: + + if (sc->zstream_in.avail_in) { + + rc = ngx_http_spdy_alloc_large_header_buffer(r); + + if (rc == NGX_DECLINED) { + /* TODO logging */ + ngx_http_finalize_request(r, + NGX_HTTP_REQUEST_HEADER_TOO_LARGE); + return ngx_http_spdy_state_headers_error(sc, pos, end); + } + + if (rc != NGX_OK) { + ngx_http_spdy_close_stream(sc->stream, + NGX_HTTP_INTERNAL_SERVER_ERROR); + return ngx_http_spdy_state_headers_error(sc, pos, end); + } + + buf = r->header_in; + + sc->zstream_in.next_out = buf->last; + sc->zstream_in.avail_out = buf->end - buf->last - 1; + + z = inflate(&sc->zstream_in, Z_NO_FLUSH); + + if (z != Z_OK) { + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "spdy inflate() failed: %d", z); + ngx_http_spdy_close_stream(sc->stream, 0); + return ngx_http_spdy_state_protocol_error(sc); + } + + sc->length -= sc->zstream_in.next_in - pos; + pos = sc->zstream_in.next_in; + + buf->last = sc->zstream_in.next_out; + + continue; + } + + if (complete) { + /* TODO: improve error message */ + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "spdy again while last chunk"); + ngx_http_spdy_close_stream(sc->stream, 0); + return ngx_http_spdy_state_protocol_error(sc); + } + + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_headers); + + case NGX_HTTP_PARSE_INVALID_REQUEST: + + /* TODO: improve error message */ + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "client sent invalid header line"); + + ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); + + return ngx_http_spdy_state_headers_error(sc, pos, end); + + default: /* NGX_HTTP_PARSE_INVALID_HEADER */ + + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "client sent invalid HEADERS spdy frame"); + ngx_http_spdy_close_stream(sc->stream, NGX_HTTP_BAD_REQUEST); + return ngx_http_spdy_state_protocol_error(sc); + } + + /* a header line has been parsed successfully */ + + rc = ngx_http_spdy_handle_request_header(r); + + if (rc != NGX_OK) { + if (rc == NGX_HTTP_PARSE_INVALID_HEADER) { + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "client sent invalid HEADERS spdy frame"); + ngx_http_spdy_close_stream(sc->stream, NGX_HTTP_BAD_REQUEST); + return ngx_http_spdy_state_protocol_error(sc); + } + + if (rc == NGX_HTTP_PARSE_INVALID_REQUEST) { + ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); + } + + return ngx_http_spdy_state_headers_error(sc, pos, end); + } + } + + if (buf->pos != buf->last) { + /* TODO: improve error message */ + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "end %ui %p %p", complete, buf->pos, buf->last); + ngx_http_spdy_close_stream(sc->stream, NGX_HTTP_BAD_REQUEST); + return ngx_http_spdy_state_protocol_error(sc); + } + + if (!complete) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_headers); + } + + ngx_http_spdy_run_request(r); + + return ngx_http_spdy_state_complete(sc, pos, end); +} + + +static u_char * +ngx_http_spdy_state_headers_error(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + if (sc->connection->error) { + return ngx_http_spdy_state_internal_error(sc); + } + + return ngx_http_spdy_state_headers_skip(sc, pos, end); +} + + +static u_char * +ngx_http_spdy_state_headers_skip(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + int n; + size_t size; + u_char buffer[NGX_SPDY_SKIP_HEADERS_BUFFER_SIZE]; + + if (sc->length == 0) { + return ngx_http_spdy_state_complete(sc, pos, end); + } + + size = end - pos; + + if (size == 0) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_headers_skip); + } + + sc->zstream_in.next_in = pos; + sc->zstream_in.avail_in = (size < sc->length) ? size : sc->length; + + while (sc->zstream_in.avail_in) { + sc->zstream_in.next_out = buffer; + sc->zstream_in.avail_out = NGX_SPDY_SKIP_HEADERS_BUFFER_SIZE; + + n = inflate(&sc->zstream_in, Z_NO_FLUSH); + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy inflate(): %d", n); + + if (n != Z_OK) { + /* TODO: logging */ + return ngx_http_spdy_state_protocol_error(sc); + } + } + + pos = sc->zstream_in.next_in; + + if (size < sc->length) { + sc->length -= size; + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_headers_skip); + } + + return ngx_http_spdy_state_complete(sc, pos, end); +} + + +static u_char * +ngx_http_spdy_state_data(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + size_t size; + ssize_t n; + ngx_buf_t *buf; + ngx_int_t rc; + ngx_uint_t complete; + ngx_temp_file_t *tf; + ngx_http_request_t *r; + ngx_http_spdy_stream_t *stream; + ngx_http_request_body_t *rb; + ngx_http_core_loc_conf_t *clcf; + + stream = sc->stream; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy DATA frame"); + + if (stream == NULL) { + return ngx_http_spdy_state_skip(sc, pos, end); + } + + if (stream->in_closed) { + /* TODO log */ + return ngx_http_spdy_state_protocol_error(sc); + } + + if (stream->skip_data) { + + if (sc->flags & NGX_SPDY_FLAG_FIN) { + stream->in_closed = 1; + } + + /* TODO log and accounting */ + return ngx_http_spdy_state_skip(sc, pos, end); + } + + size = end - pos; + + if (size >= sc->length) { + size = sc->length; + complete = 1; + + } else { + sc->length -= size; + complete = 0; + } + + r = stream->request; + + if (r->request_body == NULL + && ngx_http_spdy_init_request_body(r) != NGX_OK) + { + stream->skip_data = NGX_SPDY_DATA_INTERNAL_ERROR; + return ngx_http_spdy_state_skip(sc, pos, end); + } + + rb = r->request_body; + tf = rb->temp_file; + buf = rb->buf; + + if (size) { + rb->rest += size; + + if (r->headers_in.content_length_n != -1 + && r->headers_in.content_length_n < rb->rest) + { + /* TODO logging */ + stream->skip_data = NGX_SPDY_DATA_ERROR; + goto error; + + } else { + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); + + if (clcf->client_max_body_size + && clcf->client_max_body_size < rb->rest) + { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "client intended to send too large chunked " + "body: %O bytes", + rb->rest); + + stream->skip_data = NGX_SPDY_DATA_ERROR; + goto error; + } + } + + if (tf) { + buf->start = pos; + buf->pos = pos; + + pos += size; + + buf->end = pos; + buf->last = pos; + + n = ngx_write_chain_to_temp_file(tf, rb->bufs); + + /* TODO: n == 0 or not complete and level event */ + + if (n == NGX_ERROR) { + stream->skip_data = NGX_SPDY_DATA_INTERNAL_ERROR; + goto error; + } + + tf->offset += n; + + } else { + buf->last = ngx_cpymem(buf->last, pos, size); + pos += size; + } + + r->request_length += size; + } + + if (!complete) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_data); + } + + if (sc->flags & NGX_SPDY_FLAG_FIN) { + + stream->in_closed = 1; + + if (tf) { + ngx_memzero(buf, sizeof(ngx_buf_t)); + + buf->in_file = 1; + buf->file_last = tf->file.offset; + buf->file = &tf->file; + + rb->buf = NULL; + } + + if (r->headers_in.content_length_n < 0) { + r->headers_in.content_length_n = rb->rest; + } + + if (rb->post_handler) { + rb->post_handler(r); + } + } + + return ngx_http_spdy_state_complete(sc, pos, end); + +error: + + if (rb->post_handler) { + + if (stream->skip_data == NGX_SPDY_DATA_ERROR) { + rc = (r->headers_in.content_length_n == -1) + ? NGX_HTTP_REQUEST_ENTITY_TOO_LARGE + : NGX_HTTP_BAD_REQUEST; + + } else { + rc = NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + ngx_http_finalize_request(r, rc); + } + + return ngx_http_spdy_state_skip(sc, pos, end); +} + + +static u_char * +ngx_http_spdy_state_rst_stream(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + ngx_uint_t sid, status; + ngx_event_t *ev; + ngx_connection_t *fc; + ngx_http_request_t *r; + ngx_http_spdy_stream_t *stream; + + if (end - pos < NGX_SPDY_RST_STREAM_SIZE) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_rst_stream); + } + + if (sc->length != NGX_SPDY_RST_STREAM_SIZE) { + /* TODO logging */ + return ngx_http_spdy_state_protocol_error(sc); + } + + sid = ngx_spdy_frame_parse_sid(pos); + + pos += NGX_SPDY_SID_SIZE; + + status = ngx_spdy_frame_parse_uint32(pos); + + pos += sizeof(uint32_t); + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy RST_STREAM sid:%ui st:%ui", sid, status); + + + switch (status) { + + case NGX_SPDY_PROTOCOL_ERROR: + /* TODO logging */ + return ngx_http_spdy_state_protocol_error(sc); + + case NGX_SPDY_INVALID_STREAM: + /* TODO */ + break; + + case NGX_SPDY_REFUSED_STREAM: + /* TODO */ + break; + + case NGX_SPDY_UNSUPPORTED_VERSION: + /* TODO logging */ + return ngx_http_spdy_state_protocol_error(sc); + + case NGX_SPDY_CANCEL: + case NGX_SPDY_INTERNAL_ERROR: + stream = ngx_http_spdy_get_stream_by_id(sc, sid); + if (stream == NULL) { + /* TODO false cancel */ + break; + } + + stream->in_closed = 1; + stream->out_closed = 1; + + r = stream->request; + + fc = r->connection; + fc->error = 1; + + ev = fc->read; + ev->handler(ev); + + break; + + case NGX_SPDY_FLOW_CONTROL_ERROR: + /* TODO logging */ + return ngx_http_spdy_state_protocol_error(sc); + + default: + /* TODO */ + return ngx_http_spdy_state_protocol_error(sc); + } + + return ngx_http_spdy_state_complete(sc, pos, end); +} + + +static u_char * +ngx_http_spdy_state_ping(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + u_char *p; + ngx_buf_t *buf; + ngx_http_spdy_out_frame_t *frame; + + if (end - pos < NGX_SPDY_PING_SIZE) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_ping); + } + + if (sc->length != NGX_SPDY_PING_SIZE) { + /* TODO logging */ + return ngx_http_spdy_state_protocol_error(sc); + } + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy PING frame"); + + frame = ngx_http_spdy_get_ctl_frame(sc, NGX_SPDY_PING_SIZE, + NGX_SPDY_HIGHEST_PRIORITY); + if (frame == NULL) { + return ngx_http_spdy_state_internal_error(sc); + } + + buf = frame->first->buf; + + p = buf->pos; + + p = ngx_spdy_frame_write_head(p, NGX_SPDY_PING); + p = ngx_spdy_frame_write_flags_and_len(p, 0, NGX_SPDY_PING_SIZE); + + p = ngx_cpymem(p, pos, NGX_SPDY_PING_SIZE); + + buf->last = p; + + ngx_http_spdy_queue_frame(sc, frame); + + pos += NGX_SPDY_PING_SIZE; + + return ngx_http_spdy_state_complete(sc, pos, end); +} + + +static u_char * +ngx_http_spdy_state_skip(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + size_t size; + + size = end - pos; + + if (size < sc->length) { + sc->length -= size; + return ngx_http_spdy_state_save(sc, end, end, + ngx_http_spdy_state_skip); + } + + return ngx_http_spdy_state_complete(sc, pos + sc->length, end); +} + + +static u_char * +ngx_http_spdy_state_settings(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + ngx_uint_t v; + ngx_http_spdy_srv_conf_t *sscf; + + if (sc->headers == 0) { + + if (end - pos < NGX_SPDY_SETTINGS_NUM_SIZE) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_settings); + } + + sc->headers = ngx_spdy_frame_parse_uint32(pos); + + pos += NGX_SPDY_SETTINGS_NUM_SIZE; + sc->length -= NGX_SPDY_SETTINGS_NUM_SIZE; + + if (sc->length < sc->headers * NGX_SPDY_SETTINGS_PAIR_SIZE) { + /* TODO logging */ + return ngx_http_spdy_state_protocol_error(sc); + } + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy SETTINGS frame consists of %ui entries", + sc->headers); + } + + while (sc->headers) { + if (end - pos < NGX_SPDY_SETTINGS_PAIR_SIZE) { + return ngx_http_spdy_state_save(sc, pos, end, + ngx_http_spdy_state_settings); + } + + sc->headers--; + + if (pos[0] != NGX_SPDY_SETTINGS_MAX_STREAMS) { + pos += NGX_SPDY_SETTINGS_PAIR_SIZE; + sc->length -= NGX_SPDY_SETTINGS_PAIR_SIZE; + continue; + } + + v = ngx_spdy_frame_parse_uint32(pos + NGX_SPDY_SETTINGS_IDF_SIZE); + + sscf = ngx_http_get_module_srv_conf(sc->http_connection->conf_ctx, + ngx_http_spdy_module); + + if (v != sscf->concurrent_streams) { + ngx_http_spdy_send_settings(sc); + } + + return ngx_http_spdy_state_skip(sc, pos, end); + } + + ngx_http_spdy_send_settings(sc); + + return ngx_http_spdy_state_complete(sc, pos, end); +} + + +static u_char * +ngx_http_spdy_state_noop(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + if (sc->length) { + /* TODO logging */ + return ngx_http_spdy_state_protocol_error(sc); + } + + return ngx_http_spdy_state_complete(sc, pos, end); +} + + +static u_char * +ngx_http_spdy_state_complete(ngx_http_spdy_connection_t *sc, u_char *pos, + u_char *end) +{ + sc->handler = ngx_http_spdy_state_head; + return pos; +} + + +static u_char * +ngx_http_spdy_state_save(ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end, ngx_http_spdy_handler_pt handler) +{ +#if (NGX_DEBUG) + if (end - pos > NGX_SPDY_STATE_BUFFER_SIZE) { + ngx_log_error(NGX_LOG_ALERT, sc->connection->log, 0, + "spdy state buffer overflow: " + "%i bytes required", end - pos); + return ngx_http_spdy_state_internal_error(sc); + } +#endif + + ngx_memcpy(sc->buffer, pos, NGX_SPDY_STATE_BUFFER_SIZE); + + sc->buffer_used = end - pos; + sc->handler = handler; + sc->waiting = 1; + + return end; +} + + +static u_char * +ngx_http_spdy_state_protocol_error(ngx_http_spdy_connection_t *sc) +{ + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy state protocol error"); + + /* TODO */ + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_CLIENT_CLOSED_REQUEST); + return NULL; +} + + +static u_char * +ngx_http_spdy_state_internal_error(ngx_http_spdy_connection_t *sc) +{ + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy state internal error"); + + /* TODO */ + ngx_http_spdy_finalize_connection(sc, NGX_HTTP_INTERNAL_SERVER_ERROR); + return NULL; +} + + +static ngx_int_t +ngx_http_spdy_send_rst_stream(ngx_http_spdy_connection_t *sc, ngx_uint_t sid, + ngx_uint_t status, ngx_uint_t priority) +{ + u_char *p; + ngx_buf_t *buf; + ngx_http_spdy_out_frame_t *frame; + + if (sc->connection->error) { + return NGX_OK; + } + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy write RST_STREAM sid:%ui st:%ui", sid, status); + + frame = ngx_http_spdy_get_ctl_frame(sc, NGX_SPDY_RST_STREAM_SIZE, + priority); + if (frame == NULL) { + return NGX_ERROR; + } + + buf = frame->first->buf; + + p = buf->pos; + + p = ngx_spdy_frame_write_head(p, NGX_SPDY_RST_STREAM); + p = ngx_spdy_frame_write_flags_and_len(p, 0, NGX_SPDY_RST_STREAM_SIZE); + + p = ngx_spdy_frame_write_sid(p, sid); + p = ngx_spdy_frame_aligned_write_uint32(p, status); + + buf->last = p; + + ngx_http_spdy_queue_frame(sc, frame); + + return NGX_OK; +} + + +#if 0 +static ngx_int_t +ngx_http_spdy_send_goaway(ngx_http_spdy_connection_t *sc) +{ + u_char *p; + ngx_buf_t *buf; + ngx_http_spdy_out_frame_t *frame; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy create GOAWAY sid:%ui", sc->last_sid); + + frame = ngx_http_spdy_get_ctl_frame(sc, NGX_SPDY_GOAWAY_SIZE, + NGX_SPDY_HIGHEST_PRIORITY); + if (frame == NULL) { + return NGX_ERROR; + } + + buf = frame->first->buf; + + p = buf->pos; + + p = ngx_spdy_frame_write_head(p, NGX_SPDY_GOAWAY); + p = ngx_spdy_frame_write_flags_and_len(p, 0, NGX_SPDY_GOAWAY_SIZE); + + p = ngx_spdy_frame_write_sid(p, sc->last_sid); + + buf->last = p; + + ngx_http_spdy_queue_frame(sc, frame); + + return NGX_OK; +} +#endif + + +static ngx_int_t +ngx_http_spdy_send_settings(ngx_http_spdy_connection_t *sc) +{ + u_char *p; + ngx_buf_t *buf; + ngx_pool_t *pool; + ngx_chain_t *cl; + ngx_http_spdy_srv_conf_t *sscf; + ngx_http_spdy_out_frame_t *frame; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy create SETTINGS frame"); + + pool = sc->connection->pool; + + frame = ngx_palloc(pool, sizeof(ngx_http_spdy_out_frame_t)); + if (frame == NULL) { + return NGX_ERROR; + } + + cl = ngx_alloc_chain_link(pool); + if (cl == NULL) { + return NGX_ERROR; + } + + buf = ngx_create_temp_buf(pool, NGX_SPDY_FRAME_HEADER_SIZE + + NGX_SPDY_SETTINGS_NUM_SIZE + + NGX_SPDY_SETTINGS_PAIR_SIZE); + if (buf == NULL) { + return NGX_ERROR; + } + + buf->last_buf = 1; + + cl->buf = buf; + cl->next = NULL; + + frame->first = cl; + frame->last = cl; + frame->handler = ngx_http_spdy_settings_frame_handler; +#if (NGX_DEBUG) + frame->stream = NULL; + frame->size = NGX_SPDY_FRAME_HEADER_SIZE + + NGX_SPDY_SETTINGS_NUM_SIZE + + NGX_SPDY_SETTINGS_PAIR_SIZE; +#endif + frame->priority = NGX_SPDY_HIGHEST_PRIORITY; + frame->blocked = 0; + + p = buf->pos; + + p = ngx_spdy_frame_write_head(p, NGX_SPDY_SETTINGS); + p = ngx_spdy_frame_write_flags_and_len(p, NGX_SPDY_FLAG_CLEAR_SETTINGS, + NGX_SPDY_SETTINGS_NUM_SIZE + + NGX_SPDY_SETTINGS_PAIR_SIZE); + + p = ngx_spdy_frame_aligned_write_uint32(p, 1); + p = ngx_spdy_frame_aligned_write_uint32(p, + NGX_SPDY_SETTINGS_MAX_STREAMS << 24 + | NGX_SPDY_SETTINGS_FLAG_PERSIST); + + sscf = ngx_http_get_module_srv_conf(sc->http_connection->conf_ctx, + ngx_http_spdy_module); + + p = ngx_spdy_frame_aligned_write_uint32(p, sscf->concurrent_streams); + + buf->last = p; + + ngx_http_spdy_queue_frame(sc, frame); + + return NGX_OK; +} + + +ngx_int_t +ngx_http_spdy_settings_frame_handler(ngx_http_spdy_connection_t *sc, + ngx_http_spdy_out_frame_t *frame) +{ + ngx_buf_t *buf; + + buf = frame->first->buf; + + if (buf->pos != buf->last) { + return NGX_AGAIN; + } + + ngx_free_chain(sc->pool, frame->first); + + return NGX_OK; +} + + +static ngx_http_spdy_out_frame_t * +ngx_http_spdy_get_ctl_frame(ngx_http_spdy_connection_t *sc, size_t size, + ngx_uint_t priority) +{ + ngx_chain_t *cl; + ngx_http_spdy_out_frame_t *frame; + + frame = sc->free_ctl_frames; + + if (frame) { + sc->free_ctl_frames = frame->free; + + cl = frame->first; + cl->buf->pos = cl->buf->start; + + } else { + frame = ngx_palloc(sc->pool, sizeof(ngx_http_spdy_out_frame_t)); + if (frame == NULL) { + return NULL; + } + + cl = ngx_alloc_chain_link(sc->pool); + if (cl == NULL) { + return NULL; + } + + cl->buf = ngx_create_temp_buf(sc->pool, + NGX_SPDY_CTL_FRAME_BUFFER_SIZE); + if (cl->buf == NULL) { + return NULL; + } + + cl->buf->last_buf = 1; + + frame->first = cl; + frame->last = cl; + frame->handler = ngx_http_spdy_ctl_frame_handler; + } + + frame->free = NULL; + +#if (NGX_DEBUG) + if (size > NGX_SPDY_CTL_FRAME_BUFFER_SIZE - NGX_SPDY_FRAME_HEADER_SIZE) { + ngx_log_error(NGX_LOG_ALERT, sc->pool->log, 0, + "requested control frame is too big: %z", size); + return NULL; + } + + frame->stream = NULL; + frame->size = size; +#endif + + frame->priority = priority; + frame->blocked = 0; + + return frame; +} + + +static ngx_int_t +ngx_http_spdy_ctl_frame_handler(ngx_http_spdy_connection_t *sc, + ngx_http_spdy_out_frame_t *frame) +{ + ngx_buf_t *buf; + + buf = frame->first->buf; + + if (buf->pos != buf->last) { + return NGX_AGAIN; + } + + frame->free = sc->free_ctl_frames; + sc->free_ctl_frames = frame; + + return NGX_OK; +} + + +static ngx_http_spdy_stream_t * +ngx_http_spdy_create_stream(ngx_http_spdy_connection_t *sc, ngx_uint_t id, + ngx_uint_t priority) +{ + ngx_log_t *log; + ngx_uint_t index; + ngx_event_t *rev, *wev; + ngx_connection_t *fc; + ngx_http_log_ctx_t *ctx; + ngx_http_request_t *r; + ngx_http_spdy_stream_t *stream; + ngx_http_core_srv_conf_t *cscf; + ngx_http_spdy_srv_conf_t *sscf; + + fc = sc->free_fake_connections; + + if (fc) { + sc->free_fake_connections = fc->data; + + rev = fc->read; + wev = fc->write; + log = fc->log; + ctx = log->data; + + } else { + fc = ngx_palloc(sc->pool, sizeof(ngx_connection_t)); + if (fc == NULL) { + return NULL; + } + + rev = ngx_palloc(sc->pool, sizeof(ngx_event_t)); + if (rev == NULL) { + return NULL; + } + + wev = ngx_palloc(sc->pool, sizeof(ngx_event_t)); + if (wev == NULL) { + return NULL; + } + + log = ngx_palloc(sc->pool, sizeof(ngx_log_t)); + if (log == NULL) { + return NULL; + } + + ctx = ngx_palloc(sc->pool, sizeof(ngx_http_log_ctx_t)); + if (ctx == NULL) { + return NULL; + } + + ctx->connection = fc; + ctx->request = NULL; + } + + ngx_memcpy(log, sc->connection->log, sizeof(ngx_log_t)); + + log->data = ctx; + + ngx_memzero(rev, sizeof(ngx_event_t)); + + rev->data = fc; + rev->ready = 1; + rev->handler = ngx_http_empty_handler; + rev->log = log; + + ngx_memcpy(wev, rev, sizeof(ngx_event_t)); + + wev->write = 1; + + ngx_memcpy(fc, sc->connection, sizeof(ngx_connection_t)); + + fc->data = sc->http_connection; + fc->read = rev; + fc->write = wev; + fc->sent = 0; + fc->log = log; + fc->buffered = 0; + fc->sndlowat = 1; + + r = ngx_http_create_request(fc); + if (r == NULL) { + return NULL; + } + + r->valid_location = 1; + + fc->data = r; + sc->connection->requests++; + + cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module); + + r->header_in = ngx_create_temp_buf(r->pool, + cscf->client_header_buffer_size); + if (r->header_in == NULL) { + ngx_http_free_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); + return NULL; + } + + r->headers_in.connection_type = NGX_HTTP_CONNECTION_CLOSE; + + stream = ngx_pcalloc(r->pool, sizeof(ngx_http_spdy_stream_t)); + if (stream == NULL) { + ngx_http_free_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); + return NULL; + } + + r->spdy_stream = stream; + + stream->id = id; + stream->request = r; + stream->connection = sc; + stream->priority = priority; + + sscf = ngx_http_get_module_srv_conf(r, ngx_http_spdy_module); + + index = ngx_http_spdy_stream_index(sscf, id); + + stream->index = sc->streams_index[index]; + sc->streams_index[index] = stream; + + sc->processing++; + + return stream; +} + + +static ngx_http_spdy_stream_t * +ngx_http_spdy_get_stream_by_id(ngx_http_spdy_connection_t *sc, + ngx_uint_t sid) +{ + ngx_http_spdy_stream_t *stream; + ngx_http_spdy_srv_conf_t *sscf; + + sscf = ngx_http_get_module_srv_conf(sc->http_connection->conf_ctx, + ngx_http_spdy_module); + + stream = sc->streams_index[ngx_http_spdy_stream_index(sscf, sid)]; + + while (stream) { + if (stream->id == sid) { + return stream; + } + + stream = stream->index; + } + + return NULL; +} + + +static ngx_int_t +ngx_http_spdy_parse_header(ngx_http_request_t *r) +{ + u_char *p, *end, ch; + ngx_uint_t len, hash; + ngx_http_core_srv_conf_t *cscf; + + enum { + sw_name_len = 0, + sw_name, + sw_value_len, + sw_value + } state; + + state = r->state; + + p = r->header_in->pos; + end = r->header_in->last; + + switch (state) { + + case sw_name_len: + + if (end - p < NGX_SPDY_NV_NLEN_SIZE) { + return NGX_AGAIN; + } + + len = ngx_spdy_frame_parse_uint16(p); + + if (!len) { + return NGX_HTTP_PARSE_INVALID_HEADER; + } + + p += NGX_SPDY_NV_NLEN_SIZE; + + r->header_name_end = p + len; + r->lowcase_index = len; + r->invalid_header = 0; + + state = sw_name; + + /* fall through */ + + case sw_name: + + if (r->header_name_end > end) { + break; + } + + cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module); + + r->header_name_start = p; + + hash = 0; + + for ( /* void */ ; p != r->header_name_end; p++) { + + ch = *p; + + hash = ngx_hash(hash, ch); + + if ((ch >= 'a' && ch <= 'z') + || (ch == '-') + || (ch >= '0' && ch <= '9') + || (ch == '_' && cscf->underscores_in_headers)) + { + continue; + } + + switch (ch) { + case '\0': + case LF: + case CR: + case ':': + return NGX_HTTP_PARSE_INVALID_REQUEST; + } + + if (ch >= 'A' && ch <= 'Z') { + return NGX_HTTP_PARSE_INVALID_HEADER; + } + + r->invalid_header = 1; + } + + r->header_hash = hash; + + state = sw_value_len; + + /* fall through */ + + case sw_value_len: + + if (end - p < NGX_SPDY_NV_VLEN_SIZE) { + break; + } + + len = ngx_spdy_frame_parse_uint16(p); + + if (!len) { + return NGX_ERROR; + } + + p += NGX_SPDY_NV_VLEN_SIZE; + + r->header_end = p + len; + + state = sw_value; + + /* fall through */ + + case sw_value: + + if (r->header_end > end) { + break; + } + + r->header_start = p; + + for ( /* void */ ; p != r->header_end; p++) { + + ch = *p; + + if (ch == '\0') { + + if (p == r->header_start) { + return NGX_ERROR; + } + + r->header_size = p - r->header_start; + r->header_in->pos = p + 1; + + return NGX_OK; + } + + if (ch == CR || ch == LF) { + return NGX_HTTP_PARSE_INVALID_HEADER; + } + } + + r->header_size = p - r->header_start; + r->header_in->pos = p; + + r->state = 0; + + return NGX_DONE; + } + + r->header_in->pos = p; + r->state = state; + + return NGX_AGAIN; +} + + +static ngx_int_t +ngx_http_spdy_alloc_large_header_buffer(ngx_http_request_t *r) +{ + u_char *old, *new; + size_t rest; + ngx_buf_t *buf; + ngx_http_spdy_stream_t *stream; + ngx_http_core_srv_conf_t *cscf; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "spdy alloc large header buffer"); + + stream = r->spdy_stream; + + cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module); + + if (stream->header_buffers + == (ngx_uint_t) cscf->large_client_header_buffers.num) + { + return NGX_DECLINED; + } + + rest = r->header_in->last - r->header_in->pos; + + if (rest >= cscf->large_client_header_buffers.size) { + return NGX_DECLINED; + } + + buf = ngx_create_temp_buf(r->pool, cscf->large_client_header_buffers.size); + if (buf == NULL) { + return NGX_ERROR; + } + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "spdy large header alloc: %p %uz", + buf->pos, buf->end - buf->last); + + old = r->header_in->pos; + new = buf->pos; + + if (rest) { + buf->last = ngx_cpymem(new, old, rest); + } + + if (r->header_name_end > old) { + r->header_name_end = new + (r->header_name_end - old); + + } else if (r->header_end > old) { + r->header_end = new + (r->header_end - old); + } + + r->header_in = buf; + + stream->header_buffers++; + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_spdy_handle_request_header(ngx_http_request_t *r) +{ + ngx_uint_t i; + ngx_table_elt_t *h; + ngx_http_core_srv_conf_t *cscf; + ngx_http_spdy_request_header_t *sh; + + if (r->invalid_header) { + cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module); + + if (cscf->ignore_invalid_headers) { + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "client sent invalid header: \"%*s\"", + r->header_end - r->header_name_start, + r->header_name_start); + return NGX_OK; + } + + } else { + for (i = 0; i < NGX_SPDY_REQUEST_HEADERS; i++) { + sh = &ngx_http_spdy_request_headers[i]; + + if (sh->hash != r->header_hash + || sh->len != r->lowcase_index + || ngx_strncmp(sh->header, r->header_name_start, + r->lowcase_index) + != 0) + { + continue; + } + + return sh->handler(r); + } + } + + h = ngx_list_push(&r->headers_in.headers); + if (h == NULL) { + ngx_http_spdy_close_stream(r->spdy_stream, + NGX_HTTP_INTERNAL_SERVER_ERROR); + return NGX_ERROR; + } + + h->hash = r->header_hash; + + h->key.len = r->lowcase_index; + h->key.data = r->header_name_start; + h->key.data[h->key.len] = '\0'; + + h->value.len = r->header_size; + h->value.data = r->header_start; + h->value.data[h->value.len] = '\0'; + + h->lowcase_key = h->key.data; + + return NGX_OK; +} + + +void +ngx_http_spdy_request_headers_init() +{ + ngx_uint_t i; + ngx_http_spdy_request_header_t *h; + + for (i = 0; i < NGX_SPDY_REQUEST_HEADERS; i++) { + h = &ngx_http_spdy_request_headers[i]; + h->hash = ngx_hash_key(h->header, h->len); + } +} + + +static ngx_int_t +ngx_http_spdy_parse_method(ngx_http_request_t *r) +{ + size_t k, len; + ngx_uint_t n; + const u_char *p, *m; + + /* + * This array takes less than 256 sequential bytes, + * and if typical CPU cache line size is 64 bytes, + * it is prefetched for 4 load operations. + */ + static const struct { + u_char len; + const u_char method[11]; + uint32_t value; + } tests[] = { + { 3, "GET", NGX_HTTP_GET }, + { 4, "POST", NGX_HTTP_POST }, + { 4, "HEAD", NGX_HTTP_HEAD }, + { 7, "OPTIONS", NGX_HTTP_OPTIONS }, + { 8, "PROPFIND", NGX_HTTP_PROPFIND }, + { 3, "PUT", NGX_HTTP_PUT }, + { 5, "MKCOL", NGX_HTTP_MKCOL }, + { 6, "DELETE", NGX_HTTP_DELETE }, + { 4, "COPY", NGX_HTTP_COPY }, + { 4, "MOVE", NGX_HTTP_MOVE }, + { 9, "PROPPATCH", NGX_HTTP_PROPPATCH }, + { 4, "LOCK", NGX_HTTP_LOCK }, + { 6, "UNLOCK", NGX_HTTP_UNLOCK }, + { 5, "PATCH", NGX_HTTP_PATCH }, + { 5, "TRACE", NGX_HTTP_TRACE } + }, *test; + + if (r->method_name.len) { + return NGX_HTTP_PARSE_INVALID_HEADER; + } + + len = r->header_size; + + r->method_name.len = len; + r->method_name.data = r->header_start; + + test = tests; + n = sizeof(tests) / sizeof(tests[0]); + + do { + if (len == test->len) { + p = r->method_name.data; + m = test->method; + k = len; + + do { + if (*p++ != *m++) { + goto next; + } + } while (--k); + + r->method = test->value; + return NGX_OK; + } + + next: + test++; + + } while (--n); + + p = r->method_name.data; + + do { + if ((*p < 'A' || *p > 'Z') && *p != '_') { + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "client sent invalid method"); + return NGX_HTTP_PARSE_INVALID_REQUEST; + } + + p++; + + } while (--len); + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_spdy_parse_scheme(ngx_http_request_t *r) +{ + if (r->schema_start) { + return NGX_HTTP_PARSE_INVALID_HEADER; + } + + r->schema_start = r->header_start; + r->schema_end = r->header_end; + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_spdy_parse_url(ngx_http_request_t *r) +{ + if (r->unparsed_uri.len) { + return NGX_HTTP_PARSE_INVALID_HEADER; + } + + r->uri_start = r->header_start; + r->uri_end = r->header_end; + + if (ngx_http_parse_uri(r) != NGX_OK) { + return NGX_HTTP_PARSE_INVALID_REQUEST; + } + + if (ngx_http_process_request_uri(r) != NGX_OK) { + return NGX_ERROR; + } + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_spdy_parse_version(ngx_http_request_t *r) +{ + u_char *p, ch; + + if (r->http_protocol.len) { + return NGX_HTTP_PARSE_INVALID_HEADER; + } + + p = r->header_start; + + if (r->header_size < 8 || !(ngx_str5cmp(p, 'H', 'T', 'T', 'P', '/'))) { + return NGX_HTTP_PARSE_INVALID_REQUEST; + } + + ch = *(p + 5); + + if (ch < '1' || ch > '9') { + return NGX_HTTP_PARSE_INVALID_REQUEST; + } + + r->http_major = ch - '0'; + + for (p += 6; p != r->header_end - 2; p++) { + + ch = *p; + + if (ch < '0' || ch > '9') { + return NGX_HTTP_PARSE_INVALID_REQUEST; + } + + r->http_major = r->http_major * 10 + ch - '0'; + } + + if (*p != '.') { + return NGX_HTTP_PARSE_INVALID_REQUEST; + } + + ch = *(p + 1); + + if (ch < '0' || ch > '9') { + return NGX_HTTP_PARSE_INVALID_REQUEST; + } + + r->http_minor = ch - '0'; + + for (p += 2; p != r->header_end; p++) { + + ch = *p; + + if (ch < '0' || ch > '9') { + return NGX_HTTP_PARSE_INVALID_REQUEST; + } + + r->http_minor = r->http_minor * 10 + ch - '0'; + } + + r->http_protocol.len = r->header_size; + r->http_protocol.data = r->header_start; + r->http_version = r->http_major * 1000 + r->http_minor; + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_spdy_construct_request_line(ngx_http_request_t *r) +{ + u_char *p; + + if (r->method_name.len == 0 + || r->unparsed_uri.len == 0 + || r->http_protocol.len == 0) + { + ngx_http_finalize_request(r, NGX_HTTP_BAD_REQUEST); + return NGX_ERROR; + } + + r->request_line.len = r->method_name.len + 1 + + r->unparsed_uri.len + 1 + + r->http_protocol.len; + + p = ngx_pnalloc(r->pool, r->request_line.len + 1); + if (p == NULL) { + ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); + return NGX_ERROR; + } + + r->request_line.data = p; + + p = ngx_cpymem(p, r->method_name.data, r->method_name.len); + + *p++ = ' '; + + p = ngx_cpymem(p, r->unparsed_uri.data, r->unparsed_uri.len); + + *p++ = ' '; + + ngx_memcpy(p, r->http_protocol.data, r->http_protocol.len + 1); + + /* some modules expect the space character after method name */ + r->method_name.data = r->request_line.data; + + return NGX_OK; +} + + +static void +ngx_http_spdy_run_request(ngx_http_request_t *r) +{ + ngx_uint_t i; + ngx_list_part_t *part; + ngx_table_elt_t *h; + ngx_http_header_t *hh; + ngx_http_core_main_conf_t *cmcf; + + if (ngx_http_spdy_construct_request_line(r) != NGX_OK) { + return; + } + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "spdy http request line: \"%V\"", &r->request_line); + + cmcf = ngx_http_get_module_main_conf(r, ngx_http_core_module); + + part = &r->headers_in.headers.part; + h = part->elts; + + for (i = 0 ;; i++) { + + if (i >= part->nelts) { + if (part->next == NULL) { + break; + } + + part = part->next; + h = part->elts; + i = 0; + } + + hh = ngx_hash_find(&cmcf->headers_in_hash, h[i].hash, + h[i].lowcase_key, h[i].key.len); + + if (hh && hh->handler(r, &h[i], hh->offset) != NGX_OK) { + return; + } + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http header: \"%V: %V\"", &h[i].key, &h[i].value); + } + + r->http_state = NGX_HTTP_PROCESS_REQUEST_STATE; + + if (ngx_http_process_request_header(r) != NGX_OK) { + return; + } + + ngx_http_process_request(r); +} + + +static ngx_int_t +ngx_http_spdy_init_request_body(ngx_http_request_t *r) +{ + ngx_buf_t *buf; + ngx_temp_file_t *tf; + ngx_http_request_body_t *rb; + ngx_http_core_loc_conf_t *clcf; + + rb = ngx_pcalloc(r->pool, sizeof(ngx_http_request_body_t)); + if (rb == NULL) { + return NGX_ERROR; + } + + r->request_body = rb; + + if (r->spdy_stream->in_closed) { + return NGX_OK; + } + + rb->rest = r->headers_in.content_length_n; + + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); + + if (r->request_body_in_file_only + || rb->rest > (off_t) clcf->client_body_buffer_size + || rb->rest < 0) + { + tf = ngx_pcalloc(r->pool, sizeof(ngx_temp_file_t)); + if (tf == NULL) { + return NGX_ERROR; + } + + tf->file.fd = NGX_INVALID_FILE; + tf->file.log = r->connection->log; + tf->path = clcf->client_body_temp_path; + tf->pool = r->pool; + tf->warn = "a client request body is buffered to a temporary file"; + tf->log_level = r->request_body_file_log_level; + tf->persistent = r->request_body_in_persistent_file; + tf->clean = r->request_body_in_clean_file; + + if (r->request_body_file_group_access) { + tf->access = 0660; + } + + rb->temp_file = tf; + + if (r->spdy_stream->in_closed + && ngx_create_temp_file(&tf->file, tf->path, tf->pool, + tf->persistent, tf->clean, tf->access) + != NGX_OK) + { + return NGX_ERROR; + } + + buf = ngx_calloc_buf(r->pool); + if (buf == NULL) { + return NGX_ERROR; + } + + if (rb->rest == 0) { + buf->in_file = 1; + buf->file = &tf->file; + } else { + rb->buf = buf; + } + + } else { + + if (rb->rest == 0) { + return NGX_OK; + } + + buf = ngx_create_temp_buf(r->pool, (size_t) rb->rest); + if (buf == NULL) { + return NGX_ERROR; + } + + rb->buf = buf; + } + + rb->bufs = ngx_alloc_chain_link(r->pool); + if (rb->bufs == NULL) { + return NGX_ERROR; + } + + rb->bufs->buf = buf; + rb->bufs->next = NULL; + + rb->rest = 0; + + return NGX_OK; +} + + +ngx_int_t +ngx_http_spdy_read_request_body(ngx_http_request_t *r, + ngx_http_client_body_handler_pt post_handler) +{ + ngx_http_spdy_stream_t *stream; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "spdy read request body"); + + stream = r->spdy_stream; + + switch (stream->skip_data) { + + case NGX_SPDY_DATA_DISCARD: + post_handler(r); + return NGX_OK; + + case NGX_SPDY_DATA_ERROR: + if (r->headers_in.content_length_n == -1) { + return NGX_HTTP_REQUEST_ENTITY_TOO_LARGE; + } else { + return NGX_HTTP_BAD_REQUEST; + } + + case NGX_SPDY_DATA_INTERNAL_ERROR: + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + if (!r->request_body && ngx_http_spdy_init_request_body(r) != NGX_OK) { + stream->skip_data = NGX_SPDY_DATA_INTERNAL_ERROR; + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + if (stream->in_closed) { + post_handler(r); + return NGX_OK; + } + + r->request_body->post_handler = post_handler; + + return NGX_AGAIN; +} + + +void +ngx_http_spdy_close_stream(ngx_http_spdy_stream_t *stream, ngx_int_t rc) +{ + ngx_event_t *ev; + ngx_connection_t *fc; + ngx_http_spdy_stream_t **index, *s; + ngx_http_spdy_srv_conf_t *sscf; + ngx_http_spdy_connection_t *sc; + + sc = stream->connection; + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy close stream %ui, processing %ui", + stream->id, sc->processing); + + if (!stream->out_closed) { + if (ngx_http_spdy_send_rst_stream(sc, stream->id, + NGX_SPDY_INTERNAL_ERROR, + stream->priority) + != NGX_OK) + { + sc->connection->error = 1; + } + } + + sscf = ngx_http_get_module_srv_conf(sc->http_connection->conf_ctx, + ngx_http_spdy_module); + + index = sc->streams_index + ngx_http_spdy_stream_index(sscf, stream->id); + + for ( ;; ) { + s = *index; + + if (s == NULL) { + break; + } + + if (s == stream) { + *index = s->index; + break; + } + + index = &s->index; + } + + fc = stream->request->connection; + + ngx_http_free_request(stream->request, rc); + + ev = fc->read; + + if (ev->active || ev->disabled) { + ngx_del_event(ev, NGX_READ_EVENT, 0); + } + + if (ev->timer_set) { + ngx_del_timer(ev); + } + + if (ev->prev) { + ngx_delete_posted_event(ev); + } + + ev = fc->write; + + if (ev->active || ev->disabled) { + ngx_del_event(ev, NGX_WRITE_EVENT, 0); + } + + if (ev->timer_set) { + ngx_del_timer(ev); + } + + if (ev->prev) { + ngx_delete_posted_event(ev); + } + + fc->data = sc->free_fake_connections; + sc->free_fake_connections = fc; + + sc->processing--; + + if (sc->processing || sc->blocked) { + return; + } + + ev = sc->connection->read; + + ev->handler = ngx_http_spdy_handle_connection_handler; + ngx_post_event(ev, &ngx_posted_events); +} + + +static void +ngx_http_spdy_handle_connection_handler(ngx_event_t *rev) +{ + ngx_connection_t *c; + + rev->handler = ngx_http_spdy_read_handler; + + if (rev->ready) { + ngx_http_spdy_read_handler(rev); + return; + } + + c = rev->data; + + ngx_http_spdy_handle_connection(c->data); +} + + +static void +ngx_http_spdy_keepalive_handler(ngx_event_t *rev) +{ + ngx_connection_t *c; + ngx_http_spdy_srv_conf_t *sscf; + ngx_http_spdy_connection_t *sc; + + c = rev->data; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "spdy keepalive handler"); + + if (rev->timedout || c->close) { + ngx_http_close_connection(c); + return; + } + +#if (NGX_HAVE_KQUEUE) + + if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { + if (rev->pending_eof) { + c->log->handler = NULL; + ngx_log_error(NGX_LOG_INFO, c->log, rev->kq_errno, + "kevent() reported that client %V closed " + "keepalive connection", &c->addr_text); +#if (NGX_HTTP_SSL) + if (c->ssl) { + c->ssl->no_send_shutdown = 1; + } +#endif + ngx_http_close_connection(c); + return; + } + } + +#endif + + c->destroyed = 0; + c->idle = 0; + ngx_reusable_connection(c, 0); + + sc = c->data; + + sscf = ngx_http_get_module_srv_conf(sc->http_connection->conf_ctx, + ngx_http_spdy_module); + + sc->pool = ngx_create_pool(sscf->pool_size, sc->connection->log); + if (sc->pool == NULL) { + ngx_http_close_connection(c); + return; + } + + sc->streams_index = ngx_pcalloc(sc->pool, + ngx_http_spdy_streams_index_size(sscf) + * sizeof(ngx_http_spdy_stream_t *)); + if (sc->streams_index == NULL) { + ngx_http_close_connection(c); + return; + } + + c->write->handler = ngx_http_spdy_write_handler; + + rev->handler = ngx_http_spdy_read_handler; + ngx_http_spdy_read_handler(rev); +} + + +static void +ngx_http_spdy_finalize_connection(ngx_http_spdy_connection_t *sc, + ngx_int_t rc) +{ + ngx_uint_t i, size; + ngx_event_t *ev; + ngx_connection_t *c, *fc; + ngx_http_request_t *r; + ngx_http_spdy_stream_t *stream; + ngx_http_spdy_srv_conf_t *sscf; + + c = sc->connection; + + if (!sc->processing) { + ngx_http_close_connection(c); + return; + } + + c->error = 1; + c->read->handler = ngx_http_empty_handler; + + sc->last_out = NULL; + + sc->blocked = 1; + + sscf = ngx_http_get_module_srv_conf(sc->http_connection->conf_ctx, + ngx_http_spdy_module); + + size = ngx_http_spdy_streams_index_size(sscf); + + for (i = 0; i < size; i++) { + stream = sc->streams_index[i]; + + while (stream) { + r = stream->request; + + fc = r->connection; + fc->error = 1; + + if (stream->waiting) { + r->blocked -= stream->waiting; + stream->waiting = 0; + ev = fc->write; + + } else { + ev = fc->read; + } + + stream = stream->index; + + ev->eof = 1; + ev->handler(ev); + } + } + + sc->blocked = 0; + + if (sc->processing) { + return; + } + + ngx_http_close_connection(c); +} + + +static void +ngx_http_spdy_pool_cleanup(void *data) +{ + ngx_http_spdy_connection_t *sc = data; + + if (sc->pool) { + ngx_destroy_pool(sc->pool); + } +} + + +static void * +ngx_http_spdy_zalloc(void *opaque, u_int items, u_int size) +{ + ngx_http_spdy_connection_t *sc = opaque; + + return ngx_palloc(sc->connection->pool, items * size); +} + + +static void +ngx_http_spdy_zfree(void *opaque, void *address) +{ +#if 0 + ngx_http_spdy_connection_t *sc = opaque; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy zfree: %p", address); +#endif +} Added: trunk/src/http/ngx_http_spdy.h =================================================================== --- trunk/src/http/ngx_http_spdy.h (rev 0) +++ trunk/src/http/ngx_http_spdy.h 2013-03-20 10:36:57 UTC (rev 5122) @@ -0,0 +1,235 @@ +/* + * Copyright (C) Nginx, Inc. + * Copyright (C) Valentin V. Bartenev + */ + + +#ifndef _NGX_HTTP_SPDY_H_INCLUDED_ +#define _NGX_HTTP_SPDY_H_INCLUDED_ + + +#include +#include +#include + +#include + + +#define NGX_SPDY_VERSION 2 + +#ifdef TLSEXT_TYPE_next_proto_neg +#define NGX_SPDY_NPN_ADVERTISE "\x06spdy/2" +#define NGX_SPDY_NPN_NEGOTIATED "spdy/2" +#endif + +#define NGX_SPDY_STATE_BUFFER_SIZE 16 + +#define NGX_SPDY_CTL_BIT 1 + +#define NGX_SPDY_SYN_STREAM 1 +#define NGX_SPDY_SYN_REPLY 2 +#define NGX_SPDY_RST_STREAM 3 +#define NGX_SPDY_SETTINGS 4 +#define NGX_SPDY_NOOP 5 +#define NGX_SPDY_PING 6 +#define NGX_SPDY_GOAWAY 7 +#define NGX_SPDY_HEADERS 8 + +#define NGX_SPDY_FRAME_HEADER_SIZE 8 + +#define NGX_SPDY_SID_SIZE 4 + +#define NGX_SPDY_SYN_STREAM_SIZE 10 +#define NGX_SPDY_SYN_REPLY_SIZE 6 +#define NGX_SPDY_RST_STREAM_SIZE 8 +#define NGX_SPDY_PING_SIZE 4 +#define NGX_SPDY_GOAWAY_SIZE 4 +#define NGX_SPDY_NV_NUM_SIZE 2 +#define NGX_SPDY_NV_NLEN_SIZE 2 +#define NGX_SPDY_NV_VLEN_SIZE 2 +#define NGX_SPDY_SETTINGS_NUM_SIZE 4 +#define NGX_SPDY_SETTINGS_IDF_SIZE 4 +#define NGX_SPDY_SETTINGS_VAL_SIZE 4 + +#define NGX_SPDY_SETTINGS_PAIR_SIZE \ + (NGX_SPDY_SETTINGS_IDF_SIZE + NGX_SPDY_SETTINGS_VAL_SIZE) + +#define NGX_SPDY_HIGHEST_PRIORITY 0 +#define NGX_SPDY_LOWEST_PRIORITY 3 + +#define NGX_SPDY_FLAG_FIN 0x01 +#define NGX_SPDY_FLAG_UNIDIRECTIONAL 0x02 +#define NGX_SPDY_FLAG_CLEAR_SETTINGS 0x01 + +#define NGX_SPDY_MAX_FRAME_SIZE ((1 << 24) - 1) + +#define NGX_SPDY_DATA_DISCARD 1 +#define NGX_SPDY_DATA_ERROR 2 +#define NGX_SPDY_DATA_INTERNAL_ERROR 3 + + +typedef struct ngx_http_spdy_connection_s ngx_http_spdy_connection_t; +typedef struct ngx_http_spdy_out_frame_s ngx_http_spdy_out_frame_t; + + +typedef u_char *(*ngx_http_spdy_handler_pt) (ngx_http_spdy_connection_t *sc, + u_char *pos, u_char *end); + +struct ngx_http_spdy_connection_s { + ngx_connection_t *connection; + ngx_http_connection_t *http_connection; + + ngx_uint_t processing; + + u_char buffer[NGX_SPDY_STATE_BUFFER_SIZE]; + size_t buffer_used; + ngx_http_spdy_handler_pt handler; + + z_stream zstream_in; + z_stream zstream_out; + + ngx_pool_t *pool; + + ngx_http_spdy_out_frame_t *free_ctl_frames; + ngx_connection_t *free_fake_connections; + + ngx_http_spdy_stream_t **streams_index; + + ngx_http_spdy_out_frame_t *last_out; + ngx_http_spdy_stream_t *last_stream; + + ngx_http_spdy_stream_t *stream; + + ngx_uint_t headers; + size_t length; + u_char flags; + + ngx_uint_t last_sid; + + unsigned blocked:2; + unsigned waiting:1; /* FIXME better name */ +}; + + +struct ngx_http_spdy_stream_s { + ngx_uint_t id; + ngx_http_request_t *request; + ngx_http_spdy_connection_t *connection; + ngx_http_spdy_stream_t *index; + ngx_http_spdy_stream_t *next; + + ngx_uint_t header_buffers; + ngx_uint_t waiting; + ngx_http_spdy_out_frame_t *free_frames; + ngx_chain_t *free_data_headers; + + unsigned priority:2; + unsigned handled:1; + unsigned in_closed:1; + unsigned out_closed:1; + unsigned skip_data:2; +}; + + +struct ngx_http_spdy_out_frame_s { + ngx_http_spdy_out_frame_t *next; + ngx_chain_t *first; + ngx_chain_t *last; + ngx_int_t (*handler)(ngx_http_spdy_connection_t *sc, + ngx_http_spdy_out_frame_t *frame); + + ngx_http_spdy_out_frame_t *free; + + ngx_http_spdy_stream_t *stream; + size_t size; + + ngx_uint_t priority; + unsigned blocked:1; + unsigned fin:1; +}; + + +static ngx_inline void +ngx_http_spdy_queue_frame(ngx_http_spdy_connection_t *sc, + ngx_http_spdy_out_frame_t *frame) +{ + ngx_http_spdy_out_frame_t **out; + + for (out = &sc->last_out; *out; out = &(*out)->next) + { + if (frame->priority >= (*out)->priority) { + break; + } + } + + frame->next = *out; + *out = frame; +} + + +static ngx_inline void +ngx_http_spdy_queue_blocked_frame(ngx_http_spdy_connection_t *sc, + ngx_http_spdy_out_frame_t *frame) +{ + ngx_http_spdy_out_frame_t **out; + + for (out = &sc->last_out; *out && !(*out)->blocked; out = &(*out)->next) + { + if (frame->priority >= (*out)->priority) { + break; + } + } + + frame->next = *out; + *out = frame; +} + + +void ngx_http_spdy_init(ngx_event_t *rev); +void ngx_http_spdy_request_headers_init(); + +ngx_int_t ngx_http_spdy_read_request_body(ngx_http_request_t *r, + ngx_http_client_body_handler_pt post_handler); + +void ngx_http_spdy_close_stream(ngx_http_spdy_stream_t *stream, ngx_int_t rc); + +ngx_int_t ngx_http_spdy_send_output_queue(ngx_http_spdy_connection_t *sc); + + +#define ngx_spdy_frame_aligned_write_uint16(p, s) \ + (*(uint16_t *) (p) = htons((uint16_t) (s)), (p) + sizeof(uint16_t)) + +#define ngx_spdy_frame_aligned_write_uint32(p, s) \ + (*(uint32_t *) (p) = htonl((uint32_t) (s)), (p) + sizeof(uint32_t)) + +#if (NGX_HAVE_NONALIGNED) + +#define ngx_spdy_frame_write_uint16 ngx_spdy_frame_aligned_write_uint16 +#define ngx_spdy_frame_write_uint32 ngx_spdy_frame_aligned_write_uint32 + +#else + +#define ngx_spdy_frame_write_uint16(p, s) \ + ((p)[0] = (u_char) (s) >> 8, (p)[1] = (u_char) (s), (p) + sizeof(uint16_t)) + +#define ngx_spdy_frame_write_uint32(p, s) \ + ((p)[0] = (u_char) (s) >> 24, \ + (p)[1] = (u_char) (s) >> 16, \ + (p)[2] = (u_char) (s) >> 8, \ + (p)[3] = (u_char) (s), (p) + sizeof(uint32_t)) + +#endif + + +#define ngx_spdy_ctl_frame_head(t) \ + ((uint32_t) NGX_SPDY_CTL_BIT << 31 | NGX_SPDY_VERSION << 16 | (t)) + +#define ngx_spdy_frame_write_head(p, t) \ + ngx_spdy_frame_aligned_write_uint32(p, ngx_spdy_ctl_frame_head(t)) + +#define ngx_spdy_frame_write_flags_and_len(p, f, l) \ + ngx_spdy_frame_aligned_write_uint32(p, (f) << 24 | (l)) + +#define ngx_spdy_frame_write_sid ngx_spdy_frame_aligned_write_uint32 + +#endif /* _NGX_HTTP_SPDY_H_INCLUDED_ */ Added: trunk/src/http/ngx_http_spdy_filter_module.c =================================================================== --- trunk/src/http/ngx_http_spdy_filter_module.c (rev 0) +++ trunk/src/http/ngx_http_spdy_filter_module.c 2013-03-20 10:36:57 UTC (rev 5122) @@ -0,0 +1,999 @@ + +/* + * Copyright (C) Nginx, Inc. + * Copyright (C) Valentin V. Bartenev + */ + + +#include +#include +#include +#include +#include + +#include + + +#define NGX_SPDY_WRITE_BUFFERED NGX_HTTP_WRITE_BUFFERED + +#define ngx_http_spdy_nv_nsize(h) (NGX_SPDY_NV_NLEN_SIZE + sizeof(h) - 1) +#define ngx_http_spdy_nv_vsize(h) (NGX_SPDY_NV_VLEN_SIZE + sizeof(h) - 1) + +#define ngx_http_spdy_nv_write_num ngx_spdy_frame_write_uint16 +#define ngx_http_spdy_nv_write_nlen ngx_spdy_frame_write_uint16 +#define ngx_http_spdy_nv_write_vlen ngx_spdy_frame_write_uint16 + +#define ngx_http_spdy_nv_write_name(p, h) \ + ngx_cpymem(ngx_http_spdy_nv_write_nlen(p, sizeof(h) - 1), h, sizeof(h) - 1) + +#define ngx_http_spdy_nv_write_val(p, h) \ + ngx_cpymem(ngx_http_spdy_nv_write_vlen(p, sizeof(h) - 1), h, sizeof(h) - 1) + +static ngx_inline ngx_int_t ngx_http_spdy_filter_send( + ngx_connection_t *fc, ngx_http_spdy_stream_t *stream); + +static ngx_http_spdy_out_frame_t *ngx_http_spdy_filter_get_data_frame( + ngx_http_spdy_stream_t *stream, size_t len, ngx_uint_t flags, + ngx_chain_t *first, ngx_chain_t *last); + +static ngx_int_t ngx_http_spdy_syn_frame_handler( + ngx_http_spdy_connection_t *sc, ngx_http_spdy_out_frame_t *frame); +static ngx_int_t ngx_http_spdy_data_frame_handler( + ngx_http_spdy_connection_t *sc, ngx_http_spdy_out_frame_t *frame); +static ngx_inline void ngx_http_spdy_handle_frame( + ngx_http_spdy_stream_t *stream, ngx_http_spdy_out_frame_t *frame); +static ngx_inline void ngx_http_spdy_handle_stream( + ngx_http_spdy_connection_t *sc, ngx_http_spdy_stream_t *stream); + +static void ngx_http_spdy_filter_cleanup(void *data); + +static ngx_int_t ngx_http_spdy_filter_init(ngx_conf_t *cf); + + +static ngx_http_module_t ngx_http_spdy_filter_module_ctx = { + NULL, /* preconfiguration */ + ngx_http_spdy_filter_init, /* postconfiguration */ + + NULL, /* create main configuration */ + NULL, /* init main configuration */ + + NULL, /* create server configuration */ + NULL, /* merge server configuration */ + + NULL, /* create location configuration */ + NULL /* merge location configuration */ +}; + + +ngx_module_t ngx_http_spdy_filter_module = { + NGX_MODULE_V1, + &ngx_http_spdy_filter_module_ctx, /* module context */ + NULL, /* module directives */ + NGX_HTTP_MODULE, /* module type */ + NULL, /* init master */ + NULL, /* init module */ + NULL, /* init process */ + NULL, /* init thread */ + NULL, /* exit thread */ + NULL, /* exit process */ + NULL, /* exit master */ + NGX_MODULE_V1_PADDING +}; + + +static ngx_http_output_header_filter_pt ngx_http_next_header_filter; +static ngx_http_output_body_filter_pt ngx_http_next_body_filter; + + +static ngx_int_t +ngx_http_spdy_header_filter(ngx_http_request_t *r) +{ + int rc; + size_t len; + u_char *p, *buf, *last; + ngx_buf_t *b; + ngx_str_t host; + ngx_uint_t i, j, count, port; + ngx_chain_t *cl; + ngx_list_part_t *part, *pt; + ngx_table_elt_t *header, *h; + ngx_connection_t *c; + ngx_http_cleanup_t *cln; + ngx_http_core_loc_conf_t *clcf; + ngx_http_core_srv_conf_t *cscf; + ngx_http_spdy_stream_t *stream; + ngx_http_spdy_out_frame_t *frame; + ngx_http_spdy_connection_t *sc; + struct sockaddr_in *sin; +#if (NGX_HAVE_INET6) + struct sockaddr_in6 *sin6; +#endif + u_char addr[NGX_SOCKADDR_STRLEN]; + + if (!r->spdy_stream) { + return ngx_http_next_header_filter(r); + } + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "spdy header filter"); + + if (r->header_sent) { + return NGX_OK; + } + + r->header_sent = 1; + + if (r != r->main) { + return NGX_OK; + } + + c = r->connection; + + if (r->method == NGX_HTTP_HEAD) { + r->header_only = 1; + } + + switch (r->headers_out.status) { + + case NGX_HTTP_OK: + case NGX_HTTP_PARTIAL_CONTENT: + break; + + case NGX_HTTP_NOT_MODIFIED: + r->header_only = 1; + break; + + case NGX_HTTP_NO_CONTENT: + r->header_only = 1; + + ngx_str_null(&r->headers_out.content_type); + + r->headers_out.content_length = NULL; + r->headers_out.content_length_n = -1; + + /* fall through */ + + default: + r->headers_out.last_modified_time = -1; + r->headers_out.last_modified = NULL; + } + + len = NGX_SPDY_NV_NUM_SIZE + + ngx_http_spdy_nv_nsize("version") + + ngx_http_spdy_nv_vsize("HTTP/1.1") + + ngx_http_spdy_nv_nsize("status") + + ngx_http_spdy_nv_vsize("418"); + + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); + + if (r->headers_out.server == NULL) { + len += ngx_http_spdy_nv_nsize("server"); + len += clcf->server_tokens ? ngx_http_spdy_nv_vsize(NGINX_VER) + : ngx_http_spdy_nv_vsize("nginx"); + } + + if (r->headers_out.date == NULL) { + len += ngx_http_spdy_nv_nsize("date") + + ngx_http_spdy_nv_vsize("Wed, 31 Dec 1986 10:00:00 GMT"); + } + + if (r->headers_out.content_type.len) { + len += ngx_http_spdy_nv_nsize("content-type") + + NGX_SPDY_NV_VLEN_SIZE + r->headers_out.content_type.len; + + if (r->headers_out.content_type_len == r->headers_out.content_type.len + && r->headers_out.charset.len) + { + len += sizeof("; charset=") - 1 + r->headers_out.charset.len; + } + } + + if (r->headers_out.content_length == NULL + && r->headers_out.content_length_n >= 0) + { + len += ngx_http_spdy_nv_nsize("content-length") + + NGX_SPDY_NV_VLEN_SIZE + NGX_OFF_T_LEN; + } + + if (r->headers_out.last_modified == NULL + && r->headers_out.last_modified_time != -1) + { + len += ngx_http_spdy_nv_nsize("last-modified") + + ngx_http_spdy_nv_vsize("Wed, 31 Dec 1986 10:00:00 GMT"); + } + + if (r->headers_out.location + && r->headers_out.location->value.len + && r->headers_out.location->value.data[0] == '/') + { + r->headers_out.location->hash = 0; + + if (clcf->server_name_in_redirect) { + cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module); + host = cscf->server_name; + + } else if (r->headers_in.server.len) { + host = r->headers_in.server; + + } else { + host.len = NGX_SOCKADDR_STRLEN; + host.data = addr; + + if (ngx_connection_local_sockaddr(c, &host, 0) != NGX_OK) { + return NGX_ERROR; + } + } + + switch (c->local_sockaddr->sa_family) { + +#if (NGX_HAVE_INET6) + case AF_INET6: + sin6 = (struct sockaddr_in6 *) c->local_sockaddr; + port = ntohs(sin6->sin6_port); + break; +#endif +#if (NGX_HAVE_UNIX_DOMAIN) + case AF_UNIX: + port = 0; + break; +#endif + default: /* AF_INET */ + sin = (struct sockaddr_in *) c->local_sockaddr; + port = ntohs(sin->sin_port); + break; + } + + len += ngx_http_spdy_nv_nsize("location") + + ngx_http_spdy_nv_vsize("https://") + + host.len + + r->headers_out.location->value.len; + + if (clcf->port_in_redirect) { + +#if (NGX_HTTP_SSL) + if (c->ssl) + port = (port == 443) ? 0 : port; + else +#endif + port = (port == 80) ? 0 : port; + + } else { + port = 0; + } + + if (port) { + len += sizeof(":65535") - 1; + } + + } else { + ngx_str_null(&host); + port = 0; + } + + part = &r->headers_out.headers.part; + header = part->elts; + + for (i = 0; /* void */; i++) { + + if (i >= part->nelts) { + if (part->next == NULL) { + break; + } + + part = part->next; + header = part->elts; + i = 0; + } + + if (header[i].hash == 0) { + continue; + } + + len += NGX_SPDY_NV_NLEN_SIZE + header[i].key.len + + NGX_SPDY_NV_VLEN_SIZE + header[i].value.len; + } + + buf = ngx_alloc(len, r->pool->log); + if (buf == NULL) { + return NGX_ERROR; + } + + last = buf + NGX_SPDY_NV_NUM_SIZE; + + last = ngx_http_spdy_nv_write_name(last, "version"); + last = ngx_http_spdy_nv_write_val(last, "HTTP/1.1"); + + last = ngx_http_spdy_nv_write_name(last, "status"); + last = ngx_spdy_frame_write_uint16(last, 3); + last = ngx_sprintf(last, "%03ui", r->headers_out.status); + + count = 2; + + if (r->headers_out.server == NULL) { + last = ngx_http_spdy_nv_write_name(last, "server"); + last = clcf->server_tokens + ? ngx_http_spdy_nv_write_val(last, NGINX_VER) + : ngx_http_spdy_nv_write_val(last, "nginx"); + + count++; + } + + if (r->headers_out.date == NULL) { + last = ngx_http_spdy_nv_write_name(last, "date"); + + last = ngx_http_spdy_nv_write_vlen(last, ngx_cached_http_time.len); + + last = ngx_cpymem(last, ngx_cached_http_time.data, + ngx_cached_http_time.len); + + count++; + } + + if (r->headers_out.content_type.len) { + + last = ngx_http_spdy_nv_write_name(last, "content-type"); + + p = last + NGX_SPDY_NV_VLEN_SIZE; + + last = ngx_cpymem(p, r->headers_out.content_type.data, + r->headers_out.content_type.len); + + if (r->headers_out.content_type_len == r->headers_out.content_type.len + && r->headers_out.charset.len) + { + last = ngx_cpymem(last, "; charset=", sizeof("; charset=") - 1); + + last = ngx_cpymem(last, r->headers_out.charset.data, + r->headers_out.charset.len); + + /* update r->headers_out.content_type for possible logging */ + + r->headers_out.content_type.len = last - p; + r->headers_out.content_type.data = p; + } + + (void) ngx_http_spdy_nv_write_vlen(p - NGX_SPDY_NV_VLEN_SIZE, + r->headers_out.content_type.len); + + count++; + } + + if (r->headers_out.content_length == NULL + && r->headers_out.content_length_n >= 0) + { + last = ngx_http_spdy_nv_write_name(last, "content-length"); + + p = last + NGX_SPDY_NV_VLEN_SIZE; + + last = ngx_sprintf(p, "%O", r->headers_out.content_length_n); + + (void) ngx_http_spdy_nv_write_vlen(p - NGX_SPDY_NV_VLEN_SIZE, + last - p); + + count++; + } + + if (r->headers_out.last_modified == NULL + && r->headers_out.last_modified_time != -1) + { + last = ngx_http_spdy_nv_write_name(last, "last-modified"); + + p = last + NGX_SPDY_NV_VLEN_SIZE; + + last = ngx_http_time(p, r->headers_out.last_modified_time); + + (void) ngx_http_spdy_nv_write_vlen(p - NGX_SPDY_NV_VLEN_SIZE, + last - p); + + count++; + } + + if (host.data) { + + last = ngx_http_spdy_nv_write_name(last, "location"); + + p = last + NGX_SPDY_NV_VLEN_SIZE; + + last = ngx_cpymem(p, "http", sizeof("http") - 1); + +#if (NGX_HTTP_SSL) + if (c->ssl) { + *last++ ='s'; + } +#endif + + *last++ = ':'; *last++ = '/'; *last++ = '/'; + + last = ngx_cpymem(last, host.data, host.len); + + if (port) { + last = ngx_sprintf(last, ":%ui", port); + } + + last = ngx_cpymem(last, r->headers_out.location->value.data, + r->headers_out.location->value.len); + + /* update r->headers_out.location->value for possible logging */ + + r->headers_out.location->value.len = last - p; + r->headers_out.location->value.data = p; + ngx_str_set(&r->headers_out.location->key, "location"); + + (void) ngx_http_spdy_nv_write_vlen(p - NGX_SPDY_NV_VLEN_SIZE, + r->headers_out.location->value.len); + + count++; + } + + part = &r->headers_out.headers.part; + header = part->elts; + + for (i = 0; /* void */; i++) { + + if (i >= part->nelts) { + if (part->next == NULL) { + break; + } + + part = part->next; + header = part->elts; + i = 0; + } + + if (header[i].hash == 0 || header[i].hash == 2) { + continue; + } + + if ((header[i].key.len == 6 + && ngx_strncasecmp(header[i].key.data, + (u_char *) "status", 6) == 0) + || (header[i].key.len == 7 + && ngx_strncasecmp(header[i].key.data, + (u_char *) "version", 7) == 0)) + { + header[i].hash = 0; + continue; + } + + last = ngx_http_spdy_nv_write_nlen(last, header[i].key.len); + + ngx_strlow(last, header[i].key.data, header[i].key.len); + last += header[i].key.len; + + p = last + NGX_SPDY_NV_VLEN_SIZE; + + last = ngx_cpymem(p, header[i].value.data, header[i].value.len); + + pt = part; + h = header; + + for (j = i + 1; /* void */; j++) { + + if (j >= pt->nelts) { + if (pt->next == NULL) { + break; + } + + pt = pt->next; + h = pt->elts; + j = 0; + } + + if (h[j].hash == 0 || h[j].hash == 2 + || h[j].key.len != header[i].key.len + || ngx_strncasecmp(header[i].key.data, h[j].key.data, + header[i].key.len)) + { + continue; + } + + *last++ = '\0'; + + last = ngx_cpymem(last, h[j].value.data, h[j].value.len); + + h[j].hash = 2; + } + + (void) ngx_http_spdy_nv_write_vlen(p - NGX_SPDY_NV_VLEN_SIZE, + last - p); + + count++; + } + + (void) ngx_spdy_frame_write_uint16(buf, count); + + stream = r->spdy_stream; + sc = stream->connection; + + len = last - buf; + + b = ngx_create_temp_buf(r->pool, NGX_SPDY_FRAME_HEADER_SIZE + + NGX_SPDY_SYN_REPLY_SIZE + + deflateBound(&sc->zstream_out, len)); + if (b == NULL) { + ngx_free(buf); + return NGX_ERROR; + } + + b->last += NGX_SPDY_FRAME_HEADER_SIZE + NGX_SPDY_SYN_REPLY_SIZE; + + sc->zstream_out.next_in = buf; + sc->zstream_out.avail_in = len; + sc->zstream_out.next_out = b->last; + sc->zstream_out.avail_out = b->end - b->last; + + rc = deflate(&sc->zstream_out, Z_SYNC_FLUSH); + + ngx_free(buf); + + if (rc != Z_OK) { + ngx_log_error(NGX_LOG_ALERT, c->log, 0, + "spdy deflate() failed: %d", rc); + return NGX_ERROR; + } + + ngx_log_debug5(NGX_LOG_DEBUG_HTTP, c->log, 0, + "spdy deflate out: ni:%p no:%p ai:%ud ao:%ud rc:%d", + sc->zstream_out.next_in, sc->zstream_out.next_out, + sc->zstream_out.avail_in, sc->zstream_out.avail_out, + rc); + + b->last = sc->zstream_out.next_out; + + p = b->pos; + p = ngx_spdy_frame_write_head(p, NGX_SPDY_SYN_REPLY); + + len = b->last - b->pos; + + r->header_size = len; + + if (r->header_only) { + b->last_buf = 1; + p = ngx_spdy_frame_write_flags_and_len(p, NGX_SPDY_FLAG_FIN, + len - NGX_SPDY_FRAME_HEADER_SIZE); + } else { + p = ngx_spdy_frame_write_flags_and_len(p, 0, + len - NGX_SPDY_FRAME_HEADER_SIZE); + } + + (void) ngx_spdy_frame_write_sid(p, stream->id); + + cl = ngx_alloc_chain_link(r->pool); + if (cl == NULL) { + return NGX_ERROR; + } + + cl->buf = b; + cl->next = NULL; + + frame = ngx_palloc(r->pool, sizeof(ngx_http_spdy_out_frame_t)); + if (frame == NULL) { + return NGX_ERROR; + } + + frame->first = cl; + frame->last = cl; + frame->handler = ngx_http_spdy_syn_frame_handler; + frame->free = NULL; + frame->stream = stream; + frame->size = len; + frame->priority = stream->priority; + frame->blocked = 1; + frame->fin = r->header_only; + + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, stream->request->connection->log, 0, + "spdy:%ui create SYN_REPLY frame %p: size:%uz", + stream->id, frame, frame->size); + + ngx_http_spdy_queue_blocked_frame(sc, frame); + + r->blocked++; + + cln = ngx_http_cleanup_add(r, 0); + if (cln == NULL) { + return NGX_ERROR; + } + + cln->handler = ngx_http_spdy_filter_cleanup; + cln->data = stream; + + stream->waiting = 1; + + return ngx_http_spdy_filter_send(c, stream); +} + + +static ngx_int_t +ngx_http_spdy_body_filter(ngx_http_request_t *r, ngx_chain_t *in) +{ + off_t size; + ngx_buf_t *b; + ngx_chain_t *cl, *ll, *out, **ln; + ngx_http_spdy_stream_t *stream; + ngx_http_spdy_out_frame_t *frame; + + stream = r->spdy_stream; + + if (stream == NULL) { + return ngx_http_next_body_filter(r, in); + } + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "spdy body filter \"%V?%V\"", &r->uri, &r->args); + + if (in == NULL || r->header_only) { + + if (stream->waiting) { + return NGX_AGAIN; + } + + r->connection->buffered &= ~NGX_SPDY_WRITE_BUFFERED; + + return NGX_OK; + } + + size = 0; + ln = &out; + ll = in; + + for ( ;; ) { + b = ll->buf; +#if 1 + if (ngx_buf_size(b) == 0 && !ngx_buf_special(b)) { + ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, + "zero size buf in spdy body filter " + "t:%d r:%d f:%d %p %p-%p %p %O-%O", + b->temporary, + b->recycled, + b->in_file, + b->start, + b->pos, + b->last, + b->file, + b->file_pos, + b->file_last); + + ngx_debug_point(); + return NGX_ERROR; + } +#endif + cl = ngx_alloc_chain_link(r->pool); + if (cl == NULL) { + return NGX_ERROR; + } + + size += ngx_buf_size(b); + cl->buf = b; + + *ln = cl; + ln = &cl->next; + + if (ll->next == NULL) { + break; + } + + ll = ll->next; + } + + if (size > NGX_SPDY_MAX_FRAME_SIZE) { + ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, + "FIXME: chain too big in spdy filter: %O", size); + return NGX_ERROR; + } + + frame = ngx_http_spdy_filter_get_data_frame(stream, (size_t) size, + b->last_buf, out, cl); + if (frame == NULL) { + return NGX_ERROR; + } + + ngx_http_spdy_queue_frame(stream->connection, frame); + + stream->waiting++; + + r->main->blocked++; + + return ngx_http_spdy_filter_send(r->connection, stream); +} + + +static ngx_http_spdy_out_frame_t * +ngx_http_spdy_filter_get_data_frame(ngx_http_spdy_stream_t *stream, + size_t len, ngx_uint_t fin, ngx_chain_t *first, ngx_chain_t *last) +{ + u_char *p; + ngx_buf_t *buf; + ngx_uint_t flags; + ngx_chain_t *cl; + ngx_http_spdy_out_frame_t *frame; + + + frame = stream->free_frames; + + if (frame) { + stream->free_frames = frame->free; + + } else { + frame = ngx_palloc(stream->request->pool, + sizeof(ngx_http_spdy_out_frame_t)); + if (frame == NULL) { + return NULL; + } + } + + ngx_log_debug4(NGX_LOG_DEBUG_HTTP, stream->request->connection->log, 0, + "spdy:%ui create DATA frame %p: len:%uz fin:%ui", + stream->id, frame, len, fin); + + if (len || fin) { + + flags = fin ? NGX_SPDY_FLAG_FIN : 0; + + cl = ngx_chain_get_free_buf(stream->request->pool, + &stream->free_data_headers); + if (cl == NULL) { + return NULL; + } + + buf = cl->buf; + + if (buf->start) { + p = buf->start; + buf->pos = p; + + p += sizeof(uint32_t); + + (void) ngx_spdy_frame_write_flags_and_len(p, flags, len); + + } else { + p = ngx_palloc(stream->request->pool, NGX_SPDY_FRAME_HEADER_SIZE); + if (p == NULL) { + return NULL; + } + + buf->pos = p; + buf->start = p; + + p = ngx_spdy_frame_write_sid(p, stream->id); + p = ngx_spdy_frame_write_flags_and_len(p, flags, len); + + buf->last = p; + buf->end = p; + + buf->tag = (ngx_buf_tag_t) &ngx_http_spdy_filter_module; + buf->memory = 1; + } + + cl->next = first; + first = cl; + } + + frame->first = first; + frame->last = last; + frame->handler = ngx_http_spdy_data_frame_handler; + frame->free = NULL; + frame->stream = stream; + frame->size = NGX_SPDY_FRAME_HEADER_SIZE + len; + frame->priority = stream->priority; + frame->blocked = 0; + frame->fin = fin; + + return frame; +} + + +static ngx_inline ngx_int_t +ngx_http_spdy_filter_send(ngx_connection_t *fc, ngx_http_spdy_stream_t *stream) +{ + if (ngx_http_spdy_send_output_queue(stream->connection) == NGX_ERROR) { + fc->error = 1; + return NGX_ERROR; + } + + if (stream->waiting) { + fc->buffered |= NGX_SPDY_WRITE_BUFFERED; + fc->write->delayed = 1; + return NGX_AGAIN; + } + + fc->buffered &= ~NGX_SPDY_WRITE_BUFFERED; + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_spdy_syn_frame_handler(ngx_http_spdy_connection_t *sc, + ngx_http_spdy_out_frame_t *frame) +{ + ngx_buf_t *buf; + ngx_http_spdy_stream_t *stream; + + buf = frame->first->buf; + + if (buf->pos != buf->last) { + return NGX_AGAIN; + } + + stream = frame->stream; + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy:%ui SYN_REPLY frame %p was sent", stream->id, frame); + + ngx_free_chain(stream->request->pool, frame->first); + + ngx_http_spdy_handle_frame(stream, frame); + + ngx_http_spdy_handle_stream(sc, stream); + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_spdy_data_frame_handler(ngx_http_spdy_connection_t *sc, + ngx_http_spdy_out_frame_t *frame) +{ + ngx_chain_t *cl, *ln; + ngx_http_spdy_stream_t *stream; + + stream = frame->stream; + + cl = frame->first; + + if (cl->buf->tag == (ngx_buf_tag_t) &ngx_http_spdy_filter_module) { + + if (cl->buf->pos != cl->buf->last) { + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy:%ui DATA frame %p was sent partially", + stream->id, frame); + + return NGX_AGAIN; + } + + ln = cl->next; + + cl->next = stream->free_data_headers; + stream->free_data_headers = cl; + + if (cl == frame->last) { + goto done; + } + + cl = ln; + } + + for ( ;; ) { + if (ngx_buf_size(cl->buf) != 0) { + + if (cl != frame->first) { + frame->first = cl; + ngx_http_spdy_handle_stream(sc, stream); + } + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy:%ui DATA frame %p was sent partially", + stream->id, frame); + + return NGX_AGAIN; + } + + ln = cl->next; + + ngx_free_chain(stream->request->pool, cl); + + if (cl == frame->last) { + goto done; + } + + cl = ln; + } + +done: + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, sc->connection->log, 0, + "spdy:%ui DATA frame %p was sent", stream->id, frame); + + stream->request->header_size += NGX_SPDY_FRAME_HEADER_SIZE; + + ngx_http_spdy_handle_frame(stream, frame); + + ngx_http_spdy_handle_stream(sc, stream); + + return NGX_OK; +} + + +static ngx_inline void +ngx_http_spdy_handle_frame(ngx_http_spdy_stream_t *stream, + ngx_http_spdy_out_frame_t *frame) +{ + ngx_http_request_t *r; + + r = stream->request; + + r->connection->sent += frame->size; + r->blocked--; + + if (frame->fin) { + stream->out_closed = 1; + } + + frame->free = stream->free_frames; + stream->free_frames = frame; + + stream->waiting--; +} + + +static ngx_inline void +ngx_http_spdy_handle_stream(ngx_http_spdy_connection_t *sc, + ngx_http_spdy_stream_t *stream) +{ + ngx_connection_t *fc; + + fc = stream->request->connection; + + fc->write->delayed = 0; + + if (stream->handled) { + return; + } + + if (sc->blocked == 2) { + stream->handled = 1; + + stream->next = sc->last_stream; + sc->last_stream = stream; + } +} + + +static void +ngx_http_spdy_filter_cleanup(void *data) +{ + ngx_http_spdy_stream_t *stream = data; + + ngx_http_request_t *r; + ngx_http_spdy_out_frame_t *frame, **fn; + + if (stream->waiting == 0) { + return; + } + + r = stream->request; + + fn = &stream->connection->last_out; + + for ( ;; ) { + frame = *fn; + + if (frame == NULL) { + break; + } + + if (frame->stream == stream && !frame->blocked) { + + stream->waiting--; + r->blocked--; + + *fn = frame->next; + continue; + } + + fn = &frame->next; + } +} + + +static ngx_int_t +ngx_http_spdy_filter_init(ngx_conf_t *cf) +{ + ngx_http_next_header_filter = ngx_http_top_header_filter; + ngx_http_top_header_filter = ngx_http_spdy_header_filter; + + ngx_http_next_body_filter = ngx_http_top_body_filter; + ngx_http_top_body_filter = ngx_http_spdy_body_filter; + + return NGX_OK; +} Added: trunk/src/http/ngx_http_spdy_module.c =================================================================== --- trunk/src/http/ngx_http_spdy_module.c (rev 0) +++ trunk/src/http/ngx_http_spdy_module.c 2013-03-20 10:36:57 UTC (rev 5122) @@ -0,0 +1,351 @@ + +/* + * Copyright (C) Nginx, Inc. + * Copyright (C) Valentin V. Bartenev + */ + + +#include +#include +#include +#include + + +static ngx_int_t ngx_http_spdy_add_variables(ngx_conf_t *cf); + +static ngx_int_t ngx_http_spdy_variable(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data); +static ngx_int_t ngx_http_spdy_request_priority_variable(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data); + +static ngx_int_t ngx_http_spdy_module_init(ngx_cycle_t *cycle); + +static void *ngx_http_spdy_create_main_conf(ngx_conf_t *cf); +static char *ngx_http_spdy_init_main_conf(ngx_conf_t *cf, void *conf); + +static void *ngx_http_spdy_create_srv_conf(ngx_conf_t *cf); +static char *ngx_http_spdy_merge_srv_conf(ngx_conf_t *cf, void *parent, + void *child); + +static char *ngx_http_spdy_recv_buffer_size(ngx_conf_t *cf, void *post, + void *data); +static char *ngx_http_spdy_pool_size(ngx_conf_t *cf, void *post, void *data); +static char *ngx_http_spdy_streams_index_mask(ngx_conf_t *cf, void *post, + void *data); + + +static ngx_conf_num_bounds_t ngx_http_spdy_headers_comp_bounds = { + ngx_conf_check_num_bounds, 0, 9 +}; + +static ngx_conf_post_t ngx_http_spdy_recv_buffer_size_post = + { ngx_http_spdy_recv_buffer_size }; +static ngx_conf_post_t ngx_http_spdy_pool_size_post = + { ngx_http_spdy_pool_size }; +static ngx_conf_post_t ngx_http_spdy_streams_index_mask_post = + { ngx_http_spdy_streams_index_mask }; + + +static ngx_command_t ngx_http_spdy_commands[] = { + + { ngx_string("spdy_recv_buffer_size"), + NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE1, + ngx_conf_set_size_slot, + NGX_HTTP_MAIN_CONF_OFFSET, + offsetof(ngx_http_spdy_main_conf_t, recv_buffer_size), + &ngx_http_spdy_recv_buffer_size_post }, + + { ngx_string("spdy_pool_size"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_TAKE1, + ngx_conf_set_size_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_spdy_srv_conf_t, pool_size), + &ngx_http_spdy_pool_size_post }, + + { ngx_string("spdy_max_concurrent_streams"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_TAKE1, + ngx_conf_set_num_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_spdy_srv_conf_t, concurrent_streams), + NULL }, + + { ngx_string("spdy_streams_index_size"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_TAKE1, + ngx_conf_set_num_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_spdy_srv_conf_t, streams_index_mask), + &ngx_http_spdy_streams_index_mask_post }, + + { ngx_string("spdy_recv_timeout"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_TAKE1, + ngx_conf_set_msec_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_spdy_srv_conf_t, recv_timeout), + NULL }, + + { ngx_string("spdy_keepalive_timeout"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_TAKE1, + ngx_conf_set_msec_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_spdy_srv_conf_t, keepalive_timeout), + NULL }, + + { ngx_string("spdy_headers_comp"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_TAKE1, + ngx_conf_set_num_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_spdy_srv_conf_t, headers_comp), + &ngx_http_spdy_headers_comp_bounds }, + + ngx_null_command +}; + + +static ngx_http_module_t ngx_http_spdy_module_ctx = { + ngx_http_spdy_add_variables, /* preconfiguration */ + NULL, /* postconfiguration */ + + ngx_http_spdy_create_main_conf, /* create main configuration */ + ngx_http_spdy_init_main_conf, /* init main configuration */ + + ngx_http_spdy_create_srv_conf, /* create server configuration */ + ngx_http_spdy_merge_srv_conf, /* merge server configuration */ + + NULL, /* create location configuration */ + NULL /* merge location configuration */ +}; + + +ngx_module_t ngx_http_spdy_module = { + NGX_MODULE_V1, + &ngx_http_spdy_module_ctx, /* module context */ + ngx_http_spdy_commands, /* module directives */ + NGX_HTTP_MODULE, /* module type */ + NULL, /* init master */ + ngx_http_spdy_module_init, /* init module */ + NULL, /* init process */ + NULL, /* init thread */ + NULL, /* exit thread */ + NULL, /* exit process */ + NULL, /* exit master */ + NGX_MODULE_V1_PADDING +}; + + +static ngx_http_variable_t ngx_http_spdy_vars[] = { + + { ngx_string("spdy"), NULL, + ngx_http_spdy_variable, 0, 0, 0 }, + + { ngx_string("spdy_request_priority"), NULL, + ngx_http_spdy_request_priority_variable, 0, 0, 0 }, + + { ngx_null_string, NULL, NULL, 0, 0, 0 } +}; + + +static ngx_int_t +ngx_http_spdy_add_variables(ngx_conf_t *cf) +{ + ngx_http_variable_t *var, *v; + + for (v = ngx_http_spdy_vars; v->name.len; v++) { + var = ngx_http_add_variable(cf, &v->name, v->flags); + if (var == NULL) { + return NGX_ERROR; + } + + var->get_handler = v->get_handler; + var->data = v->data; + } + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_spdy_variable(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data) +{ + if (r->spdy_stream) { + v->len = 1; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + v->data = (u_char *) "2"; + + return NGX_OK; + } + + *v = ngx_http_variable_null_value; + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_spdy_request_priority_variable(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data) +{ + if (r->spdy_stream) { + v->len = 1; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + + v->data = ngx_pnalloc(r->pool, 1); + if (v->data == NULL) { + return NGX_ERROR; + } + + v->data[0] = '0' + (u_char) r->spdy_stream->priority; + + return NGX_OK; + } + + *v = ngx_http_variable_null_value; + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_spdy_module_init(ngx_cycle_t *cycle) +{ + ngx_http_spdy_request_headers_init(); + + return NGX_OK; +} + + +static void * +ngx_http_spdy_create_main_conf(ngx_conf_t *cf) +{ + ngx_http_spdy_main_conf_t *smcf; + + smcf = ngx_pcalloc(cf->pool, sizeof(ngx_http_spdy_main_conf_t)); + if (smcf == NULL) { + return NULL; + } + + smcf->recv_buffer_size = NGX_CONF_UNSET_SIZE; + + return smcf; +} + + +static char * +ngx_http_spdy_init_main_conf(ngx_conf_t *cf, void *conf) +{ + ngx_http_spdy_main_conf_t *smcf = conf; + + if (smcf->recv_buffer_size == NGX_CONF_UNSET_SIZE) { + smcf->recv_buffer_size = 256 * 1024; + } + + return NGX_CONF_OK; +} + + +static void * +ngx_http_spdy_create_srv_conf(ngx_conf_t *cf) +{ + ngx_http_spdy_srv_conf_t *sscf; + + sscf = ngx_pcalloc(cf->pool, sizeof(ngx_http_spdy_srv_conf_t)); + if (sscf == NULL) { + return NULL; + } + + sscf->pool_size = NGX_CONF_UNSET_SIZE; + + sscf->concurrent_streams = NGX_CONF_UNSET_UINT; + sscf->streams_index_mask = NGX_CONF_UNSET_UINT; + + sscf->recv_timeout = NGX_CONF_UNSET_MSEC; + sscf->keepalive_timeout = NGX_CONF_UNSET_MSEC; + + sscf->headers_comp = NGX_CONF_UNSET; + + return sscf; +} + + +static char * +ngx_http_spdy_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child) +{ + ngx_http_spdy_srv_conf_t *prev = parent; + ngx_http_spdy_srv_conf_t *conf = child; + + ngx_conf_merge_size_value(conf->pool_size, prev->pool_size, 4096); + + ngx_conf_merge_uint_value(conf->concurrent_streams, + prev->concurrent_streams, 100); + + ngx_conf_merge_uint_value(conf->streams_index_mask, + prev->streams_index_mask, 32 - 1); + + ngx_conf_merge_msec_value(conf->recv_timeout, + prev->recv_timeout, 30000); + ngx_conf_merge_msec_value(conf->keepalive_timeout, + prev->keepalive_timeout, 180000); + + ngx_conf_merge_value(conf->headers_comp, prev->headers_comp, 0); + + return NGX_CONF_OK; +} + + +static char * +ngx_http_spdy_recv_buffer_size(ngx_conf_t *cf, void *post, void *data) +{ + size_t *sp = data; + + if (*sp <= 2 * NGX_SPDY_STATE_BUFFER_SIZE) { + return "value is too small"; + } + + return NGX_CONF_OK; +} + + +static char * +ngx_http_spdy_pool_size(ngx_conf_t *cf, void *post, void *data) +{ + size_t *sp = data; + + if (*sp < NGX_MIN_POOL_SIZE) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "the pool size must be no less than %uz", + NGX_MIN_POOL_SIZE); + return NGX_CONF_ERROR; + } + + if (*sp % NGX_POOL_ALIGNMENT) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "the pool size must be a multiple of %uz", + NGX_POOL_ALIGNMENT); + return NGX_CONF_ERROR; + } + + return NGX_CONF_OK; +} + + +static char * +ngx_http_spdy_streams_index_mask(ngx_conf_t *cf, void *post, void *data) +{ + ngx_uint_t *np = data; + + ngx_uint_t mask; + + mask = *np - 1; + + if (*np == 0 || (*np & mask)) { + return "must be a power of two"; + } + + *np = mask; + + return NGX_CONF_OK; +} Added: trunk/src/http/ngx_http_spdy_module.h =================================================================== --- trunk/src/http/ngx_http_spdy_module.h (rev 0) +++ trunk/src/http/ngx_http_spdy_module.h 2013-03-20 10:36:57 UTC (rev 5122) @@ -0,0 +1,36 @@ + +/* + * Copyright (C) Nginx, Inc. + * Copyright (C) Valentin V. Bartenev + */ + + +#ifndef _NGX_HTTP_SPDY_MODULE_H_INCLUDED_ +#define _NGX_HTTP_SPDY_MODULE_H_INCLUDED_ + + +#include +#include +#include + + +typedef struct { + size_t recv_buffer_size; + u_char *recv_buffer; +} ngx_http_spdy_main_conf_t; + + +typedef struct { + size_t pool_size; + ngx_uint_t concurrent_streams; + ngx_uint_t streams_index_mask; + ngx_msec_t recv_timeout; + ngx_msec_t keepalive_timeout; + ngx_int_t headers_comp; +} ngx_http_spdy_srv_conf_t; + + +extern ngx_module_t ngx_http_spdy_module; + + +#endif /* _NGX_HTTP_SPDY_MODULE_H_INCLUDED_ */ Modified: trunk/src/http/ngx_http_upstream.c =================================================================== --- trunk/src/http/ngx_http_upstream.c 2013-03-20 10:18:26 UTC (rev 5121) +++ trunk/src/http/ngx_http_upstream.c 2013-03-20 10:36:57 UTC (rev 5122) @@ -440,6 +440,13 @@ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http init upstream, client timer: %d", c->read->timer_set); +#if (NGX_HTTP_SPDY) + if (r->spdy_stream) { + ngx_http_upstream_init_request(r); + return; + } +#endif + if (c->read->timer_set) { ngx_del_timer(c->read); } @@ -1018,6 +1025,12 @@ return; } +#if (NGX_HTTP_SPDY) + if (r->spdy_stream) { + return; + } +#endif + #if (NGX_HAVE_KQUEUE) if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { From ru at nginx.com Wed Mar 20 18:07:26 2013 From: ru at nginx.com (ru at nginx.com) Date: Wed, 20 Mar 2013 18:07:26 +0000 Subject: [nginx] svn commit: r5123 - trunk/src/core Message-ID: <20130320180727.2693E3F9F0F@mail.nginx.com> Author: ru Date: 2013-03-20 18:07:25 +0000 (Wed, 20 Mar 2013) New Revision: 5123 URL: http://trac.nginx.org/nginx/changeset/5123/nginx Log: Core: fixed resource leak if binary upgrade fails due to no memory. Found by Coverity (CID 992320). Modified: trunk/src/core/nginx.c Modified: trunk/src/core/nginx.c =================================================================== --- trunk/src/core/nginx.c 2013-03-20 10:36:57 UTC (rev 5122) +++ trunk/src/core/nginx.c 2013-03-20 18:07:25 UTC (rev 5123) @@ -595,6 +595,7 @@ + cycle->listening.nelts * (NGX_INT32_LEN + 1) + 2, cycle->log); if (var == NULL) { + ngx_free(env); return NGX_INVALID_PID; } From piotr at cloudflare.com Thu Mar 21 04:04:24 2013 From: piotr at cloudflare.com (Piotr Sikora) Date: Wed, 20 Mar 2013 21:04:24 -0700 Subject: [PATCH] Core: use NGX_FILE_ERROR when appropriate Message-ID: Hey guys, attached patch changes "if" tests to use "== NGX_FILE_ERROR" instead of "== -1" or "!= NGX_OK", because NGX_FILE_ERROR is defined as -1 on UNIX, but as 0 on Win32. This isn't much of an issue in patched code (only "ngx_fd_info()" test is actually reachable on Win32 and in worst case it might, but probably doesn't, result in bogus error log entry), so you can treat this as style fixes. Best regards, Piotr Sikora diff -r 3450eee1ee8d src/core/nginx.c --- a/src/core/nginx.c Wed Mar 20 18:07:25 2013 +0000 +++ b/src/core/nginx.c Wed Mar 20 20:56:23 2013 -0700 @@ -637,7 +637,7 @@ ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); - if (ngx_rename_file(ccf->pid.data, ccf->oldpid.data) != NGX_OK) { + if (ngx_rename_file(ccf->pid.data, ccf->oldpid.data) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, ngx_rename_file_n " %s to %s failed " "before executing new binary process \"%s\"", @@ -652,7 +652,9 @@ pid = ngx_execute(cycle, &ctx); if (pid == NGX_INVALID_PID) { - if (ngx_rename_file(ccf->oldpid.data, ccf->pid.data) != NGX_OK) { + if (ngx_rename_file(ccf->oldpid.data, ccf->pid.data) + == NGX_FILE_ERROR) + { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, ngx_rename_file_n " %s back to %s failed after " "an attempt to execute new binary process \"%s\"", diff -r 3450eee1ee8d src/core/ngx_conf_file.c --- a/src/core/ngx_conf_file.c Wed Mar 20 18:07:25 2013 +0000 +++ b/src/core/ngx_conf_file.c Wed Mar 20 20:56:23 2013 -0700 @@ -133,7 +133,7 @@ cf->conf_file = &conf_file; - if (ngx_fd_info(fd, &cf->conf_file->file.info) == -1) { + if (ngx_fd_info(fd, &cf->conf_file->file.info) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cf->log, ngx_errno, ngx_fd_info_n " \"%s\" failed", filename->data); } diff -r 3450eee1ee8d src/core/ngx_connection.c --- a/src/core/ngx_connection.c Wed Mar 20 18:07:25 2013 +0000 +++ b/src/core/ngx_connection.c Wed Mar 20 20:56:23 2013 -0700 @@ -412,7 +412,7 @@ } if (ngx_test_config) { - if (ngx_delete_file(name) == -1) { + if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno, ngx_delete_file_n " %s failed", name); } @@ -739,7 +739,7 @@ { u_char *name = ls[i].addr_text.data + sizeof("unix:") - 1; - if (ngx_delete_file(name) == -1) { + if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_delete_file_n " %s failed", name); } diff -r 3450eee1ee8d src/core/ngx_cycle.c --- a/src/core/ngx_cycle.c Wed Mar 20 18:07:25 2013 +0000 +++ b/src/core/ngx_cycle.c Wed Mar 20 20:56:23 2013 -0700 @@ -679,7 +679,7 @@ ngx_log_error(NGX_LOG_WARN, cycle->log, 0, "deleting socket %s", name); - if (ngx_delete_file(name) == -1) { + if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_delete_file_n " %s failed", name); } diff -r 3450eee1ee8d src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c Wed Mar 20 18:07:25 2013 +0000 +++ b/src/os/unix/ngx_process_cycle.c Wed Mar 20 20:56:23 2013 -0700 @@ -647,7 +647,7 @@ if (ngx_rename_file((char *) ccf->oldpid.data, (char *) ccf->pid.data) - != NGX_OK) + == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, ngx_rename_file_n " %s back to %s failed " From aviram at adallom.com Thu Mar 21 14:18:58 2013 From: aviram at adallom.com (Aviram Cohen) Date: Thu, 21 Mar 2013 16:18:58 +0200 Subject: Subrequest questions Message-ID: Hi, I have a few questions about subrequests - - When a subrequest is completed - where is the response body stored? Is it entirely in the memory? How does the main request retrieve it? - Can I invoke the main request from within the context of a body filter of a subrequest? Can I re-invoke the subrequest from the main request afterwards? The problem I'm facing is the following - I perform a subrequest, and I'm afraid that the response body would be too big. If it is too big, I'd like to send the entire response of the subrequest back to the client (meaning, not buffering it in the memory for the main request). Is that possible? Thanks, Aviram -------------- next part -------------- An HTML attachment was scrubbed... URL: From ywu at about.com Thu Mar 21 15:35:32 2013 From: ywu at about.com (YongFeng Wu) Date: Thu, 21 Mar 2013 11:35:32 -0400 Subject: a weird crash in ngx_http_core_run_phases() Message-ID: <000f01ce2649$b984a870$2c8df950$@com> Hi, Sometimes we got weird segmentation fault crashes in ngx_http_core_rewrite_phase(): (gdb) bt #0 0x0000000801c17a00 in ?? () #1 0x0000000814c7ca10 in ?? () #2 0x0000000809453600 in ?? () #3 0x000000000044715e in ngx_http_core_rewrite_phase (r=0x80fbf6150, ph=0xfffffffffffffffb) at src/http/ngx_http_core_module.c:931 #4 0x0000000000440c65 in ngx_http_core_run_phases (r=0x801c17a00) at src/http/ngx_http_core_module.c:877 #5 0x0000000000440e1c in ngx_http_handler (r=0x801c17a00) at src/http/ngx_http_core_module.c:860 #6 0x0000000000459620 in ngx_http_process_request (r=0x801c17a00) at src/http/ngx_http_request.c:1687 #7 0x000000000045a97e in ngx_http_process_request_headers (rev=Variable "rev" is not available. ) at src/http/ngx_http_request.c:1135 #8 0x000000000045b2e7 in ngx_http_process_request_line (rev=0x801d4d380) at src/http/ngx_http_request.c:933 #9 0x0000000000454a69 in ngx_http_init_request (rev=0x801d4d380) at src/http/ngx_http_request.c:519 #10 0x000000000042c249 in ngx_event_process_posted (cycle=0x801c6e050, posted=0x818488) at src/event/ngx_event_posted.c:41 #11 0x000000000042b84d in ngx_process_events_and_timers (cycle=0x801c6e050) at src/event/ngx_event.c:1376 #12 0x0000000000436429 in ngx_worker_process_cycle (cycle=0x801c6e050, data=Variable "data" is not available. ) at src/os/unix/ngx_process_cycle.c:963 #13 0x0000000000434bb7 in ngx_spawn_process (cycle=0x801c6e050, proc=0x436330 , data=0x18, name=0x4d9987 "worker process", respawn=-3) at src/os/unix/ngx_process.c:209 #14 0x00000000004358e8 in ngx_start_worker_processes (cycle=0x801c6e050, n=32, type=-3) at src/os/unix/ngx_process_cycle.c:409 #15 0x00000000004371c3 in ngx_master_process_cycle (cycle=0x801c6e050) at src/os/unix/ngx_process_cycle.c:150 #16 0x00000000004078ff in main (argc=350832656, argv=Variable "argv" is not available. ) at src/core/nginx.c:504 (gdb) It's weird because the address of "r" was mysteriously changed from <0x801c17a00> in ngx_http_core_run_phases to <0x80fbf6150> in ngx_http_core_rewrite_phase. This new address is, of course, an invalid request struct, hence the invalid r->phase_handler and &ph[r->phase_handler]. The value of r->phase_handler in ngx_http_core_run_phases() is 0, so ngx_http_core_rewrite_phase is the first phase hander to be called. Try to check the address of "*r" get the following: (gdb) f 3 #3 0x000000000044715e in ngx_http_core_rewrite_phase (r=0x80fbf6150, ph=0xfffffffffffffffb) at src/http/ngx_http_core_module.c:931 956 in src/http/ngx_http_core_module.c (gdb) p &r Address requested for identifier "r" which is in register $rbx (gdb) p $rbx $9 = 34623938896 Anybody have any ideas? We use FreeBSD 9.1, nginx_1.2.6 Thanks a lot, Yong -------------- next part -------------- An HTML attachment was scrubbed... URL: From ru at nginx.com Thu Mar 21 15:52:53 2013 From: ru at nginx.com (ru at nginx.com) Date: Thu, 21 Mar 2013 15:52:53 +0000 Subject: [nginx] svn commit: r5124 - trunk/src/core Message-ID: <20130321155253.514163F9E74@mail.nginx.com> Author: ru Date: 2013-03-21 15:52:52 +0000 (Thu, 21 Mar 2013) New Revision: 5124 URL: http://trac.nginx.org/nginx/changeset/5124/nginx Log: Simplified ngx_array_create(). Modified: trunk/src/core/ngx_array.c Modified: trunk/src/core/ngx_array.c =================================================================== --- trunk/src/core/ngx_array.c 2013-03-20 18:07:25 UTC (rev 5123) +++ trunk/src/core/ngx_array.c 2013-03-21 15:52:52 UTC (rev 5124) @@ -19,16 +19,10 @@ return NULL; } - a->elts = ngx_palloc(p, n * size); - if (a->elts == NULL) { + if (ngx_array_init(a, p, n, size) != NGX_OK) { return NULL; } - a->nelts = 0; - a->size = size; - a->nalloc = n; - a->pool = p; - return a; } From ru at nginx.com Thu Mar 21 16:03:24 2013 From: ru at nginx.com (ru at nginx.com) Date: Thu, 21 Mar 2013 16:03:24 +0000 Subject: [nginx] svn commit: r5125 - in trunk/src: core http http/modules Message-ID: <20130321160326.3AA393F9C4B@mail.nginx.com> Author: ru Date: 2013-03-21 16:03:24 +0000 (Thu, 21 Mar 2013) New Revision: 5125 URL: http://trac.nginx.org/nginx/changeset/5125/nginx Log: Use NGX_DEFAULT_POOL_SIZE macro where appropriate. Modified: trunk/src/core/ngx_cycle.h trunk/src/http/modules/ngx_http_geo_module.c trunk/src/http/modules/ngx_http_map_module.c trunk/src/http/ngx_http.c Modified: trunk/src/core/ngx_cycle.h =================================================================== --- trunk/src/core/ngx_cycle.h 2013-03-21 15:52:52 UTC (rev 5124) +++ trunk/src/core/ngx_cycle.h 2013-03-21 16:03:24 UTC (rev 5125) @@ -14,7 +14,7 @@ #ifndef NGX_CYCLE_POOL_SIZE -#define NGX_CYCLE_POOL_SIZE 16384 +#define NGX_CYCLE_POOL_SIZE NGX_DEFAULT_POOL_SIZE #endif Modified: trunk/src/http/modules/ngx_http_geo_module.c =================================================================== --- trunk/src/http/modules/ngx_http_geo_module.c 2013-03-21 15:52:52 UTC (rev 5124) +++ trunk/src/http/modules/ngx_http_geo_module.c 2013-03-21 16:03:24 UTC (rev 5125) @@ -430,14 +430,14 @@ return NGX_CONF_ERROR; } - pool = ngx_create_pool(16384, cf->log); + pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, cf->log); if (pool == NULL) { return NGX_CONF_ERROR; } ngx_memzero(&ctx, sizeof(ngx_http_geo_conf_ctx_t)); - ctx.temp_pool = ngx_create_pool(16384, cf->log); + ctx.temp_pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, cf->log); if (ctx.temp_pool == NULL) { return NGX_CONF_ERROR; } Modified: trunk/src/http/modules/ngx_http_map_module.c =================================================================== --- trunk/src/http/modules/ngx_http_map_module.c 2013-03-21 15:52:52 UTC (rev 5124) +++ trunk/src/http/modules/ngx_http_map_module.c 2013-03-21 16:03:24 UTC (rev 5125) @@ -227,7 +227,7 @@ var->get_handler = ngx_http_map_variable; var->data = (uintptr_t) map; - pool = ngx_create_pool(16384, cf->log); + pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, cf->log); if (pool == NULL) { return NGX_CONF_ERROR; } Modified: trunk/src/http/ngx_http.c =================================================================== --- trunk/src/http/ngx_http.c 2013-03-21 15:52:52 UTC (rev 5124) +++ trunk/src/http/ngx_http.c 2013-03-21 16:03:24 UTC (rev 5125) @@ -1479,7 +1479,7 @@ ngx_memzero(&ha, sizeof(ngx_hash_keys_arrays_t)); - ha.temp_pool = ngx_create_pool(16384, cf->log); + ha.temp_pool = ngx_create_pool(NGX_DEFAULT_POOL_SIZE, cf->log); if (ha.temp_pool == NULL) { return NGX_ERROR; } From ru at nginx.com Thu Mar 21 16:04:10 2013 From: ru at nginx.com (ru at nginx.com) Date: Thu, 21 Mar 2013 16:04:10 +0000 Subject: [nginx] svn commit: r5126 - trunk/src/core Message-ID: <20130321160410.9367A3FAAA9@mail.nginx.com> Author: ru Date: 2013-03-21 16:04:09 +0000 (Thu, 21 Mar 2013) New Revision: 5126 URL: http://trac.nginx.org/nginx/changeset/5126/nginx Log: Moved ngx_array_t definition from ngx_core.h to ngx_array.h. Modified: trunk/src/core/ngx_array.h trunk/src/core/ngx_core.h Modified: trunk/src/core/ngx_array.h =================================================================== --- trunk/src/core/ngx_array.h 2013-03-21 16:03:24 UTC (rev 5125) +++ trunk/src/core/ngx_array.h 2013-03-21 16:04:09 UTC (rev 5126) @@ -13,13 +13,13 @@ #include -struct ngx_array_s { +typedef struct { void *elts; ngx_uint_t nelts; size_t size; ngx_uint_t nalloc; ngx_pool_t *pool; -}; +} ngx_array_t; ngx_array_t *ngx_array_create(ngx_pool_t *p, ngx_uint_t n, size_t size); Modified: trunk/src/core/ngx_core.h =================================================================== --- trunk/src/core/ngx_core.h 2013-03-21 16:03:24 UTC (rev 5125) +++ trunk/src/core/ngx_core.h 2013-03-21 16:04:09 UTC (rev 5126) @@ -15,7 +15,6 @@ typedef struct ngx_pool_s ngx_pool_t; typedef struct ngx_chain_s ngx_chain_t; typedef struct ngx_log_s ngx_log_t; -typedef struct ngx_array_s ngx_array_t; typedef struct ngx_open_file_s ngx_open_file_t; typedef struct ngx_command_s ngx_command_t; typedef struct ngx_file_s ngx_file_t; From ru at nginx.com Thu Mar 21 16:05:35 2013 From: ru at nginx.com (ru at nginx.com) Date: Thu, 21 Mar 2013 16:05:35 +0000 Subject: [nginx] svn commit: r5127 - trunk/src/http/modules Message-ID: <20130321160535.E5DFF3F9C4B@mail.nginx.com> Author: ru Date: 2013-03-21 16:05:35 +0000 (Thu, 21 Mar 2013) New Revision: 5127 URL: http://trac.nginx.org/nginx/changeset/5127/nginx Log: Fixed language in a comment preceding ngx_http_index_handler(). Modified: trunk/src/http/modules/ngx_http_index_module.c Modified: trunk/src/http/modules/ngx_http_index_module.c =================================================================== --- trunk/src/http/modules/ngx_http_index_module.c 2013-03-21 16:04:09 UTC (rev 5126) +++ trunk/src/http/modules/ngx_http_index_module.c 2013-03-21 16:05:35 UTC (rev 5127) @@ -85,12 +85,12 @@ /* * Try to open/test the first index file before the test of directory - * existence because valid requests should be much more than invalid ones. - * If the file open()/stat() would fail, then the directory stat() should - * be more quickly because some data is already cached in the kernel. + * existence because valid requests should prevail over invalid ones. + * If open()/stat() of a file will fail then stat() of a directory + * should be faster because kernel may have already cached some data. * Besides, Win32 may return ERROR_PATH_NOT_FOUND (NGX_ENOTDIR) at once. - * Unix has ENOTDIR error, however, it's less helpful than Win32's one: - * it only indicates that path contains an usual file in place of directory. + * Unix has ENOTDIR error; however, it's less helpful than Win32's one: + * it only indicates that path points to a regular file, not a directory. */ static ngx_int_t From ru at nginx.com Thu Mar 21 16:06:14 2013 From: ru at nginx.com (ru at nginx.com) Date: Thu, 21 Mar 2013 16:06:14 +0000 Subject: [nginx] svn commit: r5128 - trunk/src/http Message-ID: <20130321160615.0D2743FAA52@mail.nginx.com> Author: ru Date: 2013-03-21 16:06:13 +0000 (Thu, 21 Mar 2013) New Revision: 5128 URL: http://trac.nginx.org/nginx/changeset/5128/nginx Log: Removed unused ngx_http_clear_variable() macro. Modified: trunk/src/http/ngx_http_variables.h Modified: trunk/src/http/ngx_http_variables.h =================================================================== --- trunk/src/http/ngx_http_variables.h 2013-03-21 16:05:35 UTC (rev 5127) +++ trunk/src/http/ngx_http_variables.h 2013-03-21 16:06:13 UTC (rev 5128) @@ -57,9 +57,6 @@ ngx_str_t *var, ngx_list_part_t *part, size_t prefix); -#define ngx_http_clear_variable(r, index) r->variables0[index].text.data = NULL; - - #if (NGX_PCRE) typedef struct { From ru at nginx.com Thu Mar 21 16:06:53 2013 From: ru at nginx.com (ru at nginx.com) Date: Thu, 21 Mar 2013 16:06:53 +0000 Subject: [nginx] svn commit: r5129 - trunk/src/http/modules Message-ID: <20130321160653.CC7463FAA5F@mail.nginx.com> Author: ru Date: 2013-03-21 16:06:53 +0000 (Thu, 21 Mar 2013) New Revision: 5129 URL: http://trac.nginx.org/nginx/changeset/5129/nginx Log: Split clients: check length when parsing configuration. Modified: trunk/src/http/modules/ngx_http_split_clients_module.c Modified: trunk/src/http/modules/ngx_http_split_clients_module.c =================================================================== --- trunk/src/http/modules/ngx_http_split_clients_module.c 2013-03-21 16:06:13 UTC (rev 5128) +++ trunk/src/http/modules/ngx_http_split_clients_module.c 2013-03-21 16:06:53 UTC (rev 5129) @@ -218,7 +218,7 @@ part->percent = 0; } else { - if (value[0].data[value[0].len - 1] != '%') { + if (value[0].len == 0 || value[0].data[value[0].len - 1] != '%') { goto invalid; } From bernd.roessl at gmail.com Fri Mar 22 14:12:23 2013 From: bernd.roessl at gmail.com (=?ISO-8859-1?Q?Bernd_R=F6ssl?=) Date: Fri, 22 Mar 2013 15:12:23 +0100 Subject: upstream proxy support In-Reply-To: References: Message-ID: hi, currently there is no way to rewrite to a url containing a schema and proy_passing the request to a upstream since such a rewrite will end up in a redirect. This is a problem if nginx is running behind a proxy like squid which uses abolute url's to pass the request to. however, absolute uri's are valid in requests and commonly used in proxies, for details see http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 attached is a patch with a fix for ngx_http_rewrite_module.c which introduces a new flag for the rewrite directive called "noredirect" to allow redirect targets containing a schema in config this will look like this: location = '/foo' { rewrite .* "http://www.example.com/bar" noredirect; proxy_pass http://squid-host; } regards, bernd -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: upstream_proxy_support.patch Type: application/octet-stream Size: 1157 bytes Desc: not available URL: From mdounin at mdounin.ru Fri Mar 22 14:40:12 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 22 Mar 2013 18:40:12 +0400 Subject: upstream proxy support In-Reply-To: References: Message-ID: <20130322144012.GJ62550@mdounin.ru> Hello! On Fri, Mar 22, 2013 at 03:12:23PM +0100, Bernd R?ssl wrote: > hi, > > currently there is no way to rewrite to a url containing a schema and > proy_passing the request to a upstream since such a rewrite will end > up in a redirect. This is a problem if nginx is running behind a proxy > like squid which uses abolute url's to pass the request to. however, > absolute uri's are valid in requests and commonly used in proxies, for > details see http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 > > attached is a patch with a fix for ngx_http_rewrite_module.c which > introduces a new flag for the rewrite directive called "noredirect" to > allow redirect targets containing a schema > > in config this will look like this: > > location = '/foo' { > rewrite .* "http://www.example.com/bar" noredirect; > proxy_pass http://squid-host; > } Rewrite is expected to set r->uri, which in turn expected to contain path component of a full URL. It is bad idea to introduce flags which allow to violate this. Instead, if you need an ability to better control request line used, you may want to address this in proxy module. -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Fri Mar 22 15:14:10 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 22 Mar 2013 15:14:10 +0000 Subject: [nginx] svn commit: r5130 - trunk/misc Message-ID: <20130322151410.EAB533F9DF8@mail.nginx.com> Author: mdounin Date: 2013-03-22 15:14:07 +0000 (Fri, 22 Mar 2013) New Revision: 5130 URL: http://trac.nginx.org/nginx/changeset/5130/nginx Log: Misc: switch to single export operation in "zip" target. While exporting parts of the tree might be better in some cases, it is awfully slow overseas, and also requires unlocking ssh key multiple times. Exporting the whole repo and removing directories not needed for zip is faster here. It is also a required step before we can switch to Mercurial. Modified: trunk/misc/GNUmakefile Modified: trunk/misc/GNUmakefile =================================================================== --- trunk/misc/GNUmakefile 2013-03-21 16:06:53 UTC (rev 5129) +++ trunk/misc/GNUmakefile 2013-03-22 15:14:07 UTC (rev 5130) @@ -124,22 +124,24 @@ rm -rf $(TEMP) rm -f $(NGINX).zip - mkdir -p $(TEMP)/$(NGINX)/docs + svn export -rHEAD . $(TEMP)/$(NGINX) + + mkdir -p $(TEMP)/$(NGINX)/docs.new mkdir -p $(TEMP)/$(NGINX)/logs mkdir -p $(TEMP)/$(NGINX)/temp - svn export -rHEAD conf $(TEMP)/$(NGINX)/conf/ sed -i '' -e "s/$$/`printf '\r'`/" $(TEMP)/$(NGINX)/conf/* - svn export -rHEAD contrib $(TEMP)/$(NGINX)/contrib/ - svn export -rHEAD docs/html $(TEMP)/$(NGINX)/html/ + mv $(TEMP)/$(NGINX)/docs/text/LICENSE $(TEMP)/$(NGINX)/docs.new + mv $(TEMP)/$(NGINX)/docs/text/README $(TEMP)/$(NGINX)/docs.new + mv $(TEMP)/$(NGINX)/docs/html $(TEMP)/$(NGINX) - $(MAKE) -f docs/GNUmakefile changes + rm -r $(TEMP)/$(NGINX)/docs + mv $(TEMP)/$(NGINX)/docs.new $(TEMP)/$(NGINX)/docs cp -p $(OBJS)/nginx.exe $(TEMP)/$(NGINX) - cp -p docs/text/LICENSE $(TEMP)/$(NGINX)/docs/ - cp -p docs/text/README $(TEMP)/$(NGINX)/docs/ + $(MAKE) -f docs/GNUmakefile changes mv $(TEMP)/$(NGINX)/CHANGES* $(TEMP)/$(NGINX)/docs/ cp -p $(OBJS)/lib/$(OPENSSL)/LICENSE \ @@ -155,6 +157,10 @@ touch -r $(OBJS)/lib/$(ZLIB)/README \ $(TEMP)/$(NGINX)/docs/zlib.LICENSE + rm -r $(TEMP)/$(NGINX)/auto + rm -r $(TEMP)/$(NGINX)/misc + rm -r $(TEMP)/$(NGINX)/src + cd $(TEMP) && zip -r ../$(NGINX).zip $(NGINX) From bernd.roessl at gmail.com Fri Mar 22 15:14:18 2013 From: bernd.roessl at gmail.com (=?ISO-8859-1?Q?Bernd_R=F6ssl?=) Date: Fri, 22 Mar 2013 16:14:18 +0100 Subject: upstream proxy support In-Reply-To: <20130322144012.GJ62550@mdounin.ru> References: <20130322144012.GJ62550@mdounin.ru> Message-ID: hi, do you mean something like a new proxy directive like for example "proxy_server"? upstream squid { server squid-host:3128; } upstream example { server www.example.com:80; } location = '/foo' { proxy_server squid; proxy_pass http://example/bar "; } if "proxy_server" has been set this will be used as the actual upstream and the upstream from the proxy_pass will be used for request building. bernd 2013/3/22 Maxim Dounin > Hello! > > On Fri, Mar 22, 2013 at 03:12:23PM +0100, Bernd R?ssl wrote: > > > hi, > > > > currently there is no way to rewrite to a url containing a schema and > > proy_passing the request to a upstream since such a rewrite will end > > up in a redirect. This is a problem if nginx is running behind a proxy > > like squid which uses abolute url's to pass the request to. however, > > absolute uri's are valid in requests and commonly used in proxies, for > > details see > http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 > > > > attached is a patch with a fix for ngx_http_rewrite_module.c which > > introduces a new flag for the rewrite directive called "noredirect" to > > allow redirect targets containing a schema > > > > in config this will look like this: > > > > location = '/foo' { > > rewrite .* "http://www.example.com/bar" noredirect; > > proxy_pass http://squid-host; > > } > > Rewrite is expected to set r->uri, which in turn expected to > contain path component of a full URL. It is bad idea to introduce > flags which allow to violate this. > > Instead, if you need an ability to better control request line > used, you may want to address this in proxy module. > > -- > Maxim Dounin > http://nginx.org/en/donation.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Mar 22 15:14:46 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 22 Mar 2013 15:14:46 +0000 Subject: [nginx] svn commit: r5131 - trunk/misc Message-ID: <20130322151446.D28F53F9E08@mail.nginx.com> Author: mdounin Date: 2013-03-22 15:14:43 +0000 (Fri, 22 Mar 2013) New Revision: 5131 URL: http://trac.nginx.org/nginx/changeset/5131/nginx Log: Misc: removed unused "snapshot" target. Modified: trunk/misc/GNUmakefile Modified: trunk/misc/GNUmakefile =================================================================== --- trunk/misc/GNUmakefile 2013-03-22 15:14:07 UTC (rev 5130) +++ trunk/misc/GNUmakefile 2013-03-22 15:14:43 UTC (rev 5131) @@ -54,34 +54,6 @@ $(MAKE) -f misc/GNUmakefile release -snapshot: - rm -rf $(TEMP) - - mkdir -p $(TEMP) - svn export . $(TEMP)/$(NGINX) - - mv $(TEMP)/$(NGINX)/auto/configure $(TEMP)/$(NGINX) - - # delete incomplete sources - rm $(TEMP)/$(NGINX)/src/event/ngx_event_acceptex.c - rm $(TEMP)/$(NGINX)/src/event/ngx_event_connectex.c - rm $(TEMP)/$(NGINX)/src/event/modules/ngx_iocp_module.* - rm -r $(TEMP)/$(NGINX)/src/os/win32 - - rm -r $(TEMP)/$(NGINX)/src/mysql - - mv $(TEMP)/$(NGINX)/docs/text/LICENSE $(TEMP)/$(NGINX) - mv $(TEMP)/$(NGINX)/docs/text/README $(TEMP)/$(NGINX) - mv $(TEMP)/$(NGINX)/docs/html $(TEMP)/$(NGINX) - mv $(TEMP)/$(NGINX)/docs/man $(TEMP)/$(NGINX) - - $(MAKE) -f docs/GNUmakefile changes - - rm -r $(TEMP)/$(NGINX)/docs - rm -r $(TEMP)/$(NGINX)/misc - - tar -c -z -f $(NGINX).tar.gz --directory $(TEMP) $(NGINX) - win32: ./auto/configure \ --with-cc=cl \ From mdounin at mdounin.ru Fri Mar 22 15:34:32 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 22 Mar 2013 19:34:32 +0400 Subject: upstream proxy support In-Reply-To: References: <20130322144012.GJ62550@mdounin.ru> Message-ID: <20130322153432.GL62550@mdounin.ru> Hello! On Fri, Mar 22, 2013 at 04:14:18PM +0100, Bernd R?ssl wrote: > hi, > > do you mean something like a new proxy directive like for example > "proxy_server"? > > upstream squid { > server squid-host:3128; > } > upstream example { > server www.example.com:80; > } > > location = '/foo' { > proxy_server squid; > proxy_pass http://example/bar "; > } > > if "proxy_server" has been set this will be used as the actual upstream and > the upstream from the proxy_pass will be used for request building. Or something like proxy_uri, which would allow to redefine URI used in a request line. This way it will be more consitent with fastcgi_pass and friends. (It is not clear how this should be inherited though.) Note though that overall we are not really intrested in teaching nginx to talk to forward proxies, and probably simpliest solution for a specific problem for now would be to abuse rewrite problems and force it to work like with your original patch. Something like this should work (note leading space): location / { rewrite ^ " http://foo/bar" break; proxy_pass http://squid; } It should do roughly the same as your original patch, but doesn't pretend this is correct and supported. (Please don't post html here, use plain text. Thank you.) -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Fri Mar 22 15:47:19 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 22 Mar 2013 15:47:19 +0000 Subject: [nginx] svn commit: r5132 - trunk/misc Message-ID: <20130322154720.8A9323F9C44@mail.nginx.com> Author: mdounin Date: 2013-03-22 15:47:18 +0000 (Fri, 22 Mar 2013) New Revision: 5132 URL: http://trac.nginx.org/nginx/changeset/5132/nginx Log: Misc: support for Mercurial repositories. Modified: trunk/misc/GNUmakefile Modified: trunk/misc/GNUmakefile =================================================================== --- trunk/misc/GNUmakefile 2013-03-22 15:14:43 UTC (rev 5131) +++ trunk/misc/GNUmakefile 2013-03-22 15:47:18 UTC (rev 5132) @@ -11,11 +11,8 @@ PCRE = pcre-8.32 -release: - rm -rf $(TEMP) +release: export - svn export -rHEAD . $(TEMP)/$(NGINX) - mv $(TEMP)/$(NGINX)/auto/configure $(TEMP)/$(NGINX) # delete incomplete sources @@ -39,7 +36,32 @@ tar -c -z -f $(NGINX).tar.gz --directory $(TEMP) $(NGINX) +export: + rm -rf $(TEMP) + + if [ -d .svn ]; then \ + svn export -rHEAD . $(TEMP)/$(NGINX); \ + else \ + hg archive -X '.hg*' $(TEMP)/$(NGINX); \ + fi + + RELEASE: + if [ -d .svn ]; then \ + $(MAKE) -f misc/GNUmakefile RELEASE.svn; \ + else \ + $(MAKE) -f misc/GNUmakefile RELEASE.hg; \ + fi + + $(MAKE) -f misc/GNUmakefile release + + +RELEASE.hg: + hg ci -m nginx-$(VER)-RELEASE + hg tag -m "release-$(VER) tag" release-$(VER) + + +RELEASE.svn: test -d $(TEMP) || mkdir -p $(TEMP) echo "nginx-$(VER)-RELEASE" > $(TEMP)/message @@ -51,9 +73,7 @@ svn up - $(MAKE) -f misc/GNUmakefile release - win32: ./auto/configure \ --with-cc=cl \ @@ -92,12 +112,10 @@ --with-mail_ssl_module \ --with-ipv6 -zip: - rm -rf $(TEMP) + +zip: export rm -f $(NGINX).zip - svn export -rHEAD . $(TEMP)/$(NGINX) - mkdir -p $(TEMP)/$(NGINX)/docs.new mkdir -p $(TEMP)/$(NGINX)/logs mkdir -p $(TEMP)/$(NGINX)/temp From bernd.roessl at gmail.com Fri Mar 22 17:23:35 2013 From: bernd.roessl at gmail.com (=?ISO-8859-1?Q?Bernd_R=F6ssl?=) Date: Fri, 22 Mar 2013 18:23:35 +0100 Subject: upstream proxy support In-Reply-To: <20130322153432.GL62550@mdounin.ru> References: <20130322144012.GJ62550@mdounin.ru> <20130322153432.GL62550@mdounin.ru> Message-ID: this was my first idea. unfortunately the url get encoded somewhere and preduces requests like GET %20http://uat2.welt.de/bigpfeeds/?type=contenttypes HTTP/1.1 of course, the forward proxy does not like this url and raises an error. however, i added the schema in upper letters which does the trick: location / { rewrite ^ HTTP://foo/bar break; proxy_pass http://squid; } so, if i understand you correctly you prefere a more general solution allowing to alter the $uri controlled by a boolen flag defined in the proxy module and in my case read by the rewrite module. But this way the rewrite module will depend on the proxy module, right? or do you not intend to add a fix for the "forward proxy problem" at all? bernd 2013/3/22 Maxim Dounin > Hello! > > On Fri, Mar 22, 2013 at 04:14:18PM +0100, Bernd R?ssl wrote: > > > hi, > > > > do you mean something like a new proxy directive like for example > > "proxy_server"? > > > > upstream squid { > > server squid-host:3128; > > } > > upstream example { > > server www.example.com:80; > > } > > > > location = '/foo' { > > proxy_server squid; > > proxy_pass http://example/bar "; > > } > > > > if "proxy_server" has been set this will be used as the actual upstream > and > > the upstream from the proxy_pass will be used for request building. > > Or something like proxy_uri, which would allow to redefine URI > used in a request line. This way it will be more consitent with > fastcgi_pass and friends. (It is not clear how this should be > inherited though.) > > Note though that overall we are not really intrested in teaching > nginx to talk to forward proxies, and probably simpliest > solution for a specific problem for now would be to abuse rewrite > problems and force it to work like with your original patch. > Something like this should work (note leading space): > > location / { > rewrite ^ " http://foo/bar" break; > proxy_pass http://squid; > } > > It should do roughly the same as your original patch, but doesn't > pretend this is correct and supported. > > (Please don't post html here, use plain text. Thank you.) > > -- > Maxim Dounin > http://nginx.org/en/donation.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From francis at daoine.org Sat Mar 23 13:01:35 2013 From: francis at daoine.org (Francis Daly) Date: Sat, 23 Mar 2013 13:01:35 +0000 Subject: upstream proxy support In-Reply-To: References: Message-ID: <20130323130135.GP18002@craic.sysops.org> On Sat, Mar 23, 2013 at 12:00:02PM +0000, nginx-devel-request at nginx.org wrote: Hi there, if I can barge in to the thread... > so, if i understand you correctly you prefere a more general solution > allowing to alter the $uri controlled by a boolen flag defined in the proxy > module and in my case read by the rewrite module. But this way the rewrite > module will depend on the proxy module, right? As I understand things (as an onlooker): Right now, proxy_pass http://example/bar; means, approximately, (1) speak http not https; (2) use the upstream{} called example, or dns-resolve example, to find the IP address to connect to; (3) issue a http request that is something like GET /bar/something HTTP/1.0 Host: example (Read the Fine Source for the full details, and how other directives can change things there, if it matters.) The suggestion seems to be that you could create a new proxy_ directive to adjust the "GET" line that is sent, to add "http://your-preferred-hostname" in the right place. You may also want it to cause the Host: header to be modified, as well as whatever else your upstream and the rfc deem necessary. No rewrite/proxy linkage. No change to rewrite at all. > or do you not intend to add a fix for the "forward proxy problem" at all? Right now, proxy_pass speaks http to a http server, or http-over-ssl to a https server. It doesn't speak proxied-http to a http proxy server. Adding that new protocol is not currently an nginx priority. You seem to have a config file workaround for the current version; if that's good enough for you, then great! If not, then you'll probably want a module or patch to add support for "proxied http" as a protocol that nginx speaks as a client. If it does just enough to get your use case working, it is unlikely to be added to stock nginx. If it cleanly and correctly implements the protocol without impacting non-users, then it is more likely to be useful for more people. I haven't followed the mailing lists for very long, but I don't recall any other request for nginx to be a client of a http proxy. That suggests that it may not be a widely-requested feature. But if it is useful for you, and you can get it written, then there's no reason why it can't be kept working in future versions. All the best, f -- Francis Daly francis at daoine.org From dakota at brokenpipe.ru Sun Mar 24 17:12:53 2013 From: dakota at brokenpipe.ru (Marat Dakota) Date: Sun, 24 Mar 2013 21:12:53 +0400 Subject: Subrequests from body filters Message-ID: Hi, I'm writing a handler module. It makes subrequests and uses body filter for subrequests. Here is simplified handler code: static ngx_int_t ngx_http_my_handler(ngx_http_request_t *r) { // ... Sending headers. ngx_chain_t *out; ngx_buf_t *b; out = ngx_alloc_chain_link(r->pool); b = ngx_pcalloc(r->pool, sizeof(ngx_buf_t)); if (out == NULL || b == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } b->pos = "aaa"; b->last = "aaa" + 3; b->memory = 1; b->last_buf = 1; out->buf = b; out->next = NULL; // ngx_http_my_subrequest() creates ngx_str_t from second argument and calls ngx_http_subrequest(). if (ngx_http_my_subrequest(r, "/uri1") != NGX_OK) { return NGX_ERROR; } if (ngx_http_my_subrequest(r, "/uri2") != NGX_OK) { return NGX_ERROR; } return ngx_http_output_filter(r, out); } Here is subrequest body filter: static ngx_int_t ngx_http_my_body_filter(ngx_http_request_t *r, ngx_chain_t *in) { // ... Do something with subrequest's body in "in" argument. ngx_chain_t *out; ngx_buf_t *b; ngx_chain_t *cl; out = ngx_alloc_chain_link(r->pool); b = ngx_pcalloc(r->pool, sizeof(ngx_buf_t)); if (out == NULL || b == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } // HERE BE DRAGONS. b->pos = "bbb"; b->last = "bbb" + 3; b->memory = 1; b->last_buf = 1; out->buf = b; out->next = NULL; // Discard subrequest's body. for (cl = in; cl; cl = cl->next) { cl->buf->pos = cl->buf->last; cl->buf->file_pos = cl->buf->file_last; } // We just put "bbb" to main response for each call of this body filter. return ngx_http_output_filter(r->main, out); } And everything works just fine. For example, if ngx_http_my_body_filter is called twice for each (/uri1 and /uri2) subrequest started from ngx_http_my_handler, the result will be "aaabbbbbbbbbbbb". But if I replace "HERE BE DRAGONS" with a call to ngx_http_my_subrequest(r->main, "/uri3"), let's suppose this subrequest is being started only in second call of ngx_http_my_body_filter for /uri1 subrequest started from ngx_http_my_handler. The result will be "aaabbb", not "aaabbbbbbbbbbbbbbbbbb" as I expect (and logs say all three subrequests are processed ok). I guess my problem is about some magic with buffers and chains, but I can't figure out what to fix and where. I would be thankful for any help. -- Marat From dakota at brokenpipe.ru Sun Mar 24 21:15:25 2013 From: dakota at brokenpipe.ru (Marat Dakota) Date: Mon, 25 Mar 2013 01:15:25 +0400 Subject: Subrequests from body filters In-Reply-To: References: Message-ID: It looks like I've figured out. When I set b->last_buf to 0 for all the buffers and to 1 for just actual last buffer, everything is ok. But then another question happens. Even if all the buffers have b->flush set to 1, I see my response only in one single chunk, after all the subrequests finished. How to actually flush the response buffer by buffer? -- Marat On Sun, Mar 24, 2013 at 9:12 PM, Marat Dakota wrote: > Hi, > > I'm writing a handler module. It makes subrequests and uses body > filter for subrequests. > > Here is simplified handler code: > > static ngx_int_t > ngx_http_my_handler(ngx_http_request_t *r) { > > // ... Sending headers. > > ngx_chain_t *out; > ngx_buf_t *b; > > out = ngx_alloc_chain_link(r->pool); > b = ngx_pcalloc(r->pool, sizeof(ngx_buf_t)); > > if (out == NULL || b == NULL) { > return NGX_HTTP_INTERNAL_SERVER_ERROR; > } > > b->pos = "aaa"; > b->last = "aaa" + 3; > b->memory = 1; > b->last_buf = 1; > > out->buf = b; > out->next = NULL; > > // ngx_http_my_subrequest() creates ngx_str_t from second argument > and calls ngx_http_subrequest(). > if (ngx_http_my_subrequest(r, "/uri1") != NGX_OK) { > return NGX_ERROR; > } > > if (ngx_http_my_subrequest(r, "/uri2") != NGX_OK) { > return NGX_ERROR; > } > > return ngx_http_output_filter(r, out); > } > > > Here is subrequest body filter: > > static ngx_int_t > ngx_http_my_body_filter(ngx_http_request_t *r, ngx_chain_t *in) > { > // ... Do something with subrequest's body in "in" argument. > > ngx_chain_t *out; > ngx_buf_t *b; > ngx_chain_t *cl; > > out = ngx_alloc_chain_link(r->pool); > b = ngx_pcalloc(r->pool, sizeof(ngx_buf_t)); > > if (out == NULL || b == NULL) { > return NGX_HTTP_INTERNAL_SERVER_ERROR; > } > > // HERE BE DRAGONS. > > b->pos = "bbb"; > b->last = "bbb" + 3; > b->memory = 1; > b->last_buf = 1; > > out->buf = b; > out->next = NULL; > > // Discard subrequest's body. > for (cl = in; cl; cl = cl->next) { > cl->buf->pos = cl->buf->last; > cl->buf->file_pos = cl->buf->file_last; > } > > // We just put "bbb" to main response for each call of this body filter. > return ngx_http_output_filter(r->main, out); > } > > And everything works just fine. For example, if > ngx_http_my_body_filter is called twice for each (/uri1 and /uri2) > subrequest started from ngx_http_my_handler, the result will be > "aaabbbbbbbbbbbb". > > But if I replace "HERE BE DRAGONS" with a call to > ngx_http_my_subrequest(r->main, "/uri3"), let's suppose this > subrequest is being started only in second call of > ngx_http_my_body_filter for /uri1 subrequest started from > ngx_http_my_handler. The result will be "aaabbb", not > "aaabbbbbbbbbbbbbbbbbb" as I expect (and logs say all three > subrequests are processed ok). > > I guess my problem is about some magic with buffers and chains, but I > can't figure out what to fix and where. > > I would be thankful for any help. > > -- > Marat From dakota at brokenpipe.ru Sun Mar 24 21:39:15 2013 From: dakota at brokenpipe.ru (Marat Dakota) Date: Mon, 25 Mar 2013 01:39:15 +0400 Subject: Subrequests from body filters In-Reply-To: References: Message-ID: It looks like I've figured out again, I see buffers flushed when they contain \n character. Sorry to bother. -- Marat On Mon, Mar 25, 2013 at 1:15 AM, Marat Dakota wrote: > It looks like I've figured out. When I set b->last_buf to 0 for all > the buffers and to 1 for just actual last buffer, everything is ok. > > But then another question happens. Even if all the buffers have > b->flush set to 1, I see my response only in one single chunk, after > all the subrequests finished. How to actually flush the response > buffer by buffer? > > -- > Marat > > On Sun, Mar 24, 2013 at 9:12 PM, Marat Dakota wrote: >> Hi, >> >> I'm writing a handler module. It makes subrequests and uses body >> filter for subrequests. >> >> Here is simplified handler code: >> >> static ngx_int_t >> ngx_http_my_handler(ngx_http_request_t *r) { >> >> // ... Sending headers. >> >> ngx_chain_t *out; >> ngx_buf_t *b; >> >> out = ngx_alloc_chain_link(r->pool); >> b = ngx_pcalloc(r->pool, sizeof(ngx_buf_t)); >> >> if (out == NULL || b == NULL) { >> return NGX_HTTP_INTERNAL_SERVER_ERROR; >> } >> >> b->pos = "aaa"; >> b->last = "aaa" + 3; >> b->memory = 1; >> b->last_buf = 1; >> >> out->buf = b; >> out->next = NULL; >> >> // ngx_http_my_subrequest() creates ngx_str_t from second argument >> and calls ngx_http_subrequest(). >> if (ngx_http_my_subrequest(r, "/uri1") != NGX_OK) { >> return NGX_ERROR; >> } >> >> if (ngx_http_my_subrequest(r, "/uri2") != NGX_OK) { >> return NGX_ERROR; >> } >> >> return ngx_http_output_filter(r, out); >> } >> >> >> Here is subrequest body filter: >> >> static ngx_int_t >> ngx_http_my_body_filter(ngx_http_request_t *r, ngx_chain_t *in) >> { >> // ... Do something with subrequest's body in "in" argument. >> >> ngx_chain_t *out; >> ngx_buf_t *b; >> ngx_chain_t *cl; >> >> out = ngx_alloc_chain_link(r->pool); >> b = ngx_pcalloc(r->pool, sizeof(ngx_buf_t)); >> >> if (out == NULL || b == NULL) { >> return NGX_HTTP_INTERNAL_SERVER_ERROR; >> } >> >> // HERE BE DRAGONS. >> >> b->pos = "bbb"; >> b->last = "bbb" + 3; >> b->memory = 1; >> b->last_buf = 1; >> >> out->buf = b; >> out->next = NULL; >> >> // Discard subrequest's body. >> for (cl = in; cl; cl = cl->next) { >> cl->buf->pos = cl->buf->last; >> cl->buf->file_pos = cl->buf->file_last; >> } >> >> // We just put "bbb" to main response for each call of this body filter. >> return ngx_http_output_filter(r->main, out); >> } >> >> And everything works just fine. For example, if >> ngx_http_my_body_filter is called twice for each (/uri1 and /uri2) >> subrequest started from ngx_http_my_handler, the result will be >> "aaabbbbbbbbbbbb". >> >> But if I replace "HERE BE DRAGONS" with a call to >> ngx_http_my_subrequest(r->main, "/uri3"), let's suppose this >> subrequest is being started only in second call of >> ngx_http_my_body_filter for /uri1 subrequest started from >> ngx_http_my_handler. The result will be "aaabbb", not >> "aaabbbbbbbbbbbbbbbbbb" as I expect (and logs say all three >> subrequests are processed ok). >> >> I guess my problem is about some magic with buffers and chains, but I >> can't figure out what to fix and where. >> >> I would be thankful for any help. >> >> -- >> Marat From safe3q at gmail.com Mon Mar 25 07:34:56 2013 From: safe3q at gmail.com (David Shee) Date: Mon, 25 Mar 2013 15:34:56 +0800 Subject: ngx unescape uri bug In-Reply-To: References: Message-ID: I'm Zuwen Shi from China,I find a unescape uri bug in your program. The source code location is src\core\ngx_string.c->ngx_unescape_uri If I put a string "%%s%elect",it convert the string to "%slect",and %% to %,%el to l,actually the right convert is "%%s%elect". So,I patch the ngx_unescape_uri like below,the red part is which I modified. Nginx is a really nice project. void ngx_unescape_uri(u_char **dst, u_char **src, size_t size, ngx_uint_t type) { u_char *d, *s, ch, c, decoded; enum { sw_usual = 0, sw_quoted, sw_quoted_second } state; d = *dst; s = *src; state = 0; decoded = 0; while (size--) { ch = *s++; switch (state) { case sw_usual: if (ch == '?' && (type & (NGX_UNESCAPE_URI|NGX_UNESCAPE_REDIRECT))) { *d++ = ch; goto done; } if (ch == '%'&&size>1) { ch=*s; c = (u_char) (ch | 0x20); if ((ch >= '0' && ch <= '9')||(c >= 'a' && c <= 'f')) { ch=*(s+1); c = (u_char) (ch | 0x20); if ((ch >= '0' && ch <= '9')||(c >= 'a' && c <= 'f')) { state = sw_quoted; break; } } *d++ = '%'; break; } if (ch == '+') { *d++ = ' '; break; } *d++ = ch; break; case sw_quoted: if (ch >= '0' && ch <= '9') { decoded = (u_char) (ch - '0'); state = sw_quoted_second; break; } c = (u_char) (ch | 0x20); if (c >= 'a' && c <= 'f') { decoded = (u_char) (c - 'a' + 10); state = sw_quoted_second; break; } /* the invalid quoted character */ state = sw_usual; *d++ = ch; break; case sw_quoted_second: state = sw_usual; if (ch >= '0' && ch <= '9') { ch = (u_char) ((decoded << 4) + ch - '0'); if (type & NGX_UNESCAPE_REDIRECT) { if (ch > '%' && ch < 0x7f) { *d++ = ch; break; } *d++ = '%'; *d++ = *(s - 2); *d++ = *(s - 1); break; } *d++ = ch; break; } c = (u_char) (ch | 0x20); if (c >= 'a' && c <= 'f') { ch = (u_char) ((decoded << 4) + c - 'a' + 10); if (type & NGX_UNESCAPE_URI) { if (ch == '?') { *d++ = ch; goto done; } *d++ = ch; break; } if (type & NGX_UNESCAPE_REDIRECT) { if (ch == '?') { *d++ = ch; goto done; } if (ch > '%' && ch < 0x7f) { *d++ = ch; break; } *d++ = '%'; *d++ = *(s - 2); *d++ = *(s - 1); break; } *d++ = ch; break; } /* the invalid quoted character */ break; } } done: *dst = d; *src = s; } -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Mon Mar 25 11:56:13 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 25 Mar 2013 15:56:13 +0400 Subject: Subrequests from body filters In-Reply-To: References: Message-ID: <20130325115613.GP62550@mdounin.ru> Hello! On Sun, Mar 24, 2013 at 09:12:53PM +0400, Marat Dakota wrote: [...] > // We just put "bbb" to main response for each call of this body filter. > return ngx_http_output_filter(r->main, out); > } It is very wrong to call ngx_http_output_filter() of a main request from you body filter. Result is undefined. Instead, you should call next body filter with a modified chain. -- Maxim Dounin http://nginx.org/en/donation.html From ru at nginx.com Mon Mar 25 13:14:49 2013 From: ru at nginx.com (ru at nginx.com) Date: Mon, 25 Mar 2013 13:14:49 +0000 Subject: [nginx] svn commit: r5133 - trunk/src/http Message-ID: <20130325131449.E77A53F9FE9@mail.nginx.com> Author: ru Date: 2013-03-25 13:14:49 +0000 (Mon, 25 Mar 2013) New Revision: 5133 URL: http://trac.nginx.org/nginx/changeset/5133/nginx Log: Upstream: only call peer.free() if peer.get() selected a peer. Modified: trunk/src/http/ngx_http_upstream.c Modified: trunk/src/http/ngx_http_upstream.c =================================================================== --- trunk/src/http/ngx_http_upstream.c 2013-03-22 15:47:18 UTC (rev 5132) +++ trunk/src/http/ngx_http_upstream.c 2013-03-25 13:14:49 UTC (rev 5133) @@ -3147,14 +3147,16 @@ ngx_http_busy_unlock(u->conf->busy_lock, &u->busy_lock); #endif - if (ft_type == NGX_HTTP_UPSTREAM_FT_HTTP_404) { - state = NGX_PEER_NEXT; - } else { - state = NGX_PEER_FAILED; - } + if (u->peer.sockaddr) { - if (ft_type != NGX_HTTP_UPSTREAM_FT_NOLIVE) { + if (ft_type == NGX_HTTP_UPSTREAM_FT_HTTP_404) { + state = NGX_PEER_NEXT; + } else { + state = NGX_PEER_FAILED; + } + u->peer.free(&u->peer, u->peer.data, state); + u->peer.sockaddr = NULL; } if (ft_type == NGX_HTTP_UPSTREAM_FT_TIMEOUT) { @@ -3314,8 +3316,9 @@ u->finalize_request(r, rc); - if (u->peer.free) { + if (u->peer.free && u->peer.sockaddr) { u->peer.free(&u->peer, u->peer.data, 0); + u->peer.sockaddr = NULL; } if (u->peer.connection) { From ru at nginx.com Mon Mar 25 13:16:56 2013 From: ru at nginx.com (ru at nginx.com) Date: Mon, 25 Mar 2013 13:16:56 +0000 Subject: [nginx] svn commit: r5134 - in trunk/src/http: . modules Message-ID: <20130325131656.773D13FA131@mail.nginx.com> Author: ru Date: 2013-03-25 13:16:55 +0000 (Mon, 25 Mar 2013) New Revision: 5134 URL: http://trac.nginx.org/nginx/changeset/5134/nginx Log: Upstream: removed double-free workarounds in peer.free() methods. Modified: trunk/src/http/modules/ngx_http_upstream_keepalive_module.c trunk/src/http/modules/ngx_http_upstream_least_conn_module.c trunk/src/http/ngx_http_upstream_round_robin.c Modified: trunk/src/http/modules/ngx_http_upstream_keepalive_module.c =================================================================== --- trunk/src/http/modules/ngx_http_upstream_keepalive_module.c 2013-03-25 13:14:49 UTC (rev 5133) +++ trunk/src/http/modules/ngx_http_upstream_keepalive_module.c 2013-03-25 13:16:55 UTC (rev 5134) @@ -37,8 +37,6 @@ ngx_event_save_peer_session_pt original_save_session; #endif - ngx_uint_t failed; /* unsigned:1 */ - } ngx_http_upstream_keepalive_peer_data_t; @@ -220,8 +218,6 @@ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pc->log, 0, "get keepalive peer"); - kp->failed = 0; - /* ask balancer */ rc = kp->original_get_peer(pc, kp->data); @@ -282,18 +278,12 @@ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pc->log, 0, "free keepalive peer"); - /* remember failed state - peer.free() may be called more than once */ - - if (state & NGX_PEER_FAILED) { - kp->failed = 1; - } - /* cache valid connections */ u = kp->upstream; c = pc->connection; - if (kp->failed + if (state & NGX_PEER_FAILED || c == NULL || c->read->eof || c->read->error Modified: trunk/src/http/modules/ngx_http_upstream_least_conn_module.c =================================================================== --- trunk/src/http/modules/ngx_http_upstream_least_conn_module.c 2013-03-25 13:14:49 UTC (rev 5133) +++ trunk/src/http/modules/ngx_http_upstream_least_conn_module.c 2013-03-25 13:16:55 UTC (rev 5134) @@ -353,10 +353,6 @@ return; } - if (state == 0 && pc->tries == 0) { - return; - } - lcp->conns[lcp->rrp.current]--; lcp->free_rr_peer(pc, &lcp->rrp, state); Modified: trunk/src/http/ngx_http_upstream_round_robin.c =================================================================== --- trunk/src/http/ngx_http_upstream_round_robin.c 2013-03-25 13:14:49 UTC (rev 5133) +++ trunk/src/http/ngx_http_upstream_round_robin.c 2013-03-25 13:16:55 UTC (rev 5134) @@ -584,10 +584,6 @@ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, pc->log, 0, "free rr peer %ui %ui", pc->tries, state); - if (state == 0 && pc->tries == 0) { - return; - } - /* TODO: NGX_PEER_KEEPALIVE */ if (rrp->peers->single) { From ru at nginx.com Mon Mar 25 13:38:59 2013 From: ru at nginx.com (ru at nginx.com) Date: Mon, 25 Mar 2013 13:38:59 +0000 Subject: [nginx] svn commit: r5135 - trunk/src/mail Message-ID: <20130325133859.AC5A53F9DF8@mail.nginx.com> Author: ru Date: 2013-03-25 13:38:59 +0000 (Mon, 25 Mar 2013) New Revision: 5135 URL: http://trac.nginx.org/nginx/changeset/5135/nginx Log: Mail: IPv6 backends (ticket #323). Modified: trunk/src/mail/ngx_mail_auth_http_module.c Modified: trunk/src/mail/ngx_mail_auth_http_module.c =================================================================== --- trunk/src/mail/ngx_mail_auth_http_module.c 2013-03-25 13:16:55 UTC (rev 5134) +++ trunk/src/mail/ngx_mail_auth_http_module.c 2013-03-25 13:38:59 UTC (rev 5135) @@ -454,12 +454,15 @@ ngx_mail_auth_http_process_headers(ngx_mail_session_t *s, ngx_mail_auth_http_ctx_t *ctx) { - u_char *p; - time_t timer; - size_t len, size; - ngx_int_t rc, port, n; - ngx_addr_t *peer; - struct sockaddr_in *sin; + u_char *p; + time_t timer; + size_t len, size; + ngx_int_t rc, port, n; + ngx_addr_t *peer; + struct sockaddr_in *sin; +#if (NGX_HAVE_INET6) + struct sockaddr_in6 *sin6; +#endif ngx_log_debug0(NGX_LOG_DEBUG_MAIL, s->connection->log, 0, "mail auth http process headers"); @@ -772,17 +775,26 @@ return; } - /* AF_INET only */ + rc = ngx_parse_addr(s->connection->pool, peer, + ctx->addr.data, ctx->addr.len); - sin = ngx_pcalloc(s->connection->pool, sizeof(struct sockaddr_in)); - if (sin == NULL) { + switch (rc) { + case NGX_OK: + break; + + case NGX_DECLINED: + ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, + "auth http server %V sent invalid server " + "address:\"%V\"", + ctx->peer.name, &ctx->addr); + /* fall through */ + + default: ngx_destroy_pool(ctx->pool); ngx_mail_session_internal_server_error(s); return; } - sin->sin_family = AF_INET; - port = ngx_atoi(ctx->port.data, ctx->port.len); if (port == NGX_ERROR || port < 1 || port > 65535) { ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, @@ -794,22 +806,21 @@ return; } - sin->sin_port = htons((in_port_t) port); + switch (peer->sockaddr->sa_family) { - sin->sin_addr.s_addr = ngx_inet_addr(ctx->addr.data, ctx->addr.len); - if (sin->sin_addr.s_addr == INADDR_NONE) { - ngx_log_error(NGX_LOG_ERR, s->connection->log, 0, - "auth http server %V sent invalid server " - "address:\"%V\"", - ctx->peer.name, &ctx->addr); - ngx_destroy_pool(ctx->pool); - ngx_mail_session_internal_server_error(s); - return; +#if (NGX_HAVE_INET6) + case AF_INET6: + sin6 = (struct sockaddr_in6 *) peer->sockaddr; + sin6->sin6_port = htons((in_port_t) port); + break; +#endif + + default: /* AF_INET */ + sin = (struct sockaddr_in *) peer->sockaddr; + sin->sin_port = htons((in_port_t) port); + break; } - peer->sockaddr = (struct sockaddr *) sin; - peer->socklen = sizeof(struct sockaddr_in); - len = ctx->addr.len + 1 + ctx->port.len; peer->name.len = len; From dakota at brokenpipe.ru Mon Mar 25 13:40:37 2013 From: dakota at brokenpipe.ru (Marat Dakota) Date: Mon, 25 Mar 2013 17:40:37 +0400 Subject: Subrequests from body filters In-Reply-To: <20130325115613.GP62550@mdounin.ru> References: <20130325115613.GP62550@mdounin.ru> Message-ID: Hi, Thanks for this correction. But is it ok to call next body filter in subrequest's body filter to produce output to main request? I mean ngx_http_next_body_filter(r->main, out). -- Marat On Mon, Mar 25, 2013 at 3:56 PM, Maxim Dounin wrote: > Hello! > > On Sun, Mar 24, 2013 at 09:12:53PM +0400, Marat Dakota wrote: > > [...] > >> // We just put "bbb" to main response for each call of this body filter. >> return ngx_http_output_filter(r->main, out); >> } > > It is very wrong to call ngx_http_output_filter() of a main > request from you body filter. Result is undefined. Instead, you > should call next body filter with a modified chain. > > -- > Maxim Dounin > http://nginx.org/en/donation.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From ru at nginx.com Mon Mar 25 13:41:30 2013 From: ru at nginx.com (ru at nginx.com) Date: Mon, 25 Mar 2013 13:41:30 +0000 Subject: [nginx] svn commit: r5136 - trunk/src/http Message-ID: <20130325134130.A2CAB3F9DF8@mail.nginx.com> Author: ru Date: 2013-03-25 13:41:30 +0000 (Mon, 25 Mar 2013) New Revision: 5136 URL: http://trac.nginx.org/nginx/changeset/5136/nginx Log: Upstream: removed sorting of upstream servers. Sorting of upstream servers by their weights is not required by current balancing algorithms. This will likely change mapping to backends served by ip_hash weighted upstreams. Modified: trunk/src/http/ngx_http_upstream_round_robin.c Modified: trunk/src/http/ngx_http_upstream_round_robin.c =================================================================== --- trunk/src/http/ngx_http_upstream_round_robin.c 2013-03-25 13:38:59 UTC (rev 5135) +++ trunk/src/http/ngx_http_upstream_round_robin.c 2013-03-25 13:41:30 UTC (rev 5136) @@ -10,8 +10,6 @@ #include -static ngx_int_t ngx_http_upstream_cmp_servers(const void *one, - const void *two); static ngx_http_upstream_rr_peer_t *ngx_http_upstream_get_peer( ngx_http_upstream_rr_peer_data_t *rrp); @@ -93,10 +91,6 @@ us->peer.data = peers; - ngx_sort(&peers->peer[0], (size_t) n, - sizeof(ngx_http_upstream_rr_peer_t), - ngx_http_upstream_cmp_servers); - /* backup servers */ n = 0; @@ -151,10 +145,6 @@ peers->next = backup; - ngx_sort(&backup->peer[0], (size_t) n, - sizeof(ngx_http_upstream_rr_peer_t), - ngx_http_upstream_cmp_servers); - return NGX_OK; } @@ -216,18 +206,6 @@ } -static ngx_int_t -ngx_http_upstream_cmp_servers(const void *one, const void *two) -{ - ngx_http_upstream_rr_peer_t *first, *second; - - first = (ngx_http_upstream_rr_peer_t *) one; - second = (ngx_http_upstream_rr_peer_t *) two; - - return (first->weight < second->weight); -} - - ngx_int_t ngx_http_upstream_init_round_robin_peer(ngx_http_request_t *r, ngx_http_upstream_srv_conf_t *us) From mdounin at mdounin.ru Mon Mar 25 13:47:12 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 25 Mar 2013 17:47:12 +0400 Subject: Subrequests from body filters In-Reply-To: References: <20130325115613.GP62550@mdounin.ru> Message-ID: <20130325134711.GZ62550@mdounin.ru> Hello! On Mon, Mar 25, 2013 at 05:40:37PM +0400, Marat Dakota wrote: > Hi, > > Thanks for this correction. > > But is it ok to call next body filter in subrequest's body filter to > produce output to main request? > I mean ngx_http_next_body_filter(r->main, out). No. You should call next body filter of the request you are working with. It's postpone filter responsibility to manage subrequests output, and if you try to do this yourself instead - result will be undefined. -- Maxim Dounin http://nginx.org/en/donation.html From dakota at brokenpipe.ru Mon Mar 25 13:52:53 2013 From: dakota at brokenpipe.ru (Marat Dakota) Date: Mon, 25 Mar 2013 17:52:53 +0400 Subject: Subrequests from body filters In-Reply-To: <20130325134711.GZ62550@mdounin.ru> References: <20130325115613.GP62550@mdounin.ru> <20130325134711.GZ62550@mdounin.ru> Message-ID: >> But is it ok to call next body filter in subrequest's body filter to >> produce output to main request? >> I mean ngx_http_next_body_filter(r->main, out). > > No. You should call next body filter of the request you are > working with. It's postpone filter responsibility to manage > subrequests output, and if you try to do this yourself instead - > result will be undefined. It seems to work as expected for me. How can I cause problems with this? -- Marat From vbart at nginx.com Mon Mar 25 14:15:56 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Mon, 25 Mar 2013 18:15:56 +0400 Subject: [PATCH] Core: use NGX_FILE_ERROR when appropriate In-Reply-To: References: Message-ID: <201303251815.57040.vbart@nginx.com> On Thursday 21 March 2013 08:04:24 Piotr Sikora wrote: > Hey guys, > attached patch changes "if" tests to use "== NGX_FILE_ERROR" instead of > "== -1" or "!= NGX_OK", because NGX_FILE_ERROR is defined as -1 on UNIX, > but as 0 on Win32. The patch looks good. I'll commit it as soon as Maxim acknowledges. Thank you. wbr, Valentin V. Bartenev > > This isn't much of an issue in patched code (only "ngx_fd_info()" test > is actually reachable on Win32 and in worst case it might, but probably > doesn't, result in bogus error log entry), so you can treat this as style > fixes. > > Best regards, > Piotr Sikora > > > diff -r 3450eee1ee8d src/core/nginx.c > --- a/src/core/nginx.c Wed Mar 20 18:07:25 2013 +0000 > +++ b/src/core/nginx.c Wed Mar 20 20:56:23 2013 -0700 > @@ -637,7 +637,7 @@ > > ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, > ngx_core_module); > > - if (ngx_rename_file(ccf->pid.data, ccf->oldpid.data) != NGX_OK) { > + if (ngx_rename_file(ccf->pid.data, ccf->oldpid.data) == > NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > ngx_rename_file_n " %s to %s failed " > "before executing new binary process \"%s\"", > @@ -652,7 +652,9 @@ > pid = ngx_execute(cycle, &ctx); > > if (pid == NGX_INVALID_PID) { > - if (ngx_rename_file(ccf->oldpid.data, ccf->pid.data) != NGX_OK) { > + if (ngx_rename_file(ccf->oldpid.data, ccf->pid.data) > + == NGX_FILE_ERROR) > + { > ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > ngx_rename_file_n " %s back to %s failed after " > "an attempt to execute new binary process > \"%s\"", diff -r 3450eee1ee8d src/core/ngx_conf_file.c > --- a/src/core/ngx_conf_file.c Wed Mar 20 18:07:25 2013 +0000 > +++ b/src/core/ngx_conf_file.c Wed Mar 20 20:56:23 2013 -0700 > @@ -133,7 +133,7 @@ > > cf->conf_file = &conf_file; > > - if (ngx_fd_info(fd, &cf->conf_file->file.info) == -1) { > + if (ngx_fd_info(fd, &cf->conf_file->file.info) == NGX_FILE_ERROR) > { ngx_log_error(NGX_LOG_EMERG, cf->log, ngx_errno, > ngx_fd_info_n " \"%s\" failed", filename->data); > } > diff -r 3450eee1ee8d src/core/ngx_connection.c > --- a/src/core/ngx_connection.c Wed Mar 20 18:07:25 2013 +0000 > +++ b/src/core/ngx_connection.c Wed Mar 20 20:56:23 2013 -0700 > @@ -412,7 +412,7 @@ > } > > if (ngx_test_config) { > - if (ngx_delete_file(name) == -1) { > + if (ngx_delete_file(name) == NGX_FILE_ERROR) { > ngx_log_error(NGX_LOG_EMERG, cycle->log, > ngx_errno, ngx_delete_file_n " %s failed", name); } > @@ -739,7 +739,7 @@ > { > u_char *name = ls[i].addr_text.data + sizeof("unix:") - 1; > > - if (ngx_delete_file(name) == -1) { > + if (ngx_delete_file(name) == NGX_FILE_ERROR) { > ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, > ngx_delete_file_n " %s failed", name); > } > diff -r 3450eee1ee8d src/core/ngx_cycle.c > --- a/src/core/ngx_cycle.c Wed Mar 20 18:07:25 2013 +0000 > +++ b/src/core/ngx_cycle.c Wed Mar 20 20:56:23 2013 -0700 > @@ -679,7 +679,7 @@ > ngx_log_error(NGX_LOG_WARN, cycle->log, 0, > "deleting socket %s", name); > > - if (ngx_delete_file(name) == -1) { > + if (ngx_delete_file(name) == NGX_FILE_ERROR) { > ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, > ngx_delete_file_n " %s failed", name); > } > diff -r 3450eee1ee8d src/os/unix/ngx_process_cycle.c > --- a/src/os/unix/ngx_process_cycle.c Wed Mar 20 18:07:25 2013 +0000 > +++ b/src/os/unix/ngx_process_cycle.c Wed Mar 20 20:56:23 2013 -0700 > @@ -647,7 +647,7 @@ > > if (ngx_rename_file((char *) ccf->oldpid.data, > (char *) ccf->pid.data) > - != NGX_OK) > + == NGX_FILE_ERROR) > { > ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > ngx_rename_file_n " %s back to %s failed > " > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Mon Mar 25 14:21:51 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 25 Mar 2013 18:21:51 +0400 Subject: Subrequests from body filters In-Reply-To: References: <20130325115613.GP62550@mdounin.ru> <20130325134711.GZ62550@mdounin.ru> Message-ID: <20130325142151.GA62550@mdounin.ru> Hello! On Mon, Mar 25, 2013 at 05:52:53PM +0400, Marat Dakota wrote: > >> But is it ok to call next body filter in subrequest's body filter to > >> produce output to main request? > >> I mean ngx_http_next_body_filter(r->main, out). > > > > No. You should call next body filter of the request you are > > working with. It's postpone filter responsibility to manage > > subrequests output, and if you try to do this yourself instead - > > result will be undefined. > > It seems to work as expected for me. How can I cause problems with this? Undefined behaviour sometimes appear to work as expected. This doesn't mean it's correct though. Depending on the exact place in a filter chain where you did it and various other factors like timings, results may vary from "nothing bad might happen, as r == r->main anyway" to "response will completely incorrect as wrong filters will be applied to the response body". Most trivial thing to test is probably a subrequest order, which likely will be wrong in your case if first subrequest will take longer to handle than second one. -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Mon Mar 25 14:41:28 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 25 Mar 2013 18:41:28 +0400 Subject: [PATCH] Core: use NGX_FILE_ERROR when appropriate In-Reply-To: <201303251815.57040.vbart@nginx.com> References: <201303251815.57040.vbart@nginx.com> Message-ID: <20130325144128.GB62550@mdounin.ru> Hello! On Mon, Mar 25, 2013 at 06:15:56PM +0400, Valentin V. Bartenev wrote: > On Thursday 21 March 2013 08:04:24 Piotr Sikora wrote: > > Hey guys, > > attached patch changes "if" tests to use "== NGX_FILE_ERROR" instead of > > "== -1" or "!= NGX_OK", because NGX_FILE_ERROR is defined as -1 on UNIX, > > but as 0 on Win32. > > The patch looks good. I'll commit it as soon as Maxim acknowledges. > Thank you. Looks fine, please commit. -- Maxim Dounin http://nginx.org/en/donation.html From dakota at brokenpipe.ru Mon Mar 25 14:46:09 2013 From: dakota at brokenpipe.ru (Marat Dakota) Date: Mon, 25 Mar 2013 18:46:09 +0400 Subject: Subrequests from body filters In-Reply-To: <20130325142151.GA62550@mdounin.ru> References: <20130325115613.GP62550@mdounin.ru> <20130325134711.GZ62550@mdounin.ru> <20130325142151.GA62550@mdounin.ru> Message-ID: >> >> But is it ok to call next body filter in subrequest's body filter to >> >> produce output to main request? >> >> I mean ngx_http_next_body_filter(r->main, out). >> > >> > No. You should call next body filter of the request you are >> > working with. It's postpone filter responsibility to manage >> > subrequests output, and if you try to do this yourself instead - >> > result will be undefined. >> >> It seems to work as expected for me. How can I cause problems with this? > > Undefined behaviour sometimes appear to work as expected. This > doesn't mean it's correct though. > > Depending on the exact place in a filter chain where you did it > and various other factors like timings, results may vary from > "nothing bad might happen, as r == r->main anyway" to "response > will completely incorrect as wrong filters will be applied to the > response body". > > Most trivial thing to test is probably a subrequest order, which > likely will be wrong in your case if first subrequest will take > longer to handle than second one. Subrequests order doesn't matter much for me. I feed my library (the one I write a Nginx module for) with a subrequests results in a whatever order and my library returns next chunk of response only when it is ready. My library has just one function to call. This function returns the next chunk of data (if any) to send as a response and/or a list of subrequests to make. In every call to subrequest body filter I pass subrequest's response to my library and get a new list of subrequests (if any) and a new chunk of final response (if any). And so on, until my library says it's done. And if I really do something wrong in terms of Nginx architecture, please, could you give me more details about how to achieve my goals correctly? Thank you for helping. -- Marat From ru at nginx.com Mon Mar 25 14:51:44 2013 From: ru at nginx.com (ru at nginx.com) Date: Mon, 25 Mar 2013 14:51:44 +0000 Subject: [nginx] svn commit: r5137 - trunk/src/http Message-ID: <20130325145145.0E3C43F9EFF@mail.nginx.com> Author: ru Date: 2013-03-25 14:51:44 +0000 (Mon, 25 Mar 2013) New Revision: 5137 URL: http://trac.nginx.org/nginx/changeset/5137/nginx Log: Upstream: removed rudiments of upstream connection caching. This functionality is now provided by ngx_http_upstream_keepalive_module. Modified: trunk/src/http/ngx_http_upstream_round_robin.c trunk/src/http/ngx_http_upstream_round_robin.h Modified: trunk/src/http/ngx_http_upstream_round_robin.c =================================================================== --- trunk/src/http/ngx_http_upstream_round_robin.c 2013-03-25 13:41:30 UTC (rev 5136) +++ trunk/src/http/ngx_http_upstream_round_robin.c 2013-03-25 14:51:44 UTC (rev 5137) @@ -373,7 +373,6 @@ ngx_int_t rc; ngx_uint_t i, n; - ngx_connection_t *c; ngx_http_upstream_rr_peer_t *peer; ngx_http_upstream_rr_peers_t *peers; @@ -382,26 +381,6 @@ /* ngx_lock_mutex(rrp->peers->mutex); */ - if (rrp->peers->last_cached) { - - /* cached connection */ - - c = rrp->peers->cached[rrp->peers->last_cached]; - rrp->peers->last_cached--; - - /* ngx_unlock_mutex(ppr->peers->mutex); */ - -#if (NGX_THREADS) - c->read->lock = c->read->own_lock; - c->write->lock = c->write->own_lock; -#endif - - pc->connection = c; - pc->cached = 1; - - return NGX_OK; - } - pc->cached = 0; pc->connection = NULL; Modified: trunk/src/http/ngx_http_upstream_round_robin.h =================================================================== --- trunk/src/http/ngx_http_upstream_round_robin.h 2013-03-25 13:41:30 UTC (rev 5136) +++ trunk/src/http/ngx_http_upstream_round_robin.h 2013-03-25 14:51:44 UTC (rev 5137) @@ -42,10 +42,8 @@ struct ngx_http_upstream_rr_peers_s { ngx_uint_t number; - ngx_uint_t last_cached; /* ngx_mutex_t *mutex; */ - ngx_connection_t **cached; ngx_uint_t total_weight; From mdounin at mdounin.ru Mon Mar 25 14:54:42 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 25 Mar 2013 18:54:42 +0400 Subject: Subrequests from body filters In-Reply-To: References: <20130325115613.GP62550@mdounin.ru> <20130325134711.GZ62550@mdounin.ru> <20130325142151.GA62550@mdounin.ru> Message-ID: <20130325145442.GE62550@mdounin.ru> Hello! On Mon, Mar 25, 2013 at 06:46:09PM +0400, Marat Dakota wrote: > >> >> But is it ok to call next body filter in subrequest's body filter to > >> >> produce output to main request? > >> >> I mean ngx_http_next_body_filter(r->main, out). > >> > > >> > No. You should call next body filter of the request you are > >> > working with. It's postpone filter responsibility to manage > >> > subrequests output, and if you try to do this yourself instead - > >> > result will be undefined. > >> > >> It seems to work as expected for me. How can I cause problems with this? > > > > Undefined behaviour sometimes appear to work as expected. This > > doesn't mean it's correct though. > > > > Depending on the exact place in a filter chain where you did it > > and various other factors like timings, results may vary from > > "nothing bad might happen, as r == r->main anyway" to "response > > will completely incorrect as wrong filters will be applied to the > > response body". > > > > Most trivial thing to test is probably a subrequest order, which > > likely will be wrong in your case if first subrequest will take > > longer to handle than second one. > > Subrequests order doesn't matter much for me. I feed my library (the > one I write a Nginx module for) with a subrequests results in a > whatever order and my library returns next chunk of response only when > it is ready. > > My library has just one function to call. This function returns the > next chunk of data (if any) to send as a response and/or a list of > subrequests to make. In every call to subrequest body filter I pass > subrequest's response to my library and get a new list of subrequests > (if any) and a new chunk of final response (if any). And so on, until > my library says it's done. > > And if I really do something wrong in terms of Nginx architecture, > please, could you give me more details about how to achieve my goals > correctly? I don't really see why you don't call ngx_http_next_body_filter(r, out), which is perfectly correct. -- Maxim Dounin http://nginx.org/en/donation.html From dakota at brokenpipe.ru Mon Mar 25 15:41:16 2013 From: dakota at brokenpipe.ru (Marat Dakota) Date: Mon, 25 Mar 2013 19:41:16 +0400 Subject: Subrequests from body filters In-Reply-To: <20130325145442.GE62550@mdounin.ru> References: <20130325115613.GP62550@mdounin.ru> <20130325134711.GZ62550@mdounin.ru> <20130325142151.GA62550@mdounin.ru> <20130325145442.GE62550@mdounin.ru> Message-ID: >> >> >> But is it ok to call next body filter in subrequest's body filter to >> >> >> produce output to main request? >> >> >> I mean ngx_http_next_body_filter(r->main, out). >> >> > >> >> > No. You should call next body filter of the request you are >> >> > working with. It's postpone filter responsibility to manage >> >> > subrequests output, and if you try to do this yourself instead - >> >> > result will be undefined. >> >> >> >> It seems to work as expected for me. How can I cause problems with this? >> > >> > Undefined behaviour sometimes appear to work as expected. This >> > doesn't mean it's correct though. >> > >> > Depending on the exact place in a filter chain where you did it >> > and various other factors like timings, results may vary from >> > "nothing bad might happen, as r == r->main anyway" to "response >> > will completely incorrect as wrong filters will be applied to the >> > response body". >> > >> > Most trivial thing to test is probably a subrequest order, which >> > likely will be wrong in your case if first subrequest will take >> > longer to handle than second one. >> >> Subrequests order doesn't matter much for me. I feed my library (the >> one I write a Nginx module for) with a subrequests results in a >> whatever order and my library returns next chunk of response only when >> it is ready. >> >> My library has just one function to call. This function returns the >> next chunk of data (if any) to send as a response and/or a list of >> subrequests to make. In every call to subrequest body filter I pass >> subrequest's response to my library and get a new list of subrequests >> (if any) and a new chunk of final response (if any). And so on, until >> my library says it's done. >> >> And if I really do something wrong in terms of Nginx architecture, >> please, could you give me more details about how to achieve my goals >> correctly? > > I don't really see why you don't call ngx_http_next_body_filter(r, > out), which is perfectly correct. Because I'm getting a mess with the chunk order. Let's suppose I've made two subrequests in a handler. ngx_http_subrequest(...); // first ngx_http_subrequest(...); // second If I call ngx_http_next_body_filter(r, out) in a subrequest body filter, I get the following. Let's suppose we've received a response from the second subrequest before any data from the first subrequest. And my library said I should send "aaa" as a response (and I passed "aaa" to ngx_http_next_body_filter). Then, we've got a response from the first subrequest and my library said I should send "bbb" as a response. I would expect "aaabbb" as the final result. But real result would be "bbbaaa" (because ngx_http_subrequest for first subrequest is earlier). How to deal with this? -- Marat From vbart at nginx.com Mon Mar 25 15:49:12 2013 From: vbart at nginx.com (vbart at nginx.com) Date: Mon, 25 Mar 2013 15:49:12 +0000 Subject: [nginx] svn commit: r5138 - in trunk/src: core os/unix Message-ID: <20130325154912.80B663F9C4E@mail.nginx.com> Author: vbart Date: 2013-03-25 15:49:11 +0000 (Mon, 25 Mar 2013) New Revision: 5138 URL: http://trac.nginx.org/nginx/changeset/5138/nginx Log: Use NGX_FILE_ERROR for handling file operations errors. On Win32 platforms 0 is used to indicate errors in file operations, so comparing against -1 is not portable. This was not much of an issue in patched code, since only ngx_fd_info() test is actually reachable on Win32 and in worst case it might result in bogus error log entry. Patch by Piotr Sikora. Modified: trunk/src/core/nginx.c trunk/src/core/ngx_conf_file.c trunk/src/core/ngx_connection.c trunk/src/core/ngx_cycle.c trunk/src/os/unix/ngx_process_cycle.c Modified: trunk/src/core/nginx.c =================================================================== --- trunk/src/core/nginx.c 2013-03-25 14:51:44 UTC (rev 5137) +++ trunk/src/core/nginx.c 2013-03-25 15:49:11 UTC (rev 5138) @@ -637,7 +637,7 @@ ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); - if (ngx_rename_file(ccf->pid.data, ccf->oldpid.data) != NGX_OK) { + if (ngx_rename_file(ccf->pid.data, ccf->oldpid.data) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, ngx_rename_file_n " %s to %s failed " "before executing new binary process \"%s\"", @@ -652,7 +652,9 @@ pid = ngx_execute(cycle, &ctx); if (pid == NGX_INVALID_PID) { - if (ngx_rename_file(ccf->oldpid.data, ccf->pid.data) != NGX_OK) { + if (ngx_rename_file(ccf->oldpid.data, ccf->pid.data) + == NGX_FILE_ERROR) + { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, ngx_rename_file_n " %s back to %s failed after " "an attempt to execute new binary process \"%s\"", Modified: trunk/src/core/ngx_conf_file.c =================================================================== --- trunk/src/core/ngx_conf_file.c 2013-03-25 14:51:44 UTC (rev 5137) +++ trunk/src/core/ngx_conf_file.c 2013-03-25 15:49:11 UTC (rev 5138) @@ -133,7 +133,7 @@ cf->conf_file = &conf_file; - if (ngx_fd_info(fd, &cf->conf_file->file.info) == -1) { + if (ngx_fd_info(fd, &cf->conf_file->file.info) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cf->log, ngx_errno, ngx_fd_info_n " \"%s\" failed", filename->data); } Modified: trunk/src/core/ngx_connection.c =================================================================== --- trunk/src/core/ngx_connection.c 2013-03-25 14:51:44 UTC (rev 5137) +++ trunk/src/core/ngx_connection.c 2013-03-25 15:49:11 UTC (rev 5138) @@ -412,7 +412,7 @@ } if (ngx_test_config) { - if (ngx_delete_file(name) == -1) { + if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno, ngx_delete_file_n " %s failed", name); } @@ -739,7 +739,7 @@ { u_char *name = ls[i].addr_text.data + sizeof("unix:") - 1; - if (ngx_delete_file(name) == -1) { + if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_delete_file_n " %s failed", name); } Modified: trunk/src/core/ngx_cycle.c =================================================================== --- trunk/src/core/ngx_cycle.c 2013-03-25 14:51:44 UTC (rev 5137) +++ trunk/src/core/ngx_cycle.c 2013-03-25 15:49:11 UTC (rev 5138) @@ -679,7 +679,7 @@ ngx_log_error(NGX_LOG_WARN, cycle->log, 0, "deleting socket %s", name); - if (ngx_delete_file(name) == -1) { + if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_delete_file_n " %s failed", name); } Modified: trunk/src/os/unix/ngx_process_cycle.c =================================================================== --- trunk/src/os/unix/ngx_process_cycle.c 2013-03-25 14:51:44 UTC (rev 5137) +++ trunk/src/os/unix/ngx_process_cycle.c 2013-03-25 15:49:11 UTC (rev 5138) @@ -647,7 +647,7 @@ if (ngx_rename_file((char *) ccf->oldpid.data, (char *) ccf->pid.data) - != NGX_OK) + == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, ngx_rename_file_n " %s back to %s failed " From vbart at nginx.com Mon Mar 25 15:52:02 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Mon, 25 Mar 2013 19:52:02 +0400 Subject: [PATCH] Core: use NGX_FILE_ERROR when appropriate In-Reply-To: <20130325144128.GB62550@mdounin.ru> References: <201303251815.57040.vbart@nginx.com> <20130325144128.GB62550@mdounin.ru> Message-ID: <201303251952.02364.vbart@nginx.com> On Monday 25 March 2013 18:41:28 Maxim Dounin wrote: > Hello! > > On Mon, Mar 25, 2013 at 06:15:56PM +0400, Valentin V. Bartenev wrote: > > On Thursday 21 March 2013 08:04:24 Piotr Sikora wrote: > > > Hey guys, > > > attached patch changes "if" tests to use "== NGX_FILE_ERROR" instead of > > > "== -1" or "!= NGX_OK", because NGX_FILE_ERROR is defined as -1 on > > > UNIX, but as 0 on Win32. > > > > The patch looks good. I'll commit it as soon as Maxim acknowledges. > > Thank you. > > Looks fine, please commit. Done: http://trac.nginx.org/nginx/changeset/5138/nginx wbr, Valentin V. Bartenev From mdounin at mdounin.ru Mon Mar 25 16:01:00 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 25 Mar 2013 20:01:00 +0400 Subject: Subrequests from body filters In-Reply-To: References: <20130325115613.GP62550@mdounin.ru> <20130325134711.GZ62550@mdounin.ru> <20130325142151.GA62550@mdounin.ru> <20130325145442.GE62550@mdounin.ru> Message-ID: <20130325160100.GG62550@mdounin.ru> Hello! On Mon, Mar 25, 2013 at 07:41:16PM +0400, Marat Dakota wrote: > >> >> >> But is it ok to call next body filter in subrequest's body filter to > >> >> >> produce output to main request? > >> >> >> I mean ngx_http_next_body_filter(r->main, out). > >> >> > > >> >> > No. You should call next body filter of the request you are > >> >> > working with. It's postpone filter responsibility to manage > >> >> > subrequests output, and if you try to do this yourself instead - > >> >> > result will be undefined. > >> >> > >> >> It seems to work as expected for me. How can I cause problems with this? > >> > > >> > Undefined behaviour sometimes appear to work as expected. This > >> > doesn't mean it's correct though. > >> > > >> > Depending on the exact place in a filter chain where you did it > >> > and various other factors like timings, results may vary from > >> > "nothing bad might happen, as r == r->main anyway" to "response > >> > will completely incorrect as wrong filters will be applied to the > >> > response body". > >> > > >> > Most trivial thing to test is probably a subrequest order, which > >> > likely will be wrong in your case if first subrequest will take > >> > longer to handle than second one. > >> > >> Subrequests order doesn't matter much for me. I feed my library (the > >> one I write a Nginx module for) with a subrequests results in a > >> whatever order and my library returns next chunk of response only when > >> it is ready. > >> > >> My library has just one function to call. This function returns the > >> next chunk of data (if any) to send as a response and/or a list of > >> subrequests to make. In every call to subrequest body filter I pass > >> subrequest's response to my library and get a new list of subrequests > >> (if any) and a new chunk of final response (if any). And so on, until > >> my library says it's done. > >> > >> And if I really do something wrong in terms of Nginx architecture, > >> please, could you give me more details about how to achieve my goals > >> correctly? > > > > I don't really see why you don't call ngx_http_next_body_filter(r, > > out), which is perfectly correct. > > Because I'm getting a mess with the chunk order. > > Let's suppose I've made two subrequests in a handler. > > ngx_http_subrequest(...); // first > ngx_http_subrequest(...); // second > > If I call ngx_http_next_body_filter(r, out) in a subrequest body > filter, I get the following. > Let's suppose we've received a response from the second subrequest > before any data from the first subrequest. And my library said I > should send "aaa" as a response (and I passed "aaa" to > ngx_http_next_body_filter). > Then, we've got a response from the first subrequest and my library > said I should send "bbb" as a response. I would expect "aaabbb" as the > final result. But real result would be "bbbaaa" (because > ngx_http_subrequest for first subrequest is earlier). > > How to deal with this? So the part of code in your body filter which tried to call ngx_http_output_filter(r->main, out) is actually not a part of a body filter, but a original request response generation, right? For such a case I would recommend looking at NGX_HTTP_SUBREQUEST_IN_MEMORY and NGX_HTTP_SUBREQUEST_WAITED functionality, and using post subrequest handler to trigger parent request content generation. -- Maxim Dounin http://nginx.org/en/donation.html From dakota at brokenpipe.ru Mon Mar 25 16:23:16 2013 From: dakota at brokenpipe.ru (Marat Dakota) Date: Mon, 25 Mar 2013 20:23:16 +0400 Subject: Subrequests from body filters In-Reply-To: <20130325160100.GG62550@mdounin.ru> References: <20130325115613.GP62550@mdounin.ru> <20130325134711.GZ62550@mdounin.ru> <20130325142151.GA62550@mdounin.ru> <20130325145442.GE62550@mdounin.ru> <20130325160100.GG62550@mdounin.ru> Message-ID: Yes, it is a part of the original request response generation. I'll have a look, thanks. Is there a place to read about NGX_HTTP_SUBREQUEST_IN_MEMORY and NGX_HTTP_SUBREQUEST_WAITED except for source code? And is my current approach wrong even in spite of the conclusion that it is the original request response generation? And is it ok to call ngx_http_output_filter(r->main, out) in a post subrequest handler? Thanks much! -- Marat On Mon, Mar 25, 2013 at 8:01 PM, Maxim Dounin wrote: > Hello! > > On Mon, Mar 25, 2013 at 07:41:16PM +0400, Marat Dakota wrote: > >> >> >> >> But is it ok to call next body filter in subrequest's body filter to >> >> >> >> produce output to main request? >> >> >> >> I mean ngx_http_next_body_filter(r->main, out). >> >> >> > >> >> >> > No. You should call next body filter of the request you are >> >> >> > working with. It's postpone filter responsibility to manage >> >> >> > subrequests output, and if you try to do this yourself instead - >> >> >> > result will be undefined. >> >> >> >> >> >> It seems to work as expected for me. How can I cause problems with this? >> >> > >> >> > Undefined behaviour sometimes appear to work as expected. This >> >> > doesn't mean it's correct though. >> >> > >> >> > Depending on the exact place in a filter chain where you did it >> >> > and various other factors like timings, results may vary from >> >> > "nothing bad might happen, as r == r->main anyway" to "response >> >> > will completely incorrect as wrong filters will be applied to the >> >> > response body". >> >> > >> >> > Most trivial thing to test is probably a subrequest order, which >> >> > likely will be wrong in your case if first subrequest will take >> >> > longer to handle than second one. >> >> >> >> Subrequests order doesn't matter much for me. I feed my library (the >> >> one I write a Nginx module for) with a subrequests results in a >> >> whatever order and my library returns next chunk of response only when >> >> it is ready. >> >> >> >> My library has just one function to call. This function returns the >> >> next chunk of data (if any) to send as a response and/or a list of >> >> subrequests to make. In every call to subrequest body filter I pass >> >> subrequest's response to my library and get a new list of subrequests >> >> (if any) and a new chunk of final response (if any). And so on, until >> >> my library says it's done. >> >> >> >> And if I really do something wrong in terms of Nginx architecture, >> >> please, could you give me more details about how to achieve my goals >> >> correctly? >> > >> > I don't really see why you don't call ngx_http_next_body_filter(r, >> > out), which is perfectly correct. >> >> Because I'm getting a mess with the chunk order. >> >> Let's suppose I've made two subrequests in a handler. >> >> ngx_http_subrequest(...); // first >> ngx_http_subrequest(...); // second >> >> If I call ngx_http_next_body_filter(r, out) in a subrequest body >> filter, I get the following. >> Let's suppose we've received a response from the second subrequest >> before any data from the first subrequest. And my library said I >> should send "aaa" as a response (and I passed "aaa" to >> ngx_http_next_body_filter). >> Then, we've got a response from the first subrequest and my library >> said I should send "bbb" as a response. I would expect "aaabbb" as the >> final result. But real result would be "bbbaaa" (because >> ngx_http_subrequest for first subrequest is earlier). >> >> How to deal with this? > > So the part of code in your body filter which tried to call > ngx_http_output_filter(r->main, out) is actually not a part of a > body filter, but a original request response generation, right? > > For such a case I would recommend looking at > NGX_HTTP_SUBREQUEST_IN_MEMORY and NGX_HTTP_SUBREQUEST_WAITED > functionality, and using post subrequest handler to trigger > parent request content generation. > > -- > Maxim Dounin > http://nginx.org/en/donation.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Mon Mar 25 18:10:53 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 25 Mar 2013 22:10:53 +0400 Subject: Subrequests from body filters In-Reply-To: References: <20130325115613.GP62550@mdounin.ru> <20130325134711.GZ62550@mdounin.ru> <20130325142151.GA62550@mdounin.ru> <20130325145442.GE62550@mdounin.ru> <20130325160100.GG62550@mdounin.ru> Message-ID: <20130325181052.GH62550@mdounin.ru> Hello! On Mon, Mar 25, 2013 at 08:23:16PM +0400, Marat Dakota wrote: > Yes, it is a part of the original request response generation. > > I'll have a look, thanks. Is there a place to read about > NGX_HTTP_SUBREQUEST_IN_MEMORY and NGX_HTTP_SUBREQUEST_WAITED except > for source code? Probably no. > And is my current approach wrong even in spite of the conclusion that > it is the original request response generation? And is it ok to call > ngx_http_output_filter(r->main, out) in a post subrequest handler? It is at least incorrect to return ngx_http_output_filter() result as a body filter result. > > Thanks much! > > -- > Marat > > On Mon, Mar 25, 2013 at 8:01 PM, Maxim Dounin wrote: > > Hello! > > > > On Mon, Mar 25, 2013 at 07:41:16PM +0400, Marat Dakota wrote: > > > >> >> >> >> But is it ok to call next body filter in subrequest's body filter to > >> >> >> >> produce output to main request? > >> >> >> >> I mean ngx_http_next_body_filter(r->main, out). > >> >> >> > > >> >> >> > No. You should call next body filter of the request you are > >> >> >> > working with. It's postpone filter responsibility to manage > >> >> >> > subrequests output, and if you try to do this yourself instead - > >> >> >> > result will be undefined. > >> >> >> > >> >> >> It seems to work as expected for me. How can I cause problems with this? > >> >> > > >> >> > Undefined behaviour sometimes appear to work as expected. This > >> >> > doesn't mean it's correct though. > >> >> > > >> >> > Depending on the exact place in a filter chain where you did it > >> >> > and various other factors like timings, results may vary from > >> >> > "nothing bad might happen, as r == r->main anyway" to "response > >> >> > will completely incorrect as wrong filters will be applied to the > >> >> > response body". > >> >> > > >> >> > Most trivial thing to test is probably a subrequest order, which > >> >> > likely will be wrong in your case if first subrequest will take > >> >> > longer to handle than second one. > >> >> > >> >> Subrequests order doesn't matter much for me. I feed my library (the > >> >> one I write a Nginx module for) with a subrequests results in a > >> >> whatever order and my library returns next chunk of response only when > >> >> it is ready. > >> >> > >> >> My library has just one function to call. This function returns the > >> >> next chunk of data (if any) to send as a response and/or a list of > >> >> subrequests to make. In every call to subrequest body filter I pass > >> >> subrequest's response to my library and get a new list of subrequests > >> >> (if any) and a new chunk of final response (if any). And so on, until > >> >> my library says it's done. > >> >> > >> >> And if I really do something wrong in terms of Nginx architecture, > >> >> please, could you give me more details about how to achieve my goals > >> >> correctly? > >> > > >> > I don't really see why you don't call ngx_http_next_body_filter(r, > >> > out), which is perfectly correct. > >> > >> Because I'm getting a mess with the chunk order. > >> > >> Let's suppose I've made two subrequests in a handler. > >> > >> ngx_http_subrequest(...); // first > >> ngx_http_subrequest(...); // second > >> > >> If I call ngx_http_next_body_filter(r, out) in a subrequest body > >> filter, I get the following. > >> Let's suppose we've received a response from the second subrequest > >> before any data from the first subrequest. And my library said I > >> should send "aaa" as a response (and I passed "aaa" to > >> ngx_http_next_body_filter). > >> Then, we've got a response from the first subrequest and my library > >> said I should send "bbb" as a response. I would expect "aaabbb" as the > >> final result. But real result would be "bbbaaa" (because > >> ngx_http_subrequest for first subrequest is earlier). > >> > >> How to deal with this? > > > > So the part of code in your body filter which tried to call > > ngx_http_output_filter(r->main, out) is actually not a part of a > > body filter, but a original request response generation, right? > > > > For such a case I would recommend looking at > > NGX_HTTP_SUBREQUEST_IN_MEMORY and NGX_HTTP_SUBREQUEST_WAITED > > functionality, and using post subrequest handler to trigger > > parent request content generation. > > > > -- > > Maxim Dounin > > http://nginx.org/en/donation.html > > > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Maxim Dounin http://nginx.org/en/donation.html From pgnet.dev at gmail.com Mon Mar 25 19:06:37 2013 From: pgnet.dev at gmail.com (pgndev) Date: Mon, 25 Mar 2013 12:06:37 -0700 Subject: fyi, patch.spdy-70_1.3.14.txt failure applying to 1.3.14 source Message-ID: cd nginx-1.3.14 patch -p1 < ../patch.spdy-70_1.3.14.txt patching file src/http/ngx_http_request.c patching file src/http/ngx_http_request.c patching file src/http/ngx_http_request.c patching file src/core/ngx_connection.c patching file src/core/ngx_connection.h patching file src/http/ngx_http_request.c patching file src/http/ngx_http_upstream.c patching file src/http/ngx_http_request.c patching file src/http/ngx_http_request.h patching file src/http/modules/ngx_http_ssl_module.c patching file src/http/modules/ngx_http_gzip_filter_module.c patching file src/http/ngx_http.h patching file src/http/ngx_http_request.c patching file src/core/ngx_connection.c patching file src/event/ngx_event.c patching file src/event/ngx_event.h patching file src/http/modules/ngx_http_stub_status_module.c patching file src/http/ngx_http_request.c can't find file to patch at input line 1300 Perhaps you used the wrong -p or --strip option? The text leading up to this was: -------------------------- |# HG changeset patch |# User Valentin Bartenev |# Date 1363794516 -14400 |# Node ID a10cc9106dc7521e3b0b4f64f8e028907deaa8bd |# Parent ed600b1e18083c077430e6d4ba5c5420d738e4b2 |Win32: disable MSVC warning | |diff -r ed600b1e1808 -r a10cc9106dc7 src/os/win32/ngx_win32_config.h |--- a/src/os/win32/ngx_win32_config.h Wed Mar 20 19:48:36 2013 +0400 |+++ b/src/os/win32/ngx_win32_config.h Wed Mar 20 19:48:36 2013 +0400 -------------------------- File to patch: From vbart at nginx.com Mon Mar 25 19:27:53 2013 From: vbart at nginx.com (Valentin V. Bartenev) Date: Mon, 25 Mar 2013 23:27:53 +0400 Subject: fyi, patch.spdy-70_1.3.14.txt failure applying to 1.3.14 source In-Reply-To: References: Message-ID: <201303252327.53434.vbart@nginx.com> On Monday 25 March 2013 23:06:37 pgndev wrote: > cd nginx-1.3.14 > patch -p1 < ../patch.spdy-70_1.3.14.txt > patching file src/http/ngx_http_request.c > patching file src/http/ngx_http_request.c > patching file src/http/ngx_http_request.c > patching file src/core/ngx_connection.c > patching file src/core/ngx_connection.h > patching file src/http/ngx_http_request.c > patching file src/http/ngx_http_upstream.c > patching file src/http/ngx_http_request.c > patching file src/http/ngx_http_request.h > patching file src/http/modules/ngx_http_ssl_module.c > patching file src/http/modules/ngx_http_gzip_filter_module.c > patching file src/http/ngx_http.h > patching file src/http/ngx_http_request.c > patching file src/core/ngx_connection.c > patching file src/event/ngx_event.c > patching file src/event/ngx_event.h > patching file src/http/modules/ngx_http_stub_status_module.c > patching file src/http/ngx_http_request.c > can't find file to patch at input line 1300 > Perhaps you used the wrong -p or --strip option? > The text leading up to this was: > -------------------------- > > |# HG changeset patch > |# User Valentin Bartenev > |# Date 1363794516 -14400 > |# Node ID a10cc9106dc7521e3b0b4f64f8e028907deaa8bd > |# Parent ed600b1e18083c077430e6d4ba5c5420d738e4b2 > |Win32: disable MSVC warning > | > |diff -r ed600b1e1808 -r a10cc9106dc7 src/os/win32/ngx_win32_config.h > |--- a/src/os/win32/ngx_win32_config.h Wed Mar 20 19:48:36 2013 +0400 > |+++ b/src/os/win32/ngx_win32_config.h Wed Mar 20 19:48:36 2013 +0400 > > -------------------------- > File to patch: > Feel free to skip this patch of spdy patch series by using "-f" flag, since it is for Windows systems. Or please wait a bit for 1.3.15 with spdy inside. wbr, Valentin V. Bartenev -- http://nginx.org/en/donation.html From yaoweibin at gmail.com Tue Mar 26 03:22:44 2013 From: yaoweibin at gmail.com (Weibin Yao) Date: Tue, 26 Mar 2013 11:22:44 +0800 Subject: Request for comment: Where is the status module? Message-ID: Hi, folks, I noticed the http status module in the auto/options script: 227 --without-http_status_module) HTTP_STATUS=NO ;; But I can't find any source file with this module. Is this module reserved for the http status module? Do you have any plan to implement it? Thanks. -- Weibin Yao Developer @ Server Platform Team of Taobao -------------- next part -------------- An HTML attachment was scrubbed... URL: From ru at nginx.com Tue Mar 26 04:56:52 2013 From: ru at nginx.com (Ruslan Ermilov) Date: Tue, 26 Mar 2013 08:56:52 +0400 Subject: Request for comment: Where is the status module? In-Reply-To: References: Message-ID: <20130326045652.GC91875@lo0.su> On Tue, Mar 26, 2013 at 11:22:44AM +0800, Weibin Yao wrote: > Hi, folks, > I noticed the http status module in the auto/options script: > 227 ? ? ? ? --without-http_status_module) ? ?HTTP_STATUS=NO ? ? ? > ? ? ? ;; > But I can't find any source file with this module. Is this module reserved > for the http status module? Do you have any plan to implement it? > Thanks. These are relics of the ancient incomplete status module: http://trac.nginx.org/nginx/changeset/4265/nginx From yaoweibin at gmail.com Tue Mar 26 05:36:10 2013 From: yaoweibin at gmail.com (Weibin Yao) Date: Tue, 26 Mar 2013 13:36:10 +0800 Subject: Request for comment: Where is the status module? In-Reply-To: <20130326045652.GC91875@lo0.su> References: <20130326045652.GC91875@lo0.su> Message-ID: Thanks. Ruslan. In your roadmap (http://trac.nginx.org/nginx/roadmap), you are working on the new status module. Will it be a more powerful status module and can display the status with each virtual server. 2013/3/26 Ruslan Ermilov > On Tue, Mar 26, 2013 at 11:22:44AM +0800, Weibin Yao wrote: > > Hi, folks, > > I noticed the http status module in the auto/options script: > > 227 --without-http_status_module) HTTP_STATUS=NO > > ;; > > But I can't find any source file with this module. Is this module > reserved > > for the http status module? Do you have any plan to implement it? > > Thanks. > > These are relics of the ancient incomplete status module: > > http://trac.nginx.org/nginx/changeset/4265/nginx > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -- Weibin Yao Developer @ Server Platform Team of Taobao -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Tue Mar 26 13:03:03 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Tue, 26 Mar 2013 13:03:03 +0000 Subject: [nginx] svn commit: r5139 - trunk/docs/xml/nginx Message-ID: <20130326130304.27B533F9C18@mail.nginx.com> Author: mdounin Date: 2013-03-26 13:03:02 +0000 (Tue, 26 Mar 2013) New Revision: 5139 URL: http://trac.nginx.org/nginx/changeset/5139/nginx Log: nginx-1.3.15-RELEASE Modified: trunk/docs/xml/nginx/changes.xml Modified: trunk/docs/xml/nginx/changes.xml =================================================================== --- trunk/docs/xml/nginx/changes.xml 2013-03-25 15:49:11 UTC (rev 5138) +++ trunk/docs/xml/nginx/changes.xml 2013-03-26 13:03:02 UTC (rev 5139) @@ -5,6 +5,123 @@ + + + + +???????? ? ???????? ?????????? ??? ???????? ? ??? ?????-???? ?????? +?????? ?? ???????????? ? access_log ? ????? ?????? 400. + + +opening and closing a connection without sending any data in it +is no longer logged to access_log with error code 400. + + + + + +?????? ngx_http_spdy_module.
+??????? Automattic ?? ????????????? ??????????. +
+ +the ngx_http_spdy_module.
+Thanks to Automattic for sponsoring this work. +
+
+ + + +????????? limit_req_status ? limit_conn_status.
+??????? Nick Marden. +
+ +the "limit_req_status" and "limit_conn_status" directives.
+Thanks to Nick Marden. +
+
+ + + +????????? image_filter_interlace.
+??????? ????? ???????. +
+ +the "image_filter_interlace" directive.
+Thanks to Ian Babrou. +
+
+ + + +?????????? $connections_waiting ? ?????? ngx_http_stub_status_module. + + +$connections_waiting variable in the ngx_http_stub_status_module. + + + + + +?????? ???????? ??????-?????? ???????????? IPv6-???????. + + +the mail proxy module now supports IPv6 backends. + + + + + +??? ????????? ???????? ??????? ?? ?????? +???? ??????? ????? ???????????? ???????????; +?????? ????????? ? 1.3.9.
+??????? Piotr Sikora. +
+ +request body might be transmitted incorrectly +when retrying a request to the next upstream server; +the bug had appeared in 1.3.9.
+Thanks to Piotr Sikora. +
+
+ + + +? ????????? client_body_in_file_only; +?????? ????????? ? 1.3.9. + + +in the "client_body_in_file_only" directive; +the bug had appeared in 1.3.9. + + + + + +?????? ????? ????????, +???? ?????????????? ?????????? +? ??? ????????? ?????????? ??????????? DNS-??????.
+??????? Lanshun Zhou. +
+ +responses might hang +if subrequests were used +and a DNS error happened during subrequest processing.
+Thanks to Lanshun Zhou. +
+
+ + + +? ????????? ????? ????????????? ????????. + + +in backend usage accounting. + + + +
+ + @@ -195,7 +312,7 @@ -? ????????? client_body_in_file_only; +? ????????? client_body_in_file_only; ?????? ????????? ? 1.3.9. From mdounin at mdounin.ru Tue Mar 26 13:03:16 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Tue, 26 Mar 2013 13:03:16 +0000 Subject: [nginx] svn commit: r5140 - tags Message-ID: <20130326130316.5C6603F9C45@mail.nginx.com> Author: mdounin Date: 2013-03-26 13:03:14 +0000 (Tue, 26 Mar 2013) New Revision: 5140 URL: http://trac.nginx.org/nginx/changeset/5140/nginx Log: release-1.3.15 tag Added: tags/release-1.3.15/ From mdounin at mdounin.ru Tue Mar 26 15:25:08 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 26 Mar 2013 19:25:08 +0400 Subject: Request for comment: Where is the status module? In-Reply-To: References: <20130326045652.GC91875@lo0.su> Message-ID: <20130326152508.GU62550@mdounin.ru> Hello! On Tue, Mar 26, 2013 at 01:36:10PM +0800, Weibin Yao wrote: > Thanks. Ruslan. In your roadmap (http://trac.nginx.org/nginx/roadmap), you > are working on the new status module. Will it be a more powerful status > module and can display the status with each virtual server. It was postponed for a while as we don't feel ourself happy enough with the result. -- Maxim Dounin http://nginx.org/en/donation.html From mdounin at mdounin.ru Wed Mar 27 15:15:35 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Wed, 27 Mar 2013 15:15:35 +0000 Subject: [nginx] svn commit: r5141 - in trunk/src: core http/modules/perl Message-ID: <20130327151535.994A83F9FE6@mail.nginx.com> Author: mdounin Date: 2013-03-27 15:15:34 +0000 (Wed, 27 Mar 2013) New Revision: 5141 URL: http://trac.nginx.org/nginx/changeset/5141/nginx Log: Version bump. Modified: trunk/src/core/nginx.h trunk/src/http/modules/perl/nginx.pm Modified: trunk/src/core/nginx.h =================================================================== --- trunk/src/core/nginx.h 2013-03-26 13:03:14 UTC (rev 5140) +++ trunk/src/core/nginx.h 2013-03-27 15:15:34 UTC (rev 5141) @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1003015 -#define NGINX_VERSION "1.3.15" +#define nginx_version 1003016 +#define NGINX_VERSION "1.3.16" #define NGINX_VER "nginx/" NGINX_VERSION #define NGINX_VAR "NGINX" Modified: trunk/src/http/modules/perl/nginx.pm =================================================================== --- trunk/src/http/modules/perl/nginx.pm 2013-03-26 13:03:14 UTC (rev 5140) +++ trunk/src/http/modules/perl/nginx.pm 2013-03-27 15:15:34 UTC (rev 5141) @@ -50,7 +50,7 @@ HTTP_INSUFFICIENT_STORAGE ); -our $VERSION = '1.3.15'; +our $VERSION = '1.3.16'; require XSLoader; XSLoader::load('nginx', $VERSION); From mdounin at mdounin.ru Wed Mar 27 15:16:46 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Wed, 27 Mar 2013 15:16:46 +0000 Subject: [nginx] svn commit: r5142 - trunk/src/event Message-ID: <20130327151646.312293FAA59@mail.nginx.com> Author: mdounin Date: 2013-03-27 15:16:45 +0000 (Wed, 27 Mar 2013) New Revision: 5142 URL: http://trac.nginx.org/nginx/changeset/5142/nginx Log: Event connect: don't penalize AF_INET6 connections. Problems with setsockopt(TCP_NODELAY) and setsockopt(TCP_NOPUSH), as well as sendfile() syscall on Solaris, are specific to UNIX-domain sockets. Other address families, i.e. AF_INET and AF_INET6, are fine. Modified: trunk/src/event/ngx_event_connect.c Modified: trunk/src/event/ngx_event_connect.c =================================================================== --- trunk/src/event/ngx_event_connect.c 2013-03-27 15:15:34 UTC (rev 5141) +++ trunk/src/event/ngx_event_connect.c 2013-03-27 15:16:45 UTC (rev 5142) @@ -84,7 +84,7 @@ c->log_error = pc->log_error; - if (pc->sockaddr->sa_family != AF_INET) { + if (pc->sockaddr->sa_family == AF_UNIX) { c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; From mdounin at mdounin.ru Wed Mar 27 15:18:34 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Wed, 27 Mar 2013 15:18:34 +0000 Subject: [nginx] svn commit: r5143 - trunk/src/http Message-ID: <20130327151834.93BAA3F9C12@mail.nginx.com> Author: mdounin Date: 2013-03-27 15:18:34 +0000 (Wed, 27 Mar 2013) New Revision: 5143 URL: http://trac.nginx.org/nginx/changeset/5143/nginx Log: Upstream: fixed tcp_nodelay with connection upgrade (ticket #325). Modified: trunk/src/http/ngx_http_upstream.c Modified: trunk/src/http/ngx_http_upstream.c =================================================================== --- trunk/src/http/ngx_http_upstream.c 2013-03-27 15:16:45 UTC (rev 5142) +++ trunk/src/http/ngx_http_upstream.c 2013-03-27 15:18:34 UTC (rev 5143) @@ -2413,32 +2413,39 @@ r->read_event_handler = ngx_http_upstream_upgraded_read_downstream; r->write_event_handler = ngx_http_upstream_upgraded_write_downstream; - if (clcf->tcp_nodelay && c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) { - ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "tcp_nodelay"); - + if (clcf->tcp_nodelay) { tcp_nodelay = 1; - if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, - (const void *) &tcp_nodelay, sizeof(int)) == -1) - { - ngx_connection_error(c, ngx_socket_errno, - "setsockopt(TCP_NODELAY) failed"); - ngx_http_upstream_finalize_request(r, u, 0); - return; + if (c->tcp_nodelay == NGX_TCP_NODELAY_UNSET) { + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "tcp_nodelay"); + + if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, + (const void *) &tcp_nodelay, sizeof(int)) == -1) + { + ngx_connection_error(c, ngx_socket_errno, + "setsockopt(TCP_NODELAY) failed"); + ngx_http_upstream_finalize_request(r, u, 0); + return; + } + + c->tcp_nodelay = NGX_TCP_NODELAY_SET; } - c->tcp_nodelay = NGX_TCP_NODELAY_SET; + if (u->peer.connection->tcp_nodelay == NGX_TCP_NODELAY_UNSET) { + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, u->peer.connection->log, 0, + "tcp_nodelay"); - if (setsockopt(u->peer.connection->fd, IPPROTO_TCP, TCP_NODELAY, - (const void *) &tcp_nodelay, sizeof(int)) == -1) - { - ngx_connection_error(u->peer.connection, ngx_socket_errno, - "setsockopt(TCP_NODELAY) failed"); - ngx_http_upstream_finalize_request(r, u, 0); - return; + if (setsockopt(u->peer.connection->fd, IPPROTO_TCP, TCP_NODELAY, + (const void *) &tcp_nodelay, sizeof(int)) == -1) + { + ngx_connection_error(u->peer.connection, ngx_socket_errno, + "setsockopt(TCP_NODELAY) failed"); + ngx_http_upstream_finalize_request(r, u, 0); + return; + } + + u->peer.connection->tcp_nodelay = NGX_TCP_NODELAY_SET; } - - u->peer.connection->tcp_nodelay = NGX_TCP_NODELAY_SET; } if (ngx_http_send_special(r, NGX_HTTP_FLUSH) == NGX_ERROR) { From piotr at cloudflare.com Wed Mar 27 20:54:57 2013 From: piotr at cloudflare.com (Piotr Sikora) Date: Wed, 27 Mar 2013 13:54:57 -0700 Subject: [PATCH] Add missing NGX_HAVE_UNIX_DOMAIN checks Message-ID: Hey, I've noticed that Maxim's recent commit (r5142) was missing a check for AF_UNIX existence (NGX_HAVE_UNIX_DOMAIN) and it seems that the check was missing in a few other places as well. Attached patch adds those, even though apparently they aren't really needed. Best regards, Piotr Sikora diff -r afea5259e05c src/event/ngx_event_connect.c --- a/src/event/ngx_event_connect.c Wed Mar 27 15:18:34 2013 +0000 +++ b/src/event/ngx_event_connect.c Wed Mar 27 13:41:59 2013 -0700 @@ -84,6 +84,7 @@ c->log_error = pc->log_error; +#if (NGX_HAVE_UNIX_DOMAIN) if (pc->sockaddr->sa_family == AF_UNIX) { c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; @@ -93,6 +94,7 @@ c->sendfile = 0; #endif } +#endif rev = c->read; wev = c->write; diff -r afea5259e05c src/http/modules/ngx_http_proxy_module.c --- a/src/http/modules/ngx_http_proxy_module.c Wed Mar 27 15:18:34 2013 +0000 +++ b/src/http/modules/ngx_http_proxy_module.c Wed Mar 27 13:41:59 2013 -0700 @@ -3767,7 +3767,9 @@ static void ngx_http_proxy_set_vars(ngx_url_t *u, ngx_http_proxy_vars_t *v) { +#if (NGX_HAVE_UNIX_DOMAIN) if (u->family != AF_UNIX) { +#endif if (u->no_port || u->port == u->default_port) { @@ -3788,11 +3790,13 @@ v->key_start.len += v->host_header.len; +#if (NGX_HAVE_UNIX_DOMAIN) } else { ngx_str_set(&v->host_header, "localhost"); ngx_str_null(&v->port); v->key_start.len += sizeof("unix:") - 1 + u->host.len + 1; } +#endif v->uri = u->uri; } diff -r afea5259e05c src/mail/ngx_mail_auth_http_module.c --- a/src/mail/ngx_mail_auth_http_module.c Wed Mar 27 15:18:34 2013 +0000 +++ b/src/mail/ngx_mail_auth_http_module.c Wed Mar 27 13:41:59 2013 -0700 @@ -1416,12 +1416,16 @@ ahcf->peer = u.addrs; +#if (NGX_HAVE_UNIX_DOMAIN) if (u.family != AF_UNIX) { +#endif ahcf->host_header = u.host; +#if (NGX_HAVE_UNIX_DOMAIN) } else { ngx_str_set(&ahcf->host_header, "localhost"); } +#endif ahcf->uri = u.uri; From mdounin at mdounin.ru Wed Mar 27 21:07:43 2013 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 28 Mar 2013 01:07:43 +0400 Subject: [PATCH] Add missing NGX_HAVE_UNIX_DOMAIN checks In-Reply-To: References: Message-ID: <20130327210743.GL62550@mdounin.ru> Hello! On Wed, Mar 27, 2013 at 01:54:57PM -0700, Piotr Sikora wrote: > Hey, > I've noticed that Maxim's recent commit (r5142) was missing a check > for AF_UNIX existence (NGX_HAVE_UNIX_DOMAIN) and it seems > that the check was missing in a few other places as well. > > Attached patch adds those, even though apparently they aren't really needed. They aren't needed, and I omitted it intentionally. AF_UNIX is required to be defined by POSIX, and it is defined even on win32. I rather think about removing the #if's in places where they are used only to protect AF_UNIX constant. -- Maxim Dounin http://nginx.org/en/donation.html From yaoweibin at gmail.com Thu Mar 28 03:54:03 2013 From: yaoweibin at gmail.com (Weibin Yao) Date: Thu, 28 Mar 2013 11:54:03 +0800 Subject: Request for comment: Where is the status module? In-Reply-To: <20130326152508.GU62550@mdounin.ru> References: <20130326045652.GC91875@lo0.su> <20130326152508.GU62550@mdounin.ru> Message-ID: Thanks for this message. Maxim. 2013/3/26 Maxim Dounin > Hello! > > On Tue, Mar 26, 2013 at 01:36:10PM +0800, Weibin Yao wrote: > > > Thanks. Ruslan. In your roadmap (http://trac.nginx.org/nginx/roadmap), > you > > are working on the new status module. Will it be a more powerful status > > module and can display the status with each virtual server. > > It was postponed for a while as we don't feel ourself happy enough > with the result. > > -- > Maxim Dounin > http://nginx.org/en/donation.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -- Weibin Yao Developer @ Server Platform Team of Taobao -------------- next part -------------- An HTML attachment was scrubbed... URL: From ru at nginx.com Thu Mar 28 08:45:47 2013 From: ru at nginx.com (ru at nginx.com) Date: Thu, 28 Mar 2013 08:45:47 +0000 Subject: [nginx] svn commit: r5144 - trunk/auto/lib/perl Message-ID: <20130328084547.755C53F9F0F@mail.nginx.com> Author: ru Date: 2013-03-28 08:45:45 +0000 (Thu, 28 Mar 2013) New Revision: 5144 URL: http://trac.nginx.org/nginx/changeset/5144/nginx Log: Configure: improved layout of the generated makefile. No functional changes. Modified: trunk/auto/lib/perl/make Modified: trunk/auto/lib/perl/make =================================================================== --- trunk/auto/lib/perl/make 2013-03-27 15:18:34 UTC (rev 5143) +++ trunk/auto/lib/perl/make 2013-03-28 08:45:45 UTC (rev 5144) @@ -5,11 +5,11 @@ cat << END >> $NGX_MAKEFILE -$NGX_OBJS/src/http/modules/perl/blib/arch/auto/nginx/nginx.so: \ - \$(CORE_DEPS) \$(HTTP_DEPS) \ - src/http/modules/perl/nginx.pm \ - src/http/modules/perl/nginx.xs \ - src/http/modules/perl/ngx_http_perl_module.h \ +$NGX_OBJS/src/http/modules/perl/blib/arch/auto/nginx/nginx.so: \\ + \$(CORE_DEPS) \$(HTTP_DEPS) \\ + src/http/modules/perl/nginx.pm \\ + src/http/modules/perl/nginx.xs \\ + src/http/modules/perl/ngx_http_perl_module.h \\ $NGX_OBJS/src/http/modules/perl/Makefile cp src/http/modules/perl/nginx.* $NGX_OBJS/src/http/modules/perl/ @@ -18,18 +18,18 @@ rm -rf $NGX_OBJS/install_perl -$NGX_OBJS/src/http/modules/perl/Makefile: \ - src/http/modules/perl/Makefile.PL \ +$NGX_OBJS/src/http/modules/perl/Makefile: \\ + src/http/modules/perl/Makefile.PL \\ src/http/modules/perl/nginx.pm cp -p src/http/modules/perl/nginx.* $NGX_OBJS/src/http/modules/perl/ cp -p src/http/modules/perl/typemap $NGX_OBJS/src/http/modules/perl/ cp -p src/http/modules/perl/Makefile.PL $NGX_OBJS/src/http/modules/perl/ - cd $NGX_OBJS/src/http/modules/perl \ - && NGX_PM_CFLAGS="\$(NGX_PM_CFLAGS) -g $NGX_CC_OPT" \ - NGX_INCS="$CORE_INCS $NGX_OBJS $HTTP_INCS" \ - $NGX_PERL Makefile.PL \ - LIB=$NGX_PERL_MODULES \ + cd $NGX_OBJS/src/http/modules/perl \\ + && NGX_PM_CFLAGS="\$(NGX_PM_CFLAGS) -g $NGX_CC_OPT" \\ + NGX_INCS="$CORE_INCS $NGX_OBJS $HTTP_INCS" \\ + $NGX_PERL Makefile.PL \\ + LIB=$NGX_PERL_MODULES \\ INSTALLSITEMAN3DIR=$NGX_PERL_MODULES_MAN END From ru at nginx.com Thu Mar 28 08:46:13 2013 From: ru at nginx.com (ru at nginx.com) Date: Thu, 28 Mar 2013 08:46:13 +0000 Subject: [nginx] svn commit: r5145 - trunk/auto/lib/perl Message-ID: <20130328084613.276CA3F9F0F@mail.nginx.com> Author: ru Date: 2013-03-28 08:46:12 +0000 (Thu, 28 Mar 2013) New Revision: 5145 URL: http://trac.nginx.org/nginx/changeset/5145/nginx Log: Configure: improved make dependencies for perl module. Added missing dependencies for perl module's Makefile. Simplified dependencies for perl module nginx.so: it depends on Makefile that in turn depends on other perl bits. Modified: trunk/auto/lib/perl/make Modified: trunk/auto/lib/perl/make =================================================================== --- trunk/auto/lib/perl/make 2013-03-28 08:45:45 UTC (rev 5144) +++ trunk/auto/lib/perl/make 2013-03-28 08:46:12 UTC (rev 5145) @@ -7,12 +7,8 @@ $NGX_OBJS/src/http/modules/perl/blib/arch/auto/nginx/nginx.so: \\ \$(CORE_DEPS) \$(HTTP_DEPS) \\ - src/http/modules/perl/nginx.pm \\ - src/http/modules/perl/nginx.xs \\ src/http/modules/perl/ngx_http_perl_module.h \\ $NGX_OBJS/src/http/modules/perl/Makefile - cp src/http/modules/perl/nginx.* $NGX_OBJS/src/http/modules/perl/ - cd $NGX_OBJS/src/http/modules/perl && \$(MAKE) rm -rf $NGX_OBJS/install_perl @@ -20,7 +16,9 @@ $NGX_OBJS/src/http/modules/perl/Makefile: \\ src/http/modules/perl/Makefile.PL \\ - src/http/modules/perl/nginx.pm + src/http/modules/perl/nginx.pm \\ + src/http/modules/perl/nginx.xs \\ + src/http/modules/perl/typemap cp -p src/http/modules/perl/nginx.* $NGX_OBJS/src/http/modules/perl/ cp -p src/http/modules/perl/typemap $NGX_OBJS/src/http/modules/perl/ cp -p src/http/modules/perl/Makefile.PL $NGX_OBJS/src/http/modules/perl/ From ru at nginx.com Thu Mar 28 08:46:44 2013 From: ru at nginx.com (ru at nginx.com) Date: Thu, 28 Mar 2013 08:46:44 +0000 Subject: [nginx] svn commit: r5146 - trunk/auto/lib/perl Message-ID: <20130328084645.55CC03F9FBB@mail.nginx.com> Author: ru Date: 2013-03-28 08:46:42 +0000 (Thu, 28 Mar 2013) New Revision: 5146 URL: http://trac.nginx.org/nginx/changeset/5146/nginx Log: Configure: fixed perl module make rules. Filename extension used for dynamically loaded perl modules isn't necessarily ".so" (e.g., it's ".bundle" on Mac OS X). This fixes "make" after "make" unnecessarily rebuilding perl module. Modified: trunk/auto/lib/perl/conf trunk/auto/lib/perl/make Modified: trunk/auto/lib/perl/conf =================================================================== --- trunk/auto/lib/perl/conf 2013-03-28 08:46:12 UTC (rev 5145) +++ trunk/auto/lib/perl/conf 2013-03-28 08:46:42 UTC (rev 5146) @@ -40,6 +40,8 @@ ngx_perl_ldopts=`$NGX_PERL -MExtUtils::Embed -e ldopts` + ngx_perl_dlext=`$NGX_PERL -MConfig -e 'print $Config{dlext}'` + if $NGX_PERL -V:usemultiplicity | grep define > /dev/null; then have=NGX_HAVE_PERL_MULTIPLICITY . auto/have echo " + perl interpreter multiplicity found" @@ -51,7 +53,7 @@ fi CORE_LINK="$CORE_LINK $ngx_perl_ldopts" - LINK_DEPS="$LINK_DEPS $NGX_OBJS/src/http/modules/perl/blib/arch/auto/nginx/nginx.so" + LINK_DEPS="$LINK_DEPS $NGX_OBJS/src/http/modules/perl/blib/arch/auto/nginx/nginx.$ngx_perl_dlext" if test -n "$NGX_PERL_MODULES"; then have=NGX_PERL_MODULES value="(u_char *) \"$NGX_PERL_MODULES\"" Modified: trunk/auto/lib/perl/make =================================================================== --- trunk/auto/lib/perl/make 2013-03-28 08:46:12 UTC (rev 5145) +++ trunk/auto/lib/perl/make 2013-03-28 08:46:42 UTC (rev 5146) @@ -5,7 +5,7 @@ cat << END >> $NGX_MAKEFILE -$NGX_OBJS/src/http/modules/perl/blib/arch/auto/nginx/nginx.so: \\ +$NGX_OBJS/src/http/modules/perl/blib/arch/auto/nginx/nginx.$ngx_perl_dlext: \\ \$(CORE_DEPS) \$(HTTP_DEPS) \\ src/http/modules/perl/ngx_http_perl_module.h \\ $NGX_OBJS/src/http/modules/perl/Makefile From ru at nginx.com Thu Mar 28 08:47:08 2013 From: ru at nginx.com (ru at nginx.com) Date: Thu, 28 Mar 2013 08:47:08 +0000 Subject: [nginx] svn commit: r5147 - in trunk: auto/lib/perl src/http/modules/perl Message-ID: <20130328084708.5A7633F9F0F@mail.nginx.com> Author: ru Date: 2013-03-28 08:47:06 +0000 (Thu, 28 Mar 2013) New Revision: 5147 URL: http://trac.nginx.org/nginx/changeset/5147/nginx Log: Simplified nginx version maintenance. It's no longer necessary to update src/http/modules/perl/nginx.pm when version is bumped, as it's now derived from src/core/nginx.h. Modified: trunk/auto/lib/perl/make trunk/src/http/modules/perl/nginx.pm Modified: trunk/auto/lib/perl/make =================================================================== --- trunk/auto/lib/perl/make 2013-03-28 08:46:42 UTC (rev 5146) +++ trunk/auto/lib/perl/make 2013-03-28 08:47:06 UTC (rev 5147) @@ -3,6 +3,9 @@ # Copyright (C) Nginx, Inc. +v=`grep 'define NGINX_VERSION' src/core/nginx.h | sed -e 's/^.*"\(.*\)".*/\1/'` + + cat << END >> $NGX_MAKEFILE $NGX_OBJS/src/http/modules/perl/blib/arch/auto/nginx/nginx.$ngx_perl_dlext: \\ @@ -15,11 +18,14 @@ $NGX_OBJS/src/http/modules/perl/Makefile: \\ + src/core/nginx.h \\ src/http/modules/perl/Makefile.PL \\ src/http/modules/perl/nginx.pm \\ src/http/modules/perl/nginx.xs \\ src/http/modules/perl/typemap - cp -p src/http/modules/perl/nginx.* $NGX_OBJS/src/http/modules/perl/ + sed "s/%%VERSION%%/$v/" src/http/modules/perl/nginx.pm > \\ + $NGX_OBJS/src/http/modules/perl/nginx.pm + cp -p src/http/modules/perl/nginx.xs $NGX_OBJS/src/http/modules/perl/ cp -p src/http/modules/perl/typemap $NGX_OBJS/src/http/modules/perl/ cp -p src/http/modules/perl/Makefile.PL $NGX_OBJS/src/http/modules/perl/ Modified: trunk/src/http/modules/perl/nginx.pm =================================================================== --- trunk/src/http/modules/perl/nginx.pm 2013-03-28 08:46:42 UTC (rev 5146) +++ trunk/src/http/modules/perl/nginx.pm 2013-03-28 08:47:06 UTC (rev 5147) @@ -50,7 +50,7 @@ HTTP_INSUFFICIENT_STORAGE ); -our $VERSION = '1.3.16'; +our $VERSION = '%%VERSION%%'; require XSLoader; XSLoader::load('nginx', $VERSION); From ru at nginx.com Thu Mar 28 08:47:27 2013 From: ru at nginx.com (ru at nginx.com) Date: Thu, 28 Mar 2013 08:47:27 +0000 Subject: [nginx] svn commit: r5148 - in trunk: docs misc Message-ID: <20130328084727.B3B473F9C14@mail.nginx.com> Author: ru Date: 2013-03-28 08:47:25 +0000 (Thu, 28 Mar 2013) New Revision: 5148 URL: http://trac.nginx.org/nginx/changeset/5148/nginx Log: Configure: unified nginx version computation constructs. Modified: trunk/docs/GNUmakefile trunk/misc/GNUmakefile Modified: trunk/docs/GNUmakefile =================================================================== --- trunk/docs/GNUmakefile 2013-03-28 08:47:06 UTC (rev 5147) +++ trunk/docs/GNUmakefile 2013-03-28 08:47:25 UTC (rev 5148) @@ -1,6 +1,6 @@ VER= $(shell grep 'define NGINX_VERSION' src/core/nginx.h \ - | sed -e 's/^.*\"\(.*\)\"/\1/') + | sed -e 's/^.*"\(.*\)".*/\1/') NGINX= nginx-$(VER) TEMP= tmp CP= $(HOME)/java Modified: trunk/misc/GNUmakefile =================================================================== --- trunk/misc/GNUmakefile 2013-03-28 08:47:06 UTC (rev 5147) +++ trunk/misc/GNUmakefile 2013-03-28 08:47:25 UTC (rev 5148) @@ -1,6 +1,6 @@ VER = $(shell grep 'define NGINX_VERSION' src/core/nginx.h \ - | sed -e 's/^.*\"\(.*\)\"/\1/') + | sed -e 's/^.*"\(.*\)".*/\1/') NGINX = nginx-$(VER) TEMP = tmp REPO = $(shell svn info | sed -n 's/^Repository Root: //p') From ru at nginx.com Fri Mar 29 08:47:38 2013 From: ru at nginx.com (ru at nginx.com) Date: Fri, 29 Mar 2013 08:47:38 +0000 Subject: [nginx] svn commit: r5149 - trunk/src/http Message-ID: <20130329084738.E810E3F9C12@mail.nginx.com> Author: ru Date: 2013-03-29 08:47:37 +0000 (Fri, 29 Mar 2013) New Revision: 5149 URL: http://trac.nginx.org/nginx/changeset/5149/nginx Log: Pass PCRE_CASELESS to pcre_compile() for caseless matching. Previously, we sometimes passed constant value 1 that happens to match PCRE_CASELESS and thus was harmless. Modified: trunk/src/http/ngx_http_core_module.c Modified: trunk/src/http/ngx_http_core_module.c =================================================================== --- trunk/src/http/ngx_http_core_module.c 2013-03-28 08:47:25 UTC (rev 5148) +++ trunk/src/http/ngx_http_core_module.c 2013-03-29 08:47:37 UTC (rev 5149) @@ -3256,7 +3256,7 @@ #if (NGX_HAVE_CASELESS_FILESYSTEM) rc.options = NGX_REGEX_CASELESS; #else - rc.options = caseless; + rc.options = caseless ? NGX_REGEX_CASELESS : 0; #endif clcf->regex = ngx_http_regex_compile(cf, &rc); From mdounin at mdounin.ru Fri Mar 29 15:29:29 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 15:29:29 +0000 Subject: [nginx] svn commit: r5150 - in branches/stable-1.2/src: core http/modules/perl Message-ID: <20130329152930.69CF83F9FD0@mail.nginx.com> Author: mdounin Date: 2013-03-29 15:29:29 +0000 (Fri, 29 Mar 2013) New Revision: 5150 URL: http://trac.nginx.org/nginx/changeset/5150/nginx Log: Version bump. Modified: branches/stable-1.2/src/core/nginx.h branches/stable-1.2/src/http/modules/perl/nginx.pm Modified: branches/stable-1.2/src/core/nginx.h =================================================================== --- branches/stable-1.2/src/core/nginx.h 2013-03-29 08:47:37 UTC (rev 5149) +++ branches/stable-1.2/src/core/nginx.h 2013-03-29 15:29:29 UTC (rev 5150) @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1002007 -#define NGINX_VERSION "1.2.7" +#define nginx_version 1002008 +#define NGINX_VERSION "1.2.8" #define NGINX_VER "nginx/" NGINX_VERSION #define NGINX_VAR "NGINX" Modified: branches/stable-1.2/src/http/modules/perl/nginx.pm =================================================================== --- branches/stable-1.2/src/http/modules/perl/nginx.pm 2013-03-29 08:47:37 UTC (rev 5149) +++ branches/stable-1.2/src/http/modules/perl/nginx.pm 2013-03-29 15:29:29 UTC (rev 5150) @@ -50,7 +50,7 @@ HTTP_INSUFFICIENT_STORAGE ); -our $VERSION = '1.2.7'; +our $VERSION = '1.2.8'; require XSLoader; XSLoader::load('nginx', $VERSION); From mdounin at mdounin.ru Fri Mar 29 17:11:10 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 17:11:10 +0000 Subject: [nginx] svn commit: r5151 - in branches/stable-1.2: . src/http/modules Message-ID: <20130329171112.385923FA02D@mail.nginx.com> Author: mdounin Date: 2013-03-29 17:11:09 +0000 (Fri, 29 Mar 2013) New Revision: 5151 URL: http://trac.nginx.org/nginx/changeset/5151/nginx Log: Merge of r5070: fixed false memset warning on Linux. Fixed false memset warning on Linux with -O3 (ticket #275). Prodded by John Leach. Modified: branches/stable-1.2/ branches/stable-1.2/src/http/modules/ngx_http_autoindex_module.c Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 15:29:29 UTC (rev 5150) +++ branches/stable-1.2 2013-03-29 17:11:09 UTC (rev 5151) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070 \ No newline at end of property Modified: branches/stable-1.2/src/http/modules/ngx_http_autoindex_module.c =================================================================== --- branches/stable-1.2/src/http/modules/ngx_http_autoindex_module.c 2013-03-29 15:29:29 UTC (rev 5150) +++ branches/stable-1.2/src/http/modules/ngx_http_autoindex_module.c 2013-03-29 17:11:09 UTC (rev 5151) @@ -489,8 +489,11 @@ } b->last = ngx_cpymem(b->last, "", sizeof("") - 1); - ngx_memset(b->last, ' ', NGX_HTTP_AUTOINDEX_NAME_LEN - len); - b->last += NGX_HTTP_AUTOINDEX_NAME_LEN - len; + + if (NGX_HTTP_AUTOINDEX_NAME_LEN - len > 0) { + ngx_memset(b->last, ' ', NGX_HTTP_AUTOINDEX_NAME_LEN - len); + b->last += NGX_HTTP_AUTOINDEX_NAME_LEN - len; + } } *b->last++ = ' '; From mdounin at mdounin.ru Fri Mar 29 17:13:45 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 17:13:45 +0000 Subject: [nginx] svn commit: r5152 - in branches/stable-1.2: . auto/lib/perl Message-ID: <20130329171345.C0E2B3F9C18@mail.nginx.com> Author: mdounin Date: 2013-03-29 17:13:45 +0000 (Fri, 29 Mar 2013) New Revision: 5152 URL: http://trac.nginx.org/nginx/changeset/5152/nginx Log: Merge of r5071: rebuild perl module on headers change. Configure: rebuild perl module nginx.so if headers are changed. Note: the "-p" argument of cp(1) dropped intentionally, to force nginx.so rebuild. It is considered too boring to properly list all dependencies in Makefile.PL. Modified: branches/stable-1.2/ branches/stable-1.2/auto/lib/perl/make Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 17:11:09 UTC (rev 5151) +++ branches/stable-1.2 2013-03-29 17:13:45 UTC (rev 5152) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071 \ No newline at end of property Modified: branches/stable-1.2/auto/lib/perl/make =================================================================== --- branches/stable-1.2/auto/lib/perl/make 2013-03-29 17:11:09 UTC (rev 5151) +++ branches/stable-1.2/auto/lib/perl/make 2013-03-29 17:13:45 UTC (rev 5152) @@ -6,11 +6,12 @@ cat << END >> $NGX_MAKEFILE $NGX_OBJS/src/http/modules/perl/blib/arch/auto/nginx/nginx.so: \ + \$(CORE_DEPS) \$(HTTP_DEPS) \ src/http/modules/perl/nginx.pm \ src/http/modules/perl/nginx.xs \ src/http/modules/perl/ngx_http_perl_module.h \ $NGX_OBJS/src/http/modules/perl/Makefile - cp -p src/http/modules/perl/nginx.* $NGX_OBJS/src/http/modules/perl/ + cp src/http/modules/perl/nginx.* $NGX_OBJS/src/http/modules/perl/ cd $NGX_OBJS/src/http/modules/perl && \$(MAKE) From mdounin at mdounin.ru Fri Mar 29 17:15:34 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 17:15:34 +0000 Subject: [nginx] svn commit: r5153 - in branches/stable-1.2: . src/http src/http/modules Message-ID: <20130329171534.DDE593F9C18@mail.nginx.com> Author: mdounin Date: 2013-03-29 17:15:34 +0000 (Fri, 29 Mar 2013) New Revision: 5153 URL: http://trac.nginx.org/nginx/changeset/5153/nginx Log: Merge of r5078: removed zero termination of shm zone names. It was added in r2717 and no longer needed since r2721, where the termination was added to ngx_shm_alloc() and ngx_init_zone_pool(). Since then it only corrupted error messages about invalid zones. Modified: branches/stable-1.2/ branches/stable-1.2/src/http/modules/ngx_http_ssl_module.c branches/stable-1.2/src/http/ngx_http_file_cache.c Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 17:13:45 UTC (rev 5152) +++ branches/stable-1.2 2013-03-29 17:15:34 UTC (rev 5153) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078 \ No newline at end of property Modified: branches/stable-1.2/src/http/modules/ngx_http_ssl_module.c =================================================================== --- branches/stable-1.2/src/http/modules/ngx_http_ssl_module.c 2013-03-29 17:13:45 UTC (rev 5152) +++ branches/stable-1.2/src/http/modules/ngx_http_ssl_module.c 2013-03-29 17:15:34 UTC (rev 5153) @@ -593,7 +593,6 @@ for (j = sizeof("shared:") - 1; j < value[i].len; j++) { if (value[i].data[j] == ':') { - value[i].data[j] = '\0'; break; } Modified: branches/stable-1.2/src/http/ngx_http_file_cache.c =================================================================== --- branches/stable-1.2/src/http/ngx_http_file_cache.c 2013-03-29 17:13:45 UTC (rev 5152) +++ branches/stable-1.2/src/http/ngx_http_file_cache.c 2013-03-29 17:15:34 UTC (rev 5153) @@ -1674,8 +1674,6 @@ p = (u_char *) ngx_strchr(name.data, ':'); if (p) { - *p = '\0'; - name.len = p - name.data; p++; From mdounin at mdounin.ru Fri Mar 29 17:17:46 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 17:17:46 +0000 Subject: [nginx] svn commit: r5154 - in branches/stable-1.2: . src/event Message-ID: <20130329171746.CEB0C3F9C18@mail.nginx.com> Author: mdounin Date: 2013-03-29 17:17:45 +0000 (Fri, 29 Mar 2013) New Revision: 5154 URL: http://trac.nginx.org/nginx/changeset/5154/nginx Log: Merge of r5082: SSL: retry "sess_id" and "id" allocations. SSL: retry "sess_id" and "id" allocations. In case of fully populated SSL session cache with no memory left for new allocations, ngx_ssl_new_session() will try to expire the oldest non-expired session and retry, but only in case when slab allocation fails for "cached_sess", not when slab allocation fails for either "sess_id" or "id", which can happen for number of reasons and results in new session not being cached. Patch fixes this by adding retry logic to "sess_id" & "id" allocations. Patch by Piotr Sikora. Modified: branches/stable-1.2/ branches/stable-1.2/src/event/ngx_event_openssl.c Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 17:15:34 UTC (rev 5153) +++ branches/stable-1.2 2013-03-29 17:17:45 UTC (rev 5154) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082 \ No newline at end of property Modified: branches/stable-1.2/src/event/ngx_event_openssl.c =================================================================== --- branches/stable-1.2/src/event/ngx_event_openssl.c 2013-03-29 17:15:34 UTC (rev 5153) +++ branches/stable-1.2/src/event/ngx_event_openssl.c 2013-03-29 17:17:45 UTC (rev 5154) @@ -1716,8 +1716,18 @@ } sess_id = ngx_slab_alloc_locked(shpool, sizeof(ngx_ssl_sess_id_t)); + if (sess_id == NULL) { - goto failed; + + /* drop the oldest non-expired session and try once more */ + + ngx_ssl_expire_sessions(cache, shpool, 0); + + sess_id = ngx_slab_alloc_locked(shpool, sizeof(ngx_ssl_sess_id_t)); + + if (sess_id == NULL) { + goto failed; + } } #if (NGX_PTR_SIZE == 8) @@ -1727,8 +1737,18 @@ #else id = ngx_slab_alloc_locked(shpool, sess->session_id_length); + if (id == NULL) { - goto failed; + + /* drop the oldest non-expired session and try once more */ + + ngx_ssl_expire_sessions(cache, shpool, 0); + + id = ngx_slab_alloc_locked(shpool, sess->session_id_length); + + if (id == NULL) { + goto failed; + } } #endif From mdounin at mdounin.ru Fri Mar 29 17:19:11 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 17:19:11 +0000 Subject: [nginx] svn commit: r5155 - in branches/stable-1.2: . src/http Message-ID: <20130329171911.DEF043F9C18@mail.nginx.com> Author: mdounin Date: 2013-03-29 17:19:11 +0000 (Fri, 29 Mar 2013) New Revision: 5155 URL: http://trac.nginx.org/nginx/changeset/5155/nginx Log: Merge of r5083: fixed potential segfault in keepalive handler. Fixed potential segfault in ngx_http_keepalive_handler(). In case of error in the read event handling we close a connection by calling ngx_http_close_connection(), that also destroys connection pool. Thereafter, an attempt to free a buffer (added in r4892) that was allocated from the pool could cause SIGSEGV and is meaningless as well (the buffer already freed with the pool). Modified: branches/stable-1.2/ branches/stable-1.2/src/http/ngx_http_request.c Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 17:17:45 UTC (rev 5154) +++ branches/stable-1.2 2013-03-29 17:19:11 UTC (rev 5155) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083 \ No newline at end of property Modified: branches/stable-1.2/src/http/ngx_http_request.c =================================================================== --- branches/stable-1.2/src/http/ngx_http_request.c 2013-03-29 17:17:45 UTC (rev 5154) +++ branches/stable-1.2/src/http/ngx_http_request.c 2013-03-29 17:19:11 UTC (rev 5155) @@ -2743,6 +2743,7 @@ if (n == NGX_AGAIN) { if (ngx_handle_read_event(rev, 0) != NGX_OK) { ngx_http_close_connection(c); + return; } /* From mdounin at mdounin.ru Fri Mar 29 17:21:00 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 17:21:00 +0000 Subject: [nginx] svn commit: r5156 - in branches/stable-1.2: . src/http/modules Message-ID: <20130329172101.298DF3F9C0D@mail.nginx.com> Author: mdounin Date: 2013-03-29 17:20:59 +0000 (Fri, 29 Mar 2013) New Revision: 5156 URL: http://trac.nginx.org/nginx/changeset/5156/nginx Log: Merge of r5098: mp4: fixed handling of too small mdat atoms. Mp4: fixed handling of too small mdat atoms (ticket #266). Patch by Gernot Vormayr (with minor changes). Modified: branches/stable-1.2/ branches/stable-1.2/src/http/modules/ngx_http_mp4_module.c Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 17:19:11 UTC (rev 5155) +++ branches/stable-1.2 2013-03-29 17:20:59 UTC (rev 5156) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098 \ No newline at end of property Modified: branches/stable-1.2/src/http/modules/ngx_http_mp4_module.c =================================================================== --- branches/stable-1.2/src/http/modules/ngx_http_mp4_module.c 2013-03-29 17:19:11 UTC (rev 5155) +++ branches/stable-1.2/src/http/modules/ngx_http_mp4_module.c 2013-03-29 17:20:59 UTC (rev 5156) @@ -750,6 +750,13 @@ *prev = &mp4->mdat_atom; + if (start_offset > mp4->mdat_data.buf->file_last) { + ngx_log_error(NGX_LOG_ERR, mp4->file.log, 0, + "start time is out mp4 mdat atom in \"%s\"", + mp4->file.name.data); + return NGX_ERROR; + } + adjustment = mp4->ftyp_size + mp4->moov_size + ngx_http_mp4_update_mdat_atom(mp4, start_offset) - start_offset; From mdounin at mdounin.ru Fri Mar 29 17:23:38 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 17:23:38 +0000 Subject: [nginx] svn commit: r5157 - in branches/stable-1.2: . src/http Message-ID: <20130329172339.6C38E3F9C0D@mail.nginx.com> Author: mdounin Date: 2013-03-29 17:23:36 +0000 (Fri, 29 Mar 2013) New Revision: 5157 URL: http://trac.nginx.org/nginx/changeset/5157/nginx Log: Merge of r5109, r5128: removed unused prototype and macro. *) Removed unused prototype of ngx_http_find_server_conf(). This function prototype and its implementation was added in r90, but the implementation was removed in r97. *) Removed unused ngx_http_clear_variable() macro. Modified: branches/stable-1.2/ branches/stable-1.2/src/http/ngx_http.h branches/stable-1.2/src/http/ngx_http_variables.h Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 17:20:59 UTC (rev 5156) +++ branches/stable-1.2 2013-03-29 17:23:36 UTC (rev 5157) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5128 \ No newline at end of property Modified: branches/stable-1.2/src/http/ngx_http.h =================================================================== --- branches/stable-1.2/src/http/ngx_http.h 2013-03-29 17:20:59 UTC (rev 5156) +++ branches/stable-1.2/src/http/ngx_http.h 2013-03-29 17:23:36 UTC (rev 5157) @@ -94,7 +94,6 @@ ngx_str_t *args); -ngx_int_t ngx_http_find_server_conf(ngx_http_request_t *r); void ngx_http_update_location_config(ngx_http_request_t *r); void ngx_http_handler(ngx_http_request_t *r); void ngx_http_run_posted_requests(ngx_connection_t *c); Modified: branches/stable-1.2/src/http/ngx_http_variables.h =================================================================== --- branches/stable-1.2/src/http/ngx_http_variables.h 2013-03-29 17:20:59 UTC (rev 5156) +++ branches/stable-1.2/src/http/ngx_http_variables.h 2013-03-29 17:23:36 UTC (rev 5157) @@ -57,9 +57,6 @@ ngx_str_t *var, ngx_list_part_t *part, size_t prefix); -#define ngx_http_clear_variable(r, index) r->variables0[index].text.data = NULL; - - #if (NGX_PCRE) typedef struct { From mdounin at mdounin.ru Fri Mar 29 17:34:47 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 17:34:47 +0000 Subject: [nginx] svn commit: r5158 - in branches/stable-1.2: . src/http Message-ID: <20130329173448.972B93FA00C@mail.nginx.com> Author: mdounin Date: 2013-03-29 17:34:45 +0000 (Fri, 29 Mar 2013) New Revision: 5158 URL: http://trac.nginx.org/nginx/changeset/5158/nginx Log: Merge of r5113, r5114: upstream: resolve errors handling. Upstream: call ngx_http_run_posted_requests() on resolve errors. If proxy_pass to a host with dynamic resolution was used to handle a subrequest, and host resolution failed, the main request wasn't run till something else happened on the connection. E.g. request to "/zzz" with the following configuration hanged: addition_types *; resolver 8.8.8.8; location /test { set $ihost xxx; proxy_pass http://$ihost; } location /zzz { add_after_body /test; return 200 "test"; } Report and original version of the patch by Lanshun Zhou, http://mailman.nginx.org/pipermail/nginx-devel/2013-March/003476.html. Modified: branches/stable-1.2/ branches/stable-1.2/src/http/ngx_http_upstream.c Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 17:23:36 UTC (rev 5157) +++ branches/stable-1.2 2013-03-29 17:34:45 UTC (rev 5158) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5128 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5128 \ No newline at end of property Modified: branches/stable-1.2/src/http/ngx_http_upstream.c =================================================================== --- branches/stable-1.2/src/http/ngx_http_upstream.c 2013-03-29 17:23:36 UTC (rev 5157) +++ branches/stable-1.2/src/http/ngx_http_upstream.c 2013-03-29 17:34:45 UTC (rev 5158) @@ -865,11 +865,13 @@ static void ngx_http_upstream_resolve_handler(ngx_resolver_ctx_t *ctx) { + ngx_connection_t *c; ngx_http_request_t *r; ngx_http_upstream_t *u; ngx_http_upstream_resolved_t *ur; r = ctx->data; + c = r->connection; u = r->upstream; ur = u->resolved; @@ -881,7 +883,7 @@ ngx_resolver_strerror(ctx->state)); ngx_http_upstream_finalize_request(r, u, NGX_HTTP_BAD_GATEWAY); - return; + goto failed; } ur->naddrs = ctx->naddrs; @@ -906,13 +908,17 @@ if (ngx_http_upstream_create_round_robin_peer(r, ur) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_HTTP_INTERNAL_SERVER_ERROR); - return; + goto failed; } ngx_resolve_name_done(ctx); ur->ctx = NULL; ngx_http_upstream_connect(r, u); + +failed: + + ngx_http_run_posted_requests(c); } From mdounin at mdounin.ru Fri Mar 29 17:51:02 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 17:51:02 +0000 Subject: [nginx] svn commit: r5159 - in branches/stable-1.2: . src/core Message-ID: <20130329175102.9D7D13F9C14@mail.nginx.com> Author: mdounin Date: 2013-03-29 17:51:00 +0000 (Fri, 29 Mar 2013) New Revision: 5159 URL: http://trac.nginx.org/nginx/changeset/5159/nginx Log: Merge of r5117, r5123: allocation errors handing during upgrade. *) Core: guard against failed allocation during binary upgrade. Patch by Piotr Sikora. *) Core: fixed resource leak if binary upgrade fails due to no memory. Found by Coverity (CID 992320). Modified: branches/stable-1.2/ branches/stable-1.2/src/core/nginx.c Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 17:34:45 UTC (rev 5158) +++ branches/stable-1.2 2013-03-29 17:51:00 UTC (rev 5159) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5128 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5117,5123,5128 \ No newline at end of property Modified: branches/stable-1.2/src/core/nginx.c =================================================================== --- branches/stable-1.2/src/core/nginx.c 2013-03-29 17:34:45 UTC (rev 5158) +++ branches/stable-1.2/src/core/nginx.c 2013-03-29 17:51:00 UTC (rev 5159) @@ -594,6 +594,10 @@ var = ngx_alloc(sizeof(NGINX_VAR) + cycle->listening.nelts * (NGX_INT32_LEN + 1) + 2, cycle->log); + if (var == NULL) { + ngx_free(env); + return NGX_INVALID_PID; + } p = ngx_cpymem(var, NGINX_VAR "=", sizeof(NGINX_VAR)); From mdounin at mdounin.ru Fri Mar 29 17:53:50 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 17:53:50 +0000 Subject: [nginx] svn commit: r5160 - in branches/stable-1.2: . src/http/modules Message-ID: <20130329175350.F0ECC3F9C14@mail.nginx.com> Author: mdounin Date: 2013-03-29 17:53:47 +0000 (Fri, 29 Mar 2013) New Revision: 5160 URL: http://trac.nginx.org/nginx/changeset/5160/nginx Log: Merge of r5127: language in a comment. Fixed language in a comment preceding ngx_http_index_handler(). Modified: branches/stable-1.2/ branches/stable-1.2/src/http/modules/ngx_http_index_module.c Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 17:51:00 UTC (rev 5159) +++ branches/stable-1.2 2013-03-29 17:53:47 UTC (rev 5160) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5117,5123,5128 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5117,5123,5127-5128 \ No newline at end of property Modified: branches/stable-1.2/src/http/modules/ngx_http_index_module.c =================================================================== --- branches/stable-1.2/src/http/modules/ngx_http_index_module.c 2013-03-29 17:51:00 UTC (rev 5159) +++ branches/stable-1.2/src/http/modules/ngx_http_index_module.c 2013-03-29 17:53:47 UTC (rev 5160) @@ -85,12 +85,12 @@ /* * Try to open/test the first index file before the test of directory - * existence because valid requests should be much more than invalid ones. - * If the file open()/stat() would fail, then the directory stat() should - * be more quickly because some data is already cached in the kernel. + * existence because valid requests should prevail over invalid ones. + * If open()/stat() of a file will fail then stat() of a directory + * should be faster because kernel may have already cached some data. * Besides, Win32 may return ERROR_PATH_NOT_FOUND (NGX_ENOTDIR) at once. - * Unix has ENOTDIR error, however, it's less helpful than Win32's one: - * it only indicates that path contains an usual file in place of directory. + * Unix has ENOTDIR error; however, it's less helpful than Win32's one: + * it only indicates that path points to a regular file, not a directory. */ static ngx_int_t From mdounin at mdounin.ru Fri Mar 29 17:59:42 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 17:59:42 +0000 Subject: [nginx] svn commit: r5161 - in branches/stable-1.2: . src/http/modules Message-ID: <20130329175942.AF8A13FA0B7@mail.nginx.com> Author: mdounin Date: 2013-03-29 17:59:41 +0000 (Fri, 29 Mar 2013) New Revision: 5161 URL: http://trac.nginx.org/nginx/changeset/5161/nginx Log: Merge of r5129: split clients configuration parsing. Split clients: check length when parsing configuration. Modified: branches/stable-1.2/ branches/stable-1.2/src/http/modules/ngx_http_split_clients_module.c Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 17:53:47 UTC (rev 5160) +++ branches/stable-1.2 2013-03-29 17:59:41 UTC (rev 5161) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5117,5123,5127-5128 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5117,5123,5127-5129 \ No newline at end of property Modified: branches/stable-1.2/src/http/modules/ngx_http_split_clients_module.c =================================================================== --- branches/stable-1.2/src/http/modules/ngx_http_split_clients_module.c 2013-03-29 17:53:47 UTC (rev 5160) +++ branches/stable-1.2/src/http/modules/ngx_http_split_clients_module.c 2013-03-29 17:59:41 UTC (rev 5161) @@ -218,7 +218,7 @@ part->percent = 0; } else { - if (value[0].data[value[0].len - 1] != '%') { + if (value[0].len == 0 || value[0].data[value[0].len - 1] != '%') { goto invalid; } From mdounin at mdounin.ru Fri Mar 29 18:09:07 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 18:09:07 +0000 Subject: [nginx] svn commit: r5162 - in branches/stable-1.2: . misc Message-ID: <20130329180908.793D73F9FD0@mail.nginx.com> Author: mdounin Date: 2013-03-29 18:09:06 +0000 (Fri, 29 Mar 2013) New Revision: 5162 URL: http://trac.nginx.org/nginx/changeset/5162/nginx Log: Merge of r5130, r5131, r5132: support for Mercurial repositories. *) Misc: switch to single export operation in "zip" target. While exporting parts of the tree might be better in some cases, it is awfully slow overseas, and also requires unlocking ssh key multiple times. Exporting the whole repo and removing directories not needed for zip is faster here. It is also a required step before we can switch to Mercurial. *) Misc: removed unused "snapshot" target. *) Misc: support for Mercurial repositories. Modified: branches/stable-1.2/ branches/stable-1.2/misc/GNUmakefile Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 17:59:41 UTC (rev 5161) +++ branches/stable-1.2 2013-03-29 18:09:06 UTC (rev 5162) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5117,5123,5127-5129 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5117,5123,5127-5132 \ No newline at end of property Modified: branches/stable-1.2/misc/GNUmakefile =================================================================== --- branches/stable-1.2/misc/GNUmakefile 2013-03-29 17:59:41 UTC (rev 5161) +++ branches/stable-1.2/misc/GNUmakefile 2013-03-29 18:09:06 UTC (rev 5162) @@ -11,11 +11,8 @@ PCRE = pcre-8.32 -release: - rm -rf $(TEMP) +release: export - svn export -rHEAD . $(TEMP)/$(NGINX) - mv $(TEMP)/$(NGINX)/auto/configure $(TEMP)/$(NGINX) # delete incomplete sources @@ -39,7 +36,32 @@ tar -c -z -f $(NGINX).tar.gz --directory $(TEMP) $(NGINX) +export: + rm -rf $(TEMP) + + if [ -d .svn ]; then \ + svn export -rHEAD . $(TEMP)/$(NGINX); \ + else \ + hg archive -X '.hg*' $(TEMP)/$(NGINX); \ + fi + + RELEASE: + if [ -d .svn ]; then \ + $(MAKE) -f misc/GNUmakefile RELEASE.svn; \ + else \ + $(MAKE) -f misc/GNUmakefile RELEASE.hg; \ + fi + + $(MAKE) -f misc/GNUmakefile release + + +RELEASE.hg: + hg ci -m nginx-$(VER)-RELEASE + hg tag -m "release-$(VER) tag" release-$(VER) + + +RELEASE.svn: test -d $(TEMP) || mkdir -p $(TEMP) echo "nginx-$(VER)-RELEASE" > $(TEMP)/message @@ -52,37 +74,7 @@ svn up - $(MAKE) -f misc/GNUmakefile release - -snapshot: - rm -rf $(TEMP) - - mkdir -p $(TEMP) - svn export . $(TEMP)/$(NGINX) - - mv $(TEMP)/$(NGINX)/auto/configure $(TEMP)/$(NGINX) - - # delete incomplete sources - rm $(TEMP)/$(NGINX)/src/event/ngx_event_acceptex.c - rm $(TEMP)/$(NGINX)/src/event/ngx_event_connectex.c - rm $(TEMP)/$(NGINX)/src/event/modules/ngx_iocp_module.* - rm -r $(TEMP)/$(NGINX)/src/os/win32 - - rm -r $(TEMP)/$(NGINX)/src/mysql - - mv $(TEMP)/$(NGINX)/docs/text/LICENSE $(TEMP)/$(NGINX) - mv $(TEMP)/$(NGINX)/docs/text/README $(TEMP)/$(NGINX) - mv $(TEMP)/$(NGINX)/docs/html $(TEMP)/$(NGINX) - mv $(TEMP)/$(NGINX)/docs/man $(TEMP)/$(NGINX) - - $(MAKE) -f docs/GNUmakefile changes - - rm -r $(TEMP)/$(NGINX)/docs - rm -r $(TEMP)/$(NGINX)/misc - - tar -c -z -f $(NGINX).tar.gz --directory $(TEMP) $(NGINX) - win32: ./auto/configure \ --with-cc=cl \ @@ -120,26 +112,26 @@ --with-mail_ssl_module \ --with-ipv6 -zip: - rm -rf $(TEMP) + +zip: export rm -f $(NGINX).zip - mkdir -p $(TEMP)/$(NGINX)/docs + mkdir -p $(TEMP)/$(NGINX)/docs.new mkdir -p $(TEMP)/$(NGINX)/logs mkdir -p $(TEMP)/$(NGINX)/temp - svn export -rHEAD conf $(TEMP)/$(NGINX)/conf/ sed -i '' -e "s/$$/`printf '\r'`/" $(TEMP)/$(NGINX)/conf/* - svn export -rHEAD contrib $(TEMP)/$(NGINX)/contrib/ - svn export -rHEAD docs/html $(TEMP)/$(NGINX)/html/ + mv $(TEMP)/$(NGINX)/docs/text/LICENSE $(TEMP)/$(NGINX)/docs.new + mv $(TEMP)/$(NGINX)/docs/text/README $(TEMP)/$(NGINX)/docs.new + mv $(TEMP)/$(NGINX)/docs/html $(TEMP)/$(NGINX) - $(MAKE) -f docs/GNUmakefile changes + rm -r $(TEMP)/$(NGINX)/docs + mv $(TEMP)/$(NGINX)/docs.new $(TEMP)/$(NGINX)/docs cp -p $(OBJS)/nginx.exe $(TEMP)/$(NGINX) - cp -p docs/text/LICENSE $(TEMP)/$(NGINX)/docs/ - cp -p docs/text/README $(TEMP)/$(NGINX)/docs/ + $(MAKE) -f docs/GNUmakefile changes mv $(TEMP)/$(NGINX)/CHANGES* $(TEMP)/$(NGINX)/docs/ cp -p $(OBJS)/lib/$(OPENSSL)/LICENSE \ @@ -155,6 +147,10 @@ touch -r $(OBJS)/lib/$(ZLIB)/README \ $(TEMP)/$(NGINX)/docs/zlib.LICENSE + rm -r $(TEMP)/$(NGINX)/auto + rm -r $(TEMP)/$(NGINX)/misc + rm -r $(TEMP)/$(NGINX)/src + cd $(TEMP) && zip -r ../$(NGINX).zip $(NGINX) From mdounin at mdounin.ru Fri Mar 29 18:16:30 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 18:16:30 +0000 Subject: [nginx] svn commit: r5163 - in branches/stable-1.2: . src/http src/http/modules Message-ID: <20130329181632.6FE573FA09B@mail.nginx.com> Author: mdounin Date: 2013-03-29 18:16:27 +0000 (Fri, 29 Mar 2013) New Revision: 5163 URL: http://trac.nginx.org/nginx/changeset/5163/nginx Log: Merge of r5133, r5134: peer.free() and peer.get() balance. *) Upstream: only call peer.free() if peer.get() selected a peer. *) Upstream: removed double-free workarounds in peer.free() methods. Modified: branches/stable-1.2/ branches/stable-1.2/src/http/modules/ngx_http_upstream_keepalive_module.c branches/stable-1.2/src/http/modules/ngx_http_upstream_least_conn_module.c branches/stable-1.2/src/http/ngx_http_upstream.c branches/stable-1.2/src/http/ngx_http_upstream_round_robin.c Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 18:09:06 UTC (rev 5162) +++ branches/stable-1.2 2013-03-29 18:16:27 UTC (rev 5163) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5117,5123,5127-5132 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5117,5123,5127-5134 \ No newline at end of property Modified: branches/stable-1.2/src/http/modules/ngx_http_upstream_keepalive_module.c =================================================================== --- branches/stable-1.2/src/http/modules/ngx_http_upstream_keepalive_module.c 2013-03-29 18:09:06 UTC (rev 5162) +++ branches/stable-1.2/src/http/modules/ngx_http_upstream_keepalive_module.c 2013-03-29 18:16:27 UTC (rev 5163) @@ -37,8 +37,6 @@ ngx_event_save_peer_session_pt original_save_session; #endif - ngx_uint_t failed; /* unsigned:1 */ - } ngx_http_upstream_keepalive_peer_data_t; @@ -220,8 +218,6 @@ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pc->log, 0, "get keepalive peer"); - kp->failed = 0; - /* ask balancer */ rc = kp->original_get_peer(pc, kp->data); @@ -282,18 +278,12 @@ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pc->log, 0, "free keepalive peer"); - /* remember failed state - peer.free() may be called more than once */ - - if (state & NGX_PEER_FAILED) { - kp->failed = 1; - } - /* cache valid connections */ u = kp->upstream; c = pc->connection; - if (kp->failed + if (state & NGX_PEER_FAILED || c == NULL || c->read->eof || c->read->error Modified: branches/stable-1.2/src/http/modules/ngx_http_upstream_least_conn_module.c =================================================================== --- branches/stable-1.2/src/http/modules/ngx_http_upstream_least_conn_module.c 2013-03-29 18:09:06 UTC (rev 5162) +++ branches/stable-1.2/src/http/modules/ngx_http_upstream_least_conn_module.c 2013-03-29 18:16:27 UTC (rev 5163) @@ -353,10 +353,6 @@ return; } - if (state == 0 && pc->tries == 0) { - return; - } - lcp->conns[lcp->rrp.current]--; lcp->free_rr_peer(pc, &lcp->rrp, state); Modified: branches/stable-1.2/src/http/ngx_http_upstream.c =================================================================== --- branches/stable-1.2/src/http/ngx_http_upstream.c 2013-03-29 18:09:06 UTC (rev 5162) +++ branches/stable-1.2/src/http/ngx_http_upstream.c 2013-03-29 18:16:27 UTC (rev 5163) @@ -2846,14 +2846,16 @@ ngx_http_busy_unlock(u->conf->busy_lock, &u->busy_lock); #endif - if (ft_type == NGX_HTTP_UPSTREAM_FT_HTTP_404) { - state = NGX_PEER_NEXT; - } else { - state = NGX_PEER_FAILED; - } + if (u->peer.sockaddr) { - if (ft_type != NGX_HTTP_UPSTREAM_FT_NOLIVE) { + if (ft_type == NGX_HTTP_UPSTREAM_FT_HTTP_404) { + state = NGX_PEER_NEXT; + } else { + state = NGX_PEER_FAILED; + } + u->peer.free(&u->peer, u->peer.data, state); + u->peer.sockaddr = NULL; } if (ft_type == NGX_HTTP_UPSTREAM_FT_TIMEOUT) { @@ -3013,8 +3015,9 @@ u->finalize_request(r, rc); - if (u->peer.free) { + if (u->peer.free && u->peer.sockaddr) { u->peer.free(&u->peer, u->peer.data, 0); + u->peer.sockaddr = NULL; } if (u->peer.connection) { Modified: branches/stable-1.2/src/http/ngx_http_upstream_round_robin.c =================================================================== --- branches/stable-1.2/src/http/ngx_http_upstream_round_robin.c 2013-03-29 18:09:06 UTC (rev 5162) +++ branches/stable-1.2/src/http/ngx_http_upstream_round_robin.c 2013-03-29 18:16:27 UTC (rev 5163) @@ -584,10 +584,6 @@ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, pc->log, 0, "free rr peer %ui %ui", pc->tries, state); - if (state == 0 && pc->tries == 0) { - return; - } - /* TODO: NGX_PEER_KEEPALIVE */ if (rrp->peers->single) { From mdounin at mdounin.ru Fri Mar 29 18:18:44 2013 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 29 Mar 2013 18:18:44 +0000 Subject: [nginx] svn commit: r5164 - in branches/stable-1.2: . src/core src/os/unix Message-ID: <20130329181845.953363F9E7F@mail.nginx.com> Author: mdounin Date: 2013-03-29 18:18:42 +0000 (Fri, 29 Mar 2013) New Revision: 5164 URL: http://trac.nginx.org/nginx/changeset/5164/nginx Log: Merge of r5138: use of NGX_FILE_ERROR. Use NGX_FILE_ERROR for handling file operations errors. On Win32 platforms 0 is used to indicate errors in file operations, so comparing against either -1 or NGX_OK is not portable. This was not much of an issue in patched code, since only ngx_fd_info() test is actually reachable on Win32 and in worst case it might result in bogus error log entry. Patch by Piotr Sikora. Modified: branches/stable-1.2/ branches/stable-1.2/src/core/nginx.c branches/stable-1.2/src/core/ngx_conf_file.c branches/stable-1.2/src/core/ngx_connection.c branches/stable-1.2/src/core/ngx_cycle.c branches/stable-1.2/src/os/unix/ngx_process_cycle.c Index: branches/stable-1.2 =================================================================== --- branches/stable-1.2 2013-03-29 18:16:27 UTC (rev 5163) +++ branches/stable-1.2 2013-03-29 18:18:42 UTC (rev 5164) Property changes on: branches/stable-1.2 ___________________________________________________________________ Modified: svn:mergeinfo ## -1 +1 ## -/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5117,5123,5127-5134 +/trunk:4611-4632,4636-4657,4671-4672,4674-4676,4682,4684-4699,4704-4706,4713,4736-4741,4754,4756-4771,4775,4777-4780,4782-4785,4795,4811-4820,4822-4824,4828-4835,4840-4844,4865-4872,4885-4887,4890-4896,4913-4925,4933-4934,4939,4944-4949,4961-4969,4973-4974,4976-4994,4997,4999-5005,5011-5025,5027-5031,5066,5070-5071,5078,5082-5083,5098,5109,5113-5114,5117,5123,5127-5134,5138 \ No newline at end of property Modified: branches/stable-1.2/src/core/nginx.c =================================================================== --- branches/stable-1.2/src/core/nginx.c 2013-03-29 18:16:27 UTC (rev 5163) +++ branches/stable-1.2/src/core/nginx.c 2013-03-29 18:18:42 UTC (rev 5164) @@ -637,7 +637,7 @@ ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); - if (ngx_rename_file(ccf->pid.data, ccf->oldpid.data) != NGX_OK) { + if (ngx_rename_file(ccf->pid.data, ccf->oldpid.data) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, ngx_rename_file_n " %s to %s failed " "before executing new binary process \"%s\"", @@ -652,7 +652,9 @@ pid = ngx_execute(cycle, &ctx); if (pid == NGX_INVALID_PID) { - if (ngx_rename_file(ccf->oldpid.data, ccf->pid.data) != NGX_OK) { + if (ngx_rename_file(ccf->oldpid.data, ccf->pid.data) + == NGX_FILE_ERROR) + { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, ngx_rename_file_n " %s back to %s failed after " "an attempt to execute new binary process \"%s\"", Modified: branches/stable-1.2/src/core/ngx_conf_file.c =================================================================== --- branches/stable-1.2/src/core/ngx_conf_file.c 2013-03-29 18:16:27 UTC (rev 5163) +++ branches/stable-1.2/src/core/ngx_conf_file.c 2013-03-29 18:18:42 UTC (rev 5164) @@ -133,7 +133,7 @@ cf->conf_file = &conf_file; - if (ngx_fd_info(fd, &cf->conf_file->file.info) == -1) { + if (ngx_fd_info(fd, &cf->conf_file->file.info) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cf->log, ngx_errno, ngx_fd_info_n " \"%s\" failed", filename->data); } Modified: branches/stable-1.2/src/core/ngx_connection.c =================================================================== --- branches/stable-1.2/src/core/ngx_connection.c 2013-03-29 18:16:27 UTC (rev 5163) +++ branches/stable-1.2/src/core/ngx_connection.c 2013-03-29 18:18:42 UTC (rev 5164) @@ -412,7 +412,7 @@ } if (ngx_test_config) { - if (ngx_delete_file(name) == -1) { + if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno, ngx_delete_file_n " %s failed", name); } @@ -739,7 +739,7 @@ { u_char *name = ls[i].addr_text.data + sizeof("unix:") - 1; - if (ngx_delete_file(name) == -1) { + if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_delete_file_n " %s failed", name); } Modified: branches/stable-1.2/src/core/ngx_cycle.c =================================================================== --- branches/stable-1.2/src/core/ngx_cycle.c 2013-03-29 18:16:27 UTC (rev 5163) +++ branches/stable-1.2/src/core/ngx_cycle.c 2013-03-29 18:18:42 UTC (rev 5164) @@ -679,7 +679,7 @@ ngx_log_error(NGX_LOG_WARN, cycle->log, 0, "deleting socket %s", name); - if (ngx_delete_file(name) == -1) { + if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_delete_file_n " %s failed", name); } Modified: branches/stable-1.2/src/os/unix/ngx_process_cycle.c =================================================================== --- branches/stable-1.2/src/os/unix/ngx_process_cycle.c 2013-03-29 18:16:27 UTC (rev 5163) +++ branches/stable-1.2/src/os/unix/ngx_process_cycle.c 2013-03-29 18:18:42 UTC (rev 5164) @@ -647,7 +647,7 @@ if (ngx_rename_file((char *) ccf->oldpid.data, (char *) ccf->pid.data) - != NGX_OK) + == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, ngx_rename_file_n " %s back to %s failed "