(bug?)Timeout when proxy-pass 0 byte file
Maxim Dounin
mdounin at mdounin.ru
Fri Oct 2 18:37:14 MSD 2009
Hello!
On Fri, Oct 02, 2009 at 01:29:56AM +0400, Maxim Dounin wrote:
> Hello!
>
> On Wed, Sep 30, 2009 at 10:48:27PM +0400, Maxim Dounin wrote:
>
> > Hello!
> >
> > On Wed, Sep 30, 2009 at 12:42:52PM +0400, Maxim Dounin wrote:
> >
> > > On Wed, Sep 30, 2009 at 11:26:59AM +0800, tOmasEn wrote:
> >
> > [...]
> >
> > > > the initiall request when there isn't and cache, everything is ok. the
> > > > following request to same url will wait until timeout.
> > >
> > > Ok, so the problem is cache. Thanks, I'm able to reproduce it
> > > here. I'll take a look later today how to fix it.
> >
> > Patch.
>
> No, this patch is wrong. Corrected patch attached.
No, this patch is wrong too as it will trigger "[alert] ... zero
size buf in output" messages in error log for empty cached
subrequests. Corrected patch attached.
Maxim Dounin
>
> Maxim Dounin
>
> >
> > Maxim Dounin
> >
> > >
> > > Maxim Dounin
> > >
> > > [...]
> > >
> > > >
> > > > On Tue, Sep 29, 2009 at 10:48 PM, Maxim Dounin <mdounin at mdounin.ru> wrote:
> > > >
> > > > > Hello!
> > > > >
> > > > > On Tue, Sep 29, 2009 at 10:08:51PM +0800, tOmasEn wrote:
> > > > >
> > > > > > I been expirencing very slow page load when use nginx as frontend(with
> > > > > > proxy_pass) for a while.
> > > > > >
> > > > > > After some test and debug, i found that it always timeout on response
> > > > > > of 0 byte file.
> > > > > >
> > > > > > So i think there might be a bug when nginx running on proxy mode and
> > > > > > serving 0 byte files. The frontend will consider there should be more
> > > > > > data and wait until timeout or something like this.
> > > > >
> > > > > Could you please provide nginx -V output and debug log?
> > > > >
> > > > > Maxim Dounin
> > > > >
> > > > > >
> > > > > > Btw. Nginx is great. Thanks
> > > > > >
> > > > > > tomasen
> > > > > >
> > > > > > --
> > > > > > 从我的移动设备发送
> > > > > >
> > > > >
> > > > >
> > >
>
> > # HG changeset patch
> > # User Maxim Dounin <mdounin at mdounin.ru>
> > # Date 1254336299 -14400
> > # Node ID 3a18992ceed641398ac911ad7230924ba2f28929
> > # Parent 7688992d2abb6759b0a91c4b0cf86802d27cbc4a
> > Cache: send correct special buffer for empty responses.
> >
> > diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c
> > --- a/src/http/ngx_http_file_cache.c
> > +++ b/src/http/ngx_http_file_cache.c
> > @@ -798,7 +798,7 @@ ngx_http_cache_send(ngx_http_request_t *
> >
> > size = c->length - c->body_start;
> > if (size == 0) {
> > - return rc;
> > + return ngx_http_send_special(r, NGX_HTTP_LAST);
> > }
> >
> > b->file_pos = c->body_start;
>
> # HG changeset patch
> # User Maxim Dounin <mdounin at mdounin.ru>
> # Date 1254432452 -14400
> # Node ID 3d5336ab6a43f7243dd7aad3dde4f21c291009e8
> # Parent 7688992d2abb6759b0a91c4b0cf86802d27cbc4a
> Cache: fix sending of empty and stale reponses.
>
> For empty cached responses last buffer wasn't sent and hence client got
> timeout instead of response.
>
> For stale non-empty responses two last buffers were sent, resulting in
> duplicate final chunk if chunked encoding was used.
>
> diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c
> --- a/src/http/ngx_http_file_cache.c
> +++ b/src/http/ngx_http_file_cache.c
> @@ -767,7 +767,6 @@ ngx_http_file_cache_update(ngx_http_requ
> ngx_int_t
> ngx_http_cache_send(ngx_http_request_t *r)
> {
> - off_t size;
> ngx_int_t rc;
> ngx_buf_t *b;
> ngx_chain_t out;
> @@ -796,15 +795,10 @@ ngx_http_cache_send(ngx_http_request_t *
> return rc;
> }
>
> - size = c->length - c->body_start;
> - if (size == 0) {
> - return rc;
> - }
> -
> b->file_pos = c->body_start;
> b->file_last = c->length;
>
> - b->in_file = size ? 1: 0;
> + b->in_file = (c->length - c->body_start) ? 1: 0;
> b->last_buf = (r == r->main) ? 1: 0;
> b->last_in_chain = 1;
>
> diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c
> --- a/src/http/ngx_http_upstream.c
> +++ b/src/http/ngx_http_upstream.c
> @@ -2948,7 +2948,7 @@ ngx_http_upstream_finalize_request(ngx_h
>
> r->connection->log->action = "sending to client";
>
> - if (rc == 0) {
> + if (rc == 0 && !r->cached) {
> rc = ngx_http_send_special(r, NGX_HTTP_LAST);
> }
>
-------------- next part --------------
# HG changeset patch
# User Maxim Dounin <mdounin at mdounin.ru>
# Date 1254493235 -14400
# Node ID 85ace06b373969189cdeb96198107bc1e96996c8
# Parent 7688992d2abb6759b0a91c4b0cf86802d27cbc4a
Cache: fix sending of empty and stale reponses.
For empty cached responses last buffer wasn't sent and hence client got
timeout instead of response.
For stale non-empty responses two last buffers were sent, resulting in
duplicate final chunk if chunked encoding was used.
diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c
--- a/src/http/ngx_http_file_cache.c
+++ b/src/http/ngx_http_file_cache.c
@@ -767,7 +767,6 @@ ngx_http_file_cache_update(ngx_http_requ
ngx_int_t
ngx_http_cache_send(ngx_http_request_t *r)
{
- off_t size;
ngx_int_t rc;
ngx_buf_t *b;
ngx_chain_t out;
@@ -796,15 +795,16 @@ ngx_http_cache_send(ngx_http_request_t *
return rc;
}
- size = c->length - c->body_start;
- if (size == 0) {
- return rc;
- }
-
b->file_pos = c->body_start;
b->file_last = c->length;
- b->in_file = size ? 1: 0;
+ if (c->length - c->body_start) {
+ b->in_file = 1;
+
+ } else {
+ b->sync = 1;
+ }
+
b->last_buf = (r == r->main) ? 1: 0;
b->last_in_chain = 1;
diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c
--- a/src/http/ngx_http_upstream.c
+++ b/src/http/ngx_http_upstream.c
@@ -2948,7 +2948,7 @@ ngx_http_upstream_finalize_request(ngx_h
r->connection->log->action = "sending to client";
- if (rc == 0) {
+ if (rc == 0 && !r->cached) {
rc = ngx_http_send_special(r, NGX_HTTP_LAST);
}
More information about the nginx
mailing list