[PATCH] Always use default server configs for large buffers allocation
Daniil Bondarev
xonatius at gmail.com
Mon Feb 20 07:51:02 UTC 2017
Hello,
On Sat, Feb 18, 2017 at 6:10 PM, Maxim Dounin <mdounin at mdounin.ru> wrote:
> Hello!
>
> On Thu, Feb 16, 2017 at 05:00:20PM -0800, Daniil Bondarev wrote:
>
>> # HG changeset patch
>> # User Daniil Bondarev <bondarev at amazon.com>
>> # Date 1485286710 28800
>> # Tue Jan 24 11:38:30 2017 -0800
>> # Node ID 8cd694e06443aaa1ed0601108681fa1c6f7297e0
>> # Parent d84f48e571e449ee6c072a8d52cdea8e06b88ef7
>> Always use default server configs for large buffers allocation
>>
>> Single http connection can flip between default server and virtual
>> servers: depending on parsed Host header for a current request nginx
>> changes current request configs. Currently this behavior might cause
>> buffer overrun while adding new buffer to hc->busy, if hc->busy was
>> allocated with config containing fewer large_client_header_buffers than
>> the current one.
>>
>> This change makes nginx to always use large_client_header_buffers from
>> http_connection config, which is default server config.
>>
>> diff -r d84f48e571e4 -r 8cd694e06443 src/http/ngx_http_request.c
>> --- a/src/http/ngx_http_request.c Tue Jan 24 17:02:19 2017 +0300
>> +++ b/src/http/ngx_http_request.c Tue Jan 24 11:38:30 2017 -0800
>> @@ -1447,7 +1447,9 @@ ngx_http_alloc_large_header_buffer(ngx_h
>>
>> old = request_line ? r->request_start : r->header_name_start;
>>
>> - cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module);
>> + hc = r->http_connection;
>> +
>> + cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module);
>>
>> if (r->state != 0
>> && (size_t) (r->header_in->pos - old)
>> @@ -1456,8 +1458,6 @@ ngx_http_alloc_large_header_buffer(ngx_h
>> return NGX_DECLINED;
>> }
>>
>> - hc = r->http_connection;
>> -
>> if (hc->nfree) {
>> b = hc->free[--hc->nfree];
>
> Thanks for spotting this.
>
> Another part of this problem is with hc->free. If a number of
> large header buffers configured in a virtual server is less than
> the one in the default server, saving buffers from hc->busy to
> hc->free for pipelined requests will also overflow allocated
> buffer. To fix this as well, the patch should be extend with
> something like this:
>
> @@ -2876,7 +2875,7 @@ ngx_http_set_keepalive(ngx_http_request_
> * Now we would move the large header buffers to the free list.
> */
>
> - cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module);
> + cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module);
>
> if (hc->free == NULL) {
> hc->free = ngx_palloc(c->pool,
>
Ah, good catch!
> An alternative approach would be to convert hc->busy / hc->free to
> use chain links. This will preserve the possibility to have
> different large_client_header_buffers in different virtual hosts
> as long as the host is known before the limit is reached. Patch:
>
> # HG changeset patch
> # User Maxim Dounin <mdounin at mdounin.ru>
> # Date 1487467695 -10800
> # Sun Feb 19 04:28:15 2017 +0300
> # Node ID 3412c0c019f0f24432bac8b3e65ec03ba69073cb
> # Parent 87cf6ddb41c216876d13cffa5e637a61b159362c
> Converted hc->busy/hc->free to use chain links.
>
> Most notably, this fixes possible buffer overflows if number of buffers
> in a virtual server is different from the one in the default server.
>
> Reported by Daniil Bondarev.
>
> diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c
> --- a/src/http/ngx_http_request.c
> +++ b/src/http/ngx_http_request.c
> @@ -549,7 +549,7 @@ ngx_http_create_request(ngx_connection_t
>
> ngx_set_connection_log(r->connection, clcf->error_log);
>
> - r->header_in = hc->nbusy ? hc->busy[0] : c->buffer;
> + r->header_in = hc->busy ? hc->busy->buf : c->buffer;
>
> if (ngx_list_init(&r->headers_out.headers, r->pool, 20,
> sizeof(ngx_table_elt_t))
> @@ -1431,6 +1431,7 @@ ngx_http_alloc_large_header_buffer(ngx_h
> {
> u_char *old, *new;
> ngx_buf_t *b;
> + ngx_chain_t *cl;
> ngx_http_connection_t *hc;
> ngx_http_core_srv_conf_t *cscf;
>
> @@ -1460,8 +1461,11 @@ ngx_http_alloc_large_header_buffer(ngx_h
>
> hc = r->http_connection;
>
> - if (hc->nfree) {
> - b = hc->free[--hc->nfree];
> + if (hc->free) {
> + cl = hc->free;
> + hc->free = cl->next;
> +
> + b = cl->buf;
>
> ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
> "http large header free: %p %uz",
> @@ -1469,20 +1473,19 @@ ngx_http_alloc_large_header_buffer(ngx_h
>
> } else if (hc->nbusy < cscf->large_client_header_buffers.num) {
>
> - if (hc->busy == NULL) {
> - hc->busy = ngx_palloc(r->connection->pool,
> - cscf->large_client_header_buffers.num * sizeof(ngx_buf_t *));
> - if (hc->busy == NULL) {
> - return NGX_ERROR;
> - }
> - }
> -
> b = ngx_create_temp_buf(r->connection->pool,
> cscf->large_client_header_buffers.size);
> if (b == NULL) {
> return NGX_ERROR;
> }
>
> + cl = ngx_alloc_chain_link(r->connection->pool);
> + if (cl == NULL) {
> + return NGX_ERROR;
> + }
> +
> + cl->buf = b;
> +
> ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
> "http large header alloc: %p %uz",
> b->pos, b->end - b->last);
> @@ -1491,7 +1494,9 @@ ngx_http_alloc_large_header_buffer(ngx_h
> return NGX_DECLINED;
> }
>
> - hc->busy[hc->nbusy++] = b;
> + cl->next = hc->busy;
> + hc->busy = cl;
> + hc->nbusy++;
>
> if (r->state == 0) {
> /*
> @@ -2835,12 +2840,11 @@ static void
> ngx_http_set_keepalive(ngx_http_request_t *r)
> {
> int tcp_nodelay;
> - ngx_int_t i;
> ngx_buf_t *b, *f;
> + ngx_chain_t *cl, *ln;
> ngx_event_t *rev, *wev;
> ngx_connection_t *c;
> ngx_http_connection_t *hc;
> - ngx_http_core_srv_conf_t *cscf;
> ngx_http_core_loc_conf_t *clcf;
>
> c = r->connection;
> @@ -2876,27 +2880,15 @@ ngx_http_set_keepalive(ngx_http_request_
> * Now we would move the large header buffers to the free list.
> */
>
> - cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module);
> -
> - if (hc->free == NULL) {
> - hc->free = ngx_palloc(c->pool,
> - cscf->large_client_header_buffers.num * sizeof(ngx_buf_t *));
> -
> - if (hc->free == NULL) {
> - ngx_http_close_request(r, 0);
> - return;
> - }
> - }
> -
> - for (i = 0; i < hc->nbusy - 1; i++) {
> - f = hc->busy[i];
> - hc->free[hc->nfree++] = f;
> + for (cl = hc->busy; cl->next; cl = cl->next) {
> + f = cl->next->buf;
> f->pos = f->start;
> f->last = f->start;
> }
>
> - hc->busy[0] = b;
> - hc->nbusy = 1;
> + cl->next = hc->free;
> + hc->free = hc->busy->next;
> + hc->busy->next = NULL;
> }
> }
>
> @@ -2966,28 +2958,24 @@ ngx_http_set_keepalive(ngx_http_request_
> b->last = b->start;
> }
>
> - ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "hc free: %p %i",
> - hc->free, hc->nfree);
> -
> - if (hc->free) {
> - for (i = 0; i < hc->nfree; i++) {
> - ngx_pfree(c->pool, hc->free[i]->start);
> - hc->free[i] = NULL;
> - }
> -
> - hc->nfree = 0;
> + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "hc free: %p",
> + hc->free);
> +
> + for (cl = hc->free; cl; /* void */) {
> + ln = cl;
> + cl = cl->next;
> + ngx_pfree(c->pool, ln->buf->start);
> + ngx_free_chain(r->pool, ln);
> }
>
> ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "hc busy: %p %i",
> hc->busy, hc->nbusy);
>
> - if (hc->busy) {
> - for (i = 0; i < hc->nbusy; i++) {
> - ngx_pfree(c->pool, hc->busy[i]->start);
> - hc->busy[i] = NULL;
> - }
> -
> - hc->nbusy = 0;
> + for (cl = hc->busy; cl; /* void */) {
> + ln = cl;
> + cl = cl->next;
> + ngx_pfree(c->pool, ln->buf->start);
> + ngx_free_chain(r->pool, ln);
> }
>
> #if (NGX_HTTP_SSL)
> diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h
> --- a/src/http/ngx_http_request.h
> +++ b/src/http/ngx_http_request.h
> @@ -309,11 +309,10 @@ typedef struct {
> #endif
> #endif
>
> - ngx_buf_t **busy;
> + ngx_chain_t *busy;
> ngx_int_t nbusy;
>
> - ngx_buf_t **free;
> - ngx_int_t nfree;
> + ngx_chain_t *free;
>
> unsigned ssl:1;
> unsigned proxy_protocol:1;
>
Sure, that works (: Thanks!
> --
> Maxim Dounin
> http://nginx.org/
> _______________________________________________
> nginx-devel mailing list
> nginx-devel at nginx.org
> http://mailman.nginx.org/mailman/listinfo/nginx-devel
More information about the nginx-devel
mailing list