[PATCH] HTTP/2: make http2 server support http1
Haitao Lv
i at lvht.net
Sun Mar 18 16:57:23 UTC 2018
ping @wbr
> On Mar 13, 2018, at 01:11, Valentin V. Bartenev <vbart at nginx.com> wrote:
>
> I will look when time permits.
>
> But at the first glance the patch still look too complicated than it should be.
>
> wbr, Valentin V. Bartenev
>
>
> On Tuesday 13 March 2018 00:44:28 吕海涛 wrote:
>> Is there any one who would like to review this patch?
>>
>> 发自我的 iPhone
>>
>>> 在 2018年3月8日,08:42,Haitao Lv <i at lvht.net> 写道:
>>>
>>> Sorry for disturbing. But I have to fix a buffer overflow bug.
>>> Here is the latest patch.
>>>
>>> Sorry. But please make your comments. Thank you.
>>>
>>>
>>> diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c
>>> index 89cfe77a..c51d8ace 100644
>>> --- a/src/http/ngx_http_request.c
>>> +++ b/src/http/ngx_http_request.c
>>> @@ -17,6 +17,10 @@ static ssize_t ngx_http_read_request_header(ngx_http_request_t *r);
>>> static ngx_int_t ngx_http_alloc_large_header_buffer(ngx_http_request_t *r,
>>> ngx_uint_t request_line);
>>>
>>> +#if (NGX_HTTP_V2)
>>> +static void ngx_http_wait_v2_preface_handler(ngx_event_t *rev);
>>> +#endif
>>> +
>>> static ngx_int_t ngx_http_process_header_line(ngx_http_request_t *r,
>>> ngx_table_elt_t *h, ngx_uint_t offset);
>>> static ngx_int_t ngx_http_process_unique_header_line(ngx_http_request_t *r,
>>> @@ -321,7 +325,7 @@ ngx_http_init_connection(ngx_connection_t *c)
>>>
>>> #if (NGX_HTTP_V2)
>>> if (hc->addr_conf->http2) {
>>> - rev->handler = ngx_http_v2_init;
>>> + rev->handler = ngx_http_wait_v2_preface_handler;
>>> }
>>> #endif
>>>
>>> @@ -377,6 +381,110 @@ ngx_http_init_connection(ngx_connection_t *c)
>>> }
>>>
>>>
>>> +#if (NGX_HTTP_V2)
>>> +static void
>>> +ngx_http_wait_v2_preface_handler(ngx_event_t *rev)
>>> +{
>>> + size_t size;
>>> + ssize_t n;
>>> + ngx_buf_t *b;
>>> + ngx_connection_t *c;
>>> + static const u_char preface[] = "PRI";
>>> +
>>> + c = rev->data;
>>> + size = sizeof(preface) - 1;
>>> +
>>> + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
>>> + "http wait h2 preface handler");
>>> +
>>> + if (rev->timedout) {
>>> + ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed out");
>>> + ngx_http_close_connection(c);
>>> + return;
>>> + }
>>> +
>>> + if (c->close) {
>>> + ngx_http_close_connection(c);
>>> + return;
>>> + }
>>> +
>>> + b = c->buffer;
>>> +
>>> + if (b == NULL) {
>>> + b = ngx_create_temp_buf(c->pool, size);
>>> + if (b == NULL) {
>>> + ngx_http_close_connection(c);
>>> + return;
>>> + }
>>> +
>>> + c->buffer = b;
>>> +
>>> + } else if (b->start == NULL) {
>>> +
>>> + b->start = ngx_palloc(c->pool, size);
>>> + if (b->start == NULL) {
>>> + ngx_http_close_connection(c);
>>> + return;
>>> + }
>>> +
>>> + b->pos = b->start;
>>> + b->last = b->start;
>>> + b->end = b->last + size;
>>> + }
>>> +
>>> + n = c->recv(c, b->last, b->end - b->last);
>>> +
>>> + if (n == NGX_AGAIN) {
>>> +
>>> + if (!rev->timer_set) {
>>> + ngx_add_timer(rev, c->listening->post_accept_timeout);
>>> + ngx_reusable_connection(c, 1);
>>> + }
>>> +
>>> + if (ngx_handle_read_event(rev, 0) != NGX_OK) {
>>> + ngx_http_close_connection(c);
>>> + return;
>>> + }
>>> +
>>> + /*
>>> + * We are trying to not hold c->buffer's memory for an idle connection.
>>> + */
>>> +
>>> + if (ngx_pfree(c->pool, b->start) == NGX_OK) {
>>> + b->start = NULL;
>>> + }
>>> +
>>> + return;
>>> + }
>>> +
>>> + if (n == NGX_ERROR) {
>>> + ngx_http_close_connection(c);
>>> + return;
>>> + }
>>> +
>>> + if (n == 0) {
>>> + ngx_log_error(NGX_LOG_INFO, c->log, 0,
>>> + "client closed connection");
>>> + ngx_http_close_connection(c);
>>> + return;
>>> + }
>>> +
>>> + b->last += n;
>>> +
>>> + if (b->last == b->end) {
>>> + /* b will be freed in ngx_http_v2_init/ngx_http_wait_request_handler */
>>> +
>>> + if (ngx_strncmp(b->start, preface, size) == 0) {
>>> + ngx_http_v2_init(rev);
>>> + } else {
>>> + rev->handler = ngx_http_wait_request_handler;
>>> + ngx_http_wait_request_handler(rev);
>>> + }
>>> + }
>>> +}
>>> +#endif
>>> +
>>> +
>>> static void
>>> ngx_http_wait_request_handler(ngx_event_t *rev)
>>> {
>>> @@ -430,6 +538,22 @@ ngx_http_wait_request_handler(ngx_event_t *rev)
>>> b->pos = b->start;
>>> b->last = b->start;
>>> b->end = b->last + size;
>>> + } else {
>>> +
>>> + p = ngx_palloc(c->pool, size);
>>> + if (p == NULL) {
>>> + ngx_http_close_connection(c);
>>> + return;
>>> + }
>>> +
>>> + n = b->last - b->start;
>>> + ngx_memcpy(p, b->start, n);
>>> + ngx_pfree(c->pool, b->start);
>>> +
>>> + b->start = p;
>>> + b->pos = b->start;
>>> + b->last = b->start + n;
>>> + b->end = b->last + size;
>>> }
>>>
>>> n = c->recv(c, b->last, size);
>>> diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c
>>> index d9df0f90..e36bf382 100644
>>> --- a/src/http/v2/ngx_http_v2.c
>>> +++ b/src/http/v2/ngx_http_v2.c
>>> @@ -231,6 +231,8 @@ static ngx_http_v2_parse_header_t ngx_http_v2_parse_headers[] = {
>>> void
>>> ngx_http_v2_init(ngx_event_t *rev)
>>> {
>>> + size_t size;
>>> + ngx_buf_t *b;
>>> ngx_connection_t *c;
>>> ngx_pool_cleanup_t *cln;
>>> ngx_http_connection_t *hc;
>>> @@ -262,6 +264,23 @@ ngx_http_v2_init(ngx_event_t *rev)
>>> return;
>>> }
>>>
>>> + b = c->buffer;
>>> +
>>> + if (b != NULL) {
>>> + size = b->last - b->start;
>>> +
>>> + if (size > h2mcf->recv_buffer_size) {
>>> + size = h2mcf->recv_buffer_size;
>>> + }
>>> +
>>> + ngx_memcpy(h2mcf->recv_buffer, b->start, size);
>>> + h2c->state.buffer_used = size;
>>> +
>>> + ngx_pfree(c->pool, b->start);
>>> + ngx_pfree(c->pool, b);
>>> + c->buffer = NULL;
>>> + }
>>> +
>>> h2c->connection = c;
>>> h2c->http_connection = hc;
>>>
>>> @@ -381,13 +400,15 @@ ngx_http_v2_read_handler(ngx_event_t *rev)
>>> h2mcf = ngx_http_get_module_main_conf(h2c->http_connection->conf_ctx,
>>> ngx_http_v2_module);
>>>
>>> - available = h2mcf->recv_buffer_size - 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE;
>>> + available = h2mcf->recv_buffer_size - h2c->state.buffer_used - 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE;
>>>
>>> do {
>>> p = h2mcf->recv_buffer;
>>>
>>> - ngx_memcpy(p, h2c->state.buffer, NGX_HTTP_V2_STATE_BUFFER_SIZE);
>>> end = p + h2c->state.buffer_used;
>>> + if (h2c->state.buffer_used == 0) {
>>> + ngx_memcpy(p, h2c->state.buffer, NGX_HTTP_V2_STATE_BUFFER_SIZE);
>>> + }
>>>
>>> n = c->recv(c, end, available);
>>>
>>>
>>>
>>>
>>>> On Mar 6, 2018, at 03:14, Maxim Dounin <mdounin at mdounin.ru> wrote:
>>>>
>>>> Hello!
>>>>
>>>> On Mon, Mar 05, 2018 at 11:52:57PM +0800, Haitao Lv wrote:
>>>>
>>>> [...]
>>>>
>>>>>> Overall, the patch looks like a hack and introduces too much
>>>>>> complexity for this feature. While I understand the reasoning,
>>>>>> the proposed implementation cannot be accepted.
>>>>>
>>>>> Could you clarify that whether is this feature not accepted or this patch?
>>>>>
>>>>> If this feature is not needed, I will terminate this thread.
>>>>>
>>>>> If this patch only looks like a hack, would you like offer any advice to write
>>>>> code with good smell?
>>>>
>>>> We've previously discussed this with Valentin, and our position is
>>>> as follows:
>>>>
>>>> - The feature itself (autodetection between HTTP/2 and HTTP/1.x
>>>> protocols) might be usable, and we can consider adding it if
>>>> there will be a good and simple enough patch. (Moreover, we
>>>> think that this probably should be the default if "listen ...
>>>> http2" is configured - that is, no "http1" option.)
>>>>
>>>> - The patch suggested certainly doesn't meet the above criteria,
>>>> and it does not look like it can be fixed.
>>>>
>>>> We don't know if a good and simple enough implementation is at all
>>>> possible though. One of the possible approaches was already
>>>> proposed by Valentin (detect HTTP/2 or HTTP/1.x before starting
>>>> processing, may be similar to how we handle http-to-https
>>>> requests), but it's now immediately clear if it will work or not.
>>>> Sorry, but please don't expect any of us to provide further
>>>> guidance.
>>>>
>>
>>
>>
>>
>> _______________________________________________
>> nginx-devel mailing list
>> nginx-devel at nginx.org
>> http://mailman.nginx.org/mailman/listinfo/nginx-devel
>
> _______________________________________________
> nginx-devel mailing list
> nginx-devel at nginx.org
> http://mailman.nginx.org/mailman/listinfo/nginx-devel
More information about the nginx-devel
mailing list