QUIC: reworked congestion control mechanism.
Roman Arutyunyan
arut at nginx.com
Wed Dec 7 15:01:52 UTC 2022
Hi,
Thanks for the path.
On Tue, Dec 06, 2022 at 02:35:37PM +0000, 朱宇 wrote:
> Hi,
>
> # HG changeset patch
> # User Yu Zhu <lishu.zy at alibaba-inc.com>
> # Date 1670326031 -28800
> # Tue Dec 06 19:27:11 2022 +0800
> # Branch quic
> # Node ID 9a47ff1223bb32c8ddb146d731b395af89769a97
> # Parent 1a320805265db14904ca9deaae8330f4979619ce
> QUIC: reworked congestion control mechanism.
>
> 1. move rtt measurement and congestion control to struct ngx_quic_path_t
> because RTT and congestion control are properities of the path.
I think this part should be moved out to a separate patch.
> 2. introduced struct "ngx_quic_congestion_ops_t" to wrap callback functions
> of congestion control and extract the reno algorithm from ngx_event_quic_ack.c.
The biggest question about this part is how extensible is this approach?
We are planning to implement more congestion control algorithms in the future
and need a framework that would allow us to do that.
Even CUBIC needs more data fields that we have now, and BBR will prooably
need much more than that. Not sure how we'll add those data fields considering
the proposed modular design. Also, we need to make sure the API is enough for
future algorithms.
I suggest that we finish the first part which moves congestion control
to the path object. Then, until we have at least one other congestion
control algorithm supported, it's hard to come up with a good API for it.
I this we can postpone the second part until then.
Also, I think CUBIC can be hardcoded into Reno without modular redesign of the
code.
> No functional changes.
[..]
> diff -r 1a320805265d -r 9a47ff1223bb src/event/quic/congestion/ngx_quic_reno.c
> --- /dev/null Thu Jan 01 00:00:00 1970 +0000
> +++ b/src/event/quic/congestion/ngx_quic_reno.c Tue Dec 06 19:27:11 2022 +0800
> @@ -0,0 +1,133 @@
> +
> +/*
> + * Copyright (C) Nginx, Inc.
> + */
> +
> +
> +#include <ngx_config.h>
> +#include <ngx_core.h>
> +#include <ngx_event.h>
> +#include <ngx_event_quic_connection.h>
> +
> +
> +static void ngx_quic_reno_on_init(ngx_connection_t *c, ngx_quic_congestion_t *cg);
> +static ngx_int_t ngx_quic_reno_on_ack(ngx_connection_t *c, ngx_quic_frame_t *f);
> +static ngx_int_t ngx_quic_reno_on_lost(ngx_connection_t *c, ngx_quic_frame_t *f);
> +
> +
> +ngx_quic_congestion_ops_t ngx_quic_reno = {
> + ngx_string("reno"),
> + ngx_quic_reno_on_init,
> + ngx_quic_reno_on_ack,
> + ngx_quic_reno_on_lost
> +};
> +
> +
> +static void
> +ngx_quic_reno_on_init(ngx_connection_t *c, ngx_quic_congestion_t *cg)
> +{
> + ngx_quic_connection_t *qc;
> +
> + qc = ngx_quic_get_connection(c);
> +
> + cg->window = ngx_min(10 * qc->tp.max_udp_payload_size,
> + ngx_max(2 * qc->tp.max_udp_payload_size,
> + 14720));
> + cg->ssthresh = (size_t) -1;
> + cg->recovery_start = ngx_current_msec;
> +}
> +
> +
> +static ngx_int_t
> +ngx_quic_reno_on_ack(ngx_connection_t *c, ngx_quic_frame_t *f)
> +{
> + ngx_msec_t timer;
> + ngx_quic_path_t *path;
> + ngx_quic_connection_t *qc;
> + ngx_quic_congestion_t *cg;
> +
> + qc = ngx_quic_get_connection(c);
> + path = qc->path;
What if the packet was sent on a different path?
> +
> + cg = &path->congestion;
> +
> + cg->in_flight -= f->plen;
> +
> + timer = f->last - cg->recovery_start;
> +
> + if ((ngx_msec_int_t) timer <= 0) {
> + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
> + "quic congestion ack recovery win:%uz ss:%z if:%uz",
> + cg->window, cg->ssthresh, cg->in_flight);
> +
> + return NGX_DONE;
> + }
> +
> + if (cg->window < cg->ssthresh) {
> + cg->window += f->plen;
> +
> + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
> + "quic congestion slow start win:%uz ss:%z if:%uz",
> + cg->window, cg->ssthresh, cg->in_flight);
> +
> + } else {
> + cg->window += qc->tp.max_udp_payload_size * f->plen / cg->window;
> +
> + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
> + "quic congestion avoidance win:%uz ss:%z if:%uz",
> + cg->window, cg->ssthresh, cg->in_flight);
> + }
> +
> + /* prevent recovery_start from wrapping */
> +
> + timer = cg->recovery_start - ngx_current_msec + qc->tp.max_idle_timeout * 2;
> +
> + if ((ngx_msec_int_t) timer < 0) {
> + cg->recovery_start = ngx_current_msec - qc->tp.max_idle_timeout * 2;
> + }
> +
> + return NGX_OK;
> +}
> +
> +
> +static ngx_int_t
> +ngx_quic_reno_on_lost(ngx_connection_t *c, ngx_quic_frame_t *f)
> +{
> + ngx_msec_t timer;
> + ngx_quic_path_t *path;
> + ngx_quic_connection_t *qc;
> + ngx_quic_congestion_t *cg;
> +
> + qc = ngx_quic_get_connection(c);
> + path = qc->path;
Same here.
> +
> + cg = &path->congestion;
> +
> + cg->in_flight -= f->plen;
> + f->plen = 0;
> +
> + timer = f->last - cg->recovery_start;
> +
> + if ((ngx_msec_int_t) timer <= 0) {
> + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
> + "quic congestion lost recovery win:%uz ss:%z if:%uz",
> + cg->window, cg->ssthresh, cg->in_flight);
> +
> + return NGX_DONE;
> + }
> +
> + cg->recovery_start = ngx_current_msec;
> + cg->window /= 2;
> +
> + if (cg->window < qc->tp.max_udp_payload_size * 2) {
> + cg->window = qc->tp.max_udp_payload_size * 2;
> + }
> +
> + cg->ssthresh = cg->window;
> +
> + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
> + "quic congestion lost win:%uz ss:%z if:%uz",
> + cg->window, cg->ssthresh, cg->in_flight);
> +
> + return NGX_OK;
> +}
[..]
--
Roman Arutyunyan
More information about the nginx-devel
mailing list