[PATCH] Dynamic rate limiting for limit_req module
J Carter
jordanc.carter at outlook.com
Sat Jan 7 06:04:09 UTC 2023
Hello,
Please find below my revised patch with style and code bug fixes
included. The only change in behavior of note is that invalid rate
values are now simply ignored in the same fashion an empty key is, and
the use of the parsing of complex value being disabled if a non variable
is used as the rate.
A brief overview of the issue and how I've resolved it to match the
behavior of the current best solution for multiple rates.
> "The fundamental problem with dynamic rates in limit_req, which is
> a leaky bucket limiter, is that the rate is a property which
> applies to multiple requests as accumulated in the bucket (and
> this is basically why it is configured in the limit_req_zone, and
> not in limit_req), while the dynamically calculated rate is a
> property of the last request.
>
> This can easily result in counter-intuitive behaviour.
> For example, consider 5 requests with 1 second interval, assuming
> burst 2, and rate being 1r/m or 100r/s depending on some request
> properties:
>
> - 1st request, rate 1r/m, request is accepted
> - 2st request, rate 1r/m, request is accepted
> - 3rd request, rate 1r/m, request is rejected, since there is no
> room in the bucket
> - 4th request, rate 100r/s, request is accepted
> - 5th request, rate 1r/m, request is accepted (unexpectedly)
>
> Note that the 5th request is accepted, while it is mostly
> equivalent to the 3rd request, and one could expect it to be
> rejected. But it happens to to be accepted because the 4th
> request "cleared" the bucket."
No additional logic was required to fix this. Simply appending the $rate
variable to the $key is sufficient for a user to avoid unwanted excess
value changes with extreme rate jumps (as seen above).
Instead of: limit_req_zone $binary_remote_addr zone=one:10m rate=$rate;
this: limit_req_zone $binary_remote_addr$rate zone=one:10m
rate=$rate;
This ensures that a change in the rate variable's value starts new
accounting of excess (as a new state is created to reflect this new key)
- this follows the behavior previously given in this chain for a static
set of rates. As an additional benefit, the syntax is more compact and
readable within the nginx configuration at the cost of slight overhead
in memory.
(the solution previously given for multiple defined/static rates)
> ...
> map $traffic_tier $limit_free {
> free $binary_remote_addr;
> }
>
> map $traffic_tier $limit_basic {
> basic $binary_remote_addr;
> }
>
> map $traffic_tier $limit_premium {
> premium $binary_remote_addr;
> }
>
> limit_req_zone $limit_free zone=free:10m rate=1r/m;
> limit_req_zone $limit_basic zone=basic:10m rate=2r/m;
> limit_req_zone $limit_premium zone=premium:10m rate=1r/s;
>
> limit_req zone=free (burst=x nodelay);
> limit_req zone=basic (burst=x nodelay);
> limit_req zone=premium (burst=x nodelay);
to
...
map $traffic_tier $rate {
free 1r/m;
basic 2r/m;
premium 1r/s;
}
limit_req_zone $binary_remote_addr$rate zone=one:10m rate=$rate;
limit_req zone=one burst=x nodelay;
(With the appended $rate after $key being optional - if your rates don't
flip flop to extremes omitting it will save memory and allow more states)
The is merely a side benefit - the main purpose of the patch is being
able to obtain a rate limit in the from a string (of some description)
from a request, or any variable that is populated in time for the
limit_req module to use directly as a rate limit value. The approach
detailed above works for that too.
debian at debian:~/projects/nginx-merc/nginx-tests$ prove limit_req*
limit_req2.t ......... ok
limit_req_delay.t .... ok
limit_req_dry_run.t .. ok
limit_req.t .......... ok
All tests successful.
Files=4, Tests=40, 5 wallclock secs ( 0.05 usr 0.01 sys + 0.48 cusr
0.12 csys = 0.66 CPU)
Result: PASS
# HG changeset patch
# User jordanc.carter at outlook.com
# Date 1673064770 0
# Sat Jan 07 04:12:50 2023 +0000
# Branch dynamic-rate-limiting
# Node ID f80741fb734e0a4f83f2f96436ff300c4f3125aa
# Parent 07b0bee87f32be91a33210bc06973e07c4c1dac9
Variables support for limit_req_zone's rate
diff -r 07b0bee87f32 -r f80741fb734e
src/http/modules/ngx_http_limit_req_module.c
--- a/src/http/modules/ngx_http_limit_req_module.c Wed Dec 21
14:53:27 2022 +0300
+++ b/src/http/modules/ngx_http_limit_req_module.c Sat Jan 07
04:12:50 2023 +0000
@@ -26,6 +26,7 @@
/* integer value, 1 corresponds to 0.001 r/s */
ngx_uint_t excess;
ngx_uint_t count;
+ ngx_uint_t rate;
u_char data[1];
} ngx_http_limit_req_node_t;
@@ -41,6 +42,7 @@
ngx_http_limit_req_shctx_t *sh;
ngx_slab_pool_t *shpool;
/* integer value, 1 corresponds to 0.001 r/s */
+ ngx_http_complex_value_t cvr;
ngx_uint_t rate;
ngx_http_complex_value_t key;
ngx_http_limit_req_node_t *node;
@@ -66,9 +68,9 @@
static void ngx_http_limit_req_delay(ngx_http_request_t *r);
static ngx_int_t ngx_http_limit_req_lookup(ngx_http_limit_req_limit_t
*limit,
- ngx_uint_t hash, ngx_str_t *key, ngx_uint_t *ep, ngx_uint_t account);
+ ngx_uint_t hash, ngx_str_t *key, ngx_uint_t *ep, ngx_uint_t
account, ngx_uint_t rate);
static ngx_msec_t
ngx_http_limit_req_account(ngx_http_limit_req_limit_t *limits,
- ngx_uint_t n, ngx_uint_t *ep, ngx_http_limit_req_limit_t **limit);
+ ngx_uint_t n, ngx_uint_t *ep, ngx_http_limit_req_limit_t **limit,
ngx_uint_t rate);
static void ngx_http_limit_req_unlock(ngx_http_limit_req_limit_t *limits,
ngx_uint_t n);
static void ngx_http_limit_req_expire(ngx_http_limit_req_ctx_t *ctx,
@@ -195,10 +197,12 @@
ngx_http_limit_req_handler(ngx_http_request_t *r)
{
uint32_t hash;
- ngx_str_t key;
+ ngx_str_t key, s;
ngx_int_t rc;
ngx_uint_t n, excess;
+ ngx_uint_t scale, rate;
ngx_msec_t delay;
+ u_char *p;
ngx_http_limit_req_ctx_t *ctx;
ngx_http_limit_req_conf_t *lrcf;
ngx_http_limit_req_limit_t *limit, *limits;
@@ -211,6 +215,7 @@
limits = lrcf->limits.elts;
excess = 0;
+ rate = 0;
rc = NGX_DECLINED;
@@ -243,10 +248,44 @@
hash = ngx_crc32_short(key.data, key.len);
+ if (ctx->rate == 0) {
+
+ scale = 1;
+
+ if (ngx_http_complex_value(r, &ctx->cvr, &s) != NGX_OK) {
+ ngx_http_limit_req_unlock(limits, n);
+ return NGX_HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ if (s.len < 9) {
+ continue;
+ }
+
+ rate = (ngx_uint_t) ngx_atoi(s.data + 5, s.len - 8);
+
+ if (rate == 0) {
+ continue;
+ }
+
+ p = s.data + s.len - 3;
+
+ if (ngx_strncmp(p, "r/m", 3) == 0) {
+ scale = 60;
+
+ } else if (ngx_strncmp(p, "r/s", 3) != 0) {
+ continue;
+ }
+
+ rate = rate * 1000 / scale;
+
+ } else {
+ rate = ctx->rate;
+ }
+
ngx_shmtx_lock(&ctx->shpool->mutex);
rc = ngx_http_limit_req_lookup(limit, hash, &key, &excess,
- (n == lrcf->limits.nelts - 1));
+ (n == lrcf->limits.nelts - 1),
rate);
ngx_shmtx_unlock(&ctx->shpool->mutex);
@@ -291,7 +330,7 @@
excess = 0;
}
- delay = ngx_http_limit_req_account(limits, n, &excess, &limit);
+ delay = ngx_http_limit_req_account(limits, n, &excess, &limit, rate);
if (!delay) {
r->main->limit_req_status = NGX_HTTP_LIMIT_REQ_PASSED;
@@ -403,7 +442,7 @@
static ngx_int_t
ngx_http_limit_req_lookup(ngx_http_limit_req_limit_t *limit,
ngx_uint_t hash,
- ngx_str_t *key, ngx_uint_t *ep, ngx_uint_t account)
+ ngx_str_t *key, ngx_uint_t *ep, ngx_uint_t account, ngx_uint_t rate)
{
size_t size;
ngx_int_t rc, excess;
@@ -451,7 +490,9 @@
ms = 0;
}
- excess = lr->excess - ctx->rate * ms / 1000 + 1000;
+ excess = lr->excess - rate * ms / 1000 + 1000;
+
+ lr->rate = rate;
if (excess < 0) {
excess = 0;
@@ -510,6 +551,7 @@
lr->len = (u_short) key->len;
lr->excess = 0;
+ lr->rate = rate;
ngx_memcpy(lr->data, key->data, key->len);
@@ -534,7 +576,7 @@
static ngx_msec_t
ngx_http_limit_req_account(ngx_http_limit_req_limit_t *limits,
ngx_uint_t n,
- ngx_uint_t *ep, ngx_http_limit_req_limit_t **limit)
+ ngx_uint_t *ep, ngx_http_limit_req_limit_t **limit, ngx_uint_t rate)
{
ngx_int_t excess;
ngx_msec_t now, delay, max_delay;
@@ -548,8 +590,7 @@
max_delay = 0;
} else {
- ctx = (*limit)->shm_zone->data;
- max_delay = (excess - (*limit)->delay) * 1000 / ctx->rate;
+ max_delay = (excess - (*limit)->delay) * 1000 / rate;
}
while (n--) {
@@ -572,7 +613,7 @@
ms = 0;
}
- excess = lr->excess - ctx->rate * ms / 1000 + 1000;
+ excess = lr->excess - lr->rate * ms / 1000 + 1000;
if (excess < 0) {
excess = 0;
@@ -593,7 +634,7 @@
continue;
}
- delay = (excess - limits[n].delay) * 1000 / ctx->rate;
+ delay = (excess - limits[n].delay) * 1000 / lr->rate;
if (delay > max_delay) {
max_delay = delay;
@@ -676,7 +717,7 @@
return;
}
- excess = lr->excess - ctx->rate * ms / 1000;
+ excess = lr->excess - lr->rate * ms / 1000;
if (excess > 0) {
return;
@@ -860,7 +901,7 @@
}
size = 0;
- rate = 1;
+ rate = 0;
scale = 1;
name.len = 0;
@@ -902,6 +943,20 @@
if (ngx_strncmp(value[i].data, "rate=", 5) == 0) {
+ ngx_memzero(&ccv, sizeof(ngx_http_compile_complex_value_t));
+
+ ccv.cf = cf;
+ ccv.value = &value[i];
+ ccv.complex_value = &ctx->cvr;
+
+ if (ngx_http_compile_complex_value(&ccv) != NGX_OK) {
+ return NGX_CONF_ERROR;
+ }
+
+ if (ctx->cvr.lengths != NULL) {
+ continue;
+ }
+
len = value[i].len;
p = value[i].data + len - 3;
More information about the nginx-devel
mailing list