[PATCH] Cache: provide naive feedback loop for uncacheable URLs
Piotr Sikora
piotr at cloudflare.com
Mon Apr 27 23:38:47 UTC 2015
# HG changeset patch
# User Piotr Sikora <piotr at cloudflare.com>
# Date 1430177825 25200
# Mon Apr 27 16:37:05 2015 -0700
# Node ID 1e7878138228e3decb868f38eddc38937d55f403
# Parent 96e22e4f1b03ff15a774c6ed34d74b897af32c55
Cache: provide naive feedback loop for uncacheable URLs.
The cache lock provides a mechanism that allows only one request
for the same URL to be sent upstream at the same time. All other
requests must wait until the lock is released and/or not needed.
While this works as designed and protects upstream from requests
stampede in case the response is cacheable, it results in a huge
performance degradation for popular and uncacheable URLs, thanks
to the artificial delay added to the upstream requests.
To make things even worse, the lock acquisition is retried every
500ms and prone to lock starvation - there is no queue, so newly
accepted requests can jump ahead and acquire the lock before any
of the already pending requests have a chance to retry it, which
can lead to huge spikes in the response times and even timeouts.
This feedback loop provides a mechanism that allows requests for
uncacheable URLs to immediately skip the cache lock.
Signed-off-by: Piotr Sikora <piotr at cloudflare.com>
diff -r 96e22e4f1b03 -r 1e7878138228 src/http/ngx_http_cache.h
--- a/src/http/ngx_http_cache.h Mon Apr 27 18:51:18 2015 +0300
+++ b/src/http/ngx_http_cache.h Mon Apr 27 16:37:05 2015 -0700
@@ -50,7 +50,8 @@ typedef struct {
unsigned exists:1;
unsigned updating:1;
unsigned deleting:1;
- /* 11 unused bits */
+ unsigned cacheable:1;
+ /* 10 unused bits */
ngx_file_uniq_t uniq;
time_t expire;
@@ -171,7 +172,8 @@ ngx_int_t ngx_http_file_cache_set_header
void ngx_http_file_cache_update(ngx_http_request_t *r, ngx_temp_file_t *tf);
void ngx_http_file_cache_update_header(ngx_http_request_t *r);
ngx_int_t ngx_http_cache_send(ngx_http_request_t *);
-void ngx_http_file_cache_free(ngx_http_cache_t *c, ngx_temp_file_t *tf);
+void ngx_http_file_cache_free(ngx_http_cache_t *c, ngx_temp_file_t *tf,
+ ngx_uint_t cacheable);
time_t ngx_http_file_cache_valid(ngx_array_t *cache_valid, ngx_uint_t status);
char *ngx_http_file_cache_set_slot(ngx_conf_t *cf, ngx_command_t *cmd,
diff -r 96e22e4f1b03 -r 1e7878138228 src/http/ngx_http_file_cache.c
--- a/src/http/ngx_http_file_cache.c Mon Apr 27 18:51:18 2015 +0300
+++ b/src/http/ngx_http_file_cache.c Mon Apr 27 16:37:05 2015 -0700
@@ -399,6 +399,7 @@ static ngx_int_t
ngx_http_file_cache_lock(ngx_http_request_t *r, ngx_http_cache_t *c)
{
ngx_msec_t now, timer;
+ ngx_uint_t cacheable;
ngx_http_file_cache_t *cache;
if (!c->lock) {
@@ -412,6 +413,7 @@ ngx_http_file_cache_lock(ngx_http_reques
ngx_shmtx_lock(&cache->shpool->mutex);
timer = c->node->lock_time - now;
+ cacheable = c->node->cacheable;
if (!c->node->updating || (ngx_msec_int_t) timer <= 0) {
c->node->updating = 1;
@@ -422,11 +424,11 @@ ngx_http_file_cache_lock(ngx_http_reques
ngx_shmtx_unlock(&cache->shpool->mutex);
- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
- "http file cache lock u:%d wt:%M",
- c->updating, c->wait_time);
-
- if (c->updating) {
+ ngx_log_debug3(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
+ "http file cache lock u:%d c:%d wt:%M",
+ c->updating, cacheable, c->wait_time);
+
+ if (c->updating || !cacheable) {
return NGX_DECLINED;
}
@@ -880,6 +882,7 @@ renew:
fcn->uniq = 0;
fcn->body_start = 0;
fcn->fs_size = 0;
+ fcn->cacheable = 1;
done:
@@ -1300,6 +1303,7 @@ ngx_http_file_cache_update_variant(ngx_h
c->node->count--;
c->node->updating = 0;
+ c->node->cacheable = 1;
c->node = NULL;
ngx_shmtx_unlock(&cache->shpool->mutex);
@@ -1389,6 +1393,7 @@ ngx_http_file_cache_update(ngx_http_requ
}
c->node->updating = 0;
+ c->node->cacheable = 1;
ngx_shmtx_unlock(&cache->shpool->mutex);
}
@@ -1578,7 +1583,8 @@ ngx_http_cache_send(ngx_http_request_t *
void
-ngx_http_file_cache_free(ngx_http_cache_t *c, ngx_temp_file_t *tf)
+ngx_http_file_cache_free(ngx_http_cache_t *c, ngx_temp_file_t *tf,
+ ngx_uint_t cacheable)
{
ngx_http_file_cache_t *cache;
ngx_http_file_cache_node_t *fcn;
@@ -1599,6 +1605,7 @@ ngx_http_file_cache_free(ngx_http_cache_
if (c->updating && fcn->lock_time == c->lock_time) {
fcn->updating = 0;
+ fcn->cacheable = cacheable;
}
if (c->error) {
@@ -1609,7 +1616,9 @@ ngx_http_file_cache_free(ngx_http_cache_
fcn->valid_msec = c->valid_msec;
}
- } else if (!fcn->exists && fcn->count == 0 && c->min_uses == 1) {
+ } else if (!fcn->exists && fcn->count == 0 && c->min_uses == 1
+ && (!c->lock || fcn->cacheable))
+ {
ngx_queue_remove(&fcn->queue);
ngx_rbtree_delete(&cache->sh->rbtree, &fcn->node);
ngx_slab_free_locked(cache->shpool, fcn);
@@ -1658,7 +1667,7 @@ ngx_http_file_cache_cleanup(void *data)
"stalled cache updating, error:%ui", c->error);
}
- ngx_http_file_cache_free(c, NULL);
+ ngx_http_file_cache_free(c, NULL, 1);
}
diff -r 96e22e4f1b03 -r 1e7878138228 src/http/ngx_http_upstream.c
--- a/src/http/ngx_http_upstream.c Mon Apr 27 18:51:18 2015 +0300
+++ b/src/http/ngx_http_upstream.c Mon Apr 27 16:37:05 2015 -0700
@@ -2353,7 +2353,8 @@ ngx_http_upstream_intercept_errors(ngx_h
r->cache->error = status;
}
- ngx_http_file_cache_free(r->cache, u->pipe->temp_file);
+ ngx_http_file_cache_free(r->cache, u->pipe->temp_file,
+ u->cacheable);
}
#endif
ngx_http_upstream_finalize_request(r, u, status);
@@ -2835,7 +2836,7 @@ ngx_http_upstream_send_response(ngx_http
"http cacheable: %d", u->cacheable);
if (u->cacheable == 0 && r->cache) {
- ngx_http_file_cache_free(r->cache, u->pipe->temp_file);
+ ngx_http_file_cache_free(r->cache, u->pipe->temp_file, 0);
}
#endif
@@ -3643,11 +3644,11 @@ ngx_http_upstream_process_request(ngx_ht
ngx_http_file_cache_update(r, tf);
} else {
- ngx_http_file_cache_free(r->cache, tf);
+ ngx_http_file_cache_free(r->cache, tf, 1);
}
} else if (p->upstream_error) {
- ngx_http_file_cache_free(r->cache, p->temp_file);
+ ngx_http_file_cache_free(r->cache, p->temp_file, 1);
}
}
@@ -4029,7 +4030,7 @@ ngx_http_upstream_finalize_request(ngx_h
}
}
- ngx_http_file_cache_free(r->cache, u->pipe->temp_file);
+ ngx_http_file_cache_free(r->cache, u->pipe->temp_file, u->cacheable);
}
#endif
More information about the nginx-devel
mailing list