[PATCH] Using worker specific counters for http_stub_status module
debayang.qdt
debayang.qdt at qualcommdatacenter.com
Fri Jan 5 13:34:56 UTC 2018
When the http_stub_status_module is enabled, a performance impact seen on some platforms with several worker processes running and increased workload.
There is a contention with the atomic updates of the several shared memory counters maintained by this module - which could be eliminated if we
maintain worker process specific counters and only sum them up when requested by client.
Below patch is an attempt to do so - which bypasses the contention and improves performance on such platforms.
# HG changeset patch
# User Debayan Ghosh <debayang.qdt at qualcommdatacenter.com>
# Date 1515158399 0
# Fri Jan 05 13:19:59 2018 +0000
# Node ID 5ab36b546338d5cc34769e1a70ddc754bfc5e9ea
# Parent 6d2e92acb013224e6ef2c71c9e61ab07f0b03271
Using worker specific counters for http_stub_status module.
Eliminate the shared memory contention using worker specific counters for http_stub_status module and aggregate them only on lookup.
diff -r 6d2e92acb013 -r 5ab36b546338 src/core/ngx_connection.c
--- a/src/core/ngx_connection.c Thu Dec 28 12:01:05 2017 +0200
+++ b/src/core/ngx_connection.c Fri Jan 05 13:19:59 2018 +0000
@@ -1211,7 +1211,8 @@
ngx_cycle->reusable_connections_n--;
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_waiting, -1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_waiting,
+ ngx_process_slot, -1);
#endif
}
@@ -1225,7 +1226,8 @@
ngx_cycle->reusable_connections_n++;
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_waiting, 1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_waiting,
+ ngx_process_slot, 1);
#endif
}
}
diff -r 6d2e92acb013 -r 5ab36b546338 src/event/ngx_event.c
--- a/src/event/ngx_event.c Thu Dec 28 12:01:05 2017 +0200
+++ b/src/event/ngx_event.c Fri Jan 05 13:19:59 2018 +0000
@@ -481,7 +481,7 @@
/* cl should be equal to or greater than cache line size */
- cl = 128;
+ cl = NGX_COUNTER_SLOT_SIZE;
size = cl /* ngx_accept_mutex */
+ cl /* ngx_connection_counter */
@@ -489,13 +489,13 @@
#if (NGX_STAT_STUB)
- size += cl /* ngx_stat_accepted */
- + cl /* ngx_stat_handled */
- + cl /* ngx_stat_requests */
- + cl /* ngx_stat_active */
- + cl /* ngx_stat_reading */
- + cl /* ngx_stat_writing */
- + cl; /* ngx_stat_waiting */
+ size += cl * NGX_MAX_PROCESSES /* ngx_stat_accepted */
+ + cl * NGX_MAX_PROCESSES /* ngx_stat_handled */
+ + cl * NGX_MAX_PROCESSES /* ngx_stat_requests */
+ + cl * NGX_MAX_PROCESSES /* ngx_stat_active */
+ + cl * NGX_MAX_PROCESSES /* ngx_stat_reading */
+ + cl * NGX_MAX_PROCESSES /* ngx_stat_writing */
+ + cl * NGX_MAX_PROCESSES; /* ngx_stat_waiting */
#endif
@@ -535,13 +535,13 @@
#if (NGX_STAT_STUB)
- ngx_stat_accepted = (ngx_atomic_t *) (shared + 3 * cl);
- ngx_stat_handled = (ngx_atomic_t *) (shared + 4 * cl);
- ngx_stat_requests = (ngx_atomic_t *) (shared + 5 * cl);
- ngx_stat_active = (ngx_atomic_t *) (shared + 6 * cl);
- ngx_stat_reading = (ngx_atomic_t *) (shared + 7 * cl);
- ngx_stat_writing = (ngx_atomic_t *) (shared + 8 * cl);
- ngx_stat_waiting = (ngx_atomic_t *) (shared + 9 * cl);
+ ngx_stat_accepted = (ngx_atomic_t *) (shared + 3 * cl );
+ ngx_stat_handled = (ngx_atomic_t *) ((u_char*) ngx_stat_accepted + (cl * NGX_MAX_PROCESSES));
+ ngx_stat_requests = (ngx_atomic_t *) ((u_char*) ngx_stat_handled + (cl * NGX_MAX_PROCESSES));
+ ngx_stat_active = (ngx_atomic_t *) ((u_char*) ngx_stat_requests + (cl * NGX_MAX_PROCESSES));
+ ngx_stat_reading = (ngx_atomic_t *) ((u_char*) ngx_stat_active + (cl * NGX_MAX_PROCESSES));
+ ngx_stat_writing = (ngx_atomic_t *) ((u_char*) ngx_stat_reading + (cl * NGX_MAX_PROCESSES));
+ ngx_stat_waiting = (ngx_atomic_t *) ((u_char*) ngx_stat_writing + (cl * NGX_MAX_PROCESSES));
#endif
diff -r 6d2e92acb013 -r 5ab36b546338 src/event/ngx_event_accept.c
--- a/src/event/ngx_event_accept.c Thu Dec 28 12:01:05 2017 +0200
+++ b/src/event/ngx_event_accept.c Fri Jan 05 13:19:59 2018 +0000
@@ -135,7 +135,8 @@
}
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_accepted,
+ ngx_process_slot, 1);
#endif
ngx_accept_disabled = ngx_cycle->connection_n / 8 @@ -155,7 +156,8 @@
c->type = SOCK_STREAM;
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_active, 1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_active,
+ ngx_process_slot, 1);
#endif
c->pool = ngx_create_pool(ls->pool_size, ev->log); @@ -262,7 +264,8 @@
c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_handled, 1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_handled,
+ ngx_process_slot, 1);
#endif
if (ls->addr_ntop) {
@@ -421,7 +424,8 @@
}
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_accepted,
+ ngx_process_slot, 1);
#endif
#if (NGX_HAVE_MSGHDR_MSG_CONTROL)
@@ -449,7 +453,8 @@
}
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_active, 1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_active,
+ ngx_process_slot, 1);
#endif
c->pool = ngx_create_pool(ls->pool_size, ev->log); @@ -589,7 +594,8 @@
c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_handled, 1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_handled,
+ ngx_process_slot, 1);
#endif
if (ls->addr_ntop) {
@@ -766,7 +772,8 @@
}
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_active, -1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_active,
+ ngx_process_slot , -1);
#endif
}
diff -r 6d2e92acb013 -r 5ab36b546338 src/http/modules/ngx_http_stub_status_module.c
--- a/src/http/modules/ngx_http_stub_status_module.c Thu Dec 28 12:01:05 2017 +0200
+++ b/src/http/modules/ngx_http_stub_status_module.c Fri Jan 05 13:19:59 2018 +0000
@@ -79,6 +79,17 @@
ngx_http_null_variable
};
+static ngx_atomic_int_t
+ngx_http_get_aggregated_status(ngx_atomic_t* ctr) {
+ ngx_atomic_int_t sum = 0;
+ int i;
+ for (i = 0; i < NGX_MAX_PROCESSES; i++) {
+ sum += *(ngx_atomic_t*) ((u_char*) ctr + (i * NGX_COUNTER_SLOT_SIZE));
+ }
+
+ return sum;
+}
static ngx_int_t
ngx_http_stub_status_handler(ngx_http_request_t *r) @@ -126,13 +137,14 @@
out.buf = b;
out.next = NULL;
- ap = *ngx_stat_accepted;
- hn = *ngx_stat_handled;
- ac = *ngx_stat_active;
- rq = *ngx_stat_requests;
- rd = *ngx_stat_reading;
- wr = *ngx_stat_writing;
- wa = *ngx_stat_waiting;
+
+ ap = ngx_http_get_aggregated_status(ngx_stat_accepted);
+ hn = ngx_http_get_aggregated_status(ngx_stat_handled);
+ ac = ngx_http_get_aggregated_status(ngx_stat_active);
+ rq = ngx_http_get_aggregated_status(ngx_stat_requests);
+ rd = ngx_http_get_aggregated_status(ngx_stat_reading);
+ wr = ngx_http_get_aggregated_status(ngx_stat_writing);
+ wa = ngx_http_get_aggregated_status(ngx_stat_waiting);
b->last = ngx_sprintf(b->last, "Active connections: %uA \n", ac);
diff -r 6d2e92acb013 -r 5ab36b546338 src/http/ngx_http_request.c
--- a/src/http/ngx_http_request.c Thu Dec 28 12:01:05 2017 +0200
+++ b/src/http/ngx_http_request.c Fri Jan 05 13:19:59 2018 +0000
@@ -617,9 +617,11 @@
r->log_handler = ngx_http_log_error_handler;
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_reading, 1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_reading,
+ ngx_process_slot, 1);
r->stat_reading = 1;
- (void) ngx_atomic_fetch_add(ngx_stat_requests, 1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_requests,
+ ngx_process_slot, 1);
#endif
return r;
@@ -1935,9 +1937,11 @@
}
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_reading, -1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_reading,
+ ngx_process_slot, -1);
r->stat_reading = 0;
- (void) ngx_atomic_fetch_add(ngx_stat_writing, 1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_writing,
+ ngx_process_slot, 1);
r->stat_writing = 1;
#endif
@@ -3491,11 +3495,13 @@
#if (NGX_STAT_STUB)
if (r->stat_reading) {
- (void) ngx_atomic_fetch_add(ngx_stat_reading, -1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_reading,
+ ngx_process_slot, -1);
}
if (r->stat_writing) {
- (void) ngx_atomic_fetch_add(ngx_stat_writing, -1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_writing,
+ ngx_process_slot, -1);
}
#endif
@@ -3584,7 +3590,8 @@
#endif
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_active, -1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_active,
+ ngx_process_slot, -1);
#endif
c->destroyed = 1;
diff -r 6d2e92acb013 -r 5ab36b546338 src/mail/ngx_mail_handler.c
--- a/src/mail/ngx_mail_handler.c Thu Dec 28 12:01:05 2017 +0200
+++ b/src/mail/ngx_mail_handler.c Fri Jan 05 13:19:59 2018 +0000
@@ -838,7 +838,8 @@
#endif
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_active, -1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_active,
+ ngx_process_slot, -1);
#endif
c->destroyed = 1;
diff -r 6d2e92acb013 -r 5ab36b546338 src/os/unix/ngx_atomic.h
--- a/src/os/unix/ngx_atomic.h Thu Dec 28 12:01:05 2017 +0200
+++ b/src/os/unix/ngx_atomic.h Fri Jan 05 13:19:59 2018 +0000
@@ -309,5 +309,9 @@
#define ngx_trylock(lock) (*(lock) == 0 && ngx_atomic_cmp_set(lock, 0, 1))
#define ngx_unlock(lock) *(lock) = 0
+#define NGX_COUNTER_SLOT_SIZE 128
+#define ngx_worker_atomic_fetch_add(value, worker, add) \
+ ngx_atomic_fetch_add((ngx_atomic_t*)((u_char*) value + \
+ (worker * NGX_COUNTER_SLOT_SIZE)),
+add)
#endif /* _NGX_ATOMIC_H_INCLUDED_ */
diff -r 6d2e92acb013 -r 5ab36b546338 src/stream/ngx_stream_handler.c
--- a/src/stream/ngx_stream_handler.c Thu Dec 28 12:01:05 2017 +0200
+++ b/src/stream/ngx_stream_handler.c Fri Jan 05 13:19:59 2018 +0000
@@ -345,7 +345,8 @@
#endif
#if (NGX_STAT_STUB)
- (void) ngx_atomic_fetch_add(ngx_stat_active, -1);
+ (void) ngx_worker_atomic_fetch_add(ngx_stat_active,
+ ngx_process_slot, -1);
#endif
pool = c->pool;
More information about the nginx-devel
mailing list