[PATCH] Free the shared memory only when reconfiguration is successful

Zhihua Cao czhihua at vmware.com
Wed Oct 11 08:45:04 UTC 2017


# HG changeset patch
# User Zhihua Cao <czhihua at vmware.com>
# Date 1507710209 25200
#      Wed Oct 11 01:23:29 2017 -0700
# Node ID 648b1cca8f50d83eea02a6cc2c105ae95a3f3d72
# Parent  3012fcb69db4f35dd5851e3156625dc18a823fce
Free the shared memory only when reconfiguration is successful

If nginx reconfiguration fails, it maybe crash when killing the master process.
The reason is that the unreused shared memory is freed during reconfiguration,
the shared memory address is unmapped from master's address space, but the
reconfiguration maybe fail(open listening sockets maybe fails due to the
address is already inuse) and roll back to the old configuration.
When killing nginx process, master still access the adrress
which is unmapped from master's address space, segment fault exception occurs.

diff -r 3012fcb69db4 -r 648b1cca8f50 src/core/ngx_cycle.c
--- a/src/core/ngx_cycle.c	Tue Oct 10 18:22:51 2017 +0300
+++ b/src/core/ngx_cycle.c	Wed Oct 11 01:23:29 2017 -0700
@@ -16,6 +16,7 @@
 static ngx_int_t ngx_test_lockfile(u_char *file, ngx_log_t *log);
 static void ngx_clean_old_cycles(ngx_event_t *ev);
 static void ngx_shutdown_timer_handler(ngx_event_t *ev);
+static void ngx_shared_memory_reset_stale(ngx_cycle_t *cycle);
 
 
 volatile ngx_cycle_t  *ngx_cycle;
@@ -470,7 +471,8 @@
                 goto shm_zone_found;
             }
 
-            ngx_shm_free(&oshm_zone[n].shm);
+            /* don't free the old shm zone here, just mark it stale */
+            oshm_zone[n].stale = 1;
 
             break;
         }
@@ -648,6 +650,12 @@
             i = 0;
         }
 
+        /* reconfiguration is successful, free the shared memory zone */
+        if (oshm_zone[i].stale == 1) {
+            ngx_shm_free(&oshm_zone[i].shm);
+            continue;
+        }
+
         part = &cycle->shared_memory.part;
         shm_zone = part->elts;
 
@@ -798,6 +806,8 @@
 
 failed:
 
+    ngx_shared_memory_reset_stale(old_cycle);
+
     if (!ngx_is_init_cycle(old_cycle)) {
         old_ccf = (ngx_core_conf_t *) ngx_get_conf(old_cycle->conf_ctx,
                                                    ngx_core_module);
@@ -1201,6 +1211,34 @@
 }
 
 
+void
+ngx_shared_memory_reset_stale(ngx_cycle_t *cycle)
+{
+    ngx_uint_t        i;
+    ngx_list_part_t  *part;
+    ngx_shm_zone_t   *shm_zone;
+
+    part = &cycle->shared_memory.part;
+    shm_zone = part->elts;
+
+    for (i = 0; /* void */ ; i++) {
+
+        if (i >= part->nelts) {
+            if (part->next == NULL) {
+                return;
+            }
+            part = part->next;
+            shm_zone = part->elts;
+            i = 0;
+        }
+
+        if (shm_zone[i].stale == 1) {
+            shm_zone[i].stale = 0;
+        }
+    }
+}
+
+
 ngx_shm_zone_t *
 ngx_shared_memory_add(ngx_conf_t *cf, ngx_str_t *name, size_t size, void *tag)
 {
@@ -1269,6 +1307,7 @@
     shm_zone->init = NULL;
     shm_zone->tag = tag;
     shm_zone->noreuse = 0;
+    shm_zone->stale = 0;
 
     return shm_zone;
 }
diff -r 3012fcb69db4 -r 648b1cca8f50 src/core/ngx_cycle.h
--- a/src/core/ngx_cycle.h	Tue Oct 10 18:22:51 2017 +0300
+++ b/src/core/ngx_cycle.h	Wed Oct 11 01:23:29 2017 -0700
@@ -31,7 +31,8 @@
     ngx_shm_t                 shm;
     ngx_shm_zone_init_pt      init;
     void                     *tag;
-    ngx_uint_t                noreuse;  /* unsigned  noreuse:1; */
+    ngx_uint_t                noreuse:1;  /* unsigned  noreuse:1; */
+    ngx_uint_t                stale:1;
 };
 
 


More information about the nginx-devel mailing list