From pluknet at nginx.com Mon Jun 4 16:00:43 2018 From: pluknet at nginx.com (Sergey Kandaurov) Date: Mon, 04 Jun 2018 16:00:43 +0000 Subject: [nginx] Leave chain in ngx_chain_add_copy() in consistent state on errors. Message-ID: details: http://hg.nginx.org/nginx/rev/da9941c9b01b branches: changeset: 7282:da9941c9b01b user: Sergey Kandaurov date: Mon Jun 04 18:47:54 2018 +0300 description: Leave chain in ngx_chain_add_copy() in consistent state on errors. diffstat: src/core/ngx_buf.c | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (11 lines): diff -r bd6563e81cea -r da9941c9b01b src/core/ngx_buf.c --- a/src/core/ngx_buf.c Wed May 30 15:40:34 2018 +0300 +++ b/src/core/ngx_buf.c Mon Jun 04 18:47:54 2018 +0300 @@ -137,6 +137,7 @@ ngx_chain_add_copy(ngx_pool_t *pool, ngx while (in) { cl = ngx_alloc_chain_link(pool); if (cl == NULL) { + *ll = NULL; return NGX_ERROR; } From arut at nginx.com Mon Jun 4 18:46:25 2018 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 04 Jun 2018 18:46:25 +0000 Subject: [nginx] Events: fixed handling zero-length client address. Message-ID: details: http://hg.nginx.org/nginx/rev/d0b897c0bb5b branches: changeset: 7283:d0b897c0bb5b user: Roman Arutyunyan date: Fri Jun 01 16:53:02 2018 +0300 description: Events: fixed handling zero-length client address. On Linux recvmsg() syscall may return a zero-length client address when receiving a datagram from an unbound unix datagram socket. It is usually assumed that socket address has at least the sa_family member. Zero-length socket address caused buffer over-read in functions which receive socket address, for example ngx_sock_ntop(). Typically the over-read resulted in unexpected socket family followed by session close. Now a fake socket address is allocated instead of a zero-length client address. diffstat: src/event/ngx_event_accept.c | 12 ++++++++++++ 1 files changed, 12 insertions(+), 0 deletions(-) diffs (22 lines): diff -r da9941c9b01b -r d0b897c0bb5b src/event/ngx_event_accept.c --- a/src/event/ngx_event_accept.c Mon Jun 04 18:47:54 2018 +0300 +++ b/src/event/ngx_event_accept.c Fri Jun 01 16:53:02 2018 +0300 @@ -448,6 +448,18 @@ ngx_event_recvmsg(ngx_event_t *ev) c->socklen = sizeof(ngx_sockaddr_t); } + if (c->socklen == 0) { + + /* + * on Linux recvmsg() returns zero msg_namelen + * when receiving packets from unbound AF_UNIX sockets + */ + + c->socklen = sizeof(struct sockaddr); + ngx_memzero(&sa, sizeof(struct sockaddr)); + sa.sockaddr.sa_family = ls->sockaddr->sa_family; + } + #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, 1); #endif From arut at nginx.com Mon Jun 4 18:46:27 2018 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 04 Jun 2018 18:46:27 +0000 Subject: [nginx] Events: get remote addresses before creating udp connection. Message-ID: details: http://hg.nginx.org/nginx/rev/52aacc8ddcc5 branches: changeset: 7284:52aacc8ddcc5 user: Roman Arutyunyan date: Fri Jun 01 13:12:57 2018 +0300 description: Events: get remote addresses before creating udp connection. Previously, ngx_event_recvmsg() got remote socket addresses after creating the connection object. In preparation to handling multiple UDP packets in a single session, this code was moved up. diffstat: src/event/ngx_event_accept.c | 205 ++++++++++++++++++++++-------------------- 1 files changed, 108 insertions(+), 97 deletions(-) diffs (266 lines): diff -r d0b897c0bb5b -r 52aacc8ddcc5 src/event/ngx_event_accept.c --- a/src/event/ngx_event_accept.c Fri Jun 01 16:53:02 2018 +0300 +++ b/src/event/ngx_event_accept.c Fri Jun 01 13:12:57 2018 +0300 @@ -328,10 +328,12 @@ ngx_event_recvmsg(ngx_event_t *ev) ssize_t n; ngx_log_t *log; ngx_err_t err; + socklen_t socklen, local_socklen; ngx_event_t *rev, *wev; struct iovec iov[1]; struct msghdr msg; - ngx_sockaddr_t sa; + ngx_sockaddr_t sa, lsa; + struct sockaddr *sockaddr, *local_sockaddr; ngx_listening_t *ls; ngx_event_conf_t *ecf; ngx_connection_t *c, *lc; @@ -420,10 +422,6 @@ ngx_event_recvmsg(ngx_event_t *ev) return; } -#if (NGX_STAT_STUB) - (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); -#endif - #if (NGX_HAVE_MSGHDR_MSG_CONTROL) if (msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) { ngx_log_error(NGX_LOG_ALERT, ev->log, 0, @@ -432,6 +430,102 @@ ngx_event_recvmsg(ngx_event_t *ev) } #endif + sockaddr = msg.msg_name; + socklen = msg.msg_namelen; + + if (socklen > (socklen_t) sizeof(ngx_sockaddr_t)) { + socklen = sizeof(ngx_sockaddr_t); + } + + if (socklen == 0) { + + /* + * on Linux recvmsg() returns zero msg_namelen + * when receiving packets from unbound AF_UNIX sockets + */ + + socklen = sizeof(struct sockaddr); + ngx_memzero(&sa, sizeof(struct sockaddr)); + sa.sockaddr.sa_family = ls->sockaddr->sa_family; + } + + local_sockaddr = ls->sockaddr; + local_socklen = ls->socklen; + +#if (NGX_HAVE_MSGHDR_MSG_CONTROL) + + if (ls->wildcard) { + struct cmsghdr *cmsg; + + ngx_memcpy(&lsa, local_sockaddr, local_socklen); + local_sockaddr = &lsa.sockaddr; + + for (cmsg = CMSG_FIRSTHDR(&msg); + cmsg != NULL; + cmsg = CMSG_NXTHDR(&msg, cmsg)) + { + +#if (NGX_HAVE_IP_RECVDSTADDR) + + if (cmsg->cmsg_level == IPPROTO_IP + && cmsg->cmsg_type == IP_RECVDSTADDR + && local_sockaddr->sa_family == AF_INET) + { + struct in_addr *addr; + struct sockaddr_in *sin; + + addr = (struct in_addr *) CMSG_DATA(cmsg); + sin = (struct sockaddr_in *) local_sockaddr; + sin->sin_addr = *addr; + + break; + } + +#elif (NGX_HAVE_IP_PKTINFO) + + if (cmsg->cmsg_level == IPPROTO_IP + && cmsg->cmsg_type == IP_PKTINFO + && local_sockaddr->sa_family == AF_INET) + { + struct in_pktinfo *pkt; + struct sockaddr_in *sin; + + pkt = (struct in_pktinfo *) CMSG_DATA(cmsg); + sin = (struct sockaddr_in *) local_sockaddr; + sin->sin_addr = pkt->ipi_addr; + + break; + } + +#endif + +#if (NGX_HAVE_INET6 && NGX_HAVE_IPV6_RECVPKTINFO) + + if (cmsg->cmsg_level == IPPROTO_IPV6 + && cmsg->cmsg_type == IPV6_PKTINFO + && local_sockaddr->sa_family == AF_INET6) + { + struct in6_pktinfo *pkt6; + struct sockaddr_in6 *sin6; + + pkt6 = (struct in6_pktinfo *) CMSG_DATA(cmsg); + sin6 = (struct sockaddr_in6 *) local_sockaddr; + sin6->sin6_addr = pkt6->ipi6_addr; + + break; + } + +#endif + + } + } + +#endif + +#if (NGX_STAT_STUB) + (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); +#endif + ngx_accept_disabled = ngx_cycle->connection_n / 8 - ngx_cycle->free_connection_n; @@ -442,23 +536,7 @@ ngx_event_recvmsg(ngx_event_t *ev) c->shared = 1; c->type = SOCK_DGRAM; - c->socklen = msg.msg_namelen; - - if (c->socklen > (socklen_t) sizeof(ngx_sockaddr_t)) { - c->socklen = sizeof(ngx_sockaddr_t); - } - - if (c->socklen == 0) { - - /* - * on Linux recvmsg() returns zero msg_namelen - * when receiving packets from unbound AF_UNIX sockets - */ - - c->socklen = sizeof(struct sockaddr); - ngx_memzero(&sa, sizeof(struct sockaddr)); - sa.sockaddr.sa_family = ls->sockaddr->sa_family; - } + c->socklen = socklen; #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, 1); @@ -470,13 +548,13 @@ ngx_event_recvmsg(ngx_event_t *ev) return; } - c->sockaddr = ngx_palloc(c->pool, c->socklen); + c->sockaddr = ngx_palloc(c->pool, socklen); if (c->sockaddr == NULL) { ngx_close_accepted_connection(c); return; } - ngx_memcpy(c->sockaddr, msg.msg_name, c->socklen); + ngx_memcpy(c->sockaddr, sockaddr, socklen); log = ngx_palloc(c->pool, sizeof(ngx_log_t)); if (log == NULL) { @@ -491,87 +569,20 @@ ngx_event_recvmsg(ngx_event_t *ev) c->log = log; c->pool->log = log; - c->listening = ls; - c->local_sockaddr = ls->sockaddr; - c->local_socklen = ls->socklen; - -#if (NGX_HAVE_MSGHDR_MSG_CONTROL) - if (ls->wildcard) { - struct cmsghdr *cmsg; - struct sockaddr *sockaddr; - - sockaddr = ngx_palloc(c->pool, c->local_socklen); - if (sockaddr == NULL) { + if (local_sockaddr == &lsa.sockaddr) { + local_sockaddr = ngx_palloc(c->pool, local_socklen); + if (local_sockaddr == NULL) { ngx_close_accepted_connection(c); return; } - ngx_memcpy(sockaddr, c->local_sockaddr, c->local_socklen); - c->local_sockaddr = sockaddr; - - for (cmsg = CMSG_FIRSTHDR(&msg); - cmsg != NULL; - cmsg = CMSG_NXTHDR(&msg, cmsg)) - { - -#if (NGX_HAVE_IP_RECVDSTADDR) - - if (cmsg->cmsg_level == IPPROTO_IP - && cmsg->cmsg_type == IP_RECVDSTADDR - && sockaddr->sa_family == AF_INET) - { - struct in_addr *addr; - struct sockaddr_in *sin; - - addr = (struct in_addr *) CMSG_DATA(cmsg); - sin = (struct sockaddr_in *) sockaddr; - sin->sin_addr = *addr; - - break; - } - -#elif (NGX_HAVE_IP_PKTINFO) - - if (cmsg->cmsg_level == IPPROTO_IP - && cmsg->cmsg_type == IP_PKTINFO - && sockaddr->sa_family == AF_INET) - { - struct in_pktinfo *pkt; - struct sockaddr_in *sin; - - pkt = (struct in_pktinfo *) CMSG_DATA(cmsg); - sin = (struct sockaddr_in *) sockaddr; - sin->sin_addr = pkt->ipi_addr; - - break; - } - -#endif - -#if (NGX_HAVE_INET6 && NGX_HAVE_IPV6_RECVPKTINFO) - - if (cmsg->cmsg_level == IPPROTO_IPV6 - && cmsg->cmsg_type == IPV6_PKTINFO - && sockaddr->sa_family == AF_INET6) - { - struct in6_pktinfo *pkt6; - struct sockaddr_in6 *sin6; - - pkt6 = (struct in6_pktinfo *) CMSG_DATA(cmsg); - sin6 = (struct sockaddr_in6 *) sockaddr; - sin6->sin6_addr = pkt6->ipi6_addr; - - break; - } - -#endif - - } + ngx_memcpy(local_sockaddr, &lsa, local_socklen); } -#endif + c->local_sockaddr = local_sockaddr; + c->local_socklen = local_socklen; c->buffer = ngx_create_temp_buf(c->pool, n); if (c->buffer == NULL) { From arut at nginx.com Mon Jun 4 18:46:28 2018 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 04 Jun 2018 18:46:28 +0000 Subject: [nginx] Events: moved ngx_recvmsg() to new file src/event/ngx_event_udp.c. Message-ID: details: http://hg.nginx.org/nginx/rev/88a624c9b491 branches: changeset: 7285:88a624c9b491 user: Roman Arutyunyan date: Fri Jun 01 16:55:49 2018 +0300 description: Events: moved ngx_recvmsg() to new file src/event/ngx_event_udp.c. diffstat: auto/sources | 1 + src/event/ngx_event.h | 4 + src/event/ngx_event_accept.c | 357 +---------------------------------------- src/event/ngx_event_udp.c | 376 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 384 insertions(+), 354 deletions(-) diffs (803 lines): diff -r 52aacc8ddcc5 -r 88a624c9b491 auto/sources --- a/auto/sources Fri Jun 01 13:12:57 2018 +0300 +++ b/auto/sources Fri Jun 01 16:55:49 2018 +0300 @@ -95,6 +95,7 @@ EVENT_SRCS="src/event/ngx_event.c \ src/event/ngx_event_timer.c \ src/event/ngx_event_posted.c \ src/event/ngx_event_accept.c \ + src/event/ngx_event_udp.c \ src/event/ngx_event_connect.c \ src/event/ngx_event_pipe.c" diff -r 52aacc8ddcc5 -r 88a624c9b491 src/event/ngx_event.h --- a/src/event/ngx_event.h Fri Jun 01 13:12:57 2018 +0300 +++ b/src/event/ngx_event.h Fri Jun 01 16:55:49 2018 +0300 @@ -508,7 +508,11 @@ void ngx_event_accept(ngx_event_t *ev); void ngx_event_recvmsg(ngx_event_t *ev); #endif ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle); +ngx_int_t ngx_enable_accept_events(ngx_cycle_t *cycle); u_char *ngx_accept_log_error(ngx_log_t *log, u_char *buf, size_t len); +#if (NGX_DEBUG) +void ngx_debug_accepted_connection(ngx_event_conf_t *ecf, ngx_connection_t *c); +#endif void ngx_process_events_and_timers(ngx_cycle_t *cycle); diff -r 52aacc8ddcc5 -r 88a624c9b491 src/event/ngx_event_accept.c --- a/src/event/ngx_event_accept.c Fri Jun 01 13:12:57 2018 +0300 +++ b/src/event/ngx_event_accept.c Fri Jun 01 16:55:49 2018 +0300 @@ -10,13 +10,8 @@ #include -static ngx_int_t ngx_enable_accept_events(ngx_cycle_t *cycle); static ngx_int_t ngx_disable_accept_events(ngx_cycle_t *cycle, ngx_uint_t all); static void ngx_close_accepted_connection(ngx_connection_t *c); -#if (NGX_DEBUG) -static void ngx_debug_accepted_connection(ngx_event_conf_t *ecf, - ngx_connection_t *c); -#endif void @@ -320,352 +315,6 @@ ngx_event_accept(ngx_event_t *ev) } -#if !(NGX_WIN32) - -void -ngx_event_recvmsg(ngx_event_t *ev) -{ - ssize_t n; - ngx_log_t *log; - ngx_err_t err; - socklen_t socklen, local_socklen; - ngx_event_t *rev, *wev; - struct iovec iov[1]; - struct msghdr msg; - ngx_sockaddr_t sa, lsa; - struct sockaddr *sockaddr, *local_sockaddr; - ngx_listening_t *ls; - ngx_event_conf_t *ecf; - ngx_connection_t *c, *lc; - static u_char buffer[65535]; - -#if (NGX_HAVE_MSGHDR_MSG_CONTROL) - -#if (NGX_HAVE_IP_RECVDSTADDR) - u_char msg_control[CMSG_SPACE(sizeof(struct in_addr))]; -#elif (NGX_HAVE_IP_PKTINFO) - u_char msg_control[CMSG_SPACE(sizeof(struct in_pktinfo))]; -#endif - -#if (NGX_HAVE_INET6 && NGX_HAVE_IPV6_RECVPKTINFO) - u_char msg_control6[CMSG_SPACE(sizeof(struct in6_pktinfo))]; -#endif - -#endif - - if (ev->timedout) { - if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { - return; - } - - ev->timedout = 0; - } - - ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module); - - if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) { - ev->available = ecf->multi_accept; - } - - lc = ev->data; - ls = lc->listening; - ev->ready = 0; - - ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, - "recvmsg on %V, ready: %d", &ls->addr_text, ev->available); - - do { - ngx_memzero(&msg, sizeof(struct msghdr)); - - iov[0].iov_base = (void *) buffer; - iov[0].iov_len = sizeof(buffer); - - msg.msg_name = &sa; - msg.msg_namelen = sizeof(ngx_sockaddr_t); - msg.msg_iov = iov; - msg.msg_iovlen = 1; - -#if (NGX_HAVE_MSGHDR_MSG_CONTROL) - - if (ls->wildcard) { - -#if (NGX_HAVE_IP_RECVDSTADDR || NGX_HAVE_IP_PKTINFO) - if (ls->sockaddr->sa_family == AF_INET) { - msg.msg_control = &msg_control; - msg.msg_controllen = sizeof(msg_control); - } -#endif - -#if (NGX_HAVE_INET6 && NGX_HAVE_IPV6_RECVPKTINFO) - if (ls->sockaddr->sa_family == AF_INET6) { - msg.msg_control = &msg_control6; - msg.msg_controllen = sizeof(msg_control6); - } -#endif - } - -#endif - - n = recvmsg(lc->fd, &msg, 0); - - if (n == -1) { - err = ngx_socket_errno; - - if (err == NGX_EAGAIN) { - ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err, - "recvmsg() not ready"); - return; - } - - ngx_log_error(NGX_LOG_ALERT, ev->log, err, "recvmsg() failed"); - - return; - } - -#if (NGX_HAVE_MSGHDR_MSG_CONTROL) - if (msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) { - ngx_log_error(NGX_LOG_ALERT, ev->log, 0, - "recvmsg() truncated data"); - continue; - } -#endif - - sockaddr = msg.msg_name; - socklen = msg.msg_namelen; - - if (socklen > (socklen_t) sizeof(ngx_sockaddr_t)) { - socklen = sizeof(ngx_sockaddr_t); - } - - if (socklen == 0) { - - /* - * on Linux recvmsg() returns zero msg_namelen - * when receiving packets from unbound AF_UNIX sockets - */ - - socklen = sizeof(struct sockaddr); - ngx_memzero(&sa, sizeof(struct sockaddr)); - sa.sockaddr.sa_family = ls->sockaddr->sa_family; - } - - local_sockaddr = ls->sockaddr; - local_socklen = ls->socklen; - -#if (NGX_HAVE_MSGHDR_MSG_CONTROL) - - if (ls->wildcard) { - struct cmsghdr *cmsg; - - ngx_memcpy(&lsa, local_sockaddr, local_socklen); - local_sockaddr = &lsa.sockaddr; - - for (cmsg = CMSG_FIRSTHDR(&msg); - cmsg != NULL; - cmsg = CMSG_NXTHDR(&msg, cmsg)) - { - -#if (NGX_HAVE_IP_RECVDSTADDR) - - if (cmsg->cmsg_level == IPPROTO_IP - && cmsg->cmsg_type == IP_RECVDSTADDR - && local_sockaddr->sa_family == AF_INET) - { - struct in_addr *addr; - struct sockaddr_in *sin; - - addr = (struct in_addr *) CMSG_DATA(cmsg); - sin = (struct sockaddr_in *) local_sockaddr; - sin->sin_addr = *addr; - - break; - } - -#elif (NGX_HAVE_IP_PKTINFO) - - if (cmsg->cmsg_level == IPPROTO_IP - && cmsg->cmsg_type == IP_PKTINFO - && local_sockaddr->sa_family == AF_INET) - { - struct in_pktinfo *pkt; - struct sockaddr_in *sin; - - pkt = (struct in_pktinfo *) CMSG_DATA(cmsg); - sin = (struct sockaddr_in *) local_sockaddr; - sin->sin_addr = pkt->ipi_addr; - - break; - } - -#endif - -#if (NGX_HAVE_INET6 && NGX_HAVE_IPV6_RECVPKTINFO) - - if (cmsg->cmsg_level == IPPROTO_IPV6 - && cmsg->cmsg_type == IPV6_PKTINFO - && local_sockaddr->sa_family == AF_INET6) - { - struct in6_pktinfo *pkt6; - struct sockaddr_in6 *sin6; - - pkt6 = (struct in6_pktinfo *) CMSG_DATA(cmsg); - sin6 = (struct sockaddr_in6 *) local_sockaddr; - sin6->sin6_addr = pkt6->ipi6_addr; - - break; - } - -#endif - - } - } - -#endif - -#if (NGX_STAT_STUB) - (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); -#endif - - ngx_accept_disabled = ngx_cycle->connection_n / 8 - - ngx_cycle->free_connection_n; - - c = ngx_get_connection(lc->fd, ev->log); - if (c == NULL) { - return; - } - - c->shared = 1; - c->type = SOCK_DGRAM; - c->socklen = socklen; - -#if (NGX_STAT_STUB) - (void) ngx_atomic_fetch_add(ngx_stat_active, 1); -#endif - - c->pool = ngx_create_pool(ls->pool_size, ev->log); - if (c->pool == NULL) { - ngx_close_accepted_connection(c); - return; - } - - c->sockaddr = ngx_palloc(c->pool, socklen); - if (c->sockaddr == NULL) { - ngx_close_accepted_connection(c); - return; - } - - ngx_memcpy(c->sockaddr, sockaddr, socklen); - - log = ngx_palloc(c->pool, sizeof(ngx_log_t)); - if (log == NULL) { - ngx_close_accepted_connection(c); - return; - } - - *log = ls->log; - - c->send = ngx_udp_send; - c->send_chain = ngx_udp_send_chain; - - c->log = log; - c->pool->log = log; - c->listening = ls; - - if (local_sockaddr == &lsa.sockaddr) { - local_sockaddr = ngx_palloc(c->pool, local_socklen); - if (local_sockaddr == NULL) { - ngx_close_accepted_connection(c); - return; - } - - ngx_memcpy(local_sockaddr, &lsa, local_socklen); - } - - c->local_sockaddr = local_sockaddr; - c->local_socklen = local_socklen; - - c->buffer = ngx_create_temp_buf(c->pool, n); - if (c->buffer == NULL) { - ngx_close_accepted_connection(c); - return; - } - - c->buffer->last = ngx_cpymem(c->buffer->last, buffer, n); - - rev = c->read; - wev = c->write; - - wev->ready = 1; - - rev->log = log; - wev->log = log; - - /* - * TODO: MT: - ngx_atomic_fetch_add() - * or protection by critical section or light mutex - * - * TODO: MP: - allocated in a shared memory - * - ngx_atomic_fetch_add() - * or protection by critical section or light mutex - */ - - c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); - -#if (NGX_STAT_STUB) - (void) ngx_atomic_fetch_add(ngx_stat_handled, 1); -#endif - - if (ls->addr_ntop) { - c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len); - if (c->addr_text.data == NULL) { - ngx_close_accepted_connection(c); - return; - } - - c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen, - c->addr_text.data, - ls->addr_text_max_len, 0); - if (c->addr_text.len == 0) { - ngx_close_accepted_connection(c); - return; - } - } - -#if (NGX_DEBUG) - { - ngx_str_t addr; - u_char text[NGX_SOCKADDR_STRLEN]; - - ngx_debug_accepted_connection(ecf, c); - - if (log->log_level & NGX_LOG_DEBUG_EVENT) { - addr.data = text; - addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text, - NGX_SOCKADDR_STRLEN, 1); - - ngx_log_debug4(NGX_LOG_DEBUG_EVENT, log, 0, - "*%uA recvmsg: %V fd:%d n:%z", - c->number, &addr, c->fd, n); - } - - } -#endif - - log->data = NULL; - log->handler = NULL; - - ls->handler(c); - - if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { - ev->available -= n; - } - - } while (ev->available); -} - -#endif - - ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { @@ -704,7 +353,7 @@ ngx_trylock_accept_mutex(ngx_cycle_t *cy } -static ngx_int_t +ngx_int_t ngx_enable_accept_events(ngx_cycle_t *cycle) { ngx_uint_t i; @@ -779,7 +428,7 @@ ngx_close_accepted_connection(ngx_connec fd = c->fd; c->fd = (ngx_socket_t) -1; - if (!c->shared && ngx_close_socket(fd) == -1) { + if (ngx_close_socket(fd) == -1) { ngx_log_error(NGX_LOG_ALERT, c->log, ngx_socket_errno, ngx_close_socket_n " failed"); } @@ -804,7 +453,7 @@ ngx_accept_log_error(ngx_log_t *log, u_c #if (NGX_DEBUG) -static void +void ngx_debug_accepted_connection(ngx_event_conf_t *ecf, ngx_connection_t *c) { struct sockaddr_in *sin; diff -r 52aacc8ddcc5 -r 88a624c9b491 src/event/ngx_event_udp.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/event/ngx_event_udp.c Fri Jun 01 16:55:49 2018 +0300 @@ -0,0 +1,376 @@ + +/* + * Copyright (C) Roman Arutyunyan + * Copyright (C) Nginx, Inc. + */ + + +#include +#include +#include + + +#if !(NGX_WIN32) + +static void ngx_close_accepted_udp_connection(ngx_connection_t *c); + + +void +ngx_event_recvmsg(ngx_event_t *ev) +{ + ssize_t n; + ngx_log_t *log; + ngx_err_t err; + socklen_t socklen, local_socklen; + ngx_event_t *rev, *wev; + struct iovec iov[1]; + struct msghdr msg; + ngx_sockaddr_t sa, lsa; + struct sockaddr *sockaddr, *local_sockaddr; + ngx_listening_t *ls; + ngx_event_conf_t *ecf; + ngx_connection_t *c, *lc; + static u_char buffer[65535]; + +#if (NGX_HAVE_MSGHDR_MSG_CONTROL) + +#if (NGX_HAVE_IP_RECVDSTADDR) + u_char msg_control[CMSG_SPACE(sizeof(struct in_addr))]; +#elif (NGX_HAVE_IP_PKTINFO) + u_char msg_control[CMSG_SPACE(sizeof(struct in_pktinfo))]; +#endif + +#if (NGX_HAVE_INET6 && NGX_HAVE_IPV6_RECVPKTINFO) + u_char msg_control6[CMSG_SPACE(sizeof(struct in6_pktinfo))]; +#endif + +#endif + + if (ev->timedout) { + if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { + return; + } + + ev->timedout = 0; + } + + ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module); + + if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) { + ev->available = ecf->multi_accept; + } + + lc = ev->data; + ls = lc->listening; + ev->ready = 0; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "recvmsg on %V, ready: %d", &ls->addr_text, ev->available); + + do { + ngx_memzero(&msg, sizeof(struct msghdr)); + + iov[0].iov_base = (void *) buffer; + iov[0].iov_len = sizeof(buffer); + + msg.msg_name = &sa; + msg.msg_namelen = sizeof(ngx_sockaddr_t); + msg.msg_iov = iov; + msg.msg_iovlen = 1; + +#if (NGX_HAVE_MSGHDR_MSG_CONTROL) + + if (ls->wildcard) { + +#if (NGX_HAVE_IP_RECVDSTADDR || NGX_HAVE_IP_PKTINFO) + if (ls->sockaddr->sa_family == AF_INET) { + msg.msg_control = &msg_control; + msg.msg_controllen = sizeof(msg_control); + } +#endif + +#if (NGX_HAVE_INET6 && NGX_HAVE_IPV6_RECVPKTINFO) + if (ls->sockaddr->sa_family == AF_INET6) { + msg.msg_control = &msg_control6; + msg.msg_controllen = sizeof(msg_control6); + } +#endif + } + +#endif + + n = recvmsg(lc->fd, &msg, 0); + + if (n == -1) { + err = ngx_socket_errno; + + if (err == NGX_EAGAIN) { + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err, + "recvmsg() not ready"); + return; + } + + ngx_log_error(NGX_LOG_ALERT, ev->log, err, "recvmsg() failed"); + + return; + } + +#if (NGX_HAVE_MSGHDR_MSG_CONTROL) + if (msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) { + ngx_log_error(NGX_LOG_ALERT, ev->log, 0, + "recvmsg() truncated data"); + continue; + } +#endif + + sockaddr = msg.msg_name; + socklen = msg.msg_namelen; + + if (socklen > (socklen_t) sizeof(ngx_sockaddr_t)) { + socklen = sizeof(ngx_sockaddr_t); + } + + if (socklen == 0) { + + /* + * on Linux recvmsg() returns zero msg_namelen + * when receiving packets from unbound AF_UNIX sockets + */ + + socklen = sizeof(struct sockaddr); + ngx_memzero(&sa, sizeof(struct sockaddr)); + sa.sockaddr.sa_family = ls->sockaddr->sa_family; + } + + local_sockaddr = ls->sockaddr; + local_socklen = ls->socklen; + +#if (NGX_HAVE_MSGHDR_MSG_CONTROL) + + if (ls->wildcard) { + struct cmsghdr *cmsg; + + ngx_memcpy(&lsa, local_sockaddr, local_socklen); + local_sockaddr = &lsa.sockaddr; + + for (cmsg = CMSG_FIRSTHDR(&msg); + cmsg != NULL; + cmsg = CMSG_NXTHDR(&msg, cmsg)) + { + +#if (NGX_HAVE_IP_RECVDSTADDR) + + if (cmsg->cmsg_level == IPPROTO_IP + && cmsg->cmsg_type == IP_RECVDSTADDR + && local_sockaddr->sa_family == AF_INET) + { + struct in_addr *addr; + struct sockaddr_in *sin; + + addr = (struct in_addr *) CMSG_DATA(cmsg); + sin = (struct sockaddr_in *) local_sockaddr; + sin->sin_addr = *addr; + + break; + } + +#elif (NGX_HAVE_IP_PKTINFO) + + if (cmsg->cmsg_level == IPPROTO_IP + && cmsg->cmsg_type == IP_PKTINFO + && local_sockaddr->sa_family == AF_INET) + { + struct in_pktinfo *pkt; + struct sockaddr_in *sin; + + pkt = (struct in_pktinfo *) CMSG_DATA(cmsg); + sin = (struct sockaddr_in *) local_sockaddr; + sin->sin_addr = pkt->ipi_addr; + + break; + } + +#endif + +#if (NGX_HAVE_INET6 && NGX_HAVE_IPV6_RECVPKTINFO) + + if (cmsg->cmsg_level == IPPROTO_IPV6 + && cmsg->cmsg_type == IPV6_PKTINFO + && local_sockaddr->sa_family == AF_INET6) + { + struct in6_pktinfo *pkt6; + struct sockaddr_in6 *sin6; + + pkt6 = (struct in6_pktinfo *) CMSG_DATA(cmsg); + sin6 = (struct sockaddr_in6 *) local_sockaddr; + sin6->sin6_addr = pkt6->ipi6_addr; + + break; + } + +#endif + + } + } + +#endif + +#if (NGX_STAT_STUB) + (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); +#endif + + ngx_accept_disabled = ngx_cycle->connection_n / 8 + - ngx_cycle->free_connection_n; + + c = ngx_get_connection(lc->fd, ev->log); + if (c == NULL) { + return; + } + + c->shared = 1; + c->type = SOCK_DGRAM; + c->socklen = socklen; + +#if (NGX_STAT_STUB) + (void) ngx_atomic_fetch_add(ngx_stat_active, 1); +#endif + + c->pool = ngx_create_pool(ls->pool_size, ev->log); + if (c->pool == NULL) { + ngx_close_accepted_udp_connection(c); + return; + } + + c->sockaddr = ngx_palloc(c->pool, socklen); + if (c->sockaddr == NULL) { + ngx_close_accepted_udp_connection(c); + return; + } + + ngx_memcpy(c->sockaddr, sockaddr, socklen); + + log = ngx_palloc(c->pool, sizeof(ngx_log_t)); + if (log == NULL) { + ngx_close_accepted_udp_connection(c); + return; + } + + *log = ls->log; + + c->send = ngx_udp_send; + c->send_chain = ngx_udp_send_chain; + + c->log = log; + c->pool->log = log; + c->listening = ls; + + if (local_sockaddr == &lsa.sockaddr) { + local_sockaddr = ngx_palloc(c->pool, local_socklen); + if (local_sockaddr == NULL) { + ngx_close_accepted_udp_connection(c); + return; + } + + ngx_memcpy(local_sockaddr, &lsa, local_socklen); + } + + c->local_sockaddr = local_sockaddr; + c->local_socklen = local_socklen; + + c->buffer = ngx_create_temp_buf(c->pool, n); + if (c->buffer == NULL) { + ngx_close_accepted_udp_connection(c); + return; + } + + c->buffer->last = ngx_cpymem(c->buffer->last, buffer, n); + + rev = c->read; + wev = c->write; + + wev->ready = 1; + + rev->log = log; + wev->log = log; + + /* + * TODO: MT: - ngx_atomic_fetch_add() + * or protection by critical section or light mutex + * + * TODO: MP: - allocated in a shared memory + * - ngx_atomic_fetch_add() + * or protection by critical section or light mutex + */ + + c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); + +#if (NGX_STAT_STUB) + (void) ngx_atomic_fetch_add(ngx_stat_handled, 1); +#endif + + if (ls->addr_ntop) { + c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len); + if (c->addr_text.data == NULL) { + ngx_close_accepted_udp_connection(c); + return; + } + + c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen, + c->addr_text.data, + ls->addr_text_max_len, 0); + if (c->addr_text.len == 0) { + ngx_close_accepted_udp_connection(c); + return; + } + } + +#if (NGX_DEBUG) + { + ngx_str_t addr; + u_char text[NGX_SOCKADDR_STRLEN]; + + ngx_debug_accepted_connection(ecf, c); + + if (log->log_level & NGX_LOG_DEBUG_EVENT) { + addr.data = text; + addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text, + NGX_SOCKADDR_STRLEN, 1); + + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, log, 0, + "*%uA recvmsg: %V fd:%d n:%z", + c->number, &addr, c->fd, n); + } + + } +#endif + + log->data = NULL; + log->handler = NULL; + + ls->handler(c); + + if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { + ev->available -= n; + } + + } while (ev->available); +} + + +static void +ngx_close_accepted_udp_connection(ngx_connection_t *c) +{ + ngx_free_connection(c); + + c->fd = (ngx_socket_t) -1; + + if (c->pool) { + ngx_destroy_pool(c->pool); + } + +#if (NGX_STAT_STUB) + (void) ngx_atomic_fetch_add(ngx_stat_active, -1); +#endif +} + +#endif From arut at nginx.com Mon Jun 4 18:46:30 2018 From: arut at nginx.com (Roman Arutyunyan) Date: Mon, 04 Jun 2018 18:46:30 +0000 Subject: [nginx] Stream: udp streams. Message-ID: details: http://hg.nginx.org/nginx/rev/d27aa9060c95 branches: changeset: 7286:d27aa9060c95 user: Roman Arutyunyan date: Mon Jun 04 19:50:00 2018 +0300 description: Stream: udp streams. Previously, only one client packet could be processed in a udp stream session even though multiple response packets were supported. Now multiple packets coming from the same client address and port are delivered to the same stream session. If it's required to maintain a single stream of data, nginx should be configured in a way that all packets from a client are delivered to the same worker. On Linux and DragonFly BSD the "reuseport" parameter should be specified for this. Other systems do not currently provide appropriate mechanisms. For these systems a single stream of udp packets is only guaranteed in single-worker configurations. The proxy_response directive now specifies how many packets are expected in response to a single client packet. diffstat: src/core/ngx_connection.c | 4 + src/core/ngx_connection.h | 5 + src/core/ngx_core.h | 1 + src/event/ngx_event.h | 2 + src/event/ngx_event_udp.c | 265 +++++++++++++++++++++++++++++++++++ src/stream/ngx_stream_proxy_module.c | 134 ++++++++++------- src/stream/ngx_stream_upstream.h | 1 + 7 files changed, 358 insertions(+), 54 deletions(-) diffs (651 lines): diff -r 88a624c9b491 -r d27aa9060c95 src/core/ngx_connection.c --- a/src/core/ngx_connection.c Fri Jun 01 16:55:49 2018 +0300 +++ b/src/core/ngx_connection.c Mon Jun 04 19:50:00 2018 +0300 @@ -72,6 +72,10 @@ ngx_create_listening(ngx_conf_t *cf, str ngx_memcpy(ls->addr_text.data, text, len); +#if !(NGX_WIN32) + ngx_rbtree_init(&ls->rbtree, &ls->sentinel, ngx_udp_rbtree_insert_value); +#endif + ls->fd = (ngx_socket_t) -1; ls->type = SOCK_STREAM; diff -r 88a624c9b491 -r d27aa9060c95 src/core/ngx_connection.h --- a/src/core/ngx_connection.h Fri Jun 01 16:55:49 2018 +0300 +++ b/src/core/ngx_connection.h Mon Jun 04 19:50:00 2018 +0300 @@ -51,6 +51,9 @@ struct ngx_listening_s { ngx_listening_t *previous; ngx_connection_t *connection; + ngx_rbtree_t rbtree; + ngx_rbtree_node_t sentinel; + ngx_uint_t worker; unsigned open:1; @@ -151,6 +154,8 @@ struct ngx_connection_s { ngx_ssl_connection_t *ssl; #endif + ngx_udp_connection_t *udp; + struct sockaddr *local_sockaddr; socklen_t local_socklen; diff -r 88a624c9b491 -r d27aa9060c95 src/core/ngx_core.h --- a/src/core/ngx_core.h Fri Jun 01 16:55:49 2018 +0300 +++ b/src/core/ngx_core.h Mon Jun 04 19:50:00 2018 +0300 @@ -27,6 +27,7 @@ typedef struct ngx_connection_s ngx typedef struct ngx_thread_task_s ngx_thread_task_t; typedef struct ngx_ssl_s ngx_ssl_t; typedef struct ngx_ssl_connection_s ngx_ssl_connection_t; +typedef struct ngx_udp_connection_s ngx_udp_connection_t; typedef void (*ngx_event_handler_pt)(ngx_event_t *ev); typedef void (*ngx_connection_handler_pt)(ngx_connection_t *c); diff -r 88a624c9b491 -r d27aa9060c95 src/event/ngx_event.h --- a/src/event/ngx_event.h Fri Jun 01 16:55:49 2018 +0300 +++ b/src/event/ngx_event.h Mon Jun 04 19:50:00 2018 +0300 @@ -506,6 +506,8 @@ extern ngx_module_t ngx_event_ void ngx_event_accept(ngx_event_t *ev); #if !(NGX_WIN32) void ngx_event_recvmsg(ngx_event_t *ev); +void ngx_udp_rbtree_insert_value(ngx_rbtree_node_t *temp, + ngx_rbtree_node_t *node, ngx_rbtree_node_t *sentinel); #endif ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle); ngx_int_t ngx_enable_accept_events(ngx_cycle_t *cycle); diff -r 88a624c9b491 -r d27aa9060c95 src/event/ngx_event_udp.c --- a/src/event/ngx_event_udp.c Fri Jun 01 16:55:49 2018 +0300 +++ b/src/event/ngx_event_udp.c Mon Jun 04 19:50:00 2018 +0300 @@ -12,13 +12,28 @@ #if !(NGX_WIN32) +struct ngx_udp_connection_s { + ngx_rbtree_node_t node; + ngx_connection_t *connection; + ngx_buf_t *buffer; +}; + + static void ngx_close_accepted_udp_connection(ngx_connection_t *c); +static ssize_t ngx_udp_shared_recv(ngx_connection_t *c, u_char *buf, + size_t size); +static ngx_int_t ngx_insert_udp_connection(ngx_connection_t *c); +static void ngx_delete_udp_connection(void *data); +static ngx_connection_t *ngx_lookup_udp_connection(ngx_listening_t *ls, + struct sockaddr *sockaddr, socklen_t socklen, + struct sockaddr *local_sockaddr, socklen_t local_socklen); void ngx_event_recvmsg(ngx_event_t *ev) { ssize_t n; + ngx_buf_t buf; ngx_log_t *log; ngx_err_t err; socklen_t socklen, local_socklen; @@ -215,6 +230,43 @@ ngx_event_recvmsg(ngx_event_t *ev) #endif + c = ngx_lookup_udp_connection(ls, sockaddr, socklen, local_sockaddr, + local_socklen); + + if (c) { + +#if (NGX_DEBUG) + if (c->log->log_level & NGX_LOG_DEBUG_EVENT) { + ngx_log_handler_pt handler; + + handler = c->log->handler; + c->log->handler = NULL; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0, + "recvmsg: fd:%d n:%z", c->fd, n); + + c->log->handler = handler; + } +#endif + + ngx_memzero(&buf, sizeof(ngx_buf_t)); + + buf.pos = buffer; + buf.last = buffer + n; + + rev = c->read; + + c->udp->buffer = &buf; + rev->ready = 1; + + rev->handler(rev); + + c->udp->buffer = NULL; + rev->ready = 0; + + goto next; + } + #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); #endif @@ -257,6 +309,7 @@ ngx_event_recvmsg(ngx_event_t *ev) *log = ls->log; + c->recv = ngx_udp_shared_recv; c->send = ngx_udp_send; c->send_chain = ngx_udp_send_chain; @@ -344,11 +397,18 @@ ngx_event_recvmsg(ngx_event_t *ev) } #endif + if (ngx_insert_udp_connection(c) != NGX_OK) { + ngx_close_accepted_udp_connection(c); + return; + } + log->data = NULL; log->handler = NULL; ls->handler(c); + next: + if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available -= n; } @@ -373,4 +433,209 @@ ngx_close_accepted_udp_connection(ngx_co #endif } + +static ssize_t +ngx_udp_shared_recv(ngx_connection_t *c, u_char *buf, size_t size) +{ + ssize_t n; + ngx_buf_t *b; + + if (c->udp == NULL || c->udp->buffer == NULL) { + return NGX_AGAIN; + } + + b = c->udp->buffer; + + n = ngx_min(b->last - b->pos, (ssize_t) size); + + ngx_memcpy(buf, b->pos, n); + + c->udp->buffer = NULL; + c->read->ready = 0; + + return n; +} + + +void +ngx_udp_rbtree_insert_value(ngx_rbtree_node_t *temp, + ngx_rbtree_node_t *node, ngx_rbtree_node_t *sentinel) +{ + ngx_int_t rc; + ngx_connection_t *c, *ct; + ngx_rbtree_node_t **p; + ngx_udp_connection_t *udp, *udpt; + + for ( ;; ) { + + if (node->key < temp->key) { + + p = &temp->left; + + } else if (node->key > temp->key) { + + p = &temp->right; + + } else { /* node->key == temp->key */ + + udp = (ngx_udp_connection_t *) node; + c = udp->connection; + + udpt = (ngx_udp_connection_t *) temp; + ct = udpt->connection; + + rc = ngx_cmp_sockaddr(c->sockaddr, c->socklen, + ct->sockaddr, ct->socklen, 1); + + if (rc == 0 && c->listening->wildcard) { + rc = ngx_cmp_sockaddr(c->local_sockaddr, c->local_socklen, + ct->local_sockaddr, ct->local_socklen, 1); + } + + p = (rc < 0) ? &temp->left : &temp->right; + } + + if (*p == sentinel) { + break; + } + + temp = *p; + } + + *p = node; + node->parent = temp; + node->left = sentinel; + node->right = sentinel; + ngx_rbt_red(node); +} + + +static ngx_int_t +ngx_insert_udp_connection(ngx_connection_t *c) +{ + uint32_t hash; + ngx_pool_cleanup_t *cln; + ngx_udp_connection_t *udp; + + if (c->udp) { + return NGX_OK; + } + + udp = ngx_pcalloc(c->pool, sizeof(ngx_udp_connection_t)); + if (udp == NULL) { + return NGX_ERROR; + } + + udp->connection = c; + + ngx_crc32_init(hash); + ngx_crc32_update(&hash, (u_char *) c->sockaddr, c->socklen); + + if (c->listening->wildcard) { + ngx_crc32_update(&hash, (u_char *) c->local_sockaddr, c->local_socklen); + } + + ngx_crc32_final(hash); + + udp->node.key = hash; + + cln = ngx_pool_cleanup_add(c->pool, 0); + if (cln == NULL) { + return NGX_ERROR; + } + + cln->data = c; + cln->handler = ngx_delete_udp_connection; + + ngx_rbtree_insert(&c->listening->rbtree, &udp->node); + + c->udp = udp; + + return NGX_OK; +} + + +static void +ngx_delete_udp_connection(void *data) +{ + ngx_connection_t *c = data; + + ngx_rbtree_delete(&c->listening->rbtree, &c->udp->node); +} + + +static ngx_connection_t * +ngx_lookup_udp_connection(ngx_listening_t *ls, struct sockaddr *sockaddr, + socklen_t socklen, struct sockaddr *local_sockaddr, socklen_t local_socklen) +{ + uint32_t hash; + ngx_int_t rc; + ngx_connection_t *c; + ngx_rbtree_node_t *node, *sentinel; + ngx_udp_connection_t *udp; + +#if (NGX_HAVE_UNIX_DOMAIN) + + if (sockaddr->sa_family == AF_UNIX) { + struct sockaddr_un *saun = (struct sockaddr_un *) sockaddr; + + if (socklen <= (socklen_t) offsetof(struct sockaddr_un, sun_path) + || saun->sun_path[0] == '\0') + { + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ngx_cycle->log, 0, + "unbound unix socket"); + return NULL; + } + } + #endif + + node = ls->rbtree.root; + sentinel = ls->rbtree.sentinel; + + ngx_crc32_init(hash); + ngx_crc32_update(&hash, (u_char *) sockaddr, socklen); + + if (ls->wildcard) { + ngx_crc32_update(&hash, (u_char *) local_sockaddr, local_socklen); + } + + ngx_crc32_final(hash); + + while (node != sentinel) { + + if (hash < node->key) { + node = node->left; + continue; + } + + if (hash > node->key) { + node = node->right; + continue; + } + + /* hash == node->key */ + + udp = (ngx_udp_connection_t *) node; + + c = udp->connection; + + rc = ngx_cmp_sockaddr(sockaddr, socklen, + c->sockaddr, c->socklen, 1); + + if (rc == 0 && ls->wildcard) { + rc = ngx_cmp_sockaddr(local_sockaddr, local_socklen, + c->local_sockaddr, c->local_socklen, 1); + } + + if (rc == 0) { + return c; + } + + node = (rc < 0) ? node->left : node->right; + } + + return NULL; +} + +#endif diff -r 88a624c9b491 -r d27aa9060c95 src/stream/ngx_stream_proxy_module.c --- a/src/stream/ngx_stream_proxy_module.c Fri Jun 01 16:55:49 2018 +0300 +++ b/src/stream/ngx_stream_proxy_module.c Mon Jun 04 19:50:00 2018 +0300 @@ -377,6 +377,8 @@ ngx_stream_proxy_handler(ngx_stream_sess s->log_handler = ngx_stream_proxy_log_error; + u->requests = 1; + u->peer.log = c->log; u->peer.log_error = NGX_ERROR_ERR; @@ -398,21 +400,19 @@ ngx_stream_proxy_handler(ngx_stream_sess return; } - if (c->type == SOCK_STREAM) { - p = ngx_pnalloc(c->pool, pscf->buffer_size); - if (p == NULL) { - ngx_stream_proxy_finalize(s, NGX_STREAM_INTERNAL_SERVER_ERROR); - return; - } - - u->downstream_buf.start = p; - u->downstream_buf.end = p + pscf->buffer_size; - u->downstream_buf.pos = p; - u->downstream_buf.last = p; - - if (c->read->ready) { - ngx_post_event(c->read, &ngx_posted_events); - } + p = ngx_pnalloc(c->pool, pscf->buffer_size); + if (p == NULL) { + ngx_stream_proxy_finalize(s, NGX_STREAM_INTERNAL_SERVER_ERROR); + return; + } + + u->downstream_buf.start = p; + u->downstream_buf.end = p + pscf->buffer_size; + u->downstream_buf.pos = p; + u->downstream_buf.last = p; + + if (c->read->ready) { + ngx_post_event(c->read, &ngx_posted_events); } if (pscf->upstream_value) { @@ -829,7 +829,6 @@ ngx_stream_proxy_init_upstream(ngx_strea cl->buf->tag = (ngx_buf_tag_t) &ngx_stream_proxy_module; cl->buf->flush = 1; - cl->buf->last_buf = (c->type == SOCK_DGRAM); cl->next = u->upstream_out; u->upstream_out = cl; @@ -871,17 +870,12 @@ ngx_stream_proxy_init_upstream(ngx_strea u->proxy_protocol = 0; } - if (c->type == SOCK_DGRAM && pscf->responses == 0) { - pc->read->ready = 0; - pc->read->eof = 1; - } - u->connected = 1; pc->read->handler = ngx_stream_proxy_upstream_handler; pc->write->handler = ngx_stream_proxy_upstream_handler; - if (pc->read->ready || pc->read->eof) { + if (pc->read->ready) { ngx_post_event(pc->read, &ngx_posted_events); } @@ -1280,6 +1274,7 @@ static void ngx_stream_proxy_process_connection(ngx_event_t *ev, ngx_uint_t from_upstream) { ngx_connection_t *c, *pc; + ngx_log_handler_pt handler; ngx_stream_session_t *s; ngx_stream_upstream_t *u; ngx_stream_proxy_srv_conf_t *pscf; @@ -1328,25 +1323,37 @@ ngx_stream_proxy_process_connection(ngx_ * with unspecified number of responses */ - pc->read->ready = 0; - pc->read->eof = 1; - - ngx_stream_proxy_process(s, 1, 0); + handler = c->log->handler; + c->log->handler = NULL; + + ngx_log_error(NGX_LOG_INFO, c->log, 0, + "udp timed out" + ", packets from/to client:%ui/%ui" + ", bytes from/to client:%O/%O" + ", bytes from/to upstream:%O/%O", + u->requests, u->responses, + s->received, c->sent, u->received, + pc ? pc->sent : 0); + + c->log->handler = handler; + + ngx_stream_proxy_finalize(s, NGX_STREAM_OK); return; } ngx_connection_error(pc, NGX_ETIMEDOUT, "upstream timed out"); - if (u->received == 0) { - ngx_stream_proxy_next_upstream(s); - return; - } - - } else { - ngx_connection_error(c, NGX_ETIMEDOUT, "connection timed out"); + pc->read->error = 1; + + ngx_stream_proxy_finalize(s, NGX_STREAM_BAD_GATEWAY); + + return; } + ngx_connection_error(c, NGX_ETIMEDOUT, "connection timed out"); + ngx_stream_proxy_finalize(s, NGX_STREAM_OK); + return; } @@ -1453,7 +1460,7 @@ ngx_stream_proxy_process(ngx_stream_sess ssize_t n; ngx_buf_t *b; ngx_int_t rc; - ngx_uint_t flags; + ngx_uint_t flags, *packets; ngx_msec_t delay; ngx_chain_t *cl, **ll, **out, **busy; ngx_connection_t *c, *pc, *src, *dst; @@ -1489,6 +1496,7 @@ ngx_stream_proxy_process(ngx_stream_sess b = &u->upstream_buf; limit_rate = pscf->download_rate; received = &u->received; + packets = &u->responses; out = &u->downstream_out; busy = &u->downstream_busy; recv_action = "proxying and reading from upstream"; @@ -1500,6 +1508,7 @@ ngx_stream_proxy_process(ngx_stream_sess b = &u->downstream_buf; limit_rate = pscf->upload_rate; received = &s->received; + packets = &u->requests; out = &u->upstream_out; busy = &u->upstream_busy; recv_action = "proxying and reading from client"; @@ -1516,11 +1525,6 @@ ngx_stream_proxy_process(ngx_stream_sess rc = ngx_stream_top_filter(s, *out, from_upstream); if (rc == NGX_ERROR) { - if (c->type == SOCK_DGRAM && !from_upstream) { - ngx_stream_proxy_next_upstream(s); - return; - } - ngx_stream_proxy_finalize(s, NGX_STREAM_OK); return; } @@ -1565,11 +1569,6 @@ ngx_stream_proxy_process(ngx_stream_sess } if (n == NGX_ERROR) { - if (c->type == SOCK_DGRAM && u->received == 0) { - ngx_stream_proxy_next_upstream(s); - return; - } - src->read->eof = 1; n = 0; } @@ -1591,12 +1590,6 @@ ngx_stream_proxy_process(ngx_stream_sess } } - if (c->type == SOCK_DGRAM && ++u->responses == pscf->responses) - { - src->read->ready = 0; - src->read->eof = 1; - } - for (ll = out; *ll; ll = &(*ll)->next) { /* void */ } cl = ngx_chain_get_free_buf(c->pool, &u->free); @@ -1616,6 +1609,7 @@ ngx_stream_proxy_process(ngx_stream_sess cl->buf->last_buf = src->read->eof; cl->buf->flush = 1; + (*packets)++; *received += n; b->last += n; do_write = 1; @@ -1629,15 +1623,38 @@ ngx_stream_proxy_process(ngx_stream_sess c->log->action = "proxying connection"; - if (src->read->eof && dst && (dst->read->eof || !dst->buffered)) { + if (c->type == SOCK_DGRAM + && pscf->responses != NGX_MAX_INT32_VALUE + && u->responses >= pscf->responses * u->requests + && !src->buffered && dst && !dst->buffered) + { handler = c->log->handler; c->log->handler = NULL; ngx_log_error(NGX_LOG_INFO, c->log, 0, - "%s%s disconnected" + "udp done" + ", packets from/to client:%ui/%ui" ", bytes from/to client:%O/%O" ", bytes from/to upstream:%O/%O", - src->type == SOCK_DGRAM ? "udp " : "", + u->requests, u->responses, + s->received, c->sent, u->received, pc ? pc->sent : 0); + + c->log->handler = handler; + + ngx_stream_proxy_finalize(s, NGX_STREAM_OK); + return; + } + + if (c->type == SOCK_STREAM + && src->read->eof && dst && (dst->read->eof || !dst->buffered)) + { + handler = c->log->handler; + c->log->handler = NULL; + + ngx_log_error(NGX_LOG_INFO, c->log, 0, + "%s disconnected" + ", bytes from/to client:%O/%O" + ", bytes from/to upstream:%O/%O", from_upstream ? "upstream" : "client", s->received, c->sent, u->received, pc ? pc->sent : 0); @@ -1739,6 +1756,7 @@ ngx_stream_proxy_next_upstream(ngx_strea static void ngx_stream_proxy_finalize(ngx_stream_session_t *s, ngx_uint_t rc) { + ngx_uint_t state; ngx_connection_t *pc; ngx_stream_upstream_t *u; @@ -1768,7 +1786,15 @@ ngx_stream_proxy_finalize(ngx_stream_ses } if (u->peer.free && u->peer.sockaddr) { - u->peer.free(&u->peer, u->peer.data, 0); + state = 0; + + if (pc && pc->type == SOCK_DGRAM + && (pc->read->error || pc->write->error)) + { + state = NGX_PEER_FAILED; + } + + u->peer.free(&u->peer, u->peer.data, state); u->peer.sockaddr = NULL; } diff -r 88a624c9b491 -r d27aa9060c95 src/stream/ngx_stream_upstream.h --- a/src/stream/ngx_stream_upstream.h Fri Jun 01 16:55:49 2018 +0300 +++ b/src/stream/ngx_stream_upstream.h Mon Jun 04 19:50:00 2018 +0300 @@ -128,6 +128,7 @@ typedef struct { off_t received; time_t start_sec; + ngx_uint_t requests; ngx_uint_t responses; ngx_str_t ssl_name; From xeioex at nginx.com Tue Jun 5 12:21:38 2018 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 05 Jun 2018 12:21:38 +0000 Subject: [njs] Using njs_vm_error() where appropriate. Message-ID: details: http://hg.nginx.org/njs/rev/bc3f64aab9f9 branches: changeset: 533:bc3f64aab9f9 user: Dmitry Volyntsev date: Tue Jun 05 15:21:20 2018 +0300 description: Using njs_vm_error() where appropriate. diffstat: nginx/ngx_http_js_module.c | 95 ++++++++++++++++----------------------------- 1 files changed, 35 insertions(+), 60 deletions(-) diffs (249 lines): diff -r c56687caf3a5 -r bc3f64aab9f9 nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Thu May 31 18:55:35 2018 +0300 +++ b/nginx/ngx_http_js_module.c Tue Jun 05 15:21:20 2018 +0300 @@ -1305,28 +1305,27 @@ ngx_http_js_ext_return(njs_vm_t *vm, njs { nxt_str_t text; ngx_int_t status; - const char *description; njs_value_t *value; ngx_http_js_ctx_t *ctx; ngx_http_request_t *r; ngx_http_complex_value_t cv; if (nargs < 2) { - description = "too few arguments"; - goto exception; + njs_vm_error(vm, "too few arguments"); + return NJS_ERROR; } value = njs_argument(args, 1); if (!njs_value_is_valid_number(value)) { - description = "code is not a number"; - goto exception; + njs_vm_error(vm, "code is not a number"); + return NJS_ERROR; } status = njs_value_number(value); if (status < 0 || status > 999) { - description = "code is out of range"; - goto exception; + njs_vm_error(vm, "code is out of range"); + return NJS_ERROR; } if (nargs < 3) { @@ -1337,8 +1336,8 @@ ngx_http_js_ext_return(njs_vm_t *vm, njs if (njs_vm_value_to_ext_string(vm, &text, njs_argument(args, 2), 0) == NJS_ERROR) { - description = "failed to convert text"; - goto exception; + njs_vm_error(vm, "failed to convert text"); + return NJS_ERROR; } } @@ -1355,8 +1354,8 @@ ngx_http_js_ext_return(njs_vm_t *vm, njs ctx->status = ngx_http_send_response(r, status, NULL, &cv); if (ctx->status == NGX_ERROR) { - description = "failed to send response"; - goto exception; + njs_vm_error(vm, "failed to send response"); + return NJS_ERROR; } } else { @@ -1364,12 +1363,6 @@ ngx_http_js_ext_return(njs_vm_t *vm, njs } return NJS_OK; - -exception: - - njs_value_error_set(vm, njs_vm_retval(vm), description, NULL); - - return NJS_ERROR; } @@ -1708,7 +1701,6 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, ngx_int_t rc; nxt_str_t uri_arg, args_arg, method_name, body_arg; ngx_uint_t cb_index, method, n, has_body; - const char *description; njs_value_t *arg2, *options, *value; njs_function_t *callback; ngx_http_request_t *r, *sr; @@ -1740,8 +1732,8 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, static const nxt_str_t body_key = nxt_string("body"); if (nargs < 2) { - description = "too few arguments"; - goto exception; + njs_vm_error(vm, "too few arguments"); + return NJS_ERROR; } r = njs_value_data(njs_argument(args, 0)); @@ -1749,8 +1741,8 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, if (njs_vm_value_to_ext_string(vm, &uri_arg, njs_argument(args, 1), 0) == NJS_ERROR) { - description = "failed to convert uri arg"; - goto exception; + njs_vm_error(vm, "failed to convert uri arg"); + return NJS_ERROR; } options = NULL; @@ -1770,13 +1762,13 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, if (njs_vm_value_to_ext_string(vm, &args_arg, arg2, 0) == NJS_ERROR) { - description = "failed to convert args"; - goto exception; + njs_vm_error(vm, "failed to convert args"); + return NJS_ERROR; } } else { - description = "failed to convert args"; - goto exception; + njs_vm_error(vm, "failed to convert args"); + return NJS_ERROR; } cb_index = 3; @@ -1791,8 +1783,8 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, if (njs_vm_value_to_ext_string(vm, &args_arg, value, 0) == NJS_ERROR) { - description = "failed to convert options.args"; - goto exception; + njs_vm_error(vm, "failed to convert options.args"); + return NJS_ERROR; } } @@ -1801,8 +1793,8 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, if (njs_vm_value_to_ext_string(vm, &method_name, value, 0) == NJS_ERROR) { - description = "failed to convert options.method"; - goto exception; + njs_vm_error(vm, "failed to convert options.method"); + return NJS_ERROR; } n = sizeof(methods) / sizeof(methods[0]); @@ -1820,11 +1812,8 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, } if (method == n) { - njs_value_error_set(vm, njs_vm_retval(vm), - "unknown method \"%.*s\"", - (int) method_name.length, - method_name.start); - + njs_vm_error(vm, "unknown method \"%.*s\"", + (int) method_name.length, method_name.start); return NJS_ERROR; } } @@ -1834,8 +1823,8 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, if (njs_vm_value_to_ext_string(vm, &body_arg, value, 0) == NJS_ERROR) { - description = "failed to convert options.body"; - goto exception; + njs_vm_error(vm, "failed to convert options.body"); + return NJS_ERROR; } has_body = 1; @@ -1846,8 +1835,8 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, if (cb_index < nargs) { if (!njs_value_is_function(njs_argument(args, cb_index))) { - description = "callback is not a function"; - goto exception; + njs_vm_error(vm, "callback is not a function"); + return NJS_ERROR; } else { callback = njs_value_function(njs_argument(args, cb_index)); @@ -1893,12 +1882,6 @@ ngx_http_js_ext_subrequest(njs_vm_t *vm, } return NJS_OK; - -exception: - - njs_value_error_set(vm, njs_vm_retval(vm), description, NULL); - - return NJS_ERROR; } @@ -1908,7 +1891,6 @@ ngx_http_js_subrequest(ngx_http_request_ { ngx_int_t flags; ngx_str_t uri, args; - const char *description; njs_vm_event_t vm_event; ngx_http_js_ctx_t *ctx; ngx_http_post_subrequest_t *ps; @@ -1920,14 +1902,14 @@ ngx_http_js_subrequest(ngx_http_request_ if (callback != NULL) { ps = ngx_palloc(r->pool, sizeof(ngx_http_post_subrequest_t)); if (ps == NULL) { - description = "internal error"; - goto exception; + njs_vm_error(ctx->vm, "internal error"); + return NJS_ERROR; } vm_event = njs_vm_add_event(ctx->vm, callback, NULL, NULL); if (vm_event == NULL) { - description = "internal error"; - goto exception; + njs_vm_error(ctx->vm, "internal error"); + return NJS_ERROR; } ps->handler = ngx_http_js_subrequest_done; @@ -1953,17 +1935,11 @@ ngx_http_js_subrequest(ngx_http_request_ njs_vm_del_event(ctx->vm, vm_event); } - description = "subrequest creation failed"; - goto exception; + njs_vm_error(ctx->vm, "subrequest creation failed"); + return NJS_ERROR; } return NJS_OK; - -exception: - - njs_value_error_set(ctx->vm, njs_vm_retval(ctx->vm), description, NULL); - - return NJS_ERROR; } @@ -2040,8 +2016,7 @@ ngx_http_js_ext_get_parent(njs_vm_t *vm, ctx = ngx_http_get_module_ctx(r->parent, ngx_http_js_module); if (ctx == NULL) { - njs_value_error_set(vm, njs_vm_retval(vm), - "failed to get the parent context", NULL); + njs_vm_error(vm, "failed to get the parent context"); return NJS_ERROR; } From mdounin at mdounin.ru Tue Jun 5 13:51:56 2018 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 05 Jun 2018 13:51:56 +0000 Subject: [nginx] nginx-1.15.0-RELEASE Message-ID: details: http://hg.nginx.org/nginx/rev/990b3e885636 branches: changeset: 7287:990b3e885636 user: Maxim Dounin date: Tue Jun 05 16:47:25 2018 +0300 description: nginx-1.15.0-RELEASE diffstat: docs/xml/nginx/changes.xml | 131 +++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 131 insertions(+), 0 deletions(-) diffs (141 lines): diff --git a/docs/xml/nginx/changes.xml b/docs/xml/nginx/changes.xml --- a/docs/xml/nginx/changes.xml +++ b/docs/xml/nginx/changes.xml @@ -5,6 +5,137 @@ + + + + +????????? "ssl" ?????? ????????? ??????????; +?????? ??? ??????? ???????????? ???????? ssl ????????? listen. + + +the "ssl" directive is deprecated; +the "ssl" parameter of the "listen" directive should be used instead. + + + + + +?????? ??? ????????????? ????????? listen ? ?????????? ssl +nginx ?????????? ?????????? SSL-???????????? ??? ???????????? ????????????. + + +now nginx detects missing SSL certificates during configuration testing +when using the "ssl" parameter of the "listen" directive. + + + + + +?????? ?????? stream ????? ???????????? +????????? ???????? UDP-??????? ?? ??????? ? ?????? ????? ??????. + + +now the stream module can handle +multiple incoming UDP datagrams from a client within a single session. + + + + + +? ????????? proxy_cache_valid +????? ???? ??????? ???????????? ??? ??????. + + +it was possible to specify an incorrect response code +in the "proxy_cache_valid" directive. + + + + + +nginx ?? ????????? gcc 8.1. + + +nginx could not be built by gcc 8.1. + + + + + +???????????? ? syslog ??????????????? ??? ????????? ?????????? IP-??????. + + +logging to syslog stopped on local IP address changes. + + + + + +nginx ?? ????????? ???????????? clang, ???? ??? ?????????? CUDA SDK; +?????? ????????? ? 1.13.8. + + +nginx could not be built by clang with CUDA SDK installed; +the bug had appeared in 1.13.8. + + + + + +??? ????????????? unix domain listen-??????? ?? FreeBSD +? ???????? ?????????? ???????????? ????? +? ???? ????? ?????????? ????????? "getsockopt(TCP_FASTOPEN) ... failed". + + +"getsockopt(TCP_FASTOPEN) ... failed" messages might appear in logs +during binary upgrade +when using unix domain listen sockets on FreeBSD. + + + + + +nginx ?? ????????? ?? Fedora 28 Linux. + + +nginx could not be built on Fedora 28 Linux. + + + + + +??? ????????????? ????????? limit_req +???????? ???????? ????????? ???????? ????? ?? ???????????. + + +request processing rate might exceed configured rate +when using the "limit_req" directive. + + + + + +? ????????? ??????? ???????? ??? ????????????? unix domain listen-??????? +??? ?????? ? ???????????? ?? Linux. + + +in handling of client addresses when using unix domain listen sockets +to work with datagrams on Linux. + + + + + +? ????????? ?????? ????????? ??????. + + +in memory allocation error handling. + + + + + + From mdounin at mdounin.ru Tue Jun 5 13:51:58 2018 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 05 Jun 2018 13:51:58 +0000 Subject: [nginx] release-1.15.0 tag Message-ID: details: http://hg.nginx.org/nginx/rev/4cb3eca3755f branches: changeset: 7288:4cb3eca3755f user: Maxim Dounin date: Tue Jun 05 16:47:25 2018 +0300 description: release-1.15.0 tag diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -425,3 +425,4 @@ fb1212c7eca4c5328fe17d6cd95b010c67336aac 31c929e16910c38492581ef474e72fa67c28f124 release-1.13.10 64179f242cb55fc206bca59de9bfdc4cf5ebcec7 release-1.13.11 051e5fa03b92b8a564f6b12debd483d267391e82 release-1.13.12 +990b3e885636d763b97ed02d0d2cfc161a4e0c09 release-1.15.0 From mdounin at mdounin.ru Tue Jun 5 16:24:22 2018 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 05 Jun 2018 16:24:22 +0000 Subject: [nginx] Version bump. Message-ID: details: http://hg.nginx.org/nginx/rev/cdf7a511197a branches: changeset: 7289:cdf7a511197a user: Maxim Dounin date: Tue Jun 05 17:13:17 2018 +0300 description: Version bump. diffstat: src/core/nginx.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (14 lines): diff --git a/src/core/nginx.h b/src/core/nginx.h --- a/src/core/nginx.h +++ b/src/core/nginx.h @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1015000 -#define NGINX_VERSION "1.15.0" +#define nginx_version 1015001 +#define NGINX_VERSION "1.15.1" #define NGINX_VER "nginx/" NGINX_VERSION #ifdef NGX_BUILD From mdounin at mdounin.ru Tue Jun 5 16:24:24 2018 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 05 Jun 2018 16:24:24 +0000 Subject: [nginx] Added missing space after ngx_close_socket_n. Message-ID: details: http://hg.nginx.org/nginx/rev/91ea68dd1501 branches: changeset: 7290:91ea68dd1501 user: Maxim Dounin date: Tue Jun 05 17:41:34 2018 +0300 description: Added missing space after ngx_close_socket_n. diffstat: src/core/ngx_resolver.c | 4 ++-- src/event/ngx_event_connect.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diffs (33 lines): diff --git a/src/core/ngx_resolver.c b/src/core/ngx_resolver.c --- a/src/core/ngx_resolver.c +++ b/src/core/ngx_resolver.c @@ -4402,7 +4402,7 @@ ngx_udp_connect(ngx_resolver_connection_ if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, &rec->log, ngx_socket_errno, - ngx_close_socket_n "failed"); + ngx_close_socket_n " failed"); } return NGX_ERROR; @@ -4488,7 +4488,7 @@ ngx_tcp_connect(ngx_resolver_connection_ if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, &rec->log, ngx_socket_errno, - ngx_close_socket_n "failed"); + ngx_close_socket_n " failed"); } return NGX_ERROR; diff --git a/src/event/ngx_event_connect.c b/src/event/ngx_event_connect.c --- a/src/event/ngx_event_connect.c +++ b/src/event/ngx_event_connect.c @@ -55,7 +55,7 @@ ngx_event_connect_peer(ngx_peer_connecti if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno, - ngx_close_socket_n "failed"); + ngx_close_socket_n " failed"); } return NGX_ERROR; From ngx_ut0mt8 at futomaki.net Tue Jun 5 19:41:53 2018 From: ngx_ut0mt8 at futomaki.net (Raphael Mazelier) Date: Tue, 5 Jun 2018 21:41:53 +0200 Subject: Problem using %V in snprintf In-Reply-To: References: Message-ID: <0c9509be-e9b6-c679-759f-85b4c71d998a@futomaki.net> Hello Ngx devs, I'm unsure if it the good ml for posting some question about coding nginx module. Anyway here it is. So I'm hacking a bit the nginx-rtmp-module (you could see my fork here : https://github.com/ut0mt8/nginx-rtmp-module/tree/cenc/dash/ ) A detail puzzle me. "wdv_data" is config directive filled with base64 text: { ngx_string("dash_wdv_data"), NGX_RTMP_MAIN_CONF|NGX_RTMP_SRV_CONF|NGX_RTMP_APP_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_slot, NGX_RTMP_APP_CONF_OFFSET, offsetof(ngx_rtmp_dash_app_conf_t, wdv_data), NULL }, When I'm trying to use as it, and to write it using : p = buffer; p = ngx_slprintf(p, last, "- %V -", dacf->wdv_data); n = ngx_write_fd(fd, buffer, p - buffer); I got a segfault in ngx_vslprintf. #0 0x0000000000418430 in ngx_vslprintf (buf=0xd3cf2c " \n", last=0xe3cce0 "P0Y00M00DT0H00M04.000S", fmt=0x4e12f9 "V -", args=args at entry=0x7ffc07eef6f8) at src/core/ngx_string.c:237 237 len = ngx_min(((size_t) (last - buf)), v->len); It seems that v->len is not filled ?! (gdb) print v->len Cannot access memory at address 0x90 Using the form : p = ngx_slprintf(p, last, "- %S -", dacf->wdv_data.data); work well. Is %V not supposed to be used with ngx_str_t ? Best, -- Raphael Mazelier From eran.kornblau at kaltura.com Tue Jun 5 19:46:19 2018 From: eran.kornblau at kaltura.com (Eran Kornblau) Date: Tue, 5 Jun 2018 19:46:19 +0000 Subject: Problem using %V in snprintf In-Reply-To: <0c9509be-e9b6-c679-759f-85b4c71d998a@futomaki.net> References: <0c9509be-e9b6-c679-759f-85b4c71d998a@futomaki.net> Message-ID: > -----Original Message----- > From: nginx-devel [mailto:nginx-devel-bounces at nginx.org] On Behalf Of Raphael Mazelier > Sent: Tuesday, June 5, 2018 10:42 PM > To: nginx-devel at nginx.org > Subject: Problem using %V in snprintf > > Hello Ngx devs, > > I'm unsure if it the good ml for posting some question about coding nginx module. Anyway here it is. > > So I'm hacking a bit the nginx-rtmp-module (you could see my fork here : > https://github.com/ut0mt8/nginx-rtmp-module/tree/cenc/dash/ ) > > A detail puzzle me. > > "wdv_data" is config directive filled with base64 text: > > { ngx_string("dash_wdv_data"), > NGX_RTMP_MAIN_CONF|NGX_RTMP_SRV_CONF|NGX_RTMP_APP_CONF|NGX_CONF_TAKE1, > ngx_conf_set_str_slot, > NGX_RTMP_APP_CONF_OFFSET, > offsetof(ngx_rtmp_dash_app_conf_t, wdv_data), > NULL }, > > When I'm trying to use as it, and to write it using : > > p = buffer; > p = ngx_slprintf(p, last, "- %V -", dacf->wdv_data); n = ngx_write_fd(fd, buffer, p - buffer); > You are missing a '&', %V expects ngx_str_t* while you passed ngx_str_t Eran > I got a segfault in ngx_vslprintf. > #0 0x0000000000418430 in ngx_vslprintf (buf=0xd3cf2c " \n", last=0xe3cce0 "P0Y00M00DT0H00M04.000S", > fmt=0x4e12f9 "V -", args=args at entry=0x7ffc07eef6f8) at > src/core/ngx_string.c:237 > 237 len = ngx_min(((size_t) (last - buf)), v->len); > > It seems that v->len is not filled ?! > (gdb) print v->len > Cannot access memory at address 0x90 > > Using the form : > > p = ngx_slprintf(p, last, "- %S -", dacf->wdv_data.data); > > work well. > > Is %V not supposed to be used with ngx_str_t ? > > Best, > > -- > Raphael Mazelier > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > From pluknet at nginx.com Wed Jun 6 15:48:18 2018 From: pluknet at nginx.com (Sergey Kandaurov) Date: Wed, 06 Jun 2018 15:48:18 +0000 Subject: [nginx] SSL: removed extra prototype. Message-ID: details: http://hg.nginx.org/nginx/rev/3482c069e050 branches: changeset: 7291:3482c069e050 user: Sergey Kandaurov date: Wed Jun 06 13:31:05 2018 +0300 description: SSL: removed extra prototype. diffstat: src/event/ngx_event_openssl.c | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diffs (11 lines): diff -r 91ea68dd1501 -r 3482c069e050 src/event/ngx_event_openssl.c --- a/src/event/ngx_event_openssl.c Tue Jun 05 17:41:34 2018 +0300 +++ b/src/event/ngx_event_openssl.c Wed Jun 06 13:31:05 2018 +0300 @@ -35,7 +35,6 @@ static void ngx_ssl_clear_error(ngx_log_ static ngx_int_t ngx_ssl_session_id_context(ngx_ssl_t *ssl, ngx_str_t *sess_ctx); -ngx_int_t ngx_ssl_session_cache_init(ngx_shm_zone_t *shm_zone, void *data); static int ngx_ssl_new_session(ngx_ssl_conn_t *ssl_conn, ngx_ssl_session_t *sess); static ngx_ssl_session_t *ngx_ssl_get_cached_session(ngx_ssl_conn_t *ssl_conn, From ru at nginx.com Thu Jun 7 19:01:16 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 07 Jun 2018 19:01:16 +0000 Subject: [nginx] Allowed digits, '+', '-', and '.' in scheme names as per RFC 3986. Message-ID: details: http://hg.nginx.org/nginx/rev/f9661f56c717 branches: changeset: 7292:f9661f56c717 user: Ruslan Ermilov date: Thu May 24 12:06:35 2018 +0300 description: Allowed digits, '+', '-', and '.' in scheme names as per RFC 3986. diffstat: src/http/ngx_http_parse.c | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) diffs (15 lines): diff -r 3482c069e050 -r f9661f56c717 src/http/ngx_http_parse.c --- a/src/http/ngx_http_parse.c Wed Jun 06 13:31:05 2018 +0300 +++ b/src/http/ngx_http_parse.c Thu May 24 12:06:35 2018 +0300 @@ -307,6 +307,11 @@ ngx_http_parse_request_line(ngx_http_req break; } + if ((ch >= '0' && ch <= '9') || ch == '+' || ch == '-' || ch == '.') + { + break; + } + switch (ch) { case ':': r->schema_end = p; From ru at nginx.com Thu Jun 7 19:01:17 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 07 Jun 2018 19:01:17 +0000 Subject: [nginx] HTTP/2: validate client request scheme. Message-ID: details: http://hg.nginx.org/nginx/rev/d588987701f4 branches: changeset: 7293:d588987701f4 user: Ruslan Ermilov date: Thu Jun 07 11:47:10 2018 +0300 description: HTTP/2: validate client request scheme. The scheme is validated as per RFC 3986, Section 3.1. diffstat: src/http/v2/ngx_http_v2.c | 23 +++++++++++++++++++++++ 1 files changed, 23 insertions(+), 0 deletions(-) diffs (40 lines): diff -r f9661f56c717 -r d588987701f4 src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Thu May 24 12:06:35 2018 +0300 +++ b/src/http/v2/ngx_http_v2.c Thu Jun 07 11:47:10 2018 +0300 @@ -3474,6 +3474,9 @@ ngx_http_v2_parse_method(ngx_http_reques static ngx_int_t ngx_http_v2_parse_scheme(ngx_http_request_t *r, ngx_str_t *value) { + u_char c, ch; + ngx_uint_t i; + if (r->schema_start) { ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, "client sent duplicate :scheme header"); @@ -3488,6 +3491,26 @@ ngx_http_v2_parse_scheme(ngx_http_reques return NGX_DECLINED; } + for (i = 0; i < value->len; i++) { + ch = value->data[i]; + + c = (u_char) (ch | 0x20); + if (c >= 'a' && c <= 'z') { + continue; + } + + if (((ch >= '0' && ch <= '9') || ch == '+' || ch == '-' || ch == '.') + && i > 0) + { + continue; + } + + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "client sent invalid :scheme header: \"%V\"", value); + + return NGX_DECLINED; + } + r->schema_start = value->data; r->schema_end = value->data + value->len; From ru at nginx.com Thu Jun 7 19:01:19 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 07 Jun 2018 19:01:19 +0000 Subject: [nginx] Removed extraneous check while processing request line. Message-ID: details: http://hg.nginx.org/nginx/rev/21ad2af3262c branches: changeset: 7294:21ad2af3262c user: Ruslan Ermilov date: Thu Jun 07 19:53:43 2018 +0300 description: Removed extraneous check while processing request line. diffstat: src/http/ngx_http_request.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r d588987701f4 -r 21ad2af3262c src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Thu Jun 07 11:47:10 2018 +0300 +++ b/src/http/ngx_http_request.c Thu Jun 07 19:53:43 2018 +0300 @@ -987,7 +987,7 @@ ngx_http_process_request_line(ngx_event_ return; } - if (r->host_start && r->host_end) { + if (r->host_end) { host.len = r->host_end - r->host_start; host.data = r->host_start; From ru at nginx.com Thu Jun 7 19:01:21 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 07 Jun 2018 19:01:21 +0000 Subject: [nginx] Added r->schema. Message-ID: details: http://hg.nginx.org/nginx/rev/89430899c72a branches: changeset: 7295:89430899c72a user: Ruslan Ermilov date: Thu Jun 07 20:01:41 2018 +0300 description: Added r->schema. For HTTP/1, it keeps scheme from the absolute form of URI. For HTTP/2, the :scheme request pseudo-header field value. diffstat: src/http/ngx_http_core_module.c | 1 + src/http/ngx_http_request.c | 5 +++++ src/http/ngx_http_request.h | 1 + src/http/v2/ngx_http_v2.c | 15 ++++++--------- 4 files changed, 13 insertions(+), 9 deletions(-) diffs (95 lines): diff -r 21ad2af3262c -r 89430899c72a src/http/ngx_http_core_module.c --- a/src/http/ngx_http_core_module.c Thu Jun 07 19:53:43 2018 +0300 +++ b/src/http/ngx_http_core_module.c Thu Jun 07 20:01:41 2018 +0300 @@ -2318,6 +2318,7 @@ ngx_http_subrequest(ngx_http_request_t * sr->unparsed_uri = r->unparsed_uri; sr->method_name = ngx_http_core_get_method; sr->http_protocol = r->http_protocol; + sr->schema = r->schema; ngx_http_set_exten(sr); diff -r 21ad2af3262c -r 89430899c72a src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c Thu Jun 07 19:53:43 2018 +0300 +++ b/src/http/ngx_http_request.c Thu Jun 07 20:01:41 2018 +0300 @@ -987,6 +987,11 @@ ngx_http_process_request_line(ngx_event_ return; } + if (r->schema_end) { + r->schema.len = r->schema_end - r->schema_start; + r->schema.data = r->schema_start; + } + if (r->host_end) { host.len = r->host_end - r->host_start; diff -r 21ad2af3262c -r 89430899c72a src/http/ngx_http_request.h --- a/src/http/ngx_http_request.h Thu Jun 07 19:53:43 2018 +0300 +++ b/src/http/ngx_http_request.h Thu Jun 07 20:01:41 2018 +0300 @@ -412,6 +412,7 @@ struct ngx_http_request_s { ngx_str_t method_name; ngx_str_t http_protocol; + ngx_str_t schema; ngx_chain_t *out; ngx_http_request_t *main; diff -r 21ad2af3262c -r 89430899c72a src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Thu Jun 07 19:53:43 2018 +0300 +++ b/src/http/v2/ngx_http_v2.c Thu Jun 07 20:01:41 2018 +0300 @@ -2616,16 +2616,14 @@ ngx_http_v2_push_stream(ngx_http_v2_stre r->method_name = ngx_http_core_get_method; r->method = NGX_HTTP_GET; - r->schema_start = (u_char *) "https"; - #if (NGX_HTTP_SSL) if (fc->ssl) { - r->schema_end = r->schema_start + 5; + ngx_str_set(&r->schema, "https"); } else #endif { - r->schema_end = r->schema_start + 4; + ngx_str_set(&r->schema, "http"); } value.data = ngx_pstrdup(pool, path); @@ -3477,7 +3475,7 @@ ngx_http_v2_parse_scheme(ngx_http_reques u_char c, ch; ngx_uint_t i; - if (r->schema_start) { + if (r->schema.len) { ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, "client sent duplicate :scheme header"); @@ -3511,8 +3509,7 @@ ngx_http_v2_parse_scheme(ngx_http_reques return NGX_DECLINED; } - r->schema_start = value->data; - r->schema_end = value->data + value->len; + r->schema = *value; return NGX_OK; } @@ -3575,14 +3572,14 @@ ngx_http_v2_construct_request_line(ngx_h static const u_char ending[] = " HTTP/2.0"; if (r->method_name.len == 0 - || r->schema_start == NULL + || r->schema.len == 0 || r->unparsed_uri.len == 0) { if (r->method_name.len == 0) { ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, "client sent no :method header"); - } else if (r->schema_start == NULL) { + } else if (r->schema.len == 0) { ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, "client sent no :scheme header"); From ru at nginx.com Thu Jun 7 19:01:22 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 07 Jun 2018 19:01:22 +0000 Subject: [nginx] HTTP/2: use scheme from original request for pushes (closes #1549). Message-ID: details: http://hg.nginx.org/nginx/rev/8e6bb4e6045f branches: changeset: 7296:8e6bb4e6045f user: Ruslan Ermilov date: Thu Jun 07 20:04:22 2018 +0300 description: HTTP/2: use scheme from original request for pushes (closes #1549). Instead of the connection scheme, use scheme from the original request. This fixes pushes when SSL is terminated by a proxy server in front of nginx. diffstat: src/http/v2/ngx_http_v2.c | 15 ++++++--------- src/http/v2/ngx_http_v2_filter_module.c | 26 ++++++++++++++------------ 2 files changed, 20 insertions(+), 21 deletions(-) diffs (85 lines): diff -r 89430899c72a -r 8e6bb4e6045f src/http/v2/ngx_http_v2.c --- a/src/http/v2/ngx_http_v2.c Thu Jun 07 20:01:41 2018 +0300 +++ b/src/http/v2/ngx_http_v2.c Thu Jun 07 20:04:22 2018 +0300 @@ -2616,15 +2616,12 @@ ngx_http_v2_push_stream(ngx_http_v2_stre r->method_name = ngx_http_core_get_method; r->method = NGX_HTTP_GET; -#if (NGX_HTTP_SSL) - if (fc->ssl) { - ngx_str_set(&r->schema, "https"); - - } else -#endif - { - ngx_str_set(&r->schema, "http"); - } + r->schema.data = ngx_pstrdup(pool, &parent->request->schema); + if (r->schema.data == NULL) { + goto close; + } + + r->schema.len = parent->request->schema.len; value.data = ngx_pstrdup(pool, path); if (value.data == NULL) { diff -r 89430899c72a -r 8e6bb4e6045f src/http/v2/ngx_http_v2_filter_module.c --- a/src/http/v2/ngx_http_v2_filter_module.c Thu Jun 07 20:01:41 2018 +0300 +++ b/src/http/v2/ngx_http_v2_filter_module.c Thu Jun 07 20:04:22 2018 +0300 @@ -944,15 +944,15 @@ ngx_http_v2_push_resource(ngx_http_reque ph = ngx_http_v2_push_headers; + len = ngx_max(r->schema.len, path->len); + if (binary[0].len) { - tmp = ngx_palloc(r->pool, path->len); + tmp = ngx_palloc(r->pool, len); if (tmp == NULL) { return NGX_ERROR; } } else { - len = path->len; - for (i = 0; i < NGX_HTTP_V2_PUSH_HEADERS; i++) { h = (ngx_table_elt_t **) ((char *) &r->headers_in + ph[i].offset); @@ -994,7 +994,7 @@ ngx_http_v2_push_resource(ngx_http_reque len = (h2c->table_update ? 1 : 0) + 1 + 1 + NGX_HTTP_V2_INT_OCTETS + path->len - + 1; + + 1 + NGX_HTTP_V2_INT_OCTETS + r->schema.len; for (i = 0; i < NGX_HTTP_V2_PUSH_HEADERS; i++) { len += binary[i].len; @@ -1025,18 +1025,20 @@ ngx_http_v2_push_resource(ngx_http_reque *pos++ = ngx_http_v2_inc_indexed(NGX_HTTP_V2_PATH_INDEX); pos = ngx_http_v2_write_value(pos, path->data, path->len, tmp); -#if (NGX_HTTP_SSL) - if (fc->ssl) { - ngx_log_debug0(NGX_LOG_DEBUG_HTTP, fc->log, 0, - "http2 push header: \":scheme: https\""); + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, fc->log, 0, + "http2 push header: \":scheme: %V\"", &r->schema); + + if (r->schema.len == 5 && ngx_strncmp(r->schema.data, "https", 5) == 0) { *pos++ = ngx_http_v2_indexed(NGX_HTTP_V2_SCHEME_HTTPS_INDEX); - } else -#endif + } else if (r->schema.len == 4 + && ngx_strncmp(r->schema.data, "http", 4) == 0) { - ngx_log_debug0(NGX_LOG_DEBUG_HTTP, fc->log, 0, - "http2 push header: \":scheme: http\""); *pos++ = ngx_http_v2_indexed(NGX_HTTP_V2_SCHEME_HTTP_INDEX); + + } else { + *pos++ = ngx_http_v2_inc_indexed(NGX_HTTP_V2_SCHEME_HTTP_INDEX); + pos = ngx_http_v2_write_value(pos, r->schema.data, r->schema.len, tmp); } for (i = 0; i < NGX_HTTP_V2_PUSH_HEADERS; i++) { From hongzhidao at gmail.com Fri Jun 8 03:29:05 2018 From: hongzhidao at gmail.com (=?UTF-8?B?5rSq5b+X6YGT?=) Date: Fri, 8 Jun 2018 11:29:05 +0800 Subject: [nginx] Added r->schema. In-Reply-To: References: Message-ID: Hi. diff -r 8e6bb4e6045f src/http/ngx_http_variables.c --- a/src/http/ngx_http_variables.c Thu Jun 07 20:04:22 2018 +0300 +++ b/src/http/ngx_http_variables.c Thu Jun 07 12:20:05 2018 -0400 @@ -1420,25 +1420,11 @@ ngx_http_variable_scheme(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data) { -#if (NGX_HTTP_SSL) - - if (r->connection->ssl) { - v->len = sizeof("https") - 1; - v->valid = 1; - v->no_cacheable = 0; - v->not_found = 0; - v->data = (u_char *) "https"; - - return NGX_OK; - } - -#endif - - v->len = sizeof("http") - 1; v->valid = 1; v->no_cacheable = 0; v->not_found = 0; - v->data = (u_char *) "http"; + v->len = r->schema.len; + v->data = r->schema.data; return NGX_OK; } Thanks. On Fri, Jun 8, 2018 at 3:01 AM Ruslan Ermilov wrote: > details: http://hg.nginx.org/nginx/rev/89430899c72a > branches: > changeset: 7295:89430899c72a > user: Ruslan Ermilov > date: Thu Jun 07 20:01:41 2018 +0300 > description: > Added r->schema. > > For HTTP/1, it keeps scheme from the absolute form of URI. > For HTTP/2, the :scheme request pseudo-header field value. > > diffstat: > > src/http/ngx_http_core_module.c | 1 + > src/http/ngx_http_request.c | 5 +++++ > src/http/ngx_http_request.h | 1 + > src/http/v2/ngx_http_v2.c | 15 ++++++--------- > 4 files changed, 13 insertions(+), 9 deletions(-) > > diffs (95 lines): > > diff -r 21ad2af3262c -r 89430899c72a src/http/ngx_http_core_module.c > --- a/src/http/ngx_http_core_module.c Thu Jun 07 19:53:43 2018 +0300 > +++ b/src/http/ngx_http_core_module.c Thu Jun 07 20:01:41 2018 +0300 > @@ -2318,6 +2318,7 @@ ngx_http_subrequest(ngx_http_request_t * > sr->unparsed_uri = r->unparsed_uri; > sr->method_name = ngx_http_core_get_method; > sr->http_protocol = r->http_protocol; > + sr->schema = r->schema; > > ngx_http_set_exten(sr); > > diff -r 21ad2af3262c -r 89430899c72a src/http/ngx_http_request.c > --- a/src/http/ngx_http_request.c Thu Jun 07 19:53:43 2018 +0300 > +++ b/src/http/ngx_http_request.c Thu Jun 07 20:01:41 2018 +0300 > @@ -987,6 +987,11 @@ ngx_http_process_request_line(ngx_event_ > return; > } > > + if (r->schema_end) { > + r->schema.len = r->schema_end - r->schema_start; > + r->schema.data = r->schema_start; > + } > + > if (r->host_end) { > > host.len = r->host_end - r->host_start; > diff -r 21ad2af3262c -r 89430899c72a src/http/ngx_http_request.h > --- a/src/http/ngx_http_request.h Thu Jun 07 19:53:43 2018 +0300 > +++ b/src/http/ngx_http_request.h Thu Jun 07 20:01:41 2018 +0300 > @@ -412,6 +412,7 @@ struct ngx_http_request_s { > > ngx_str_t method_name; > ngx_str_t http_protocol; > + ngx_str_t schema; > > ngx_chain_t *out; > ngx_http_request_t *main; > diff -r 21ad2af3262c -r 89430899c72a src/http/v2/ngx_http_v2.c > --- a/src/http/v2/ngx_http_v2.c Thu Jun 07 19:53:43 2018 +0300 > +++ b/src/http/v2/ngx_http_v2.c Thu Jun 07 20:01:41 2018 +0300 > @@ -2616,16 +2616,14 @@ ngx_http_v2_push_stream(ngx_http_v2_stre > r->method_name = ngx_http_core_get_method; > r->method = NGX_HTTP_GET; > > - r->schema_start = (u_char *) "https"; > - > #if (NGX_HTTP_SSL) > if (fc->ssl) { > - r->schema_end = r->schema_start + 5; > + ngx_str_set(&r->schema, "https"); > > } else > #endif > { > - r->schema_end = r->schema_start + 4; > + ngx_str_set(&r->schema, "http"); > } > > value.data = ngx_pstrdup(pool, path); > @@ -3477,7 +3475,7 @@ ngx_http_v2_parse_scheme(ngx_http_reques > u_char c, ch; > ngx_uint_t i; > > - if (r->schema_start) { > + if (r->schema.len) { > ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, > "client sent duplicate :scheme header"); > > @@ -3511,8 +3509,7 @@ ngx_http_v2_parse_scheme(ngx_http_reques > return NGX_DECLINED; > } > > - r->schema_start = value->data; > - r->schema_end = value->data + value->len; > + r->schema = *value; > > return NGX_OK; > } > @@ -3575,14 +3572,14 @@ ngx_http_v2_construct_request_line(ngx_h > static const u_char ending[] = " HTTP/2.0"; > > if (r->method_name.len == 0 > - || r->schema_start == NULL > + || r->schema.len == 0 > || r->unparsed_uri.len == 0) > { > if (r->method_name.len == 0) { > ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, > "client sent no :method header"); > > - } else if (r->schema_start == NULL) { > + } else if (r->schema.len == 0) { > ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, > "client sent no :scheme header"); > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From ru at nginx.com Fri Jun 8 08:01:20 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Fri, 8 Jun 2018 11:01:20 +0300 Subject: [nginx] Added r->schema. In-Reply-To: Message-ID: <20180608080120.GA83061@lo0.su> On Fri, Jun 08, 2018 at 11:29:05AM +0800, ??? wrote: > Hi. > > diff -r 8e6bb4e6045f src/http/ngx_http_variables.c > --- a/src/http/ngx_http_variables.c Thu Jun 07 20:04:22 2018 +0300 > +++ b/src/http/ngx_http_variables.c Thu Jun 07 12:20:05 2018 -0400 > @@ -1420,25 +1420,11 @@ > ngx_http_variable_scheme(ngx_http_request_t *r, > ngx_http_variable_value_t *v, uintptr_t data) > { > -#if (NGX_HTTP_SSL) > - > - if (r->connection->ssl) { > - v->len = sizeof("https") - 1; > - v->valid = 1; > - v->no_cacheable = 0; > - v->not_found = 0; > - v->data = (u_char *) "https"; > - > - return NGX_OK; > - } > - > -#endif > - > - v->len = sizeof("http") - 1; > v->valid = 1; > v->no_cacheable = 0; > v->not_found = 0; > - v->data = (u_char *) "http"; > + v->len = r->schema.len; > + v->data = r->schema.data; > > return NGX_OK; > } > > Thanks. A similar patch has already been considered and rejected: https://trac.nginx.org/nginx/ticket/1549#comment:5 See also https://trac.nginx.org/nginx/ticket/711. From xeioex at nginx.com Wed Jun 13 11:13:21 2018 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 13 Jun 2018 11:13:21 +0000 Subject: [njs] Merged HTTP Response and Reply into Request. Message-ID: details: http://hg.nginx.org/njs/rev/bf3d32cc6716 branches: changeset: 534:bf3d32cc6716 user: Dmitry Volyntsev date: Wed Jun 13 14:11:58 2018 +0300 description: Merged HTTP Response and Reply into Request. Splitting HTTP functionality into 3 objects Request, Response and Reply introduced a lot of confusion as to which method should belong to which object. New members of Request: - req.status (res.status) - req.parent (reply.parent) - req.requestBody (req.body) - req.responseBody (reply.body) - req.headersIn (req.headers) - req.headersOut (res.headers) - req.sendHeader() (res.sendHeader()) - req.send() (res.send()) - req.finish() (res.finish()) - req.return() (res.return()) Deprecated members of Request: - req.body (use req.requestBody or req.responseBody) - req.headers (use req.headersIn or req.headersOut) - req.response Response is remained in place for backward compatibility and will be removed in the following releases. Reply is replaced with Request in the req.subrequest() callback. The deprecated properties will be removed in the following releases. diffstat: nginx/ngx_http_js_module.c | 245 +++++++++++++++++++++++++++----------------- 1 files changed, 151 insertions(+), 94 deletions(-) diffs (389 lines): diff -r bc3f64aab9f9 -r bf3d32cc6716 nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Tue Jun 05 15:21:20 2018 +0300 +++ b/nginx/ngx_http_js_module.c Wed Jun 13 14:11:58 2018 +0300 @@ -16,7 +16,6 @@ typedef struct { njs_vm_t *vm; const njs_extern_t *req_proto; const njs_extern_t *res_proto; - const njs_extern_t *rep_proto; } ngx_http_js_main_conf_t; @@ -106,6 +105,10 @@ static njs_ret_t ngx_http_js_ext_get_rem njs_value_t *value, void *obj, uintptr_t data); static njs_ret_t ngx_http_js_ext_get_request_body(njs_vm_t *vm, njs_value_t *value, void *obj, uintptr_t data); +static njs_ret_t ngx_http_js_ext_get_headers(njs_vm_t *vm, njs_value_t *value, + void *obj, uintptr_t data); +static njs_ret_t ngx_http_js_ext_foreach_headers(njs_vm_t *vm, void *obj, + void *next); /*FIXME*/ static njs_ret_t ngx_http_js_ext_get_header_in(njs_vm_t *vm, njs_value_t *value, void *obj, uintptr_t data); static njs_ret_t ngx_http_js_ext_foreach_header_in(njs_vm_t *vm, void *obj, @@ -359,10 +362,34 @@ static njs_external_t ngx_http_js_ext_r NULL, 0 }, + { nxt_string("parent"), + NJS_EXTERN_PROPERTY, + NULL, + 0, + ngx_http_js_ext_get_parent, + NULL, + NULL, + NULL, + NULL, + NULL, + 0 }, + { nxt_string("body"), NJS_EXTERN_PROPERTY, NULL, 0, + ngx_http_js_ext_get_reply_body, + NULL, + NULL, + NULL, + NULL, + NULL, + 0 }, + + { nxt_string("requestBody"), + NJS_EXTERN_PROPERTY, + NULL, + 0, ngx_http_js_ext_get_request_body, NULL, NULL, @@ -371,10 +398,34 @@ static njs_external_t ngx_http_js_ext_r NULL, 0 }, + { nxt_string("responseBody"), + NJS_EXTERN_PROPERTY, + NULL, + 0, + ngx_http_js_ext_get_reply_body, + NULL, + NULL, + NULL, + NULL, + NULL, + 0 }, + { nxt_string("headers"), NJS_EXTERN_OBJECT, NULL, 0, + ngx_http_js_ext_get_headers, + NULL, + NULL, + ngx_http_js_ext_foreach_headers, + ngx_http_js_ext_next_header, + NULL, + 0 }, + + { nxt_string("headersIn"), + NJS_EXTERN_OBJECT, + NULL, + 0, ngx_http_js_ext_get_header_in, NULL, NULL, @@ -407,6 +458,30 @@ static njs_external_t ngx_http_js_ext_r NULL, 0 }, + { nxt_string("status"), + NJS_EXTERN_PROPERTY, + NULL, + 0, + ngx_http_js_ext_get_status, + ngx_http_js_ext_set_status, + NULL, + NULL, + NULL, + NULL, + offsetof(ngx_http_request_t, headers_out.status) }, + + { nxt_string("headersOut"), + NJS_EXTERN_OBJECT, + NULL, + 0, + ngx_http_js_ext_get_header_out, + ngx_http_js_ext_set_header_out, + NULL, + ngx_http_js_ext_foreach_header_out, + ngx_http_js_ext_next_header, + NULL, + 0 }, + { nxt_string("response"), NJS_EXTERN_PROPERTY, NULL, @@ -466,105 +541,53 @@ static njs_external_t ngx_http_js_ext_r NULL, ngx_http_js_ext_error, 0 }, -}; - - -static njs_external_t ngx_http_js_ext_reply[] = { - - { nxt_string("headers"), - NJS_EXTERN_OBJECT, + + { nxt_string("sendHeader"), + NJS_EXTERN_METHOD, NULL, 0, - ngx_http_js_ext_get_header_out, - NULL, - NULL, - ngx_http_js_ext_foreach_header_out, - ngx_http_js_ext_next_header, - NULL, - 0 }, - - { nxt_string("status"), - NJS_EXTERN_PROPERTY, - NULL, - 0, - ngx_http_js_ext_get_status, NULL, NULL, NULL, NULL, NULL, - offsetof(ngx_http_request_t, headers_out.status) }, - - { nxt_string("uri"), - NJS_EXTERN_PROPERTY, + ngx_http_js_ext_send_header, + 0 }, + + { nxt_string("send"), + NJS_EXTERN_METHOD, NULL, 0, - ngx_http_js_ext_get_string, NULL, NULL, NULL, NULL, NULL, - offsetof(ngx_http_request_t, uri) }, - - { nxt_string("method"), - NJS_EXTERN_PROPERTY, + ngx_http_js_ext_send, + 0 }, + + { nxt_string("finish"), + NJS_EXTERN_METHOD, NULL, 0, - ngx_http_js_ext_get_string, - NULL, - NULL, - NULL, - NULL, - NULL, - offsetof(ngx_http_request_t, method_name) }, - - { nxt_string("contentType"), - NJS_EXTERN_PROPERTY, - NULL, - 0, - ngx_http_js_ext_get_string, NULL, NULL, NULL, NULL, NULL, - offsetof(ngx_http_request_t, headers_out.content_type) }, - - { nxt_string("contentLength"), - NJS_EXTERN_PROPERTY, + ngx_http_js_ext_finish, + 0 }, + + { nxt_string("return"), + NJS_EXTERN_METHOD, NULL, 0, - ngx_http_js_ext_get_content_length, NULL, NULL, NULL, NULL, NULL, - 0 }, - - { nxt_string("parent"), - NJS_EXTERN_PROPERTY, - NULL, - 0, - ngx_http_js_ext_get_parent, - NULL, - NULL, - NULL, - NULL, - NULL, - 0 }, - - { nxt_string("body"), - NJS_EXTERN_PROPERTY, - NULL, - 0, - ngx_http_js_ext_get_reply_body, - NULL, - NULL, - NULL, - NULL, - NULL, + ngx_http_js_ext_return, 0 }, }; @@ -594,18 +617,6 @@ static njs_external_t ngx_http_js_exter NULL, NULL, 0 }, - - { nxt_string("reply"), - NJS_EXTERN_OBJECT, - ngx_http_js_ext_reply, - nxt_nitems(ngx_http_js_ext_reply), - NULL, - NULL, - NULL, - NULL, - NULL, - NULL, - 0 }, }; @@ -1073,6 +1084,7 @@ ngx_http_js_ext_set_header_out(njs_vm_t nxt_str_t *value) { u_char *p; + ngx_int_t n; nxt_str_t *v; ngx_table_elt_t *h; ngx_http_request_t *r; @@ -1101,7 +1113,6 @@ ngx_http_js_ext_set_header_out(njs_vm_t h->hash = 1; } - p = ngx_pnalloc(r->pool, value->length); if (p == NULL) { return NJS_ERROR; @@ -1112,6 +1123,19 @@ ngx_http_js_ext_set_header_out(njs_vm_t h->value.data = p; h->value.len = value->length; + if (h->key.len == sizeof("Content-Length") - 1 + && ngx_strncasecmp(h->key.data, (u_char *) "Content-Length", + sizeof("Content-Length") - 1) == 0) + { + n = ngx_atoi(value->start, value->length); + if (n == NGX_ERROR) { + return NJS_ERROR; + } + + r->headers_out.content_length_n = n; + r->headers_out.content_length = h; + } + return NJS_OK; } @@ -1541,6 +1565,27 @@ done: static njs_ret_t +ngx_http_js_ext_get_headers(njs_vm_t *vm, njs_value_t *value, + void *obj, uintptr_t data) +{ + ngx_http_js_ctx_t *ctx; + ngx_http_request_t *r; + + r = (ngx_http_request_t *) obj; + + ctx = ngx_http_get_module_ctx(r, ngx_http_js_module); + + if (ctx->done) { + /* simulate Reply.headers behavior */ + + return ngx_http_js_ext_get_header_out(vm, value, obj, data); + } + + return ngx_http_js_ext_get_header_in(vm, value, obj, data); +} + + +static njs_ret_t ngx_http_js_ext_get_header_in(njs_vm_t *vm, njs_value_t *value, void *obj, uintptr_t data) { @@ -1562,6 +1607,26 @@ ngx_http_js_ext_get_header_in(njs_vm_t * static njs_ret_t +ngx_http_js_ext_foreach_headers(njs_vm_t *vm, void *obj, void *next) +{ + ngx_http_js_ctx_t *ctx; + ngx_http_request_t *r; + + r = (ngx_http_request_t *) obj; + + ctx = ngx_http_get_module_ctx(r, ngx_http_js_module); + + if (ctx->done) { + /* simulate Reply.headers behavior */ + + return ngx_http_js_ext_foreach_header_out(vm, obj, next); + } + + return ngx_http_js_ext_foreach_header_in(vm, obj, next); +} + + +static njs_ret_t ngx_http_js_ext_foreach_header_in(njs_vm_t *vm, void *obj, void *next) { return ngx_http_js_ext_foreach_header(vm, obj, next, @@ -1990,7 +2055,7 @@ ngx_http_js_subrequest_done(ngx_http_req } ret = njs_vm_external_create(ctx->vm, njs_value_arg(&reply), - jmcf->rep_proto, r); + jmcf->req_proto, r); if (ret != NXT_OK) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "js subrequest reply creation failed"); @@ -2256,13 +2321,6 @@ ngx_http_js_include(ngx_conf_t *cf, ngx_ return NGX_CONF_ERROR; } - jmcf->rep_proto = njs_vm_external_prototype(jmcf->vm, - &ngx_http_js_externals[2]); - if (jmcf->rep_proto == NULL) { - ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "failed to add reply proto"); - return NGX_CONF_ERROR; - } - rc = njs_vm_compile(jmcf->vm, &start, end); if (rc != NJS_OK) { @@ -2359,7 +2417,6 @@ ngx_http_js_create_main_conf(ngx_conf_t * conf->vm = NULL; * conf->req_proto = NULL; * conf->res_proto = NULL; - * conf->rep_proto = NULL; */ return conf; From mdounin at mdounin.ru Wed Jun 13 12:28:45 2018 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 13 Jun 2018 12:28:45 +0000 Subject: [nginx] Upstream: disable body cleanup with preserve_output (ticket #1565). Message-ID: details: http://hg.nginx.org/nginx/rev/a10e5fe44762 branches: changeset: 7297:a10e5fe44762 user: Maxim Dounin date: Wed Jun 13 15:28:11 2018 +0300 description: Upstream: disable body cleanup with preserve_output (ticket #1565). With u->conf->preserve_output set the request body file might be used after the response header is sent, so avoid cleaning it. (Normally this is not a problem as u->conf->preserve_output is only set with r->request_body_no_buffering, but the request body might be already written to a file in a different context.) diffstat: src/http/ngx_http_upstream.c | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diffs (13 lines): diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -2901,7 +2901,8 @@ ngx_http_upstream_send_response(ngx_http } if (r->request_body && r->request_body->temp_file - && r == r->main && !r->preserve_body) + && r == r->main && !r->preserve_body + && !u->conf->preserve_output) { ngx_pool_run_cleanup_file(r->pool, r->request_body->temp_file->file.fd); r->request_body->temp_file->file.fd = NGX_INVALID_FILE; From xeioex at nginx.com Wed Jun 13 12:30:23 2018 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Wed, 13 Jun 2018 12:30:23 +0000 Subject: [njs] http internalRedirect() method. Message-ID: details: http://hg.nginx.org/njs/rev/c939541c37bc branches: changeset: 535:c939541c37bc user: Dmitry Volyntsev date: Wed Jun 13 14:15:43 2018 +0300 description: http internalRedirect() method. Performs internal redirect to the specified uri. req.internalRedirect(): uri - string. If uri starts with '@' it is considered as a named location. diffstat: nginx/ngx_http_js_module.c | 92 +++++++++++++++++++++++++++++++++++++++++----- 1 files changed, 82 insertions(+), 10 deletions(-) diffs (158 lines): diff -r bf3d32cc6716 -r c939541c37bc nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Wed Jun 13 14:11:58 2018 +0300 +++ b/nginx/ngx_http_js_module.c Wed Jun 13 14:15:43 2018 +0300 @@ -31,6 +31,7 @@ typedef struct { ngx_uint_t done; ngx_int_t status; njs_opaque_value_t request_body; + ngx_str_t redirect_uri; } ngx_http_js_ctx_t; @@ -51,6 +52,8 @@ typedef struct { static ngx_int_t ngx_http_js_content_handler(ngx_http_request_t *r); static void ngx_http_js_content_event_handler(ngx_http_request_t *r); static void ngx_http_js_content_write_event_handler(ngx_http_request_t *r); +static void ngx_http_js_content_finalize(ngx_http_request_t *r, + ngx_http_js_ctx_t *ctx); static ngx_int_t ngx_http_js_variable(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_js_init_vm(ngx_http_request_t *r); @@ -89,6 +92,8 @@ static njs_ret_t ngx_http_js_ext_finish( nxt_uint_t nargs, njs_index_t unused); static njs_ret_t ngx_http_js_ext_return(njs_vm_t *vm, njs_value_t *args, nxt_uint_t nargs, njs_index_t unused); +static njs_ret_t ngx_http_js_ext_internal_redirect(njs_vm_t *vm, + njs_value_t *args, nxt_uint_t nargs, njs_index_t unused); static njs_ret_t ngx_http_js_ext_log(njs_vm_t *vm, njs_value_t *args, nxt_uint_t nargs, njs_index_t unused); @@ -589,6 +594,18 @@ static njs_external_t ngx_http_js_ext_r NULL, ngx_http_js_ext_return, 0 }, + + { nxt_string("internalRedirect"), + NJS_EXTERN_METHOD, + NULL, + 0, + NULL, + NULL, + NULL, + NULL, + NULL, + ngx_http_js_ext_internal_redirect, + 0 }, }; @@ -683,8 +700,9 @@ ngx_http_js_content_event_handler(ngx_ht } /* - * status is expected to be overriden by finish() or return() methods, - * otherwise the content handler is considered invalid. + * status is expected to be overriden by finish(), return() or + * internalRedirect() methods, otherwise the content handler is + * considered invalid. */ ctx->status = NGX_HTTP_INTERNAL_SERVER_ERROR; @@ -704,10 +722,7 @@ ngx_http_js_content_event_handler(ngx_ht return; } - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, - "http js content rc: %i", ctx->status); - - ngx_http_finalize_request(r, ctx->status); + ngx_http_js_content_finalize(r, ctx); } @@ -725,10 +740,7 @@ ngx_http_js_content_write_event_handler( ctx = ngx_http_get_module_ctx(r, ngx_http_js_module); if (!njs_vm_pending(ctx->vm)) { - ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, - "http js content rc: %i", ctx->status); - - ngx_http_finalize_request(r, ctx->status); + ngx_http_js_content_finalize(r, ctx); return; } @@ -764,6 +776,28 @@ ngx_http_js_content_write_event_handler( } +static void +ngx_http_js_content_finalize(ngx_http_request_t *r, ngx_http_js_ctx_t *ctx) +{ + ngx_str_t args; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http js content rc: %i", ctx->status); + + if (ctx->redirect_uri.len) { + if (ctx->redirect_uri.data[0] == '@') { + ngx_http_named_location(r, &ctx->redirect_uri); + + } else { + ngx_http_split_args(r, &ctx->redirect_uri, &args); + ngx_http_internal_redirect(r, &ctx->redirect_uri, &args); + } + } + + ngx_http_finalize_request(r, ctx->status); +} + + static ngx_int_t ngx_http_js_variable(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data) @@ -1391,6 +1425,44 @@ ngx_http_js_ext_return(njs_vm_t *vm, njs static njs_ret_t +ngx_http_js_ext_internal_redirect(njs_vm_t *vm, njs_value_t *args, + nxt_uint_t nargs, njs_index_t unused) +{ + nxt_str_t uri; + ngx_http_js_ctx_t *ctx; + ngx_http_request_t *r; + + if (nargs < 2) { + njs_vm_error(vm, "too few arguments"); + return NJS_ERROR; + } + + r = njs_value_data(njs_argument(args, 0)); + + ctx = ngx_http_get_module_ctx(r, ngx_http_js_module); + + if (njs_vm_value_to_ext_string(vm, &uri, njs_argument(args, 1), 0) + == NJS_ERROR) + { + njs_vm_error(vm, "failed to convert uri arg"); + return NJS_ERROR; + } + + if (uri.length == 0) { + njs_vm_error(vm, "uri is empty"); + return NJS_ERROR; + } + + ctx->redirect_uri.data = uri.start; + ctx->redirect_uri.len = uri.length; + + ctx->status = NGX_DONE; + + return NJS_OK; +} + + +static njs_ret_t ngx_http_js_ext_log(njs_vm_t *vm, njs_value_t *args, nxt_uint_t nargs, njs_index_t unused) { From i at lvht.net Wed Jun 13 23:39:57 2018 From: i at lvht.net (=?utf-8?B?5ZCV5rW35rab?=) Date: Thu, 14 Jun 2018 07:39:57 +0800 Subject: [PATCH] HTTP/2: make http2 server support http1 In-Reply-To: <27CC686B-F25D-40D3-BFC0-D55CAB8BB834@lvht.net> References: <20180305191459.GR89840@mdounin.ru> <9195D529-BCBA-4D20-8CB0-28683145D1E3@lvht.net> <1583421.Ry823iCY5n@vbart-workstation> <27CC686B-F25D-40D3-BFC0-D55CAB8BB834@lvht.net> Message-ID: <5C9FF2F0-D41F-491E-AB81-90727F6D650C@lvht.net> hello? ???? iPhone > ? 2018?4?2??08:28?Haitao Lv ??? > > Any body is here? > >> On Mar 21, 2018, at 11:36, Haitao Lv wrote: >> >> Thank you for reviewing. >> >> And here is the patch that fix the breaking PROXY protocol functionality. >> >> Sorry for disturbing. >> >> diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c >> index 2db7a627..9f1b8544 100644 >> --- a/src/http/ngx_http_request.c >> +++ b/src/http/ngx_http_request.c >> @@ -17,6 +17,10 @@ static ssize_t ngx_http_read_request_header(ngx_http_request_t *r); >> static ngx_int_t ngx_http_alloc_large_header_buffer(ngx_http_request_t *r, >> ngx_uint_t request_line); >> >> +#if (NGX_HTTP_V2) >> +static void ngx_http_wait_v2_preface_handler(ngx_event_t *rev); >> +#endif >> + >> static ngx_int_t ngx_http_process_header_line(ngx_http_request_t *r, >> ngx_table_elt_t *h, ngx_uint_t offset); >> static ngx_int_t ngx_http_process_unique_header_line(ngx_http_request_t *r, >> @@ -325,7 +329,7 @@ ngx_http_init_connection(ngx_connection_t *c) >> >> #if (NGX_HTTP_V2) >> if (hc->addr_conf->http2) { >> - rev->handler = ngx_http_v2_init; >> + rev->handler = ngx_http_wait_v2_preface_handler; >> } >> #endif >> >> @@ -381,6 +385,131 @@ ngx_http_init_connection(ngx_connection_t *c) >> } >> >> >> +#if (NGX_HTTP_V2) >> +static void >> +ngx_http_wait_v2_preface_handler(ngx_event_t *rev) >> +{ >> + size_t size; >> + ssize_t n; >> + u_char *p; >> + ngx_buf_t *b; >> + ngx_connection_t *c; >> + ngx_http_connection_t *hc; >> + static const u_char preface[] = "PRI"; >> + >> + c = rev->data; >> + hc = c->data; >> + >> + size = sizeof(preface) - 1; >> + >> + if (hc->proxy_protocol) { >> + size += NGX_PROXY_PROTOCOL_MAX_HEADER; >> + } >> + >> + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, >> + "http wait h2 preface handler"); >> + >> + if (rev->timedout) { >> + ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed out"); >> + ngx_http_close_connection(c); >> + return; >> + } >> + >> + if (c->close) { >> + ngx_http_close_connection(c); >> + return; >> + } >> + >> + b = c->buffer; >> + >> + if (b == NULL) { >> + b = ngx_create_temp_buf(c->pool, size); >> + if (b == NULL) { >> + ngx_http_close_connection(c); >> + return; >> + } >> + >> + c->buffer = b; >> + >> + } else if (b->start == NULL) { >> + >> + b->start = ngx_palloc(c->pool, size); >> + if (b->start == NULL) { >> + ngx_http_close_connection(c); >> + return; >> + } >> + >> + b->pos = b->start; >> + b->last = b->start; >> + b->end = b->last + size; >> + } >> + >> + n = c->recv(c, b->last, b->end - b->last); >> + >> + if (n == NGX_AGAIN) { >> + >> + if (!rev->timer_set) { >> + ngx_add_timer(rev, c->listening->post_accept_timeout); >> + ngx_reusable_connection(c, 1); >> + } >> + >> + if (ngx_handle_read_event(rev, 0) != NGX_OK) { >> + ngx_http_close_connection(c); >> + return; >> + } >> + >> + /* >> + * We are trying to not hold c->buffer's memory for an idle connection. >> + */ >> + >> + if (ngx_pfree(c->pool, b->start) == NGX_OK) { >> + b->start = NULL; >> + } >> + >> + return; >> + } >> + >> + if (n == NGX_ERROR) { >> + ngx_http_close_connection(c); >> + return; >> + } >> + >> + if (n == 0) { >> + ngx_log_error(NGX_LOG_INFO, c->log, 0, >> + "client closed connection"); >> + ngx_http_close_connection(c); >> + return; >> + } >> + >> + b->last += n; >> + >> + if (hc->proxy_protocol) { >> + hc->proxy_protocol = 0; >> + >> + p = ngx_proxy_protocol_read(c, b->pos, b->last); >> + >> + if (p == NULL) { >> + ngx_http_close_connection(c); >> + return; >> + } >> + >> + b->pos = p; >> + } >> + >> + if (b->last >= b->pos + sizeof(preface) - 1) { >> + /* b will be freed in ngx_http_v2_init/ngx_http_wait_request_handler */ >> + >> + if (ngx_strncmp(b->pos, preface, sizeof(preface) - 1) == 0) { >> + ngx_http_v2_init(rev); >> + } else { >> + rev->handler = ngx_http_wait_request_handler; >> + ngx_http_wait_request_handler(rev); >> + } >> + } >> +} >> +#endif >> + >> + >> static void >> ngx_http_wait_request_handler(ngx_event_t *rev) >> { >> @@ -393,6 +522,7 @@ ngx_http_wait_request_handler(ngx_event_t *rev) >> ngx_http_core_srv_conf_t *cscf; >> >> c = rev->data; >> + n = NGX_AGAIN; >> >> ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http wait request handler"); >> >> @@ -434,9 +564,27 @@ ngx_http_wait_request_handler(ngx_event_t *rev) >> b->pos = b->start; >> b->last = b->start; >> b->end = b->last + size; >> + } else { >> + >> + p = ngx_palloc(c->pool, size); >> + if (p == NULL) { >> + ngx_http_close_connection(c); >> + return; >> + } >> + >> + n = b->last - b->pos; >> + ngx_memcpy(p, b->pos, n); >> + ngx_pfree(c->pool, b->start); >> + >> + b->start = p; >> + b->pos = b->start; >> + b->last = b->start + n; >> + b->end = b->last + size; >> } >> >> - n = c->recv(c, b->last, size); >> + if (n == NGX_AGAIN) { >> + n = c->recv(c, b->last, size); >> + } >> >> if (n == NGX_AGAIN) { >> >> diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c >> index 77ebb847..6724b662 100644 >> --- a/src/http/v2/ngx_http_v2.c >> +++ b/src/http/v2/ngx_http_v2.c >> @@ -229,6 +229,8 @@ static ngx_http_v2_parse_header_t ngx_http_v2_parse_headers[] = { >> void >> ngx_http_v2_init(ngx_event_t *rev) >> { >> + size_t size; >> + ngx_buf_t *b; >> ngx_connection_t *c; >> ngx_pool_cleanup_t *cln; >> ngx_http_connection_t *hc; >> @@ -260,6 +262,23 @@ ngx_http_v2_init(ngx_event_t *rev) >> return; >> } >> >> + b = c->buffer; >> + >> + if (b != NULL) { >> + size = b->last - b->pos; >> + >> + if (size > h2mcf->recv_buffer_size) { >> + size = h2mcf->recv_buffer_size; >> + } >> + >> + ngx_memcpy(h2mcf->recv_buffer, b->pos, size); >> + h2c->state.buffer_used = size; >> + >> + ngx_pfree(c->pool, b->start); >> + ngx_pfree(c->pool, b); >> + c->buffer = NULL; >> + } >> + >> h2c->connection = c; >> h2c->http_connection = hc; >> >> @@ -379,13 +398,15 @@ ngx_http_v2_read_handler(ngx_event_t *rev) >> h2mcf = ngx_http_get_module_main_conf(h2c->http_connection->conf_ctx, >> ngx_http_v2_module); >> >> - available = h2mcf->recv_buffer_size - 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE; >> + available = h2mcf->recv_buffer_size - h2c->state.buffer_used - 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE; >> >> do { >> p = h2mcf->recv_buffer; >> >> - ngx_memcpy(p, h2c->state.buffer, NGX_HTTP_V2_STATE_BUFFER_SIZE); >> end = p + h2c->state.buffer_used; >> + if (h2c->state.buffer_used == 0) { >> + ngx_memcpy(p, h2c->state.buffer, NGX_HTTP_V2_STATE_BUFFER_SIZE); >> + } >> >> n = c->recv(c, end, available); >> >> >> >>> On Mar 21, 2018, at 00:02, Valentin V. Bartenev wrote: >>> >>> On Thursday 08 March 2018 08:42:27 Haitao Lv wrote: >>>> Sorry for disturbing. But I have to fix a buffer overflow bug. >>>> Here is the latest patch. >>>> >>>> Sorry. But please make your comments. Thank you. >>> [..] >>> >>> There's no way for this patch to be accepted as it breaks PROXY protocol >>> functionality. >>> >>> wbr, Valentin V. Bartenev >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel From ru at nginx.com Thu Jun 14 03:29:40 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 14 Jun 2018 03:29:40 +0000 Subject: [nginx] Resolver: allocate resolver from configuration pool. Message-ID: details: http://hg.nginx.org/nginx/rev/f37d8fb25de5 branches: changeset: 7298:f37d8fb25de5 user: Ruslan Ermilov date: Wed Jun 13 22:37:42 2018 +0300 description: Resolver: allocate resolver from configuration pool. Before 4a8c9139e579, ngx_resolver_create() didn't use configuration pool, and allocations were done using malloc(). In 016352c19049, when resolver gained support of several servers, new allocations were done from the pool. diffstat: src/core/ngx_resolver.c | 87 +++++++++++++++++++++--------------------------- 1 files changed, 38 insertions(+), 49 deletions(-) diffs (120 lines): diff -r a10e5fe44762 -r f37d8fb25de5 src/core/ngx_resolver.c --- a/src/core/ngx_resolver.c Wed Jun 13 15:28:11 2018 +0300 +++ b/src/core/ngx_resolver.c Wed Jun 13 22:37:42 2018 +0300 @@ -141,25 +141,24 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ ngx_pool_cleanup_t *cln; ngx_resolver_connection_t *rec; + r = ngx_pcalloc(cf->pool, sizeof(ngx_resolver_t)); + if (r == NULL) { + return NULL; + } + + r->event = ngx_pcalloc(cf->pool, sizeof(ngx_event_t)); + if (r->event == NULL) { + return NULL; + } + cln = ngx_pool_cleanup_add(cf->pool, 0); if (cln == NULL) { return NULL; } cln->handler = ngx_resolver_cleanup; - - r = ngx_calloc(sizeof(ngx_resolver_t), cf->log); - if (r == NULL) { - return NULL; - } - cln->data = r; - r->event = ngx_calloc(sizeof(ngx_event_t), cf->log); - if (r->event == NULL) { - return NULL; - } - ngx_rbtree_init(&r->name_rbtree, &r->name_sentinel, ngx_resolver_rbtree_insert_value); @@ -288,52 +287,42 @@ ngx_resolver_cleanup(void *data) ngx_uint_t i; ngx_resolver_connection_t *rec; - if (r) { - ngx_log_debug0(NGX_LOG_DEBUG_CORE, ngx_cycle->log, 0, - "cleanup resolver"); - - ngx_resolver_cleanup_tree(r, &r->name_rbtree); - - ngx_resolver_cleanup_tree(r, &r->srv_rbtree); - - ngx_resolver_cleanup_tree(r, &r->addr_rbtree); + ngx_log_debug0(NGX_LOG_DEBUG_CORE, ngx_cycle->log, 0, "cleanup resolver"); + + ngx_resolver_cleanup_tree(r, &r->name_rbtree); + + ngx_resolver_cleanup_tree(r, &r->srv_rbtree); + + ngx_resolver_cleanup_tree(r, &r->addr_rbtree); #if (NGX_HAVE_INET6) - ngx_resolver_cleanup_tree(r, &r->addr6_rbtree); + ngx_resolver_cleanup_tree(r, &r->addr6_rbtree); #endif - if (r->event) { - if (r->event->timer_set) { - ngx_del_timer(r->event); - } - - ngx_free(r->event); + if (r->event->timer_set) { + ngx_del_timer(r->event); + } + + rec = r->connections.elts; + + for (i = 0; i < r->connections.nelts; i++) { + if (rec[i].udp) { + ngx_close_connection(rec[i].udp); } - - rec = r->connections.elts; - - for (i = 0; i < r->connections.nelts; i++) { - if (rec[i].udp) { - ngx_close_connection(rec[i].udp); - } - - if (rec[i].tcp) { - ngx_close_connection(rec[i].tcp); - } - - if (rec[i].read_buf) { - ngx_resolver_free(r, rec[i].read_buf->start); - ngx_resolver_free(r, rec[i].read_buf); - } - - if (rec[i].write_buf) { - ngx_resolver_free(r, rec[i].write_buf->start); - ngx_resolver_free(r, rec[i].write_buf); - } + if (rec[i].tcp) { + ngx_close_connection(rec[i].tcp); } - ngx_free(r); + if (rec[i].read_buf) { + ngx_resolver_free(r, rec[i].read_buf->start); + ngx_resolver_free(r, rec[i].read_buf); + } + + if (rec[i].write_buf) { + ngx_resolver_free(r, rec[i].write_buf->start); + ngx_resolver_free(r, rec[i].write_buf); + } } } From ru at nginx.com Thu Jun 14 03:29:42 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 14 Jun 2018 03:29:42 +0000 Subject: [nginx] Resolver: require name servers. Message-ID: details: http://hg.nginx.org/nginx/rev/faf14dc9ab4d branches: changeset: 7299:faf14dc9ab4d user: Ruslan Ermilov date: Wed Jun 13 22:37:49 2018 +0300 description: Resolver: require name servers. diffstat: src/core/ngx_resolver.c | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) diffs (15 lines): diff -r f37d8fb25de5 -r faf14dc9ab4d src/core/ngx_resolver.c --- a/src/core/ngx_resolver.c Wed Jun 13 22:37:42 2018 +0300 +++ b/src/core/ngx_resolver.c Wed Jun 13 22:37:49 2018 +0300 @@ -275,6 +275,11 @@ ngx_resolver_create(ngx_conf_t *cf, ngx_ } } + if (n && r->connections.nelts == 0) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "no name servers defined"); + return NULL; + } + return r; } From ru at nginx.com Fri Jun 15 03:41:07 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Fri, 15 Jun 2018 03:41:07 +0000 Subject: [nginx] Upstream: improved peer selection concurrency for hash and ip_hash. Message-ID: details: http://hg.nginx.org/nginx/rev/ed599ea6c1f1 branches: changeset: 7300:ed599ea6c1f1 user: Ruslan Ermilov date: Thu Jun 14 07:03:50 2018 +0300 description: Upstream: improved peer selection concurrency for hash and ip_hash. diffstat: src/http/modules/ngx_http_upstream_hash_module.c | 8 +++++++- src/http/modules/ngx_http_upstream_ip_hash_module.c | 8 +++++++- src/stream/ngx_stream_upstream_hash_module.c | 8 +++++++- 3 files changed, 21 insertions(+), 3 deletions(-) diffs (138 lines): diff -r faf14dc9ab4d -r ed599ea6c1f1 src/http/modules/ngx_http_upstream_hash_module.c --- a/src/http/modules/ngx_http_upstream_hash_module.c Wed Jun 13 22:37:49 2018 +0300 +++ b/src/http/modules/ngx_http_upstream_hash_module.c Thu Jun 14 07:03:50 2018 +0300 @@ -176,7 +176,7 @@ ngx_http_upstream_get_hash_peer(ngx_peer ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, "get hash peer, try: %ui", pc->tries); - ngx_http_upstream_rr_peers_wlock(hp->rrp.peers); + ngx_http_upstream_rr_peers_rlock(hp->rrp.peers); if (hp->tries > 20 || hp->rrp.peers->single) { ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); @@ -228,10 +228,13 @@ ngx_http_upstream_get_hash_peer(ngx_peer goto next; } + ngx_http_upstream_rr_peer_lock(hp->rrp.peers, peer); + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, pc->log, 0, "get hash peer, value:%uD, peer:%ui", hp->hash, p); if (peer->down) { + ngx_http_upstream_rr_peer_unlock(hp->rrp.peers, peer); goto next; } @@ -239,10 +242,12 @@ ngx_http_upstream_get_hash_peer(ngx_peer && peer->fails >= peer->max_fails && now - peer->checked <= peer->fail_timeout) { + ngx_http_upstream_rr_peer_unlock(hp->rrp.peers, peer); goto next; } if (peer->max_conns && peer->conns >= peer->max_conns) { + ngx_http_upstream_rr_peer_unlock(hp->rrp.peers, peer); goto next; } @@ -268,6 +273,7 @@ ngx_http_upstream_get_hash_peer(ngx_peer peer->checked = now; } + ngx_http_upstream_rr_peer_unlock(hp->rrp.peers, peer); ngx_http_upstream_rr_peers_unlock(hp->rrp.peers); hp->rrp.tried[n] |= m; diff -r faf14dc9ab4d -r ed599ea6c1f1 src/http/modules/ngx_http_upstream_ip_hash_module.c --- a/src/http/modules/ngx_http_upstream_ip_hash_module.c Wed Jun 13 22:37:49 2018 +0300 +++ b/src/http/modules/ngx_http_upstream_ip_hash_module.c Thu Jun 14 07:03:50 2018 +0300 @@ -161,7 +161,7 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p /* TODO: cached */ - ngx_http_upstream_rr_peers_wlock(iphp->rrp.peers); + ngx_http_upstream_rr_peers_rlock(iphp->rrp.peers); if (iphp->tries > 20 || iphp->rrp.peers->single) { ngx_http_upstream_rr_peers_unlock(iphp->rrp.peers); @@ -201,7 +201,10 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p ngx_log_debug2(NGX_LOG_DEBUG_HTTP, pc->log, 0, "get ip hash peer, hash: %ui %04XL", p, (uint64_t) m); + ngx_http_upstream_rr_peer_lock(iphp->rrp.peers, peer); + if (peer->down) { + ngx_http_upstream_rr_peer_unlock(iphp->rrp.peers, peer); goto next; } @@ -209,10 +212,12 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p && peer->fails >= peer->max_fails && now - peer->checked <= peer->fail_timeout) { + ngx_http_upstream_rr_peer_unlock(iphp->rrp.peers, peer); goto next; } if (peer->max_conns && peer->conns >= peer->max_conns) { + ngx_http_upstream_rr_peer_unlock(iphp->rrp.peers, peer); goto next; } @@ -238,6 +243,7 @@ ngx_http_upstream_get_ip_hash_peer(ngx_p peer->checked = now; } + ngx_http_upstream_rr_peer_unlock(iphp->rrp.peers, peer); ngx_http_upstream_rr_peers_unlock(iphp->rrp.peers); iphp->rrp.tried[n] |= m; diff -r faf14dc9ab4d -r ed599ea6c1f1 src/stream/ngx_stream_upstream_hash_module.c --- a/src/stream/ngx_stream_upstream_hash_module.c Wed Jun 13 22:37:49 2018 +0300 +++ b/src/stream/ngx_stream_upstream_hash_module.c Thu Jun 14 07:03:50 2018 +0300 @@ -176,7 +176,7 @@ ngx_stream_upstream_get_hash_peer(ngx_pe ngx_log_debug1(NGX_LOG_DEBUG_STREAM, pc->log, 0, "get hash peer, try: %ui", pc->tries); - ngx_stream_upstream_rr_peers_wlock(hp->rrp.peers); + ngx_stream_upstream_rr_peers_rlock(hp->rrp.peers); if (hp->tries > 20 || hp->rrp.peers->single) { ngx_stream_upstream_rr_peers_unlock(hp->rrp.peers); @@ -227,10 +227,13 @@ ngx_stream_upstream_get_hash_peer(ngx_pe goto next; } + ngx_stream_upstream_rr_peer_lock(hp->rrp.peers, peer); + ngx_log_debug2(NGX_LOG_DEBUG_STREAM, pc->log, 0, "get hash peer, value:%uD, peer:%ui", hp->hash, p); if (peer->down) { + ngx_stream_upstream_rr_peer_unlock(hp->rrp.peers, peer); goto next; } @@ -238,10 +241,12 @@ ngx_stream_upstream_get_hash_peer(ngx_pe && peer->fails >= peer->max_fails && now - peer->checked <= peer->fail_timeout) { + ngx_stream_upstream_rr_peer_unlock(hp->rrp.peers, peer); goto next; } if (peer->max_conns && peer->conns >= peer->max_conns) { + ngx_stream_upstream_rr_peer_unlock(hp->rrp.peers, peer); goto next; } @@ -267,6 +272,7 @@ ngx_stream_upstream_get_hash_peer(ngx_pe peer->checked = now; } + ngx_stream_upstream_rr_peer_unlock(hp->rrp.peers, peer); ngx_stream_upstream_rr_peers_unlock(hp->rrp.peers); hp->rrp.tried[n] |= m; From ondrej.oprala at showmax.com Fri Jun 15 09:00:18 2018 From: ondrej.oprala at showmax.com (Ondrej Oprala) Date: Fri, 15 Jun 2018 11:00:18 +0200 Subject: Status of RFC8297 ( Early hints ) Message-ID: Hello, I was trying to find any hints of support for the Early Hints RFC [1] - handling the 103 HTTP response code in nginx and possibly using the headers in it for "http2_push_preload". The newest Rails (5.2.0) are able to generate a proper 103 response, but unfortunately Nginx doesn't seem to be able to handle it. I couldn't find anything in nginx's tickets or on google. Are there any plans to support this RFC? Thank you, ?O. Oprala [1] https://tools.ietf.org/html/rfc8297 From vl at nginx.com Fri Jun 15 10:53:52 2018 From: vl at nginx.com (Vladimir Homutov) Date: Fri, 15 Jun 2018 10:53:52 +0000 Subject: [nginx] Upstream: ngx_http_upstream_random module. Message-ID: details: http://hg.nginx.org/nginx/rev/f2396ecf608b branches: changeset: 7301:f2396ecf608b user: Vladimir Homutov date: Fri Jun 15 11:46:14 2018 +0300 description: Upstream: ngx_http_upstream_random module. The module implements random load-balancing algorithm with optional second choice. In the latter case, the best of two servers is chosen, accounting number of connections and server weight. Example: upstream u { random [two [least_conn]]; server 127.0.0.1:8080; server 127.0.0.1:8081; server 127.0.0.1:8082; server 127.0.0.1:8083; } diffstat: auto/modules | 21 + auto/options | 10 + src/http/modules/ngx_http_upstream_random_module.c | 502 +++++++++++++++++++++ src/stream/ngx_stream_upstream_random_module.c | 502 +++++++++++++++++++++ 4 files changed, 1035 insertions(+), 0 deletions(-) diffs (truncated from 1105 to 1000 lines): diff -r ed599ea6c1f1 -r f2396ecf608b auto/modules --- a/auto/modules Thu Jun 14 07:03:50 2018 +0300 +++ b/auto/modules Fri Jun 15 11:46:14 2018 +0300 @@ -878,6 +878,17 @@ if [ $HTTP = YES ]; then . auto/module fi + if [ $HTTP_UPSTREAM_RANDOM = YES ]; then + ngx_module_name=ngx_http_upstream_random_module + ngx_module_incs= + ngx_module_deps= + ngx_module_srcs=src/http/modules/ngx_http_upstream_random_module.c + ngx_module_libs= + ngx_module_link=$HTTP_UPSTREAM_RANDOM + + . auto/module + fi + if [ $HTTP_UPSTREAM_KEEPALIVE = YES ]; then ngx_module_name=ngx_http_upstream_keepalive_module ngx_module_incs= @@ -1143,6 +1154,16 @@ if [ $STREAM != NO ]; then . auto/module fi + if [ $STREAM_UPSTREAM_RANDOM = YES ]; then + ngx_module_name=ngx_stream_upstream_random_module + ngx_module_deps= + ngx_module_srcs=src/stream/ngx_stream_upstream_random_module.c + ngx_module_libs= + ngx_module_link=$STREAM_UPSTREAM_RANDOM + + . auto/module + fi + if [ $STREAM_UPSTREAM_ZONE = YES ]; then have=NGX_STREAM_UPSTREAM_ZONE . auto/have diff -r ed599ea6c1f1 -r f2396ecf608b auto/options --- a/auto/options Thu Jun 14 07:03:50 2018 +0300 +++ b/auto/options Fri Jun 15 11:46:14 2018 +0300 @@ -102,6 +102,7 @@ HTTP_GZIP_STATIC=NO HTTP_UPSTREAM_HASH=YES HTTP_UPSTREAM_IP_HASH=YES HTTP_UPSTREAM_LEAST_CONN=YES +HTTP_UPSTREAM_RANDOM=YES HTTP_UPSTREAM_KEEPALIVE=YES HTTP_UPSTREAM_ZONE=YES @@ -126,6 +127,7 @@ STREAM_SPLIT_CLIENTS=YES STREAM_RETURN=YES STREAM_UPSTREAM_HASH=YES STREAM_UPSTREAM_LEAST_CONN=YES +STREAM_UPSTREAM_RANDOM=YES STREAM_UPSTREAM_ZONE=YES STREAM_SSL_PREREAD=NO @@ -273,6 +275,8 @@ do --without-http_upstream_ip_hash_module) HTTP_UPSTREAM_IP_HASH=NO ;; --without-http_upstream_least_conn_module) HTTP_UPSTREAM_LEAST_CONN=NO ;; + --without-http_upstream_random_module) + HTTP_UPSTREAM_RANDOM=NO ;; --without-http_upstream_keepalive_module) HTTP_UPSTREAM_KEEPALIVE=NO ;; --without-http_upstream_zone_module) HTTP_UPSTREAM_ZONE=NO ;; @@ -325,6 +329,8 @@ use the \"--with-mail_ssl_module\" optio STREAM_UPSTREAM_HASH=NO ;; --without-stream_upstream_least_conn_module) STREAM_UPSTREAM_LEAST_CONN=NO ;; + --without-stream_upstream_random_module) + STREAM_UPSTREAM_RANDOM=NO ;; --without-stream_upstream_zone_module) STREAM_UPSTREAM_ZONE=NO ;; @@ -485,6 +491,8 @@ cat << END disable ngx_http_upstream_ip_hash_module --without-http_upstream_least_conn_module disable ngx_http_upstream_least_conn_module + --without-http_upstream_random_module + disable ngx_http_upstream_random_module --without-http_upstream_keepalive_module disable ngx_http_upstream_keepalive_module --without-http_upstream_zone_module @@ -535,6 +543,8 @@ cat << END disable ngx_stream_upstream_hash_module --without-stream_upstream_least_conn_module disable ngx_stream_upstream_least_conn_module + --without-stream_upstream_random_module + disable ngx_stream_upstream_random_module --without-stream_upstream_zone_module disable ngx_stream_upstream_zone_module diff -r ed599ea6c1f1 -r f2396ecf608b src/http/modules/ngx_http_upstream_random_module.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/http/modules/ngx_http_upstream_random_module.c Fri Jun 15 11:46:14 2018 +0300 @@ -0,0 +1,502 @@ + +/* + * Copyright (C) Nginx, Inc. + */ + + +#include +#include +#include + + +typedef struct { + ngx_http_upstream_rr_peer_t *peer; + ngx_uint_t range; +} ngx_http_upstream_random_range_t; + + +typedef struct { + ngx_uint_t two; + ngx_http_upstream_random_range_t *ranges; +} ngx_http_upstream_random_srv_conf_t; + + +typedef struct { + /* the round robin data must be first */ + ngx_http_upstream_rr_peer_data_t rrp; + + ngx_http_upstream_random_srv_conf_t *conf; + u_char tries; +} ngx_http_upstream_random_peer_data_t; + + +static ngx_int_t ngx_http_upstream_init_random(ngx_conf_t *cf, + ngx_http_upstream_srv_conf_t *us); +static ngx_int_t ngx_http_upstream_update_random(ngx_pool_t *pool, + ngx_http_upstream_srv_conf_t *us); + +static ngx_int_t ngx_http_upstream_init_random_peer(ngx_http_request_t *r, + ngx_http_upstream_srv_conf_t *us); +static ngx_int_t ngx_http_upstream_get_random_peer(ngx_peer_connection_t *pc, + void *data); +static ngx_int_t ngx_http_upstream_get_random2_peer(ngx_peer_connection_t *pc, + void *data); +static ngx_uint_t ngx_http_upstream_peek_random_peer( + ngx_http_upstream_rr_peers_t *peers, + ngx_http_upstream_random_peer_data_t *rp); +static void *ngx_http_upstream_random_create_conf(ngx_conf_t *cf); +static char *ngx_http_upstream_random(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); + + +static ngx_command_t ngx_http_upstream_random_commands[] = { + + { ngx_string("random"), + NGX_HTTP_UPS_CONF|NGX_CONF_NOARGS|NGX_CONF_TAKE12, + ngx_http_upstream_random, + NGX_HTTP_SRV_CONF_OFFSET, + 0, + NULL }, + + ngx_null_command +}; + + +static ngx_http_module_t ngx_http_upstream_random_module_ctx = { + NULL, /* preconfiguration */ + NULL, /* postconfiguration */ + + NULL, /* create main configuration */ + NULL, /* init main configuration */ + + ngx_http_upstream_random_create_conf, /* create server configuration */ + NULL, /* merge server configuration */ + + NULL, /* create location configuration */ + NULL /* merge location configuration */ +}; + + +ngx_module_t ngx_http_upstream_random_module = { + NGX_MODULE_V1, + &ngx_http_upstream_random_module_ctx, /* module context */ + ngx_http_upstream_random_commands, /* module directives */ + NGX_HTTP_MODULE, /* module type */ + NULL, /* init master */ + NULL, /* init module */ + NULL, /* init process */ + NULL, /* init thread */ + NULL, /* exit thread */ + NULL, /* exit process */ + NULL, /* exit master */ + NGX_MODULE_V1_PADDING +}; + + +static ngx_int_t +ngx_http_upstream_init_random(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us) +{ + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, cf->log, 0, "init random"); + + if (ngx_http_upstream_init_round_robin(cf, us) != NGX_OK) { + return NGX_ERROR; + } + + us->peer.init = ngx_http_upstream_init_random_peer; + +#if (NGX_HTTP_UPSTREAM_ZONE) + if (us->shm_zone) { + return NGX_OK; + } +#endif + + return ngx_http_upstream_update_random(cf->pool, us); +} + + +static ngx_int_t +ngx_http_upstream_update_random(ngx_pool_t *pool, + ngx_http_upstream_srv_conf_t *us) +{ + size_t size; + ngx_uint_t i, total_weight; + ngx_http_upstream_rr_peer_t *peer; + ngx_http_upstream_rr_peers_t *peers; + ngx_http_upstream_random_range_t *ranges; + ngx_http_upstream_random_srv_conf_t *rcf; + + rcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_random_module); + + peers = us->peer.data; + + size = peers->number * sizeof(ngx_http_upstream_random_range_t); + + ranges = pool ? ngx_palloc(pool, size) : ngx_alloc(size, ngx_cycle->log); + if (ranges == NULL) { + return NGX_ERROR; + } + + total_weight = 0; + + for (peer = peers->peer, i = 0; peer; peer = peer->next, i++) { + ranges[i].peer = peer; + ranges[i].range = total_weight; + total_weight += peer->weight; + } + + rcf->ranges = ranges; + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_upstream_init_random_peer(ngx_http_request_t *r, + ngx_http_upstream_srv_conf_t *us) +{ + ngx_http_upstream_random_srv_conf_t *rcf; + ngx_http_upstream_random_peer_data_t *rp; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "init random peer"); + + rcf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_random_module); + + rp = ngx_palloc(r->pool, sizeof(ngx_http_upstream_random_peer_data_t)); + if (rp == NULL) { + return NGX_ERROR; + } + + r->upstream->peer.data = &rp->rrp; + + if (ngx_http_upstream_init_round_robin_peer(r, us) != NGX_OK) { + return NGX_ERROR; + } + + if (rcf->two) { + r->upstream->peer.get = ngx_http_upstream_get_random2_peer; + + } else { + r->upstream->peer.get = ngx_http_upstream_get_random_peer; + } + + rp->conf = rcf; + rp->tries = 0; + + ngx_http_upstream_rr_peers_rlock(rp->rrp.peers); + +#if (NGX_HTTP_UPSTREAM_ZONE) + if (rp->rrp.peers->shpool && rcf->ranges == NULL) { + if (ngx_http_upstream_update_random(NULL, us) != NGX_OK) { + ngx_http_upstream_rr_peers_unlock(rp->rrp.peers); + return NGX_ERROR; + } + } +#endif + + ngx_http_upstream_rr_peers_unlock(rp->rrp.peers); + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_upstream_get_random_peer(ngx_peer_connection_t *pc, void *data) +{ + ngx_http_upstream_random_peer_data_t *rp = data; + + time_t now; + uintptr_t m; + ngx_uint_t i, n; + ngx_http_upstream_rr_peer_t *peer; + ngx_http_upstream_rr_peers_t *peers; + ngx_http_upstream_rr_peer_data_t *rrp; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "get random peer, try: %ui", pc->tries); + + rrp = &rp->rrp; + peers = rrp->peers; + + ngx_http_upstream_rr_peers_rlock(peers); + + if (rp->tries > 20 || peers->single) { + ngx_http_upstream_rr_peers_unlock(peers); + return ngx_http_upstream_get_round_robin_peer(pc, rrp); + } + + pc->cached = 0; + pc->connection = NULL; + + now = ngx_time(); + + for ( ;; ) { + + i = ngx_http_upstream_peek_random_peer(peers, rp); + + peer = rp->conf->ranges[i].peer; + + n = i / (8 * sizeof(uintptr_t)); + m = (uintptr_t) 1 << i % (8 * sizeof(uintptr_t)); + + if (rrp->tried[n] & m) { + goto next; + } + + ngx_http_upstream_rr_peer_lock(peers, peer); + + if (peer->down) { + ngx_http_upstream_rr_peer_unlock(peers, peer); + goto next; + } + + if (peer->max_fails + && peer->fails >= peer->max_fails + && now - peer->checked <= peer->fail_timeout) + { + ngx_http_upstream_rr_peer_unlock(peers, peer); + goto next; + } + + if (peer->max_conns && peer->conns >= peer->max_conns) { + ngx_http_upstream_rr_peer_unlock(peers, peer); + goto next; + } + + break; + + next: + + if (++rp->tries > 20) { + ngx_http_upstream_rr_peers_unlock(peers); + return ngx_http_upstream_get_round_robin_peer(pc, rrp); + } + } + + rrp->current = peer; + + if (now - peer->checked > peer->fail_timeout) { + peer->checked = now; + } + + pc->sockaddr = peer->sockaddr; + pc->socklen = peer->socklen; + pc->name = &peer->name; + + peer->conns++; + + ngx_http_upstream_rr_peer_unlock(peers, peer); + ngx_http_upstream_rr_peers_unlock(peers); + + rrp->tried[n] |= m; + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_upstream_get_random2_peer(ngx_peer_connection_t *pc, void *data) +{ + ngx_http_upstream_random_peer_data_t *rp = data; + + time_t now; + uintptr_t m; + ngx_uint_t i, n, p; + ngx_http_upstream_rr_peer_t *peer, *prev; + ngx_http_upstream_rr_peers_t *peers; + ngx_http_upstream_rr_peer_data_t *rrp; + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "get random2 peer, try: %ui", pc->tries); + + rrp = &rp->rrp; + peers = rrp->peers; + + ngx_http_upstream_rr_peers_wlock(peers); + + if (rp->tries > 20 || peers->single) { + ngx_http_upstream_rr_peers_unlock(peers); + return ngx_http_upstream_get_round_robin_peer(pc, rrp); + } + + pc->cached = 0; + pc->connection = NULL; + + now = ngx_time(); + + prev = NULL; + +#if (NGX_SUPPRESS_WARN) + p = 0; +#endif + + for ( ;; ) { + + i = ngx_http_upstream_peek_random_peer(peers, rp); + + peer = rp->conf->ranges[i].peer; + + if (peer == prev) { + goto next; + } + + n = i / (8 * sizeof(uintptr_t)); + m = (uintptr_t) 1 << i % (8 * sizeof(uintptr_t)); + + if (rrp->tried[n] & m) { + goto next; + } + + if (peer->down) { + goto next; + } + + if (peer->max_fails + && peer->fails >= peer->max_fails + && now - peer->checked <= peer->fail_timeout) + { + goto next; + } + + if (peer->max_conns && peer->conns >= peer->max_conns) { + goto next; + } + + if (prev) { + if (peer->conns * prev->weight > prev->conns * peer->weight) { + peer = prev; + n = p / (8 * sizeof(uintptr_t)); + m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); + } + + break; + } + + prev = peer; + p = i; + + next: + + if (++rp->tries > 20) { + ngx_http_upstream_rr_peers_unlock(peers); + return ngx_http_upstream_get_round_robin_peer(pc, rrp); + } + } + + rrp->current = peer; + + if (now - peer->checked > peer->fail_timeout) { + peer->checked = now; + } + + pc->sockaddr = peer->sockaddr; + pc->socklen = peer->socklen; + pc->name = &peer->name; + + peer->conns++; + + ngx_http_upstream_rr_peers_unlock(peers); + + rrp->tried[n] |= m; + + return NGX_OK; +} + + +static ngx_uint_t +ngx_http_upstream_peek_random_peer(ngx_http_upstream_rr_peers_t *peers, + ngx_http_upstream_random_peer_data_t *rp) +{ + ngx_uint_t i, j, k, x; + + x = ngx_random() % peers->total_weight; + + i = 0; + j = peers->number; + + while (j - i > 1) { + k = (i + j) / 2; + + if (x < rp->conf->ranges[k].range) { + j = k; + + } else { + i = k; + } + } + + return i; +} + + +static void * +ngx_http_upstream_random_create_conf(ngx_conf_t *cf) +{ + ngx_http_upstream_random_srv_conf_t *conf; + + conf = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_random_srv_conf_t)); + if (conf == NULL) { + return NULL; + } + + /* + * set by ngx_pcalloc(): + * + * conf->two = 0; + */ + + return conf; +} + + +static char * +ngx_http_upstream_random(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_http_upstream_random_srv_conf_t *rcf = conf; + + ngx_str_t *value; + ngx_http_upstream_srv_conf_t *uscf; + + uscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_upstream_module); + + if (uscf->peer.init_upstream) { + ngx_conf_log_error(NGX_LOG_WARN, cf, 0, + "load balancing method redefined"); + } + + uscf->peer.init_upstream = ngx_http_upstream_init_random; + + uscf->flags = NGX_HTTP_UPSTREAM_CREATE + |NGX_HTTP_UPSTREAM_WEIGHT + |NGX_HTTP_UPSTREAM_MAX_CONNS + |NGX_HTTP_UPSTREAM_MAX_FAILS + |NGX_HTTP_UPSTREAM_FAIL_TIMEOUT + |NGX_HTTP_UPSTREAM_DOWN; + + if (cf->args->nelts == 1) { + return NGX_CONF_OK; + } + + value = cf->args->elts; + + if (ngx_strcmp(value[1].data, "two") == 0) { + rcf->two = 1; + + } else { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid parameter \"%V\"", &value[1]); + return NGX_CONF_ERROR; + } + + if (cf->args->nelts == 2) { + return NGX_CONF_OK; + } + + if (ngx_strcmp(value[2].data, "least_conn") != 0) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid parameter \"%V\"", &value[2]); + return NGX_CONF_ERROR; + } + + return NGX_CONF_OK; +} diff -r ed599ea6c1f1 -r f2396ecf608b src/stream/ngx_stream_upstream_random_module.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/stream/ngx_stream_upstream_random_module.c Fri Jun 15 11:46:14 2018 +0300 @@ -0,0 +1,502 @@ + +/* + * Copyright (C) Nginx, Inc. + */ + + +#include +#include +#include + + +typedef struct { + ngx_stream_upstream_rr_peer_t *peer; + ngx_uint_t range; +} ngx_stream_upstream_random_range_t; + + +typedef struct { + ngx_uint_t two; + ngx_stream_upstream_random_range_t *ranges; +} ngx_stream_upstream_random_srv_conf_t; + + +typedef struct { + /* the round robin data must be first */ + ngx_stream_upstream_rr_peer_data_t rrp; + + ngx_stream_upstream_random_srv_conf_t *conf; + u_char tries; +} ngx_stream_upstream_random_peer_data_t; + + +static ngx_int_t ngx_stream_upstream_init_random(ngx_conf_t *cf, + ngx_stream_upstream_srv_conf_t *us); +static ngx_int_t ngx_stream_upstream_update_random(ngx_pool_t *pool, + ngx_stream_upstream_srv_conf_t *us); + +static ngx_int_t ngx_stream_upstream_init_random_peer(ngx_stream_session_t *s, + ngx_stream_upstream_srv_conf_t *us); +static ngx_int_t ngx_stream_upstream_get_random_peer(ngx_peer_connection_t *pc, + void *data); +static ngx_int_t ngx_stream_upstream_get_random2_peer(ngx_peer_connection_t *pc, + void *data); +static ngx_uint_t ngx_stream_upstream_peek_random_peer( + ngx_stream_upstream_rr_peers_t *peers, + ngx_stream_upstream_random_peer_data_t *rp); +static void *ngx_stream_upstream_random_create_conf(ngx_conf_t *cf); +static char *ngx_stream_upstream_random(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); + + +static ngx_command_t ngx_stream_upstream_random_commands[] = { + + { ngx_string("random"), + NGX_STREAM_UPS_CONF|NGX_CONF_NOARGS|NGX_CONF_TAKE12, + ngx_stream_upstream_random, + NGX_STREAM_SRV_CONF_OFFSET, + 0, + NULL }, + + ngx_null_command +}; + + +static ngx_stream_module_t ngx_stream_upstream_random_module_ctx = { + NULL, /* preconfiguration */ + NULL, /* postconfiguration */ + + NULL, /* create main configuration */ + NULL, /* init main configuration */ + + ngx_stream_upstream_random_create_conf, /* create server configuration */ + NULL /* merge server configuration */ +}; + + +ngx_module_t ngx_stream_upstream_random_module = { + NGX_MODULE_V1, + &ngx_stream_upstream_random_module_ctx, /* module context */ + ngx_stream_upstream_random_commands, /* module directives */ + NGX_STREAM_MODULE, /* module type */ + NULL, /* init master */ + NULL, /* init module */ + NULL, /* init process */ + NULL, /* init thread */ + NULL, /* exit thread */ + NULL, /* exit process */ + NULL, /* exit master */ + NGX_MODULE_V1_PADDING +}; + + +static ngx_int_t +ngx_stream_upstream_init_random(ngx_conf_t *cf, + ngx_stream_upstream_srv_conf_t *us) +{ + ngx_log_debug0(NGX_LOG_DEBUG_STREAM, cf->log, 0, "init random"); + + if (ngx_stream_upstream_init_round_robin(cf, us) != NGX_OK) { + return NGX_ERROR; + } + + us->peer.init = ngx_stream_upstream_init_random_peer; + +#if (NGX_STREAM_UPSTREAM_ZONE) + if (us->shm_zone) { + return NGX_OK; + } +#endif + + return ngx_stream_upstream_update_random(cf->pool, us); +} + + +static ngx_int_t +ngx_stream_upstream_update_random(ngx_pool_t *pool, + ngx_stream_upstream_srv_conf_t *us) +{ + size_t size; + ngx_uint_t i, total_weight; + ngx_stream_upstream_rr_peer_t *peer; + ngx_stream_upstream_rr_peers_t *peers; + ngx_stream_upstream_random_range_t *ranges; + ngx_stream_upstream_random_srv_conf_t *rcf; + + rcf = ngx_stream_conf_upstream_srv_conf(us, + ngx_stream_upstream_random_module); + peers = us->peer.data; + + size = peers->number * sizeof(ngx_stream_upstream_random_range_t); + + ranges = pool ? ngx_palloc(pool, size) : ngx_alloc(size, ngx_cycle->log); + if (ranges == NULL) { + return NGX_ERROR; + } + + total_weight = 0; + + for (peer = peers->peer, i = 0; peer; peer = peer->next, i++) { + ranges[i].peer = peer; + ranges[i].range = total_weight; + total_weight += peer->weight; + } + + rcf->ranges = ranges; + + return NGX_OK; +} + + +static ngx_int_t +ngx_stream_upstream_init_random_peer(ngx_stream_session_t *s, + ngx_stream_upstream_srv_conf_t *us) +{ + ngx_stream_upstream_random_srv_conf_t *rcf; + ngx_stream_upstream_random_peer_data_t *rp; + + ngx_log_debug0(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, + "init random peer"); + + rcf = ngx_stream_conf_upstream_srv_conf(us, + ngx_stream_upstream_random_module); + + rp = ngx_palloc(s->connection->pool, + sizeof(ngx_stream_upstream_random_peer_data_t)); + if (rp == NULL) { + return NGX_ERROR; + } + + s->upstream->peer.data = &rp->rrp; + + if (ngx_stream_upstream_init_round_robin_peer(s, us) != NGX_OK) { + return NGX_ERROR; + } + + if (rcf->two) { + s->upstream->peer.get = ngx_stream_upstream_get_random2_peer; + + } else { + s->upstream->peer.get = ngx_stream_upstream_get_random_peer; + } + + rp->conf = rcf; + rp->tries = 0; + + ngx_stream_upstream_rr_peers_rlock(rp->rrp.peers); + +#if (NGX_STREAM_UPSTREAM_ZONE) + if (rp->rrp.peers->shpool && rcf->ranges == NULL) { + if (ngx_stream_upstream_update_random(NULL, us) != NGX_OK) { + ngx_stream_upstream_rr_peers_unlock(rp->rrp.peers); + return NGX_ERROR; + } + } +#endif + + ngx_stream_upstream_rr_peers_unlock(rp->rrp.peers); + + return NGX_OK; +} + + +static ngx_int_t +ngx_stream_upstream_get_random_peer(ngx_peer_connection_t *pc, void *data) +{ + ngx_stream_upstream_random_peer_data_t *rp = data; + + time_t now; + uintptr_t m; + ngx_uint_t i, n; + ngx_stream_upstream_rr_peer_t *peer; + ngx_stream_upstream_rr_peers_t *peers; + ngx_stream_upstream_rr_peer_data_t *rrp; + + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, pc->log, 0, + "get random peer, try: %ui", pc->tries); + + rrp = &rp->rrp; + peers = rrp->peers; + + ngx_stream_upstream_rr_peers_rlock(peers); + + if (rp->tries > 20 || peers->single) { + ngx_stream_upstream_rr_peers_unlock(peers); + return ngx_stream_upstream_get_round_robin_peer(pc, rrp); + } + + pc->cached = 0; + pc->connection = NULL; + + now = ngx_time(); + + for ( ;; ) { + + i = ngx_stream_upstream_peek_random_peer(peers, rp); + + peer = rp->conf->ranges[i].peer; + + n = i / (8 * sizeof(uintptr_t)); + m = (uintptr_t) 1 << i % (8 * sizeof(uintptr_t)); + + if (rrp->tried[n] & m) { + goto next; + } + + ngx_stream_upstream_rr_peer_lock(peers, peer); + + if (peer->down) { + ngx_stream_upstream_rr_peer_unlock(peers, peer); + goto next; + } + + if (peer->max_fails + && peer->fails >= peer->max_fails + && now - peer->checked <= peer->fail_timeout) + { + ngx_stream_upstream_rr_peer_unlock(peers, peer); + goto next; + } + + if (peer->max_conns && peer->conns >= peer->max_conns) { + ngx_stream_upstream_rr_peer_unlock(peers, peer); + goto next; + } + + break; + + next: + + if (++rp->tries > 20) { + ngx_stream_upstream_rr_peers_unlock(peers); + return ngx_stream_upstream_get_round_robin_peer(pc, rrp); + } + } + + rrp->current = peer; + + if (now - peer->checked > peer->fail_timeout) { + peer->checked = now; + } + + pc->sockaddr = peer->sockaddr; + pc->socklen = peer->socklen; + pc->name = &peer->name; + + peer->conns++; + + ngx_stream_upstream_rr_peer_unlock(peers, peer); + ngx_stream_upstream_rr_peers_unlock(peers); + + rrp->tried[n] |= m; + + return NGX_OK; +} + + +static ngx_int_t +ngx_stream_upstream_get_random2_peer(ngx_peer_connection_t *pc, void *data) +{ + ngx_stream_upstream_random_peer_data_t *rp = data; + + time_t now; + uintptr_t m; + ngx_uint_t i, n, p; + ngx_stream_upstream_rr_peer_t *peer, *prev; + ngx_stream_upstream_rr_peers_t *peers; + ngx_stream_upstream_rr_peer_data_t *rrp; + + ngx_log_debug1(NGX_LOG_DEBUG_STREAM, pc->log, 0, + "get random2 peer, try: %ui", pc->tries); + + rrp = &rp->rrp; + peers = rrp->peers; + + ngx_stream_upstream_rr_peers_wlock(peers); + + if (rp->tries > 20 || peers->single) { + ngx_stream_upstream_rr_peers_unlock(peers); + return ngx_stream_upstream_get_round_robin_peer(pc, rrp); + } + + pc->cached = 0; + pc->connection = NULL; + + now = ngx_time(); + + prev = NULL; + +#if (NGX_SUPPRESS_WARN) + p = 0; +#endif + + for ( ;; ) { + + i = ngx_stream_upstream_peek_random_peer(peers, rp); + + peer = rp->conf->ranges[i].peer; + + if (peer == prev) { + goto next; + } + + n = i / (8 * sizeof(uintptr_t)); + m = (uintptr_t) 1 << i % (8 * sizeof(uintptr_t)); + + if (rrp->tried[n] & m) { + goto next; + } + + if (peer->down) { + goto next; + } + + if (peer->max_fails + && peer->fails >= peer->max_fails + && now - peer->checked <= peer->fail_timeout) + { + goto next; + } + + if (peer->max_conns && peer->conns >= peer->max_conns) { + goto next; + } + + if (prev) { + if (peer->conns * prev->weight > prev->conns * peer->weight) { + peer = prev; + n = p / (8 * sizeof(uintptr_t)); + m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t)); + } + + break; + } + + prev = peer; + p = i; + + next: + + if (++rp->tries > 20) { + ngx_stream_upstream_rr_peers_unlock(peers); + return ngx_stream_upstream_get_round_robin_peer(pc, rrp); + } + } + + rrp->current = peer; + + if (now - peer->checked > peer->fail_timeout) { + peer->checked = now; + } + + pc->sockaddr = peer->sockaddr; + pc->socklen = peer->socklen; + pc->name = &peer->name; + + peer->conns++; + From xeioex at nginx.com Fri Jun 15 14:05:59 2018 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Fri, 15 Jun 2018 14:05:59 +0000 Subject: [njs] Fixed heap-buffer-overflow in crypto.createHmac(). Message-ID: details: http://hg.nginx.org/njs/rev/e99e0a7f4fae branches: changeset: 536:e99e0a7f4fae user: Dmitry Volyntsev date: Wed Jun 13 19:38:47 2018 +0300 description: Fixed heap-buffer-overflow in crypto.createHmac(). diffstat: njs/njs_crypto.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diffs (21 lines): diff -r c939541c37bc -r e99e0a7f4fae njs/njs_crypto.c --- a/njs/njs_crypto.c Wed Jun 13 14:15:43 2018 +0300 +++ b/njs/njs_crypto.c Wed Jun 13 19:38:47 2018 +0300 @@ -417,7 +417,7 @@ njs_crypto_create_hmac(njs_vm_t *vm, njs ctx->alg = alg; - if (key.length > 64) { + if (key.length > sizeof(key_buf)) { alg->init(&ctx->u); alg->update(&ctx->u, key.start, key.length); alg->final(digest, &ctx->u); @@ -426,7 +426,7 @@ njs_crypto_create_hmac(njs_vm_t *vm, njs memset(key_buf + alg->size, 0, sizeof(key_buf) - alg->size); } else { - memcpy(key_buf, key.start, sizeof(key_buf)); + memcpy(key_buf, key.start, key.length); memset(key_buf + key.length, 0, sizeof(key_buf) - key.length); } From vbart at nginx.com Fri Jun 15 15:24:37 2018 From: vbart at nginx.com (Valentin Bartenev) Date: Fri, 15 Jun 2018 15:24:37 +0000 Subject: [nginx] MIME: changed type for woff to font/woff (ticket #1243). Message-ID: details: http://hg.nginx.org/nginx/rev/bf1a7b363598 branches: changeset: 7302:bf1a7b363598 user: Valentin Bartenev date: Fri Jun 15 17:29:55 2018 +0300 description: MIME: changed type for woff to font/woff (ticket #1243). According to RFC 8081 the previously used application/font-woff type is deprecated. diffstat: conf/mime.types | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diffs (13 lines): diff -r f2396ecf608b -r bf1a7b363598 conf/mime.types --- a/conf/mime.types Fri Jun 15 11:46:14 2018 +0300 +++ b/conf/mime.types Fri Jun 15 17:29:55 2018 +0300 @@ -24,7 +24,8 @@ types { image/x-jng jng; image/x-ms-bmp bmp; - application/font-woff woff; + font/woff woff; + application/java-archive jar war ear; application/json json; application/mac-binhex40 hqx; From vbart at nginx.com Fri Jun 15 15:24:39 2018 From: vbart at nginx.com (Valentin Bartenev) Date: Fri, 15 Jun 2018 15:24:39 +0000 Subject: [nginx] MIME: added font/woff2 type (ticket #1243). Message-ID: details: http://hg.nginx.org/nginx/rev/118885f7a577 branches: changeset: 7303:118885f7a577 user: Valentin Bartenev date: Fri Jun 15 17:29:55 2018 +0300 description: MIME: added font/woff2 type (ticket #1243). diffstat: conf/mime.types | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (11 lines): diff -r bf1a7b363598 -r 118885f7a577 conf/mime.types --- a/conf/mime.types Fri Jun 15 17:29:55 2018 +0300 +++ b/conf/mime.types Fri Jun 15 17:29:55 2018 +0300 @@ -25,6 +25,7 @@ types { image/x-ms-bmp bmp; font/woff woff; + font/woff2 woff2; application/java-archive jar war ear; application/json json; From tomas.hulata at netbox.cz Mon Jun 18 16:08:03 2018 From: tomas.hulata at netbox.cz (Tomas Hulata) Date: Mon, 18 Jun 2018 18:08:03 +0200 Subject: syslog with timezone Message-ID: <2101c4c6-5bb3-f4d6-8570-85448783ffbc@netbox.cz> According rfc5424 there could or should be time zone specified in syslog message. There are problems for example with graylog, so my patch is adding optional switch to enable different datetime formatting with timezone support to syslog module. # HG changeset patch # User Tomas Hulata # Date 1528838460 -7200 #????? Tue Jun 12 23:21:00 2018 +0200 # Node ID 309407bf691d77df2d095282b3050c3d9ad3c4ae # Parent? 8e6bb4e6045f7197923717831d2ddf414aa0f443 new option for timezone in syslog message diff -r 8e6bb4e6045f -r 309407bf691d src/core/ngx_syslog.c --- a/src/core/ngx_syslog.c??? Thu Jun 07 20:04:22 2018 +0300 +++ b/src/core/ngx_syslog.c??? Tue Jun 12 23:21:00 2018 +0200 @@ -211,6 +211,9 @@ ???????? } else if (len == 10 && ngx_strncmp(p, "nohostname", 10) == 0) { ???????????? peer->nohostname = 1; +??????? } else if (len == 2 && ngx_strncmp(p, "tz", 2) == 0) { +??????????? peer->tz = 1; + ???????? } else { ???????????? ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, ??????????????????????????????? "unknown syslog parameter \"%s\"", p); @@ -238,11 +241,13 @@ ???? pri = peer->facility * 8 + peer->severity; ???? if (peer->nohostname) { -??????? return ngx_sprintf(buf, "<%ui>%V %V: ", pri, &ngx_cached_syslog_time, +??????? return ngx_sprintf(buf, "<%ui>%V %V: ", pri, +?????????????????????????? peer->tz ? &ngx_cached_syslog_time_tz : &ngx_cached_syslog_time, ??????????????????????????? &peer->tag); ???? } -??? return ngx_sprintf(buf, "<%ui>%V %V %V: ", pri, &ngx_cached_syslog_time, +??? return ngx_sprintf(buf, "<%ui>%V %V %V: ", pri, +?????????????????????? peer->tz ? &ngx_cached_syslog_time_tz : &ngx_cached_syslog_time, ??????????????????????? &ngx_cycle->hostname, &peer->tag); ?} diff -r 8e6bb4e6045f -r 309407bf691d src/core/ngx_syslog.h --- a/src/core/ngx_syslog.h??? Thu Jun 07 20:04:22 2018 +0300 +++ b/src/core/ngx_syslog.h??? Tue Jun 12 23:21:00 2018 +0200 @@ -17,6 +17,7 @@ ???? ngx_connection_t? conn; ???? unsigned????????? busy:1; ???? unsigned????????? nohostname:1; +??? unsigned????????? tz:1; ?} ngx_syslog_peer_t; diff -r 8e6bb4e6045f -r 309407bf691d src/core/ngx_times.c --- a/src/core/ngx_times.c??? Thu Jun 07 20:04:22 2018 +0300 +++ b/src/core/ngx_times.c??? Tue Jun 12 23:21:00 2018 +0200 @@ -33,6 +33,7 @@ ?volatile ngx_str_t?????? ngx_cached_http_log_time; ?volatile ngx_str_t?????? ngx_cached_http_log_iso8601; ?volatile ngx_str_t?????? ngx_cached_syslog_time; +volatile ngx_str_t?????? ngx_cached_syslog_time_tz; ?#if !(NGX_WIN32) @@ -56,6 +57,9 @@ [sizeof("1970-09-28T12:00:00+06:00")]; ?static u_char??????????? cached_syslog_time[NGX_TIME_SLOTS] ???????????????????????????????????? [sizeof("Sep 28 12:00:00")]; +static u_char??????????? cached_syslog_time_tz[NGX_TIME_SLOTS] + [sizeof("1970-09-28T12:00:00+06:00")]; + ?static char? *week[] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; @@ -70,6 +74,7 @@ ???? ngx_cached_http_log_time.len = sizeof("28/Sep/1970:12:00:00 +0600") - 1; ???? ngx_cached_http_log_iso8601.len = sizeof("1970-09-28T12:00:00+06:00") - 1; ???? ngx_cached_syslog_time.len = sizeof("Sep 28 12:00:00") - 1; +??? ngx_cached_syslog_time_tz.len = sizeof("1970-09-28T12:00:00+06:00") - 1; ???? ngx_cached_time = &cached_time[0]; @@ -80,7 +85,7 @@ ?void ?ngx_time_update(void) ?{ -??? u_char????????? *p0, *p1, *p2, *p3, *p4; +??? u_char????????? *p0, *p1, *p2, *p3, *p4, *p5; ???? ngx_tm_t???????? tm, gmt; ???? time_t?????????? sec; ???? ngx_uint_t?????? msec; @@ -179,6 +184,15 @@ ??????????????????????? months[tm.ngx_tm_mon - 1], tm.ngx_tm_mday, ??????????????????????? tm.ngx_tm_hour, tm.ngx_tm_min, tm.ngx_tm_sec); +??? p5 = &cached_syslog_time_tz[slot][0]; + +??? (void) ngx_sprintf(p5, "%4d-%02d-%02dT%02d:%02d:%02d%c%02i:%02i", +?????????????????????? tm.ngx_tm_year, tm.ngx_tm_mon, +?????????????????????? tm.ngx_tm_mday, tm.ngx_tm_hour, +?????????????????????? tm.ngx_tm_min, tm.ngx_tm_sec, +?????????????????????? tp->gmtoff < 0 ? '-' : '+', +?????????????????????? ngx_abs(tp->gmtoff / 60), ngx_abs(tp->gmtoff % 60)); + ???? ngx_memory_barrier(); ???? ngx_cached_time = tp; @@ -187,6 +201,7 @@ ???? ngx_cached_http_log_time.data = p2; ???? ngx_cached_http_log_iso8601.data = p3; ???? ngx_cached_syslog_time.data = p4; +??? ngx_cached_syslog_time_tz.data = p5; ???? ngx_unlock(&ngx_time_lock); ?} @@ -222,7 +237,7 @@ ?void ?ngx_time_sigsafe_update(void) ?{ -??? u_char????????? *p, *p2; +??? u_char????????? *p, *p2, *p3; ???? ngx_tm_t???????? tm; ???? time_t?????????? sec; ???? ngx_time_t????? *tp; @@ -268,10 +283,20 @@ ??????????????????????? months[tm.ngx_tm_mon - 1], tm.ngx_tm_mday, ??????????????????????? tm.ngx_tm_hour, tm.ngx_tm_min, tm.ngx_tm_sec); +??? p3 = &cached_syslog_time_tz[slot][0]; + +??? (void) ngx_sprintf(p3, "%4d-%02d-%02dT%02d:%02d:%02d%c%02i:%02i", +?????????????????????? tm.ngx_tm_year, tm.ngx_tm_mon, +?????????????????????? tm.ngx_tm_mday, tm.ngx_tm_hour, +?????????????????????? tm.ngx_tm_min, tm.ngx_tm_sec, +?????????????????????? tp->gmtoff < 0 ? '-' : '+', +?????????????????????? ngx_abs(tp->gmtoff / 60), ngx_abs(tp->gmtoff % 60)); + ???? ngx_memory_barrier(); ???? ngx_cached_err_log_time.data = p; ???? ngx_cached_syslog_time.data = p2; +??? ngx_cached_syslog_time_tz.data = p3; ???? ngx_unlock(&ngx_time_lock); ?} diff -r 8e6bb4e6045f -r 309407bf691d src/core/ngx_times.h --- a/src/core/ngx_times.h??? Thu Jun 07 20:04:22 2018 +0300 +++ b/src/core/ngx_times.h??? Tue Jun 12 23:21:00 2018 +0200 @@ -41,6 +41,7 @@ ?extern volatile ngx_str_t??? ngx_cached_http_log_time; ?extern volatile ngx_str_t??? ngx_cached_http_log_iso8601; ?extern volatile ngx_str_t??? ngx_cached_syslog_time; +extern volatile ngx_str_t??? ngx_cached_syslog_time_tz; ?/* ? * milliseconds elapsed since some unspecified point in the past -- S pozdravom Tomas Hulata /vedouc? ?seku systemov?ch in?en?r?/ tel: +420 733 163 238 NETBOX SMART Comp. a.s., Kub??kova 8, 635 00, Brno Tel./fax: +420 544 423 411, www.netbox.cz -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 4185 bytes Desc: S/MIME Cryptographic Signature URL: From mdounin at mdounin.ru Mon Jun 18 17:36:52 2018 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 18 Jun 2018 20:36:52 +0300 Subject: syslog with timezone In-Reply-To: <2101c4c6-5bb3-f4d6-8570-85448783ffbc@netbox.cz> References: <2101c4c6-5bb3-f4d6-8570-85448783ffbc@netbox.cz> Message-ID: <20180618173652.GM32137@mdounin.ru> Hello! On Mon, Jun 18, 2018 at 06:08:03PM +0200, Tomas Hulata wrote: > According rfc5424 there could or should be time zone specified in syslog > message. There are problems for example with graylog, so my patch is > adding optional switch to enable different datetime formatting with > timezone support to syslog module. Logging to syslog follows RFC 3164, not RFC 5424. Trying to change it to a different standard via optional switches looks like a bad idea. -- Maxim Dounin http://mdounin.ru/ From tomas.hulata at netbox.cz Mon Jun 18 22:25:07 2018 From: tomas.hulata at netbox.cz (Tomas Hulata) Date: Tue, 19 Jun 2018 00:25:07 +0200 Subject: syslog with timezone In-Reply-To: <20180618173652.GM32137@mdounin.ru> References: <2101c4c6-5bb3-f4d6-8570-85448783ffbc@netbox.cz> <20180618173652.GM32137@mdounin.ru> Message-ID: Hello, thanks for reply. RFC 5424 obsoletes RFC 3164, so what is the reason to follow just old/obsolete one? On 06/18/2018 07:36 PM, Maxim Dounin wrote: > Hello! > > On Mon, Jun 18, 2018 at 06:08:03PM +0200, Tomas Hulata wrote: > >> According rfc5424 there could or should be time zone specified in syslog >> message. There are problems for example with graylog, so my patch is >> adding optional switch to enable different datetime formatting with >> timezone support to syslog module. > Logging to syslog follows RFC 3164, not RFC 5424. Trying to change > it to a different standard via optional switches looks like a bad > idea. > -- S pozdravom Tomas Hulata /vedouc? ?seku systemov?ch in?en?r?/ tel: +420 733 163 238 NETBOX SMART Comp. a.s., Kub??kova 8, 635 00, Brno Tel./fax: +420 544 423 411, www.netbox.cz -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 4185 bytes Desc: S/MIME Cryptographic Signature URL: From xeioex at nginx.com Tue Jun 19 11:06:18 2018 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 19 Jun 2018 11:06:18 +0000 Subject: [njs] Version 0.2.2. Message-ID: details: http://hg.nginx.org/njs/rev/4adbb035caa3 branches: changeset: 537:4adbb035caa3 user: Dmitry Volyntsev date: Tue Jun 19 14:04:59 2018 +0300 description: Version 0.2.2. diffstat: CHANGES | 33 ++++++++++++++++++++++++++++++++- 1 files changed, 32 insertions(+), 1 deletions(-) diffs (43 lines): diff -r e99e0a7f4fae -r 4adbb035caa3 CHANGES --- a/CHANGES Wed Jun 13 19:38:47 2018 +0300 +++ b/CHANGES Tue Jun 19 14:04:59 2018 +0300 @@ -1,7 +1,38 @@ + +Changes with njs 0.2.2 19 Jun 2018 + + nginx modules: + + *) Change: merged HTTP Response and Reply into Request. + New members of Request: + req.status (res.status) + req.parent (reply.parent) + req.requestBody (req.body) + req.responseBody (reply.body) + req.headersIn (req.headers) + req.headersOut (res.headers) + req.sendHeader() (res.sendHeader()) + req.send() (res.send()) + req.finish() (res.finish()) + req.return() (res.return()) + + Deprecated members of Request: + req.body (use req.requestBody or req.responseBody) + req.headers (use req.headersIn or req.headersOut) + req.response + + The deprecated properties will be removed in the following + releases. + + *) Feature: HTTP internalRedirect() method. + + Core: + + *) Bugfix: fixed heap-buffer-overflow in crypto.createHmac(). Changes with njs 0.2.1 31 May 2018 - Nginx modules: + nginx modules: *) Feature: HTTP request body getter. From xeioex at nginx.com Tue Jun 19 11:06:18 2018 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 19 Jun 2018 11:06:18 +0000 Subject: [njs] Added tag 0.2.2 for changeset 4adbb035caa3 Message-ID: details: http://hg.nginx.org/njs/rev/25aa7c24a446 branches: changeset: 538:25aa7c24a446 user: Dmitry Volyntsev date: Tue Jun 19 14:05:18 2018 +0300 description: Added tag 0.2.2 for changeset 4adbb035caa3 diffstat: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diffs (8 lines): diff -r 4adbb035caa3 -r 25aa7c24a446 .hgtags --- a/.hgtags Tue Jun 19 14:04:59 2018 +0300 +++ b/.hgtags Tue Jun 19 14:05:18 2018 +0300 @@ -16,3 +16,4 @@ d89d06dc638e78f8635c0bfbcd02469ac1a08748 215ca47b9167d513fd58ac88de97659377e45275 0.1.15 ddd1b2c9c64b2d459e9c399554dfaadcaabcc364 0.2.0 2a0a59728b5f197379ca62a334a516fabd4ea392 0.2.1 +4adbb035caa39dae58611a061d78bc974652231e 0.2.2 From xeioex at nginx.com Tue Jun 19 11:09:00 2018 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Tue, 19 Jun 2018 11:09:00 +0000 Subject: [njs] Version bump. Message-ID: details: http://hg.nginx.org/njs/rev/711079f55d46 branches: changeset: 539:711079f55d46 user: Dmitry Volyntsev date: Tue Jun 19 14:07:22 2018 +0300 description: Version bump. diffstat: njs/njs.h | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diffs (12 lines): diff -r 25aa7c24a446 -r 711079f55d46 njs/njs.h --- a/njs/njs.h Tue Jun 19 14:05:18 2018 +0300 +++ b/njs/njs.h Tue Jun 19 14:07:22 2018 +0300 @@ -11,7 +11,7 @@ #include -#define NJS_VERSION "0.2.2" +#define NJS_VERSION "0.2.3" #include From mdounin at mdounin.ru Wed Jun 20 12:45:42 2018 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 20 Jun 2018 15:45:42 +0300 Subject: syslog with timezone In-Reply-To: References: <2101c4c6-5bb3-f4d6-8570-85448783ffbc@netbox.cz> <20180618173652.GM32137@mdounin.ru> Message-ID: <20180620124542.GO32137@mdounin.ru> Hello! On Tue, Jun 19, 2018 at 12:25:07AM +0200, Tomas Hulata wrote: > thanks for reply. RFC 5424 obsoletes RFC 3164, so what is the reason to > follow just old/obsolete one? RFC 3164 and RFC 5424 are different protocols. And RFC 3164 is much more common. -- Maxim Dounin http://mdounin.ru/ From igudger at google.com Wed Jun 20 17:58:11 2018 From: igudger at google.com (Ian Gudger) Date: Wed, 20 Jun 2018 10:58:11 -0700 Subject: [nginx] Core: remove unused FIOASYNC. Message-ID: # HG changeset patch # User Ian Gudger # Date 1529449008 25200 # Tue Jun 19 15:56:48 2018 -0700 # Node ID 9427538acbc50142afbe91a11a1d4f907a00d257 # Parent 118885f7a5774962f1145693d9c26a4c199ca6ea Core: remove unused FIOASYNC. FIOASYNC, F_SETOWN and SIGIO seem to no longer serve any function. diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c --- a/src/os/unix/ngx_process.c +++ b/src/os/unix/ngx_process.c @@ -71,8 +71,6 @@ ngx_signal_t signals[] = { { SIGINT, "SIGINT", "", ngx_signal_handler }, - { SIGIO, "SIGIO", "", ngx_signal_handler }, - { SIGCHLD, "SIGCHLD", "", ngx_signal_handler }, { SIGSYS, "SIGSYS, SIG_IGN", "", NULL }, @@ -87,7 +85,6 @@ ngx_pid_t ngx_spawn_process(ngx_cycle_t *cycle, ngx_spawn_proc_pt proc, void *data, char *name, ngx_int_t respawn) { - u_long on; ngx_pid_t pid; ngx_int_t s; @@ -142,21 +139,6 @@ ngx_spawn_process(ngx_cycle_t *cycle, ng return NGX_INVALID_PID; } - on = 1; - if (ioctl(ngx_processes[s].channel[0], FIOASYNC, &on) == -1) { - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "ioctl(FIOASYNC) failed while spawning \"%s\"", name); - ngx_close_channel(ngx_processes[s].channel, cycle->log); - return NGX_INVALID_PID; - } - - if (fcntl(ngx_processes[s].channel[0], F_SETOWN, ngx_pid) == -1) { - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "fcntl(F_SETOWN) failed while spawning \"%s\"", name); - ngx_close_channel(ngx_processes[s].channel, cycle->log); - return NGX_INVALID_PID; :...skipping... # HG changeset patch # User Ian Gudger # Date 1529449008 25200 # Tue Jun 19 15:56:48 2018 -0700 # Node ID 9427538acbc50142afbe91a11a1d4f907a00d257 # Parent 118885f7a5774962f1145693d9c26a4c199ca6ea Core: remove unused FIOASYNC. FIOASYNC, F_SETOWN and SIGIO seem to no longer serve any function. diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c --- a/src/os/unix/ngx_process.c +++ b/src/os/unix/ngx_process.c @@ -71,8 +71,6 @@ ngx_signal_t signals[] = { { SIGINT, "SIGINT", "", ngx_signal_handler }, - { SIGIO, "SIGIO", "", ngx_signal_handler }, - { SIGCHLD, "SIGCHLD", "", ngx_signal_handler }, { SIGSYS, "SIGSYS, SIG_IGN", "", NULL }, @@ -87,7 +85,6 @@ ngx_pid_t ngx_spawn_process(ngx_cycle_t *cycle, ngx_spawn_proc_pt proc, void *data, char *name, ngx_int_t respawn) { - u_long on; ngx_pid_t pid; ngx_int_t s; @@ -142,21 +139,6 @@ ngx_spawn_process(ngx_cycle_t *cycle, ng return NGX_INVALID_PID; } - on = 1; - if (ioctl(ngx_processes[s].channel[0], FIOASYNC, &on) == -1) { - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "ioctl(FIOASYNC) failed while spawning \"%s\"", name); - ngx_close_channel(ngx_processes[s].channel, cycle->log); - return NGX_INVALID_PID; - } - - if (fcntl(ngx_processes[s].channel[0], F_SETOWN, ngx_pid) == -1) { - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "fcntl(F_SETOWN) failed while spawning \"%s\"", name); - ngx_close_channel(ngx_processes[s].channel, cycle->log); - return NGX_INVALID_PID; - } - if (fcntl(ngx_processes[s].channel[0], F_SETFD, FD_CLOEXEC) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "fcntl(FD_CLOEXEC) failed while spawning \"%s\"", @@ -394,10 +376,6 @@ ngx_signal_handler(int signo, siginfo_t ngx_sigalrm = 1; break; - case SIGIO: - ngx_sigio = 1; - break; - case SIGCHLD: ngx_reap = 1; break; @@ -433,8 +411,6 @@ ngx_signal_handler(int signo, siginfo_t case ngx_signal_value(NGX_RECONFIGURE_SIGNAL): case ngx_signal_value(NGX_CHANGEBIN_SIGNAL): - case SIGIO: - action = ", ignoring"; break; } diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c +++ b/src/os/unix/ngx_process_cycle.c @@ -34,7 +34,6 @@ ngx_pid_t ngx_pid; ngx_pid_t ngx_parent; sig_atomic_t ngx_reap; -sig_atomic_t ngx_sigio; sig_atomic_t ngx_sigalrm; sig_atomic_t ngx_terminate; sig_atomic_t ngx_quit; @@ -88,7 +87,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy sigemptyset(&set); sigaddset(&set, SIGCHLD); sigaddset(&set, SIGALRM); - sigaddset(&set, SIGIO); sigaddset(&set, SIGINT); sigaddset(&set, ngx_signal_value(NGX_RECONFIGURE_SIGNAL)); sigaddset(&set, ngx_signal_value(NGX_REOPEN_SIGNAL)); diff --git a/src/os/unix/ngx_process_cycle.h b/src/os/unix/ngx_process_cycle.h --- a/src/os/unix/ngx_process_cycle.h +++ b/src/os/unix/ngx_process_cycle.h @@ -47,7 +47,6 @@ extern ngx_uint_t ngx_daemonized; extern ngx_uint_t ngx_exiting; extern sig_atomic_t ngx_reap; -extern sig_atomic_t ngx_sigio; extern sig_atomic_t ngx_sigalrm; extern sig_atomic_t ngx_quit; extern sig_atomic_t ngx_debug_quit; From tomas.hulata at netbox.cz Wed Jun 20 22:56:01 2018 From: tomas.hulata at netbox.cz (Tomas Hulata) Date: Thu, 21 Jun 2018 00:56:01 +0200 Subject: syslog with timezone In-Reply-To: <20180620124542.GO32137@mdounin.ru> References: <2101c4c6-5bb3-f4d6-8570-85448783ffbc@netbox.cz> <20180618173652.GM32137@mdounin.ru> <20180620124542.GO32137@mdounin.ru> Message-ID: Hi, there is one serious argument against the patch, that I didn't provide full rfc implementation...but take a look at your syslog nohostname option, is it rfc3164 compliant? On 06/20/2018 02:45 PM, Maxim Dounin wrote: > Hello! > > On Tue, Jun 19, 2018 at 12:25:07AM +0200, Tomas Hulata wrote: > >> thanks for reply. RFC 5424 obsoletes RFC 3164, so what is the reason to >> follow just old/obsolete one? > RFC 3164 and RFC 5424 are different protocols. And RFC 3164 is > much more common. > -- S pozdravom Tomas Hulata /vedouc? ?seku systemov?ch in?en?r?/ tel: +420 733 163 238 NETBOX SMART Comp. a.s., Kub??kova 8, 635 00, Brno Tel./fax: +420 544 423 411, www.netbox.cz -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 4185 bytes Desc: S/MIME Cryptographic Signature URL: From scott.oaks at oracle.com Mon Jun 25 17:42:59 2018 From: scott.oaks at oracle.com (scott.oaks at oracle.com) Date: Mon, 25 Jun 2018 13:42:59 -0400 Subject: How does nginx handle incomplete upstream writes Message-ID: I have a situation where when using nginx as a proxy, large POST requests to my upstream server sometimes fail -- for whatever reason (network congestion, upstream server paused for java GC. etc.), the initial writes fill up the socket buffers and so ngx_writev gets a EAGAIN. So that's pretty normal; I'd expect nginx to handle that, poll the fd, and retry the I/O when the socket is ready to write again. [In my case, that's always within a few seconds; if the socket isn't available within proxy_send_timeout, then I'd expect the request to be aborted.] Within the low-level parts of the nginx code, I see the framework for this all in place: ngx_writev() returns NGX_AGAIN to ngx_linux_sendfile_chain. That calls ngx_chain_update_sent to adjust the buffer for the amount written and marks the ngx event holder as not ready. Later, I do see nginx poll the fd; and the event holder gets marked as ready, but by that time the partially-written data has been lost and so is never written. Before that time, http_finalize_request has been called with a status of NGX_DONE on the call path back up from ngx_writev(), and the pending data seems also have been discarded. Interestingly, the call stack and path here seem to be the same whether proxy_request_buffering is on or off. Have I missed something ( must have, right?), or is the partial write situation just not handled properly at all? If it is the case that the state required to keep track of the buffer is not propagated though the code and it would be a big thing to fix, then the simpler way to fix it is an option that ngx_writev() wait on a temporary selector until the data can be written (or until the proxy_send_timeout). That blocks the worker for that time, but the worker can really only handle one request at a time anyway, correct? So it is pretty much indistinguishable from a slightly-faster but still pretty slow upstream server. -Scott From igudger at google.com Mon Jun 25 18:16:12 2018 From: igudger at google.com (Ian Gudger) Date: Mon, 25 Jun 2018 11:16:12 -0700 Subject: [PATCH] Core: remove unused FIOASYNC. Message-ID: # HG changeset patch # User Ian Gudger # Date 1529449008 25200 # Tue Jun 19 15:56:48 2018 -0700 # Node ID 9427538acbc50142afbe91a11a1d4f907a00d257 # Parent 118885f7a5774962f1145693d9c26a4c199ca6ea Core: remove unused FIOASYNC. FIOASYNC, F_SETOWN and SIGIO seem to no longer serve any function. diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c --- a/src/os/unix/ngx_process.c +++ b/src/os/unix/ngx_process.c @@ -71,8 +71,6 @@ ngx_signal_t signals[] = { { SIGINT, "SIGINT", "", ngx_signal_handler }, - { SIGIO, "SIGIO", "", ngx_signal_handler }, - { SIGCHLD, "SIGCHLD", "", ngx_signal_handler }, { SIGSYS, "SIGSYS, SIG_IGN", "", NULL }, @@ -87,7 +85,6 @@ ngx_pid_t ngx_spawn_process(ngx_cycle_t *cycle, ngx_spawn_proc_pt proc, void *data, char *name, ngx_int_t respawn) { - u_long on; ngx_pid_t pid; ngx_int_t s; @@ -142,21 +139,6 @@ ngx_spawn_process(ngx_cycle_t *cycle, ng return NGX_INVALID_PID; } - on = 1; - if (ioctl(ngx_processes[s].channel[0], FIOASYNC, &on) == -1) { - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "ioctl(FIOASYNC) failed while spawning \"%s\"", name); - ngx_close_channel(ngx_processes[s].channel, cycle->log); - return NGX_INVALID_PID; - } - - if (fcntl(ngx_processes[s].channel[0], F_SETOWN, ngx_pid) == -1) { - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "fcntl(F_SETOWN) failed while spawning \"%s\"", name); - ngx_close_channel(ngx_processes[s].channel, cycle->log); - return NGX_INVALID_PID; - } - if (fcntl(ngx_processes[s].channel[0], F_SETFD, FD_CLOEXEC) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "fcntl(FD_CLOEXEC) failed while spawning \"%s\"", @@ -394,10 +376,6 @@ ngx_signal_handler(int signo, siginfo_t ngx_sigalrm = 1; break; - case SIGIO: - ngx_sigio = 1; - break; - case SIGCHLD: ngx_reap = 1; break; @@ -433,8 +411,6 @@ ngx_signal_handler(int signo, siginfo_t case ngx_signal_value(NGX_RECONFIGURE_SIGNAL): case ngx_signal_value(NGX_CHANGEBIN_SIGNAL): - case SIGIO: - action = ", ignoring"; break; } diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c +++ b/src/os/unix/ngx_process_cycle.c @@ -34,7 +34,6 @@ ngx_pid_t ngx_pid; ngx_pid_t ngx_parent; sig_atomic_t ngx_reap; -sig_atomic_t ngx_sigio; sig_atomic_t ngx_sigalrm; sig_atomic_t ngx_terminate; sig_atomic_t ngx_quit; @@ -88,7 +87,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy sigemptyset(&set); sigaddset(&set, SIGCHLD); sigaddset(&set, SIGALRM); - sigaddset(&set, SIGIO); sigaddset(&set, SIGINT); sigaddset(&set, ngx_signal_value(NGX_RECONFIGURE_SIGNAL)); sigaddset(&set, ngx_signal_value(NGX_REOPEN_SIGNAL)); diff --git a/src/os/unix/ngx_process_cycle.h b/src/os/unix/ngx_process_cycle.h --- a/src/os/unix/ngx_process_cycle.h +++ b/src/os/unix/ngx_process_cycle.h @@ -47,7 +47,6 @@ extern ngx_uint_t ngx_daemonized; extern ngx_uint_t ngx_exiting; extern sig_atomic_t ngx_reap; -extern sig_atomic_t ngx_sigio; extern sig_atomic_t ngx_sigalrm; extern sig_atomic_t ngx_quit; extern sig_atomic_t ngx_debug_quit; From vbart at nginx.com Tue Jun 26 15:26:59 2018 From: vbart at nginx.com (Valentin Bartenev) Date: Tue, 26 Jun 2018 15:26:59 +0000 Subject: [njs] Introduced nxt_length() macro. Message-ID: details: http://hg.nginx.org/njs/rev/255c79369f59 branches: changeset: 540:255c79369f59 user: Valentin Bartenev date: Tue Jun 26 18:25:22 2018 +0300 description: Introduced nxt_length() macro. diffstat: nginx/ngx_http_js_module.c | 4 ++-- njs/njs.c | 2 +- njs/njs_extern.c | 2 +- njs/njs_json.c | 10 +++++----- njs/njs_parser.c | 2 +- njs/njs_string.c | 2 +- njs/njs_vm.h | 8 ++++---- nxt/nxt_string.h | 3 ++- nxt/test/random_unit_test.c | 2 +- nxt/test/utf8_unit_test.c | 4 ++-- 10 files changed, 20 insertions(+), 19 deletions(-) diffs (157 lines): diff -r 711079f55d46 -r 255c79369f59 nginx/ngx_http_js_module.c --- a/nginx/ngx_http_js_module.c Tue Jun 19 14:07:22 2018 +0300 +++ b/nginx/ngx_http_js_module.c Tue Jun 26 18:25:22 2018 +0300 @@ -1157,9 +1157,9 @@ ngx_http_js_ext_set_header_out(njs_vm_t h->value.data = p; h->value.len = value->length; - if (h->key.len == sizeof("Content-Length") - 1 + if (h->key.len == nxt_length("Content-Length") && ngx_strncasecmp(h->key.data, (u_char *) "Content-Length", - sizeof("Content-Length") - 1) == 0) + nxt_length("Content-Length")) == 0) { n = ngx_atoi(value->start, value->length); if (n == NGX_ERROR) { diff -r 711079f55d46 -r 255c79369f59 njs/njs.c --- a/njs/njs.c Tue Jun 19 14:07:22 2018 +0300 +++ b/njs/njs.c Tue Jun 26 18:25:22 2018 +0300 @@ -131,7 +131,7 @@ njs_vm_create(njs_vm_opt_t *options) nxt_lvlhsh_init(&vm->shared->values_hash); pattern = njs_regexp_pattern_create(vm, (u_char *) "(?:)", - sizeof("(?:)") - 1, 0); + nxt_length("(?:)"), 0); if (nxt_slow_path(pattern == NULL)) { return NULL; } diff -r 711079f55d46 -r 255c79369f59 njs/njs_extern.c --- a/njs/njs_extern.c Tue Jun 19 14:07:22 2018 +0300 +++ b/njs/njs_extern.c Tue Jun 26 18:25:22 2018 +0300 @@ -273,7 +273,7 @@ found: len = 0; for (pr = head; pr != NULL; pr = pr->next) { - len += pr->str.length + sizeof(".") - 1; + len += pr->str.length + nxt_length("."); } buf = nxt_mem_cache_zalloc(vm->mem_cache_pool, len); diff -r 711079f55d46 -r 255c79369f59 njs/njs_json.c --- a/njs/njs_json.c Tue Jun 19 14:07:22 2018 +0300 +++ b/njs/njs_json.c Tue Jun 26 18:25:22 2018 +0300 @@ -1400,19 +1400,19 @@ done: * The value to stringify is wrapped as '{"": value}'. * An empty object means empty result. */ - if (str.length <= sizeof("{\n\n}") - 1) { + if (str.length <= nxt_length("{\n\n}")) { vm->retval = njs_value_void; return NXT_OK; } /* Stripping the wrapper's data. */ - str.start += sizeof("{\"\":") - 1; - str.length -= sizeof("{\"\":}") - 1; + str.start += nxt_length("{\"\":"); + str.length -= nxt_length("{\"\":}"); if (stringify->space.length != 0) { - str.start += sizeof("\n ") - 1; - str.length -= sizeof("\n \n") - 1; + str.start += nxt_length("\n "); + str.length -= nxt_length("\n \n"); } length = nxt_utf8_length(str.start, str.length); diff -r 711079f55d46 -r 255c79369f59 njs/njs_parser.c --- a/njs/njs_parser.c Tue Jun 19 14:07:22 2018 +0300 +++ b/njs/njs_parser.c Tue Jun 26 18:25:22 2018 +0300 @@ -2589,7 +2589,7 @@ njs_parser_trace_handler(nxt_trace_t *tr size_t size; njs_vm_t *vm; - size = sizeof("InternalError: ") - 1; + size = nxt_length("InternalError: "); memcpy(start, "InternalError: ", size); p = start + size; diff -r 711079f55d46 -r 255c79369f59 njs/njs_string.c --- a/njs/njs_string.c Tue Jun 19 14:07:22 2018 +0300 +++ b/njs/njs_string.c Tue Jun 26 18:25:22 2018 +0300 @@ -3165,7 +3165,7 @@ njs_string_to_number(const njs_value_t * nxt_bool_t minus; const u_char *p, *start, *end; - const size_t infinity = sizeof("Infinity") - 1; + const size_t infinity = nxt_length("Infinity"); size = value->short_string.size; diff -r 711079f55d46 -r 255c79369f59 njs/njs_vm.h --- a/njs/njs_vm.h Tue Jun 19 14:07:22 2018 +0300 +++ b/njs/njs_vm.h Tue Jun 26 18:25:22 2018 +0300 @@ -365,8 +365,8 @@ typedef struct { #define njs_string(s) { \ .short_string = { \ .type = NJS_STRING, \ - .size = sizeof(s) - 1, \ - .length = sizeof(s) - 1, \ + .size = nxt_length(s), \ + .length = nxt_length(s), \ .start = s, \ } \ } @@ -378,10 +378,10 @@ typedef struct { .long_string = { \ .type = NJS_STRING, \ .truth = (NJS_STRING_LONG << 4) | NJS_STRING_LONG, \ - .size = sizeof(s) - 1, \ + .size = nxt_length(s), \ .data = & (njs_string_t) { \ .start = (u_char *) s, \ - .length = sizeof(s) - 1, \ + .length = nxt_length(s), \ } \ } \ } diff -r 711079f55d46 -r 255c79369f59 nxt/nxt_string.h --- a/nxt/nxt_string.h Tue Jun 19 14:07:22 2018 +0300 +++ b/nxt/nxt_string.h Tue Jun 26 18:25:22 2018 +0300 @@ -21,7 +21,8 @@ typedef struct { * So a separate nxt_string_value() macro is intended to use in assignment. */ -#define nxt_string(s) { sizeof(s) - 1, (u_char *) s } +#define nxt_length(s) (sizeof(s) - 1) +#define nxt_string(s) { nxt_length(s), (u_char *) s } #define nxt_null_string { 0, NULL } #define nxt_string_value(s) (nxt_str_t) nxt_string(s) diff -r 711079f55d46 -r 255c79369f59 nxt/test/random_unit_test.c --- a/nxt/test/random_unit_test.c Tue Jun 19 14:07:22 2018 +0300 +++ b/nxt/test/random_unit_test.c Tue Jun 26 18:25:22 2018 +0300 @@ -24,7 +24,7 @@ random_unit_test(void) r.count = 400000; - nxt_random_add(&r, (u_char *) "arc4random", sizeof("arc4random") - 1); + nxt_random_add(&r, (u_char *) "arc4random", nxt_length("arc4random")); /* * Test arc4random() numbers. diff -r 711079f55d46 -r 255c79369f59 nxt/test/utf8_unit_test.c --- a/nxt/test/utf8_unit_test.c Tue Jun 19 14:07:22 2018 +0300 +++ b/nxt/test/utf8_unit_test.c Tue Jun 26 18:25:22 2018 +0300 @@ -177,8 +177,8 @@ utf8_unit_test(nxt_uint_t start) n = nxt_utf8_casecmp((u_char *) "ABC ??? ???", (u_char *) "abc ??? ???", - sizeof("ABC ??? ???") - 1, - sizeof("abc ??? ???") - 1); + nxt_length("ABC ??? ???"), + nxt_length("abc ??? ???")); if (n != 0) { printf("nxt_utf8_casecmp() failed\n"); From mdounin at mdounin.ru Wed Jun 27 03:57:03 2018 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 27 Jun 2018 06:57:03 +0300 Subject: How does nginx handle incomplete upstream writes In-Reply-To: References: Message-ID: <20180627035703.GD35731@mdounin.ru> Hello! On Mon, Jun 25, 2018 at 01:42:59PM -0400, scott.oaks at oracle.com wrote: > I have a situation where when using nginx as a proxy, large POST > requests to my upstream server sometimes fail -- for whatever reason > (network congestion, upstream server paused for java GC. etc.), the > initial writes fill up the socket buffers and so ngx_writev gets a > EAGAIN. So that's pretty normal; I'd expect nginx to handle that, poll > the fd, and retry the I/O when the socket is ready to write again. [In > my case, that's always within a few seconds; if the socket isn't > available within proxy_send_timeout, then I'd expect the request to be > aborted.] > > Within the low-level parts of the nginx code, I see the framework for > this all in place: ngx_writev() returns NGX_AGAIN to > ngx_linux_sendfile_chain. That calls ngx_chain_update_sent to adjust the > buffer for the amount written and marks the ngx event holder as not > ready. Later, I do see nginx poll the fd; and the event holder gets > marked as ready, but by that time the partially-written data has been > lost and so is never written. Before that time, http_finalize_request > has been called with a status of NGX_DONE on the call path back up from > ngx_writev(), and the pending data seems also have been discarded. > Interestingly, the call stack and path here seem to be the same whether > proxy_request_buffering is on or off. > > Have I missed something ( must have, right?), or is the partial write > situation just not handled properly at all? It is properly handled. Buffers not yet written to the socket are referenced by ngx_chain_writer() in ctx->out and nginx will try to sent them again once the socket is ready for writing. [...] > proxy_send_timeout). That blocks the worker for that time, but the > worker can really only handle one request at a time anyway, correct? No. Each worker can handle thousands of active requests by switching between them as sockets are ready for reading or writing. Blocking nginx worker is a bad idea, as this block all requests. -- Maxim Dounin http://mdounin.ru/ From ru at nginx.com Wed Jun 27 12:57:05 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Wed, 27 Jun 2018 15:57:05 +0300 Subject: [PATCH] Core: remove unused FIOASYNC. In-Reply-To: References: Message-ID: <20180627125705.GE62373@lo0.su> On Mon, Jun 25, 2018 at 11:16:12AM -0700, Ian Gudger via nginx-devel wrote: > # HG changeset patch > # User Ian Gudger > # Date 1529449008 25200 > # Tue Jun 19 15:56:48 2018 -0700 > # Node ID 9427538acbc50142afbe91a11a1d4f907a00d257 > # Parent 118885f7a5774962f1145693d9c26a4c199ca6ea > Core: remove unused FIOASYNC. > > FIOASYNC, F_SETOWN and SIGIO seem to no longer serve any function. Can you decode your "seem to no longer server any function", please? > diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c > --- a/src/os/unix/ngx_process.c > +++ b/src/os/unix/ngx_process.c > @@ -71,8 +71,6 @@ ngx_signal_t signals[] = { > > { SIGINT, "SIGINT", "", ngx_signal_handler }, > > - { SIGIO, "SIGIO", "", ngx_signal_handler }, > - > { SIGCHLD, "SIGCHLD", "", ngx_signal_handler }, > > { SIGSYS, "SIGSYS, SIG_IGN", "", NULL }, > @@ -87,7 +85,6 @@ ngx_pid_t > ngx_spawn_process(ngx_cycle_t *cycle, ngx_spawn_proc_pt proc, void *data, > char *name, ngx_int_t respawn) > { > - u_long on; > ngx_pid_t pid; > ngx_int_t s; > > @@ -142,21 +139,6 @@ ngx_spawn_process(ngx_cycle_t *cycle, ng > return NGX_INVALID_PID; > } > > - on = 1; > - if (ioctl(ngx_processes[s].channel[0], FIOASYNC, &on) == -1) { > - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > - "ioctl(FIOASYNC) failed while spawning > \"%s\"", name); > - ngx_close_channel(ngx_processes[s].channel, cycle->log); > - return NGX_INVALID_PID; > - } > - > - if (fcntl(ngx_processes[s].channel[0], F_SETOWN, ngx_pid) == -1) { > - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > - "fcntl(F_SETOWN) failed while spawning > \"%s\"", name); > - ngx_close_channel(ngx_processes[s].channel, cycle->log); > - return NGX_INVALID_PID; > - } > - > if (fcntl(ngx_processes[s].channel[0], F_SETFD, FD_CLOEXEC) == -1) { > ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > "fcntl(FD_CLOEXEC) failed while spawning \"%s\"", > @@ -394,10 +376,6 @@ ngx_signal_handler(int signo, siginfo_t > ngx_sigalrm = 1; > break; > > - case SIGIO: > - ngx_sigio = 1; > - break; > - > case SIGCHLD: > ngx_reap = 1; > break; > @@ -433,8 +411,6 @@ ngx_signal_handler(int signo, siginfo_t > > case ngx_signal_value(NGX_RECONFIGURE_SIGNAL): > case ngx_signal_value(NGX_CHANGEBIN_SIGNAL): > - case SIGIO: > - action = ", ignoring"; Removing setting of an "action" variable looks like an error. > break; > } > > diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c > --- a/src/os/unix/ngx_process_cycle.c > +++ b/src/os/unix/ngx_process_cycle.c > @@ -34,7 +34,6 @@ ngx_pid_t ngx_pid; > ngx_pid_t ngx_parent; > > sig_atomic_t ngx_reap; > -sig_atomic_t ngx_sigio; > sig_atomic_t ngx_sigalrm; > sig_atomic_t ngx_terminate; > sig_atomic_t ngx_quit; > @@ -88,7 +87,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy > sigemptyset(&set); > sigaddset(&set, SIGCHLD); > sigaddset(&set, SIGALRM); > - sigaddset(&set, SIGIO); > sigaddset(&set, SIGINT); > sigaddset(&set, ngx_signal_value(NGX_RECONFIGURE_SIGNAL)); > sigaddset(&set, ngx_signal_value(NGX_REOPEN_SIGNAL)); > diff --git a/src/os/unix/ngx_process_cycle.h b/src/os/unix/ngx_process_cycle.h > --- a/src/os/unix/ngx_process_cycle.h > +++ b/src/os/unix/ngx_process_cycle.h > @@ -47,7 +47,6 @@ extern ngx_uint_t ngx_daemonized; > extern ngx_uint_t ngx_exiting; > > extern sig_atomic_t ngx_reap; > -extern sig_atomic_t ngx_sigio; > extern sig_atomic_t ngx_sigalrm; > extern sig_atomic_t ngx_quit; > extern sig_atomic_t ngx_debug_quit; There's also a SIGIO related code in ngx_master_process_cycle(), added in 8abb88374c6c. From igudger at google.com Wed Jun 27 17:09:47 2018 From: igudger at google.com (Ian Gudger) Date: Wed, 27 Jun 2018 10:09:47 -0700 Subject: [PATCH] Core: remove unused FIOASYNC. In-Reply-To: <20180627125705.GE62373@lo0.su> References: <20180627125705.GE62373@lo0.su> Message-ID: Actually, as far as I can tell, it never did anything other than cause signals to be delivered that were promptly ignored. It appears to have been added in eaf1f651cf86. There are two things in ngx_master_process_cycle() with names related to SIGIO. One is adding SIGIO to the set. That is removed in this patch. The other is a variable named sigio, added in 8abb88374c6c. This variable does not appear to have anything to do with SIGIO despite the name. I ran the tests with this patch and they all passed. Receiving signals isn't free, so this patch may improve performance. On Wed, Jun 27, 2018 at 5:57 AM Ruslan Ermilov wrote: > > On Mon, Jun 25, 2018 at 11:16:12AM -0700, Ian Gudger via nginx-devel wrote: > > # HG changeset patch > > # User Ian Gudger > > # Date 1529449008 25200 > > # Tue Jun 19 15:56:48 2018 -0700 > > # Node ID 9427538acbc50142afbe91a11a1d4f907a00d257 > > # Parent 118885f7a5774962f1145693d9c26a4c199ca6ea > > Core: remove unused FIOASYNC. > > > > FIOASYNC, F_SETOWN and SIGIO seem to no longer serve any function. > > Can you decode your "seem to no longer server any function", please? > > > diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c > > --- a/src/os/unix/ngx_process.c > > +++ b/src/os/unix/ngx_process.c > > @@ -71,8 +71,6 @@ ngx_signal_t signals[] = { > > > > { SIGINT, "SIGINT", "", ngx_signal_handler }, > > > > - { SIGIO, "SIGIO", "", ngx_signal_handler }, > > - > > { SIGCHLD, "SIGCHLD", "", ngx_signal_handler }, > > > > { SIGSYS, "SIGSYS, SIG_IGN", "", NULL }, > > @@ -87,7 +85,6 @@ ngx_pid_t > > ngx_spawn_process(ngx_cycle_t *cycle, ngx_spawn_proc_pt proc, void *data, > > char *name, ngx_int_t respawn) > > { > > - u_long on; > > ngx_pid_t pid; > > ngx_int_t s; > > > > @@ -142,21 +139,6 @@ ngx_spawn_process(ngx_cycle_t *cycle, ng > > return NGX_INVALID_PID; > > } > > > > - on = 1; > > - if (ioctl(ngx_processes[s].channel[0], FIOASYNC, &on) == -1) { > > - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > > - "ioctl(FIOASYNC) failed while spawning > > \"%s\"", name); > > - ngx_close_channel(ngx_processes[s].channel, cycle->log); > > - return NGX_INVALID_PID; > > - } > > - > > - if (fcntl(ngx_processes[s].channel[0], F_SETOWN, ngx_pid) == -1) { > > - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > > - "fcntl(F_SETOWN) failed while spawning > > \"%s\"", name); > > - ngx_close_channel(ngx_processes[s].channel, cycle->log); > > - return NGX_INVALID_PID; > > - } > > - > > if (fcntl(ngx_processes[s].channel[0], F_SETFD, FD_CLOEXEC) == -1) { > > ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > > "fcntl(FD_CLOEXEC) failed while spawning \"%s\"", > > @@ -394,10 +376,6 @@ ngx_signal_handler(int signo, siginfo_t > > ngx_sigalrm = 1; > > break; > > > > - case SIGIO: > > - ngx_sigio = 1; > > - break; > > - > > case SIGCHLD: > > ngx_reap = 1; > > break; > > @@ -433,8 +411,6 @@ ngx_signal_handler(int signo, siginfo_t > > > > case ngx_signal_value(NGX_RECONFIGURE_SIGNAL): > > case ngx_signal_value(NGX_CHANGEBIN_SIGNAL): > > - case SIGIO: > > - action = ", ignoring"; > > Removing setting of an "action" variable looks like an error. > > > break; > > } > > > > diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c > > --- a/src/os/unix/ngx_process_cycle.c > > +++ b/src/os/unix/ngx_process_cycle.c > > @@ -34,7 +34,6 @@ ngx_pid_t ngx_pid; > > ngx_pid_t ngx_parent; > > > > sig_atomic_t ngx_reap; > > -sig_atomic_t ngx_sigio; > > sig_atomic_t ngx_sigalrm; > > sig_atomic_t ngx_terminate; > > sig_atomic_t ngx_quit; > > @@ -88,7 +87,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy > > sigemptyset(&set); > > sigaddset(&set, SIGCHLD); > > sigaddset(&set, SIGALRM); > > - sigaddset(&set, SIGIO); > > sigaddset(&set, SIGINT); > > sigaddset(&set, ngx_signal_value(NGX_RECONFIGURE_SIGNAL)); > > sigaddset(&set, ngx_signal_value(NGX_REOPEN_SIGNAL)); > > diff --git a/src/os/unix/ngx_process_cycle.h b/src/os/unix/ngx_process_cycle.h > > --- a/src/os/unix/ngx_process_cycle.h > > +++ b/src/os/unix/ngx_process_cycle.h > > @@ -47,7 +47,6 @@ extern ngx_uint_t ngx_daemonized; > > extern ngx_uint_t ngx_exiting; > > > > extern sig_atomic_t ngx_reap; > > -extern sig_atomic_t ngx_sigio; > > extern sig_atomic_t ngx_sigalrm; > > extern sig_atomic_t ngx_quit; > > extern sig_atomic_t ngx_debug_quit; > > There's also a SIGIO related code in ngx_master_process_cycle(), > added in 8abb88374c6c. From ru at nginx.com Wed Jun 27 18:31:39 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Wed, 27 Jun 2018 21:31:39 +0300 Subject: [PATCH] Core: remove unused FIOASYNC. In-Reply-To: References: <20180627125705.GE62373@lo0.su> Message-ID: <20180627183139.GL62373@lo0.su> On Wed, Jun 27, 2018 at 10:09:47AM -0700, Ian Gudger wrote: > Actually, as far as I can tell, it never did anything other than cause > signals to be delivered that were promptly ignored. It appears to have > been added in eaf1f651cf86. I came to the same conclusion, but I'll double check with Igor before proceeding with removing this. > There are two things in ngx_master_process_cycle() with names related > to SIGIO. One is adding SIGIO to the set. That is removed in this > patch. The other is a variable named sigio, added in 8abb88374c6c. > > This variable does not appear to have anything to do with SIGIO > despite the name. It is indeed related, please see the explanation in the above mentioned commit here: http://hg.nginx.org/nginx/rev/8abb88374c6c What I was trying to say is that your patch needs to revert this change as well: diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c +++ b/src/os/unix/ngx_process_cycle.c @@ -77,7 +77,7 @@ ngx_master_process_cycle(ngx_cycle_t *cy u_char *p; size_t size; ngx_int_t i; - ngx_uint_t n, sigio; + ngx_uint_t n; sigset_t set; struct itimerval itv; ngx_uint_t live; @@ -134,13 +134,11 @@ ngx_master_process_cycle(ngx_cycle_t *cy ngx_new_binary = 0; delay = 0; - sigio = 0; live = 1; for ( ;; ) { if (delay) { if (ngx_sigalrm) { - sigio = 0; delay *= 2; ngx_sigalrm = 0; } @@ -165,8 +163,7 @@ ngx_master_process_cycle(ngx_cycle_t *cy ngx_time_update(); - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, - "wake up, sigio %i", sigio); + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "wake up"); if (ngx_reap) { ngx_reap = 0; @@ -184,13 +181,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy delay = 50; } - if (sigio) { - sigio--; - continue; - } - - sigio = ccf->worker_processes + 2 /* cache processes */; - if (delay > 1000) { ngx_signal_worker_processes(cycle, SIGKILL); } else { > I ran the tests with this patch and they all passed. Receiving signals > isn't free, so this patch may improve performance. Highly unlikely in this particular case. If you want, you can update your patch. > On Wed, Jun 27, 2018 at 5:57 AM Ruslan Ermilov wrote: > > > > On Mon, Jun 25, 2018 at 11:16:12AM -0700, Ian Gudger via nginx-devel wrote: > > > # HG changeset patch > > > # User Ian Gudger > > > # Date 1529449008 25200 > > > # Tue Jun 19 15:56:48 2018 -0700 > > > # Node ID 9427538acbc50142afbe91a11a1d4f907a00d257 > > > # Parent 118885f7a5774962f1145693d9c26a4c199ca6ea > > > Core: remove unused FIOASYNC. > > > > > > FIOASYNC, F_SETOWN and SIGIO seem to no longer serve any function. > > > > Can you decode your "seem to no longer server any function", please? > > > > > diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c > > > --- a/src/os/unix/ngx_process.c > > > +++ b/src/os/unix/ngx_process.c > > > @@ -71,8 +71,6 @@ ngx_signal_t signals[] = { > > > > > > { SIGINT, "SIGINT", "", ngx_signal_handler }, > > > > > > - { SIGIO, "SIGIO", "", ngx_signal_handler }, > > > - > > > { SIGCHLD, "SIGCHLD", "", ngx_signal_handler }, > > > > > > { SIGSYS, "SIGSYS, SIG_IGN", "", NULL }, > > > @@ -87,7 +85,6 @@ ngx_pid_t > > > ngx_spawn_process(ngx_cycle_t *cycle, ngx_spawn_proc_pt proc, void *data, > > > char *name, ngx_int_t respawn) > > > { > > > - u_long on; > > > ngx_pid_t pid; > > > ngx_int_t s; > > > > > > @@ -142,21 +139,6 @@ ngx_spawn_process(ngx_cycle_t *cycle, ng > > > return NGX_INVALID_PID; > > > } > > > > > > - on = 1; > > > - if (ioctl(ngx_processes[s].channel[0], FIOASYNC, &on) == -1) { > > > - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > > > - "ioctl(FIOASYNC) failed while spawning > > > \"%s\"", name); > > > - ngx_close_channel(ngx_processes[s].channel, cycle->log); > > > - return NGX_INVALID_PID; > > > - } > > > - > > > - if (fcntl(ngx_processes[s].channel[0], F_SETOWN, ngx_pid) == -1) { > > > - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > > > - "fcntl(F_SETOWN) failed while spawning > > > \"%s\"", name); > > > - ngx_close_channel(ngx_processes[s].channel, cycle->log); > > > - return NGX_INVALID_PID; > > > - } > > > - > > > if (fcntl(ngx_processes[s].channel[0], F_SETFD, FD_CLOEXEC) == -1) { > > > ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > > > "fcntl(FD_CLOEXEC) failed while spawning \"%s\"", > > > @@ -394,10 +376,6 @@ ngx_signal_handler(int signo, siginfo_t > > > ngx_sigalrm = 1; > > > break; > > > > > > - case SIGIO: > > > - ngx_sigio = 1; > > > - break; > > > - > > > case SIGCHLD: > > > ngx_reap = 1; > > > break; > > > @@ -433,8 +411,6 @@ ngx_signal_handler(int signo, siginfo_t > > > > > > case ngx_signal_value(NGX_RECONFIGURE_SIGNAL): > > > case ngx_signal_value(NGX_CHANGEBIN_SIGNAL): > > > - case SIGIO: > > > - action = ", ignoring"; > > > > Removing setting of an "action" variable looks like an error. > > > > > break; > > > } > > > > > > diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c > > > --- a/src/os/unix/ngx_process_cycle.c > > > +++ b/src/os/unix/ngx_process_cycle.c > > > @@ -34,7 +34,6 @@ ngx_pid_t ngx_pid; > > > ngx_pid_t ngx_parent; > > > > > > sig_atomic_t ngx_reap; > > > -sig_atomic_t ngx_sigio; > > > sig_atomic_t ngx_sigalrm; > > > sig_atomic_t ngx_terminate; > > > sig_atomic_t ngx_quit; > > > @@ -88,7 +87,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy > > > sigemptyset(&set); > > > sigaddset(&set, SIGCHLD); > > > sigaddset(&set, SIGALRM); > > > - sigaddset(&set, SIGIO); > > > sigaddset(&set, SIGINT); > > > sigaddset(&set, ngx_signal_value(NGX_RECONFIGURE_SIGNAL)); > > > sigaddset(&set, ngx_signal_value(NGX_REOPEN_SIGNAL)); > > > diff --git a/src/os/unix/ngx_process_cycle.h b/src/os/unix/ngx_process_cycle.h > > > --- a/src/os/unix/ngx_process_cycle.h > > > +++ b/src/os/unix/ngx_process_cycle.h > > > @@ -47,7 +47,6 @@ extern ngx_uint_t ngx_daemonized; > > > extern ngx_uint_t ngx_exiting; > > > > > > extern sig_atomic_t ngx_reap; > > > -extern sig_atomic_t ngx_sigio; > > > extern sig_atomic_t ngx_sigalrm; > > > extern sig_atomic_t ngx_quit; > > > extern sig_atomic_t ngx_debug_quit; > > > > There's also a SIGIO related code in ngx_master_process_cycle(), > > added in 8abb88374c6c. > -- Ruslan Ermilov Assume stupidity not malice From igudger at google.com Wed Jun 27 20:40:26 2018 From: igudger at google.com (Ian Gudger) Date: Wed, 27 Jun 2018 13:40:26 -0700 Subject: [PATCH] Core: remove unused FIOASYNC. In-Reply-To: <20180627183139.GL62373@lo0.su> References: <20180627125705.GE62373@lo0.su> <20180627183139.GL62373@lo0.su> Message-ID: Sorry, I understand now. Here is a new patch which removes that too: # HG changeset patch # User Ian Gudger # Date 1529449008 25200 # Tue Jun 19 15:56:48 2018 -0700 # Node ID 8fd0b85081a1cb91fa4495258bb5f9d3a6ef5785 # Parent 118885f7a5774962f1145693d9c26a4c199ca6ea Core: remove FIOASYNC as the SIGIOs it generated were ignored. FIOASYNC and F_SETOWN cause a pid or pgid to receive signals when a file is ready for IO. When using master process mode, this was setup, but the SIGIO signals were ignored. This has been the case since use of FIOASYNC was first added in eaf1f651cf86. Logic ignore the SIGIOs in a case where they unintentionally did something was added in 8abb88374c6c. diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c --- a/src/os/unix/ngx_process.c +++ b/src/os/unix/ngx_process.c @@ -71,8 +71,6 @@ ngx_signal_t signals[] = { { SIGINT, "SIGINT", "", ngx_signal_handler }, - { SIGIO, "SIGIO", "", ngx_signal_handler }, - { SIGCHLD, "SIGCHLD", "", ngx_signal_handler }, { SIGSYS, "SIGSYS, SIG_IGN", "", NULL }, @@ -87,7 +85,6 @@ ngx_pid_t ngx_spawn_process(ngx_cycle_t *cycle, ngx_spawn_proc_pt proc, void *data, char *name, ngx_int_t respawn) { - u_long on; ngx_pid_t pid; ngx_int_t s; @@ -142,21 +139,6 @@ ngx_spawn_process(ngx_cycle_t *cycle, ng return NGX_INVALID_PID; } - on = 1; - if (ioctl(ngx_processes[s].channel[0], FIOASYNC, &on) == -1) { - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "ioctl(FIOASYNC) failed while spawning \"%s\"", name); - ngx_close_channel(ngx_processes[s].channel, cycle->log); - return NGX_INVALID_PID; - } - - if (fcntl(ngx_processes[s].channel[0], F_SETOWN, ngx_pid) == -1) { - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "fcntl(F_SETOWN) failed while spawning \"%s\"", name); - ngx_close_channel(ngx_processes[s].channel, cycle->log); - return NGX_INVALID_PID; - } - if (fcntl(ngx_processes[s].channel[0], F_SETFD, FD_CLOEXEC) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "fcntl(FD_CLOEXEC) failed while spawning \"%s\"", @@ -394,10 +376,6 @@ ngx_signal_handler(int signo, siginfo_t ngx_sigalrm = 1; break; - case SIGIO: - ngx_sigio = 1; - break; - case SIGCHLD: ngx_reap = 1; break; @@ -433,8 +411,6 @@ ngx_signal_handler(int signo, siginfo_t case ngx_signal_value(NGX_RECONFIGURE_SIGNAL): case ngx_signal_value(NGX_CHANGEBIN_SIGNAL): - case SIGIO: - action = ", ignoring"; break; } diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c +++ b/src/os/unix/ngx_process_cycle.c @@ -34,7 +34,6 @@ ngx_pid_t ngx_pid; ngx_pid_t ngx_parent; sig_atomic_t ngx_reap; -sig_atomic_t ngx_sigio; sig_atomic_t ngx_sigalrm; sig_atomic_t ngx_terminate; sig_atomic_t ngx_quit; @@ -77,7 +76,7 @@ ngx_master_process_cycle(ngx_cycle_t *cy u_char *p; size_t size; ngx_int_t i; - ngx_uint_t n, sigio; + ngx_uint_t n; sigset_t set; struct itimerval itv; ngx_uint_t live; @@ -88,7 +87,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy sigemptyset(&set); sigaddset(&set, SIGCHLD); sigaddset(&set, SIGALRM); - sigaddset(&set, SIGIO); sigaddset(&set, SIGINT); sigaddset(&set, ngx_signal_value(NGX_RECONFIGURE_SIGNAL)); sigaddset(&set, ngx_signal_value(NGX_REOPEN_SIGNAL)); @@ -134,13 +132,11 @@ ngx_master_process_cycle(ngx_cycle_t *cy ngx_new_binary = 0; delay = 0; - sigio = 0; live = 1; for ( ;; ) { if (delay) { if (ngx_sigalrm) { - sigio = 0; delay *= 2; ngx_sigalrm = 0; } @@ -165,9 +161,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy ngx_time_update(); - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, - "wake up, sigio %i", sigio); - if (ngx_reap) { ngx_reap = 0; ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "reap children"); @@ -184,13 +177,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy delay = 50; } - if (sigio) { - sigio--; - continue; - } - - sigio = ccf->worker_processes + 2 /* cache processes */; - if (delay > 1000) { ngx_signal_worker_processes(cycle, SIGKILL); } else { diff --git a/src/os/unix/ngx_process_cycle.h b/src/os/unix/ngx_process_cycle.h --- a/src/os/unix/ngx_process_cycle.h +++ b/src/os/unix/ngx_process_cycle.h @@ -47,7 +47,6 @@ extern ngx_uint_t ngx_daemonized; extern ngx_uint_t ngx_exiting; extern sig_atomic_t ngx_reap; -extern sig_atomic_t ngx_sigio; extern sig_atomic_t ngx_sigalrm; extern sig_atomic_t ngx_quit; extern sig_atomic_t ngx_debug_quit; On Wed, Jun 27, 2018 at 11:31 AM Ruslan Ermilov wrote: > > On Wed, Jun 27, 2018 at 10:09:47AM -0700, Ian Gudger wrote: > > Actually, as far as I can tell, it never did anything other than cause > > signals to be delivered that were promptly ignored. It appears to have > > been added in eaf1f651cf86. > > I came to the same conclusion, but I'll double check with Igor > before proceeding with removing this. > > > There are two things in ngx_master_process_cycle() with names related > > to SIGIO. One is adding SIGIO to the set. That is removed in this > > patch. The other is a variable named sigio, added in 8abb88374c6c. > > > > This variable does not appear to have anything to do with SIGIO > > despite the name. > > It is indeed related, please see the explanation in the above > mentioned commit here: http://hg.nginx.org/nginx/rev/8abb88374c6c > > What I was trying to say is that your patch needs to revert this > change as well: > > diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c > --- a/src/os/unix/ngx_process_cycle.c > +++ b/src/os/unix/ngx_process_cycle.c > @@ -77,7 +77,7 @@ ngx_master_process_cycle(ngx_cycle_t *cy > u_char *p; > size_t size; > ngx_int_t i; > - ngx_uint_t n, sigio; > + ngx_uint_t n; > sigset_t set; > struct itimerval itv; > ngx_uint_t live; > @@ -134,13 +134,11 @@ ngx_master_process_cycle(ngx_cycle_t *cy > > ngx_new_binary = 0; > delay = 0; > - sigio = 0; > live = 1; > > for ( ;; ) { > if (delay) { > if (ngx_sigalrm) { > - sigio = 0; > delay *= 2; > ngx_sigalrm = 0; > } > @@ -165,8 +163,7 @@ ngx_master_process_cycle(ngx_cycle_t *cy > > ngx_time_update(); > > - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, > - "wake up, sigio %i", sigio); > + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "wake up"); > > if (ngx_reap) { > ngx_reap = 0; > @@ -184,13 +181,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy > delay = 50; > } > > - if (sigio) { > - sigio--; > - continue; > - } > - > - sigio = ccf->worker_processes + 2 /* cache processes */; > - > if (delay > 1000) { > ngx_signal_worker_processes(cycle, SIGKILL); > } else { > > > I ran the tests with this patch and they all passed. Receiving signals > > isn't free, so this patch may improve performance. > > Highly unlikely in this particular case. > > If you want, you can update your patch. > > > On Wed, Jun 27, 2018 at 5:57 AM Ruslan Ermilov wrote: > > > > > > On Mon, Jun 25, 2018 at 11:16:12AM -0700, Ian Gudger via nginx-devel wrote: > > > > # HG changeset patch > > > > # User Ian Gudger > > > > # Date 1529449008 25200 > > > > # Tue Jun 19 15:56:48 2018 -0700 > > > > # Node ID 9427538acbc50142afbe91a11a1d4f907a00d257 > > > > # Parent 118885f7a5774962f1145693d9c26a4c199ca6ea > > > > Core: remove unused FIOASYNC. > > > > > > > > FIOASYNC, F_SETOWN and SIGIO seem to no longer serve any function. > > > > > > Can you decode your "seem to no longer server any function", please? > > > > > > > diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c > > > > --- a/src/os/unix/ngx_process.c > > > > +++ b/src/os/unix/ngx_process.c > > > > @@ -71,8 +71,6 @@ ngx_signal_t signals[] = { > > > > > > > > { SIGINT, "SIGINT", "", ngx_signal_handler }, > > > > > > > > - { SIGIO, "SIGIO", "", ngx_signal_handler }, > > > > - > > > > { SIGCHLD, "SIGCHLD", "", ngx_signal_handler }, > > > > > > > > { SIGSYS, "SIGSYS, SIG_IGN", "", NULL }, > > > > @@ -87,7 +85,6 @@ ngx_pid_t > > > > ngx_spawn_process(ngx_cycle_t *cycle, ngx_spawn_proc_pt proc, void *data, > > > > char *name, ngx_int_t respawn) > > > > { > > > > - u_long on; > > > > ngx_pid_t pid; > > > > ngx_int_t s; > > > > > > > > @@ -142,21 +139,6 @@ ngx_spawn_process(ngx_cycle_t *cycle, ng > > > > return NGX_INVALID_PID; > > > > } > > > > > > > > - on = 1; > > > > - if (ioctl(ngx_processes[s].channel[0], FIOASYNC, &on) == -1) { > > > > - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > > > > - "ioctl(FIOASYNC) failed while spawning > > > > \"%s\"", name); > > > > - ngx_close_channel(ngx_processes[s].channel, cycle->log); > > > > - return NGX_INVALID_PID; > > > > - } > > > > - > > > > - if (fcntl(ngx_processes[s].channel[0], F_SETOWN, ngx_pid) == -1) { > > > > - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > > > > - "fcntl(F_SETOWN) failed while spawning > > > > \"%s\"", name); > > > > - ngx_close_channel(ngx_processes[s].channel, cycle->log); > > > > - return NGX_INVALID_PID; > > > > - } > > > > - > > > > if (fcntl(ngx_processes[s].channel[0], F_SETFD, FD_CLOEXEC) == -1) { > > > > ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, > > > > "fcntl(FD_CLOEXEC) failed while spawning \"%s\"", > > > > @@ -394,10 +376,6 @@ ngx_signal_handler(int signo, siginfo_t > > > > ngx_sigalrm = 1; > > > > break; > > > > > > > > - case SIGIO: > > > > - ngx_sigio = 1; > > > > - break; > > > > - > > > > case SIGCHLD: > > > > ngx_reap = 1; > > > > break; > > > > @@ -433,8 +411,6 @@ ngx_signal_handler(int signo, siginfo_t > > > > > > > > case ngx_signal_value(NGX_RECONFIGURE_SIGNAL): > > > > case ngx_signal_value(NGX_CHANGEBIN_SIGNAL): > > > > - case SIGIO: > > > > - action = ", ignoring"; > > > > > > Removing setting of an "action" variable looks like an error. > > > > > > > break; > > > > } > > > > > > > > diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c > > > > --- a/src/os/unix/ngx_process_cycle.c > > > > +++ b/src/os/unix/ngx_process_cycle.c > > > > @@ -34,7 +34,6 @@ ngx_pid_t ngx_pid; > > > > ngx_pid_t ngx_parent; > > > > > > > > sig_atomic_t ngx_reap; > > > > -sig_atomic_t ngx_sigio; > > > > sig_atomic_t ngx_sigalrm; > > > > sig_atomic_t ngx_terminate; > > > > sig_atomic_t ngx_quit; > > > > @@ -88,7 +87,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy > > > > sigemptyset(&set); > > > > sigaddset(&set, SIGCHLD); > > > > sigaddset(&set, SIGALRM); > > > > - sigaddset(&set, SIGIO); > > > > sigaddset(&set, SIGINT); > > > > sigaddset(&set, ngx_signal_value(NGX_RECONFIGURE_SIGNAL)); > > > > sigaddset(&set, ngx_signal_value(NGX_REOPEN_SIGNAL)); > > > > diff --git a/src/os/unix/ngx_process_cycle.h b/src/os/unix/ngx_process_cycle.h > > > > --- a/src/os/unix/ngx_process_cycle.h > > > > +++ b/src/os/unix/ngx_process_cycle.h > > > > @@ -47,7 +47,6 @@ extern ngx_uint_t ngx_daemonized; > > > > extern ngx_uint_t ngx_exiting; > > > > > > > > extern sig_atomic_t ngx_reap; > > > > -extern sig_atomic_t ngx_sigio; > > > > extern sig_atomic_t ngx_sigalrm; > > > > extern sig_atomic_t ngx_quit; > > > > extern sig_atomic_t ngx_debug_quit; > > > > > > There's also a SIGIO related code in ngx_master_process_cycle(), > > > added in 8abb88374c6c. > > > > -- > Ruslan Ermilov > Assume stupidity not malice From ru at nginx.com Thu Jun 28 09:27:40 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 28 Jun 2018 12:27:40 +0300 Subject: [PATCH] Core: remove unused FIOASYNC. In-Reply-To: References: <20180627125705.GE62373@lo0.su> <20180627183139.GL62373@lo0.su> Message-ID: <20180628092740.GA4858@lo0.su> On Wed, Jun 27, 2018 at 01:40:26PM -0700, Ian Gudger wrote: > Sorry, I understand now. > > Here is a new patch which removes that too: > > # HG changeset patch > # User Ian Gudger > # Date 1529449008 25200 > # Tue Jun 19 15:56:48 2018 -0700 > # Node ID 8fd0b85081a1cb91fa4495258bb5f9d3a6ef5785 > # Parent 118885f7a5774962f1145693d9c26a4c199ca6ea > Core: remove FIOASYNC as the SIGIOs it generated were ignored. > > FIOASYNC and F_SETOWN cause a pid or pgid to receive signals when a file is > ready for IO. When using master process mode, this was setup, but the SIGIO > signals were ignored. This has been the case since use of FIOASYNC was first > added in eaf1f651cf86. Logic ignore the SIGIOs in a case where they > unintentionally did something was added in 8abb88374c6c. > > diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c [...] > @@ -433,8 +411,6 @@ ngx_signal_handler(int signo, siginfo_t > > case ngx_signal_value(NGX_RECONFIGURE_SIGNAL): > case ngx_signal_value(NGX_CHANGEBIN_SIGNAL): > - case SIGIO: > - action = ", ignoring"; > break; > } On Wed, Jun 27, 2018 at 03:57:05PM +0300, Ruslan Ermilov wrote: > Removing setting of an "action" variable looks like an error. No need to resend the patch. From ru at nginx.com Thu Jun 28 11:47:22 2018 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 28 Jun 2018 14:47:22 +0300 Subject: [PATCH] Core: remove unused FIOASYNC. In-Reply-To: <20180628092740.GA4858@lo0.su> References: <20180627125705.GE62373@lo0.su> <20180627183139.GL62373@lo0.su> <20180628092740.GA4858@lo0.su> Message-ID: <20180628114722.GB4858@lo0.su> On Thu, Jun 28, 2018 at 12:27:40PM +0300, Ruslan Ermilov wrote: > On Wed, Jun 27, 2018 at 01:40:26PM -0700, Ian Gudger wrote: > > Sorry, I understand now. > > > > Here is a new patch which removes that too: > > > > # HG changeset patch > > # User Ian Gudger > > # Date 1529449008 25200 > > # Tue Jun 19 15:56:48 2018 -0700 > > # Node ID 8fd0b85081a1cb91fa4495258bb5f9d3a6ef5785 > > # Parent 118885f7a5774962f1145693d9c26a4c199ca6ea > > Core: remove FIOASYNC as the SIGIOs it generated were ignored. > > > > FIOASYNC and F_SETOWN cause a pid or pgid to receive signals when a file is > > ready for IO. When using master process mode, this was setup, but the SIGIO > > signals were ignored. This has been the case since use of FIOASYNC was first > > added in eaf1f651cf86. Logic ignore the SIGIOs in a case where they > > unintentionally did something was added in 8abb88374c6c. > > > > diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c > [...] > > @@ -433,8 +411,6 @@ ngx_signal_handler(int signo, siginfo_t > > > > case ngx_signal_value(NGX_RECONFIGURE_SIGNAL): > > case ngx_signal_value(NGX_CHANGEBIN_SIGNAL): > > - case SIGIO: > > - action = ", ignoring"; > > break; > > } > > On Wed, Jun 27, 2018 at 03:57:05PM +0300, Ruslan Ermilov wrote: > > Removing setting of an "action" variable looks like an error. > > No need to resend the patch. Here's a slightly cleaned up patch and commit log: # HG changeset patch # User Ian Gudger # Date 1529449008 25200 # Tue Jun 19 15:56:48 2018 -0700 # Node ID 9d24aafa6626f2915176e80e5279704af6f6d575 # Parent f2396ecf608bab9acc0545e3e53e36cc2cb9b2e6 Core: removed FIOASYNC as the SIGIOs it generated were ignored. FIOASYNC and F_SETOWN cause a pid or pgid to receive signals when a file is ready for I/O. When using master process mode, this was set up, but the SIGIO signals were ignored. This has been the case since use of FIOASYNC was first added in eaf1f651cf86. Logic to ignore the SIGIOs in a case where they unintentionally did something was added in 8abb88374c6c. diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c --- a/src/os/unix/ngx_process.c +++ b/src/os/unix/ngx_process.c @@ -71,8 +71,6 @@ ngx_signal_t signals[] = { { SIGINT, "SIGINT", "", ngx_signal_handler }, - { SIGIO, "SIGIO", "", ngx_signal_handler }, - { SIGCHLD, "SIGCHLD", "", ngx_signal_handler }, { SIGSYS, "SIGSYS, SIG_IGN", "", NULL }, @@ -87,7 +85,6 @@ ngx_pid_t ngx_spawn_process(ngx_cycle_t *cycle, ngx_spawn_proc_pt proc, void *data, char *name, ngx_int_t respawn) { - u_long on; ngx_pid_t pid; ngx_int_t s; @@ -142,21 +139,6 @@ ngx_spawn_process(ngx_cycle_t *cycle, ng return NGX_INVALID_PID; } - on = 1; - if (ioctl(ngx_processes[s].channel[0], FIOASYNC, &on) == -1) { - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "ioctl(FIOASYNC) failed while spawning \"%s\"", name); - ngx_close_channel(ngx_processes[s].channel, cycle->log); - return NGX_INVALID_PID; - } - - if (fcntl(ngx_processes[s].channel[0], F_SETOWN, ngx_pid) == -1) { - ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "fcntl(F_SETOWN) failed while spawning \"%s\"", name); - ngx_close_channel(ngx_processes[s].channel, cycle->log); - return NGX_INVALID_PID; - } - if (fcntl(ngx_processes[s].channel[0], F_SETFD, FD_CLOEXEC) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "fcntl(FD_CLOEXEC) failed while spawning \"%s\"", @@ -394,10 +376,6 @@ ngx_signal_handler(int signo, siginfo_t ngx_sigalrm = 1; break; - case SIGIO: - ngx_sigio = 1; - break; - case SIGCHLD: ngx_reap = 1; break; @@ -433,7 +411,6 @@ ngx_signal_handler(int signo, siginfo_t case ngx_signal_value(NGX_RECONFIGURE_SIGNAL): case ngx_signal_value(NGX_CHANGEBIN_SIGNAL): - case SIGIO: action = ", ignoring"; break; } diff --git a/src/os/unix/ngx_process_cycle.c b/src/os/unix/ngx_process_cycle.c --- a/src/os/unix/ngx_process_cycle.c +++ b/src/os/unix/ngx_process_cycle.c @@ -34,7 +34,6 @@ ngx_pid_t ngx_pid; ngx_pid_t ngx_parent; sig_atomic_t ngx_reap; -sig_atomic_t ngx_sigio; sig_atomic_t ngx_sigalrm; sig_atomic_t ngx_terminate; sig_atomic_t ngx_quit; @@ -77,7 +76,7 @@ ngx_master_process_cycle(ngx_cycle_t *cy u_char *p; size_t size; ngx_int_t i; - ngx_uint_t n, sigio; + ngx_uint_t n; sigset_t set; struct itimerval itv; ngx_uint_t live; @@ -88,7 +87,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy sigemptyset(&set); sigaddset(&set, SIGCHLD); sigaddset(&set, SIGALRM); - sigaddset(&set, SIGIO); sigaddset(&set, SIGINT); sigaddset(&set, ngx_signal_value(NGX_RECONFIGURE_SIGNAL)); sigaddset(&set, ngx_signal_value(NGX_REOPEN_SIGNAL)); @@ -134,13 +132,11 @@ ngx_master_process_cycle(ngx_cycle_t *cy ngx_new_binary = 0; delay = 0; - sigio = 0; live = 1; for ( ;; ) { if (delay) { if (ngx_sigalrm) { - sigio = 0; delay *= 2; ngx_sigalrm = 0; } @@ -165,8 +161,7 @@ ngx_master_process_cycle(ngx_cycle_t *cy ngx_time_update(); - ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, - "wake up, sigio %i", sigio); + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "wake up"); if (ngx_reap) { ngx_reap = 0; @@ -184,13 +179,6 @@ ngx_master_process_cycle(ngx_cycle_t *cy delay = 50; } - if (sigio) { - sigio--; - continue; - } - - sigio = ccf->worker_processes + 2 /* cache processes */; - if (delay > 1000) { ngx_signal_worker_processes(cycle, SIGKILL); } else { diff --git a/src/os/unix/ngx_process_cycle.h b/src/os/unix/ngx_process_cycle.h --- a/src/os/unix/ngx_process_cycle.h +++ b/src/os/unix/ngx_process_cycle.h @@ -47,7 +47,6 @@ extern ngx_uint_t ngx_daemonized; extern ngx_uint_t ngx_exiting; extern sig_atomic_t ngx_reap; -extern sig_atomic_t ngx_sigio; extern sig_atomic_t ngx_sigalrm; extern sig_atomic_t ngx_quit; extern sig_atomic_t ngx_debug_quit; From pluknet at nginx.com Thu Jun 28 11:55:17 2018 From: pluknet at nginx.com (Sergey Kandaurov) Date: Thu, 28 Jun 2018 11:55:17 +0000 Subject: [njs] Fixed building unit tests after nxt_length() expansion. Message-ID: details: http://hg.nginx.org/njs/rev/4a02df907ed9 branches: changeset: 541:4a02df907ed9 user: Sergey Kandaurov date: Thu Jun 28 14:37:25 2018 +0300 description: Fixed building unit tests after nxt_length() expansion. diffstat: nxt/test/random_unit_test.c | 1 + nxt/test/utf8_unit_test.c | 1 + 2 files changed, 2 insertions(+), 0 deletions(-) diffs (22 lines): diff -r 255c79369f59 -r 4a02df907ed9 nxt/test/random_unit_test.c --- a/nxt/test/random_unit_test.c Tue Jun 26 18:25:22 2018 +0300 +++ b/nxt/test/random_unit_test.c Thu Jun 28 14:37:25 2018 +0300 @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff -r 255c79369f59 -r 4a02df907ed9 nxt/test/utf8_unit_test.c --- a/nxt/test/utf8_unit_test.c Tue Jun 26 18:25:22 2018 +0300 +++ b/nxt/test/utf8_unit_test.c Thu Jun 28 14:37:25 2018 +0300 @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include From xeioex at nginx.com Thu Jun 28 14:04:41 2018 From: xeioex at nginx.com (Dmitry Volyntsev) Date: Thu, 28 Jun 2018 14:04:41 +0000 Subject: [njs] Adding lib_test to generic test target. Message-ID: details: http://hg.nginx.org/njs/rev/0307e2740df7 branches: changeset: 542:0307e2740df7 user: Dmitry Volyntsev date: Thu Jun 28 17:04:18 2018 +0300 description: Adding lib_test to generic test target. diffstat: Makefile | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diffs (13 lines): diff -r 4a02df907ed9 -r 0307e2740df7 Makefile --- a/Makefile Thu Jun 28 14:37:25 2018 +0300 +++ b/Makefile Thu Jun 28 17:04:18 2018 +0300 @@ -99,7 +99,8 @@ libnjs: $(NXT_BUILDDIR)/libnjs.a njs_interactive_test: njs_expect_test $(NXT_BUILDDIR)/njs_interactive_test $(NXT_BUILDDIR)/njs_interactive_test -test: njs_interactive_test \ +test: lib_test \ + njs_interactive_test \ $(NXT_BUILDDIR)/njs_unit_test \ $(NXT_BUILDDIR)/njs_benchmark \ From vbart at nginx.com Sat Jun 30 17:51:57 2018 From: vbart at nginx.com (Valentin Bartenev) Date: Sat, 30 Jun 2018 17:51:57 +0000 Subject: [njs] Fixed error handling of setting non-numeric Array.length. Message-ID: details: http://hg.nginx.org/njs/rev/a361553ce219 branches: changeset: 543:a361553ce219 user: Valentin Bartenev date: Sat Jun 30 20:39:22 2018 +0300 description: Fixed error handling of setting non-numeric Array.length. diffstat: njs/njs_array.c | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) diffs (15 lines): diff -r 0307e2740df7 -r a361553ce219 njs/njs_array.c --- a/njs/njs_array.c Thu Jun 28 17:04:18 2018 +0300 +++ b/njs/njs_array.c Sat Jun 30 20:39:22 2018 +0300 @@ -375,6 +375,11 @@ njs_array_prototype_length(njs_vm_t *vm, array = value->data.u.array; if (setval != NULL) { + if (!njs_is_number(setval)) { + njs_range_error(vm, "Invalid array length"); + return NJS_ERROR; + } + num = setval->data.u.number; length = (uint32_t) num;