Hello,
I have written a module to implement sFlow in nginx (nginx-sflow-module.googlecode.com). I'm simulating a 1-second timer-tick by assuming that the request handler will be called at least once per second. That's probably a safe assumption for any server that would care about sFlow monitoring, but I expect there's a better way...
I tried asking for a timer callback like this:
ngx_event_t *ev = ngx_pcalloc(pool, sizeof(ngx_event_t));
ev->hander = ngx_http_sflow_tick_event_hander;
ngx_add_timer(ev, 1000);
but (like most russian girls) the event never called me back. It looks like I might have to hang this on a file-descriptor somehow, but that's where I'm getting lost. Any pointers would be most appreciated.
Neil
openbsd has a setsockopt option called SO_BINDANY that allows a
process to bind to any ip address, even if it is not local to the
system. the patch below uses it to allow nginx to connect to a
backend server using the ip of the client making the request.
my main goal here is to allow the backend server to know the ip of
the client actually making the request without having to look at
extra hhtp headers
i thought id throw this out there to get some help since this is
my first attempt at tweaking nginx. there are a few issues with
this implementation:
1. it is completely specific to openbsd.
2. it needs root privileges to use the SO_BINDANY sockopt.
3. im not sure if connections to backends are cached. if so then
it is probable that a different client will reuse a previous clients
proxy connection, so it will appear that the same client made both
requests to the backend.
to use this you just configure nginx to run as root and add
"proxy_transparent on" to the sections you want this feature enabled
on. you will need to add appropriate "pass out proto tcp divert-reply"
rules to pf for the SO_BINDANY sockopt to work too.
if anyone has some tips on how to handle problems 2 and 3 i would
be grateful.
cheers,
dlg
--- src/event/ngx_event_connect.c.orig Thu Nov 26 04:03:59 2009
+++ src/event/ngx_event_connect.c Thu Oct 28 23:22:37 2010
@@ -11,7 +11,7 @@
ngx_int_t
-ngx_event_connect_peer(ngx_peer_connection_t *pc)
+ngx_event_connect_peer(ngx_peer_connection_t *pc, ngx_connection_t *cc)
{
int rc;
ngx_int_t event;
@@ -20,6 +20,7 @@ ngx_event_connect_peer(ngx_peer_connection_t *pc)
ngx_socket_t s;
ngx_event_t *rev, *wev;
ngx_connection_t *c;
+ int bindany;
rc = pc->get(pc, pc->data);
if (rc != NGX_OK) {
@@ -46,6 +47,40 @@ ngx_event_connect_peer(ngx_peer_connection_t *pc)
}
return NGX_ERROR;
+ }
+
+ if (cc != NULL) {
+ bindany = 1;
+ if (setsockopt(s, SOL_SOCKET, SO_BINDANY,
+ &bindany, sizeof(bindany)) == -1)
+ {
+ ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
+ "setsockopt(SO_BINDANY) failed");
+
+ ngx_free_connection(c);
+
+ if (ngx_close_socket(s) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
+ ngx_close_socket_n " failed");
+ }
+
+ return NGX_ERROR;
+ }
+
+ if (bind(s, cc->sockaddr, cc->socklen) == -1)
+ {
+ ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
+ "bind() failed");
+
+ ngx_free_connection(c);
+
+ if (ngx_close_socket(s) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
+ ngx_close_socket_n " failed");
+ }
+
+ return NGX_ERROR;
+ }
}
if (pc->rcvbuf) {
--- src/event/ngx_event_connect.h.orig Tue Nov 3 01:24:02 2009
+++ src/event/ngx_event_connect.h Thu Oct 28 23:22:37 2010
@@ -68,7 +68,8 @@ struct ngx_peer_connection_s {
};
-ngx_int_t ngx_event_connect_peer(ngx_peer_connection_t *pc);
+ngx_int_t ngx_event_connect_peer(ngx_peer_connection_t *pc,
+ ngx_connection_t *cc);
ngx_int_t ngx_event_get_peer(ngx_peer_connection_t *pc, void *data);
--- src/http/modules/ngx_http_proxy_module.c.orig Mon May 24 21:01:05 2010
+++ src/http/modules/ngx_http_proxy_module.c Thu Oct 28 23:42:10 2010
@@ -71,6 +71,7 @@ typedef struct {
ngx_http_proxy_vars_t vars;
ngx_flag_t redirect;
+ ngx_flag_t transparent;
ngx_uint_t headers_hash_max_size;
ngx_uint_t headers_hash_bucket_size;
@@ -196,6 +197,13 @@ static ngx_command_t ngx_http_proxy_commands[] = {
0,
NULL },
+ { ngx_string("proxy_transparent"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_flag_slot,
+ NGX_HTTP_LOC_CONF_OFFSET,
+ offsetof(ngx_http_proxy_loc_conf_t, transparent),
+ NULL },
+
{ ngx_string("proxy_store"),
NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1,
ngx_http_proxy_store,
@@ -626,6 +634,7 @@ ngx_http_proxy_handler(ngx_http_request_t *r)
u->abort_request = ngx_http_proxy_abort_request;
u->finalize_request = ngx_http_proxy_finalize_request;
r->state = 0;
+ r->transparent = (plcf->transparent == 1);
if (plcf->redirects) {
u->rewrite_redirect = ngx_http_proxy_rewrite_redirect;
@@ -1940,6 +1949,7 @@ ngx_http_proxy_create_loc_conf(ngx_conf_t *cf)
conf->upstream.cyclic_temp_file = 0;
conf->redirect = NGX_CONF_UNSET;
+ conf->transparent = NGX_CONF_UNSET;
conf->upstream.change_buffering = 1;
conf->headers_hash_max_size = NGX_CONF_UNSET_UINT;
@@ -2214,6 +2224,8 @@ ngx_http_proxy_merge_loc_conf(ngx_conf_t *cf, void *pa
}
}
}
+
+ ngx_conf_merge_value(conf->transparent, prev->transparent, 0);
/* STUB */
if (prev->proxy_lengths) {
--- src/http/ngx_http_request.h.orig Mon May 24 22:35:10 2010
+++ src/http/ngx_http_request.h Thu Oct 28 23:22:37 2010
@@ -511,6 +511,8 @@ struct ngx_http_request_s {
unsigned stat_writing:1;
#endif
+ unsigned transparent:1;
+
/* used to parse HTTP headers */
ngx_uint_t state;
--- src/http/ngx_http_upstream.c.orig Mon May 24 22:35:10 2010
+++ src/http/ngx_http_upstream.c Thu Oct 28 23:22:37 2010
@@ -1066,7 +1066,8 @@ ngx_http_upstream_connect(ngx_http_request_t *r, ngx_h
u->state->response_sec = tp->sec;
u->state->response_msec = tp->msec;
- rc = ngx_event_connect_peer(&u->peer);
+ rc = ngx_event_connect_peer(&u->peer, r->transparent ?
+ r->connection : NULL);
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"http upstream connect: %i", rc);
--- src/mail/ngx_mail_auth_http_module.c.orig Fri May 14 19:56:37 2010
+++ src/mail/ngx_mail_auth_http_module.c Thu Oct 28 23:22:37 2010
@@ -191,7 +191,7 @@ ngx_mail_auth_http_init(ngx_mail_session_t *s)
ctx->peer.log = s->connection->log;
ctx->peer.log_error = NGX_ERROR_ERR;
- rc = ngx_event_connect_peer(&ctx->peer);
+ rc = ngx_event_connect_peer(&ctx->peer, NULL);
if (rc == NGX_ERROR || rc == NGX_BUSY || rc == NGX_DECLINED) {
if (ctx->peer.connection) {
--- src/mail/ngx_mail_proxy_module.c.orig Thu Oct 28 23:32:15 2010
+++ src/mail/ngx_mail_proxy_module.c Thu Oct 28 23:30:53 2010
@@ -147,7 +147,7 @@ ngx_mail_proxy_init(ngx_mail_session_t *s, ngx_addr_t
p->upstream.log = s->connection->log;
p->upstream.log_error = NGX_ERROR_ERR;
- rc = ngx_event_connect_peer(&p->upstream);
+ rc = ngx_event_connect_peer(&p->upstream, NULL);
if (rc == NGX_ERROR || rc == NGX_BUSY || rc == NGX_DECLINED) {
ngx_mail_proxy_internal_server_error(s);
On November 25, 2010 04:42AM Weibin Yao wrote:
> Hi everyone,
>
> I think the the feature of OCSP stapling[1] is very useful for the
> browser blocked by the OCSP request. And the feature has supported since
> openssl 0.9.8g. Apache's mod_ssl has also added this patch in the
> development branch[2].
>
> Does anyone have the plan to develop this feature?
Hi. The CAs and Browsers represented in the CA/Browser Forum
(http://cabforum.org/forum.html) are growing increasingly interested in
encouraging wider adoption of OCSP Stapling.
Since nobody else has replied to this thread, I presume that OCSP Stapling is
not currently a priority for the core nginx developers. So, I've started
having a go at writing a patch. I'm basing it heavily on Dr Steve Henson's
OCSP Stapling code that was first included in Apache httpd 2.3.3 [3]. I'd like
to ask a few questions before I proceed any further:
1. If I am able to complete my patch, are you likely to review/commit it?
Or is OCSP Stapling the sort of feature that you'd prefer to only let a core
nginx developer work on?
2. I was under the impression that nginx started life as a fork of Apache
httpd, but I don't see any messages along the lines of "This product includes
software developed by the Apache Group..." in the source code. Is nginx 100%
*not* a derivative work of Apache httpd?
3. Steve Henson's code is presumably licensed under ASL 2.0 [4], which
presumably means that my patch would be classed as a "Derivative Work" subject
to various conditions (see the "4. Redistribution" section in ASL 2.0). Would
this prevent you from accepting it?
(Since ASL 2.0 says "nothing herein shall supersede or modify the terms of any
separate license agreement you may have executed with Licensor regarding such
Contributions", perhaps I should ask Steve Henson if he would be willing to
contribute the same code to nginx under a different licence).
Thanks for your help.
[3]. http://svn.apache.org/viewvc?view=revision&revision=829619
[4]. http://www.apache.org/licenses/LICENSE-2.0
> Thanks.
>
> [1]. http://en.wikipedia.org/wiki/OCSP_Stapling
> [2]. https://issues.apache.org/bugzilla/show_bug.cgi?id=43822
>
> --
> Weibin Yao
Rob Stradling
Senior Research & Development Scientist
COMODO - Creating Trust Online
Hi Igor,
attached patch fixes ./configure script for less error-forgiving compilers
(like LLVM/clang) when they are used with -Werror option.
auto/types/sizeof:
- printf() is defined in missing <stdio.h> header,
- sizeof() returns size_t type, which has different signedness on different
operating systems (hence %z).
auto/unix:
- setproctitle() is defined in <stdlib.h> on both: NetBSD and OpenBSD.
Best regards,
Piotr Sikora < piotr.sikora(a)frickle.com >
Hi all, I would like to contribute a patch to provide the cached file
age as a variable $proxy_cache_age , which can then be used together
with add_header Age $proxy_cache_age.
The age calculation is not RFC2616 compliant (13.2.3) as it doesn't take
into consideration the cached origin Age header and Date. Is it feasible
to extract this from the cached headers?
--
regards,
wK
Hi all,
I was looking at the code in ngx_http_upstream_ip_hash_module.c
And I'm not sure where the hashing algorithm for IPs is coming from,
especially those lines :
iphp->hash = 89;
hash = (hash * 113 + iphp->addr[i]) % 6271;
Just wondering if those constants are arbitrary chosen, or if there is
something there to guarantee a good distribution ?
If you have some links explaining this algorithm, it would be greatly
appreciated!
Also, how would you get a good distribution on IPv6. Maybe it would make
sense to use murmur ?
Thank you,
Matthieu.
Hello!
Attached patch (against 1.0.5) introduces upstream keepalive
support for memcached, fastcgi and http. Note the patch is
experimental and may have problems (though it passes basic smoke
tests here). Testing is appreciated.
Major changes include:
1. Content length now parsed into u->headers_in.content_length_n
instead of directly to r->headers_out.content_length_n.
Note this may break 3rd party protocol modules.
2. Use off_t for r->upstream->length.
Note this may break 3rd party protocol modules.
3. In buffered mode u->pipe->length was introduced to indicate minimal amount
of data which must be passed to input filter. This allows to not rely on
connection close to indicate response end while still effectiently using
buffers in most cases. Defaults to -1 (that is, wait for
connection close) if not set by protocol-specific handlers.
4. In buffered mode u->input_filter_init() now called if it's set.
This is used to set initial value of u->pipe->length (see above).
5. Proxy module now able to talk HTTP/1.1, in particular it understands
chunked encoding in responses. Requests are sent using HTTP/1.1
if proxy_http_version directive is set to 1.1.
6. Introduced u->keepalive flag to indicate connection to upstream is in
correct state and may be kept alive. Memcached, fastcgi and proxy
modules are updated to set it.
Patch is expected to be used with upstream keepalive module[1] compiled with
NGX_UPSTREAM_KEEPALIVE_PATCHED defined, i.e use something like this:
./configure --with-cc-opt="-D NGX_UPSTREAM_KEEPALIVE_PATCHED" \
--add-module=/path/to/ngx_http_upstream_keepalive
And use something like this in config:
upstream memcached_backend {
server 127.0.0.1:11211;
keepalive 1;
}
upstream http_backend {
server 127.0.0.1:8080;
keepalive 1;
}
upstream fastcgi_backend {
server 127.0.0.1:9000;
keepalive 1;
}
server {
...
location /memcached/ {
set $memcached_key $uri;
memcached_pass memcached_backend;
}
location /fastcgi/ {
fastcgi_pass fastcgi_backend;
...
}
location /http/ {
proxy_pass http://http_backend;
proxy_http_version 1.1;
proxy_set_header Connection "";
...
}
}
[1] http://mdounin.ru/hg/ngx_http_upstream_keepalive/
Maxim Dounin
src/http/modules/ngx_http_proxy_module.c:645 in ngx_http_proxy_eval
For some service the proxy url is specified by user from arguments. after
ngx_http_script_run, proxy.len may be smaller than "http://" without the
terminating '\0'. For example, {len = 4, data = "http://abcdefg"}. It passes
the
schema check, but url.url.len = proxy.len - 7 becomes a very big number.
the process will core during later memcpy.
simple fix:
diff -ruN nginx-1.0.5/src/http/modules/ngx_http_proxy_module.c
nginx-1.0.5_zls/src/http/modules/ngx_http_proxy_module.c
--- nginx-1.0.5/src/http/modules/ngx_http_proxy_module.c 2011-02-17
19:54:35.000000000 +0800
+++ nginx-1.0.5_zls/src/http/modules/ngx_http_proxy_module.c 2011-07-28
09:57:06.568333685 +0800
@@ -642,14 +642,14 @@
return NGX_ERROR;
}
- if (ngx_strncasecmp(proxy.data, (u_char *) "http://", 7) == 0) {
+ if (proxy.len > 7 && ngx_strncasecmp(proxy.data, (u_char *) "http://",
7) == 0) {
add = 7;
port = 80;
#if (NGX_HTTP_SSL)
- } else if (ngx_strncasecmp(proxy.data, (u_char *) "https://", 8) == 0)
{
+ } else if (proxy.len > 8 && ngx_strncasecmp(proxy.data, (u_char *)
"https://", 8) == 0) {
add = 8;
port = 443;