openbsd has a setsockopt option called SO_BINDANY that allows a
process to bind to any ip address, even if it is not local to the
system. the patch below uses it to allow nginx to connect to a
backend server using the ip of the client making the request.
my main goal here is to allow the backend server to know the ip of
the client actually making the request without having to look at
extra hhtp headers
i thought id throw this out there to get some help since this is
my first attempt at tweaking nginx. there are a few issues with
this implementation:
1. it is completely specific to openbsd.
2. it needs root privileges to use the SO_BINDANY sockopt.
3. im not sure if connections to backends are cached. if so then
it is probable that a different client will reuse a previous clients
proxy connection, so it will appear that the same client made both
requests to the backend.
to use this you just configure nginx to run as root and add
"proxy_transparent on" to the sections you want this feature enabled
on. you will need to add appropriate "pass out proto tcp divert-reply"
rules to pf for the SO_BINDANY sockopt to work too.
if anyone has some tips on how to handle problems 2 and 3 i would
be grateful.
cheers,
dlg
--- src/event/ngx_event_connect.c.orig Thu Nov 26 04:03:59 2009
+++ src/event/ngx_event_connect.c Thu Oct 28 23:22:37 2010
@@ -11,7 +11,7 @@
ngx_int_t
-ngx_event_connect_peer(ngx_peer_connection_t *pc)
+ngx_event_connect_peer(ngx_peer_connection_t *pc, ngx_connection_t *cc)
{
int rc;
ngx_int_t event;
@@ -20,6 +20,7 @@ ngx_event_connect_peer(ngx_peer_connection_t *pc)
ngx_socket_t s;
ngx_event_t *rev, *wev;
ngx_connection_t *c;
+ int bindany;
rc = pc->get(pc, pc->data);
if (rc != NGX_OK) {
@@ -46,6 +47,40 @@ ngx_event_connect_peer(ngx_peer_connection_t *pc)
}
return NGX_ERROR;
+ }
+
+ if (cc != NULL) {
+ bindany = 1;
+ if (setsockopt(s, SOL_SOCKET, SO_BINDANY,
+ &bindany, sizeof(bindany)) == -1)
+ {
+ ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
+ "setsockopt(SO_BINDANY) failed");
+
+ ngx_free_connection(c);
+
+ if (ngx_close_socket(s) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
+ ngx_close_socket_n " failed");
+ }
+
+ return NGX_ERROR;
+ }
+
+ if (bind(s, cc->sockaddr, cc->socklen) == -1)
+ {
+ ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
+ "bind() failed");
+
+ ngx_free_connection(c);
+
+ if (ngx_close_socket(s) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
+ ngx_close_socket_n " failed");
+ }
+
+ return NGX_ERROR;
+ }
}
if (pc->rcvbuf) {
--- src/event/ngx_event_connect.h.orig Tue Nov 3 01:24:02 2009
+++ src/event/ngx_event_connect.h Thu Oct 28 23:22:37 2010
@@ -68,7 +68,8 @@ struct ngx_peer_connection_s {
};
-ngx_int_t ngx_event_connect_peer(ngx_peer_connection_t *pc);
+ngx_int_t ngx_event_connect_peer(ngx_peer_connection_t *pc,
+ ngx_connection_t *cc);
ngx_int_t ngx_event_get_peer(ngx_peer_connection_t *pc, void *data);
--- src/http/modules/ngx_http_proxy_module.c.orig Mon May 24 21:01:05 2010
+++ src/http/modules/ngx_http_proxy_module.c Thu Oct 28 23:42:10 2010
@@ -71,6 +71,7 @@ typedef struct {
ngx_http_proxy_vars_t vars;
ngx_flag_t redirect;
+ ngx_flag_t transparent;
ngx_uint_t headers_hash_max_size;
ngx_uint_t headers_hash_bucket_size;
@@ -196,6 +197,13 @@ static ngx_command_t ngx_http_proxy_commands[] = {
0,
NULL },
+ { ngx_string("proxy_transparent"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_flag_slot,
+ NGX_HTTP_LOC_CONF_OFFSET,
+ offsetof(ngx_http_proxy_loc_conf_t, transparent),
+ NULL },
+
{ ngx_string("proxy_store"),
NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1,
ngx_http_proxy_store,
@@ -626,6 +634,7 @@ ngx_http_proxy_handler(ngx_http_request_t *r)
u->abort_request = ngx_http_proxy_abort_request;
u->finalize_request = ngx_http_proxy_finalize_request;
r->state = 0;
+ r->transparent = (plcf->transparent == 1);
if (plcf->redirects) {
u->rewrite_redirect = ngx_http_proxy_rewrite_redirect;
@@ -1940,6 +1949,7 @@ ngx_http_proxy_create_loc_conf(ngx_conf_t *cf)
conf->upstream.cyclic_temp_file = 0;
conf->redirect = NGX_CONF_UNSET;
+ conf->transparent = NGX_CONF_UNSET;
conf->upstream.change_buffering = 1;
conf->headers_hash_max_size = NGX_CONF_UNSET_UINT;
@@ -2214,6 +2224,8 @@ ngx_http_proxy_merge_loc_conf(ngx_conf_t *cf, void *pa
}
}
}
+
+ ngx_conf_merge_value(conf->transparent, prev->transparent, 0);
/* STUB */
if (prev->proxy_lengths) {
--- src/http/ngx_http_request.h.orig Mon May 24 22:35:10 2010
+++ src/http/ngx_http_request.h Thu Oct 28 23:22:37 2010
@@ -511,6 +511,8 @@ struct ngx_http_request_s {
unsigned stat_writing:1;
#endif
+ unsigned transparent:1;
+
/* used to parse HTTP headers */
ngx_uint_t state;
--- src/http/ngx_http_upstream.c.orig Mon May 24 22:35:10 2010
+++ src/http/ngx_http_upstream.c Thu Oct 28 23:22:37 2010
@@ -1066,7 +1066,8 @@ ngx_http_upstream_connect(ngx_http_request_t *r, ngx_h
u->state->response_sec = tp->sec;
u->state->response_msec = tp->msec;
- rc = ngx_event_connect_peer(&u->peer);
+ rc = ngx_event_connect_peer(&u->peer, r->transparent ?
+ r->connection : NULL);
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"http upstream connect: %i", rc);
--- src/mail/ngx_mail_auth_http_module.c.orig Fri May 14 19:56:37 2010
+++ src/mail/ngx_mail_auth_http_module.c Thu Oct 28 23:22:37 2010
@@ -191,7 +191,7 @@ ngx_mail_auth_http_init(ngx_mail_session_t *s)
ctx->peer.log = s->connection->log;
ctx->peer.log_error = NGX_ERROR_ERR;
- rc = ngx_event_connect_peer(&ctx->peer);
+ rc = ngx_event_connect_peer(&ctx->peer, NULL);
if (rc == NGX_ERROR || rc == NGX_BUSY || rc == NGX_DECLINED) {
if (ctx->peer.connection) {
--- src/mail/ngx_mail_proxy_module.c.orig Thu Oct 28 23:32:15 2010
+++ src/mail/ngx_mail_proxy_module.c Thu Oct 28 23:30:53 2010
@@ -147,7 +147,7 @@ ngx_mail_proxy_init(ngx_mail_session_t *s, ngx_addr_t
p->upstream.log = s->connection->log;
p->upstream.log_error = NGX_ERROR_ERR;
- rc = ngx_event_connect_peer(&p->upstream);
+ rc = ngx_event_connect_peer(&p->upstream, NULL);
if (rc == NGX_ERROR || rc == NGX_BUSY || rc == NGX_DECLINED) {
ngx_mail_proxy_internal_server_error(s);
Hi Igor,
attached patch fixes ./configure script for less error-forgiving compilers
(like LLVM/clang) when they are used with -Werror option.
auto/types/sizeof:
- printf() is defined in missing <stdio.h> header,
- sizeof() returns size_t type, which has different signedness on different
operating systems (hence %z).
auto/unix:
- setproctitle() is defined in <stdlib.h> on both: NetBSD and OpenBSD.
Best regards,
Piotr Sikora < piotr.sikora(a)frickle.com >
Hi all, I would like to contribute a patch to provide the cached file
age as a variable $proxy_cache_age , which can then be used together
with add_header Age $proxy_cache_age.
The age calculation is not RFC2616 compliant (13.2.3) as it doesn't take
into consideration the cached origin Age header and Date. Is it feasible
to extract this from the cached headers?
--
regards,
wK
Hi, folks!
Here's the first draft of our nginx module development plan in the
next year or so:
http://agentzh.org/misc/nginx/roadmap.html
We'll keep this roadmap document up to date.
Comments and suggestions will be highly appreciated as usual :)
Cheers,
-agentzh
Hi
I plan to use NGINX HTTP server on Linux, it uses epoll event loop.
I want to develop a NGINX proxy module using File Descriptors of my
own (not TCP socket) to forward the requests to a back-end service, and
let NGINX adds those FDs in his epoll FD set, to handle the responses.
What are the steps to achieve this goal ? (I suppose it is much work
than a simple proxy module, maybe involving upstream and event module
development ?)
Should I give up hacking NGINX event loop, and make my own event loop to
manage my File Descriptors in a back-end process ? (actually, this is
the way I go for now)
Philippe Razavet
Hi there,
I have an authentication code to get IBM Lotus Domino users
authenticated via Domino LDAP then use Nginx as a mail proxy. And
I would like to contribute it to the community.
According an article posted on Nginx's forum [1], the contribution
code should be posted onto this mailing list.
I see it's not an usual way to contribute code to a FOSS project so
I would like to have confirmation first.
Anyone can tell me how I can contribute my code or, at least, where
I can read the instructions.
Thank you very much.
[1] http://forum.nginx.org/read.php?2,175271
Kind regards,
Tuan
Hi all,
I just found a weird behavior in nginx, which looks like a race condition.
I'm using an unmodified nginx 0.9.6 with no option on the ./configure line
and the config is pretty much the default except for those 2 directives in
the http section :
client_max_body_size 20M;
client_body_in_single_buffer on;
I have the "location ~ \.php$" enabled and a simple script called
print_post.php which contains :
<?php print_r($_POST); ?>
let's curl it to see if this works :
curl 'http://localhost/print_post.php' -F 'file=@/path/to/15M_file' -F
'foo=bar'
it shows as you would expect :
Array
(
[foo] => bar
)
now let's use HTTP 1.0 (with the -0 option for curl):
curl 'http://localhost/print_post.php' -F 'file=@/path/to/15M_file' -F
'foo=bar' -0
sometimes I get the expected result, and some other times just an empty
array :
Array
(
)
strangely this doesn't seem to occur with HTTP 1.1, and seems to go away if
I don't use "client_body_in_single_buffer on;"
can anybody reproduce this ?
Thank you,
Matthieu.
Hi,
Yesterday error messages similar to this started filling up the logs :
Mar 25 01:58:22 192.168.1.136 [nginx_error] 3m2: 2011/03/25 01:58:21 [crit]
26278#0: ngx_slab_alloc() failed: no memory in limit_zone "limit_inbound"
And more importantly nginx started returning "503 Service Temporary
Unavalaible" for every requests
The solution was simple, as to increase the memory assigned to the
limit_zone rbtree.
I was thinking instead of returning NGX_HTTP_SERVICE_UNAVAILABLE when the
allocation fails, maybe NGX_DECLINED should be returned so we keep accepting
requests if there is a problem with ngx_http_limit_zone_module rather than
blocking by default.
I attached a diff for the proposed modification below.
Thank you,
Matthieu.
diff --git a/src/http/modules/ngx_http_limit_zone_module.c
b/src/http/modules/ngx_http_limit_zone_module.c
index 36da49c..5a20d8c 100644
--- a/src/http/modules/ngx_http_limit_zone_module.c
+++ b/src/http/modules/ngx_http_limit_zone_module.c
@@ -235,7 +235,7 @@ ngx_http_limit_zone_handler(ngx_http_request_t *r)
node = ngx_slab_alloc_locked(shpool, n);
if (node == NULL) {
ngx_shmtx_unlock(&shpool->mutex);
- return NGX_HTTP_SERVICE_UNAVAILABLE;
+ return NGX_DECLINED;
}
lz = (ngx_http_limit_zone_node_t *) &node->color;
Hi agentzh,
I managed to migrate all my tests from my original python approach to
using Test::Nginx. I guess this is good news. However, I must say some
of it was a bit painful.
The main thing is probably a lack of documentation/examples on the
data sections accepted by Test::Nginx. Especially for people who are
not familiar with Test::Base (like me). I understand you don't want to
duplicate the work done by the guys at Test::Base but at least
pointers to some useful tricks like filters and --- ONLY would help
the beginners.
As a side note to this, I don't see any benefit in having the
"request_eval" section. To me (at least in the tests I wrote)
"request_eval" can be replaced by "request eval" (so, applying eval to
the data). May be you should get rid of the _eval versions or maybe
I'm missing something....
I actually wrote a few posts on the migration to Test::Nginx:
* http://www.nginx-discovery.com/2011/03/day-32-moving-to-testnginx.html
* http://www.nginx-discovery.com/2011/03/day-33-testnginx-pipelinedrequests.h…
Another thing that annoyed me is the use of shuffle to "on" by
default. I find it more misleading than anything else (especially on
your first runs).
After going through this exercise (and learning quite a few things in
the process), the things that I really think should be improved are:
* Being able to share one config amongst multiple tests.
* Being able to run multiple requests in one test. The
pipelined_requests use the same connection which might not be what I
want. I was thinking of something more natural like : send request 1,
wait for response 1, check response 1, send request 2, wait for
response 2, check response 2, etc.
Of course, I am willing to help with these improvements but I do not
want to start running all over the place without discussing it with
you as I'm likely to miss out something really big.
Antoine.
--
Antoine Bonavita.
Follow my progress with nginx at: http://www.nginx-discovery.com
On Tue, Mar 15, 2011 at 3:43 PM, Antoine BONAVITA
<antoine_bonavita(a)yahoo.com> wrote:
>> From: Antoine BONAVITA <antoine_bonavita(a)yahoo.com>
>> To: agentzh <agentzh(a)gmail.com>
>> Cc: nginx-devel(a)nginx.org
>> Sent: Thu, March 3, 2011 2:57:44 PM
>> Subject: Re: Use Test::Nginx with etcproxy and/or valgrind (Was Re:
>>Test::Nginx::LWP vs. Test::Nginx::Socket)
>>
>> Agentzh,
>>
>> Thanks a lot, again. I'm going on a ski trip for a week or so. I'll try that
>> when I come back.
>>
>> Antoine.
>>
>>
>>
>>
>> ----- Original Message ----
>> > From: agentzh <agentzh(a)gmail.com>
>> > To: Antoine BONAVITA <antoine_bonavita(a)yahoo.com>
>> > Cc: nginx-devel(a)nginx.org
>> > Sent: Thu, March 3, 2011 4:49:17 AM
>> > Subject: Use Test::Nginx with etcproxy and/or valgrind (Was Re:
>> >Test::Nginx::LWP vs. Test::Nginx::Socket)
>> >
>> > On Thu, Mar 3, 2011 at 12:37 AM, Antoine BONAVITA
>> > <antoine_bonavita(a)yahoo.com> wrote:
>> > > Following agentzh tips, I'm moving the test cases for my module to
>> >Test::Nginx
>> > > (instead of using my python unit tests).
>> > >
>> >
>> > There's a lot of undocumented features in Test::Nginx::Socket. I'm
>> > sorry. I'd like to document a bit how to integrate it with etcproxy
>> > and/or valgrind here because it's so useful ;)
>> >
>> > Use Test::Nginx::Socket with etcproxy
>> > ============================
>> >
>> > Test::Nginx automatically starts an nginx instance (from the PATH env)
>> > rooted at t/servroot/ and the default config template makes this nginx
>> > instance listen on the port 1984 by default.
>> >
>> > The default settings in etcproxy [1] makes this small TCP proxy split
>> > the TCP packets into bytes and introduce 1ms latency among them.
>> >
>> > There's usually various TCP chains that we can put etcproxy into, for
>>example
>> >
>> > Test::Nginx <=> nginx
>> > ----------------------------
>> >
>> > $ ./etcproxy 1234 1984
>> >
>> > Here we tell etcproxy to listen on port 1234 and to delegate all the
>> > TCP traffic to the port 1984, the default port that Test::Nginx makes
>> > nginx listen to.
>> >
>> > And then we tell Test::Nginx to test against the port 1234, where
>> > etcproxy listens on, rather than the port 1984 that nginx directly
>> > listens on:
>> >
>> > $ TEST_NGINX_CLIENT_PORT=1234 prove -r t/
>> >
>> > Then the TCP chain now looks like this:
>> >
>> > Test::Nginx <=> etcproxy (1234) <=> nginx (1984)
>> >
>> > So etcproxy can effectively emulate extreme network conditions and
>> > exercise "unusual" code paths in your nginx server by your tests.
>> >
>> > In practice, *tons* of weird bugs can be captured by this setting.
>> > Even ourselves didn't expect that this simple approach is so
>> > effective.
>> >
>> > nginx <=> memcached
>> > -----------------------------
>> >
>> > We first start the memcached server daemon on port 11211:
>> >
>> > memcached -p 11211 -vv
>> >
>> > and then we another etcproxy instance to listen on port 11984 like this
>> >
>> > $ ./etcproxy 11984 11211
>> >
>> > Then we tell our t/foo.t test script to connect to 11984 rather than
> 11211:
>> >
>> > # foo.t
>> > use Test::Nginx::Socket;
>> > repeat_each(1);
>> > plan tests => 2 * repeat_each() * blocks();
>> > $ENV{TEST_NGINX_MEMCACHED_PORT} ||= 11211; # make this env take a
>> > default value
>> > run_tests();
>> >
>> > __DATA__
>> >
>> > === TEST 1: sanity
>> > --- config
>> > location /foo {
>> > set $memc_cmd set;
>> > set $memc_key foo;
>> > set $memc_value bar;
>> > memc_pass 127.0.0.1:$TEST_NGINX_MEMCACHED_PORT;
>> > }
>> > --- request
>> > GET /foo
>> > --- response_body_like: STORED
>> >
>> > The Test::Nginx library will automatically expand the special macro
>> > "$TEST_NGINX_MEMCACHED_PORT" to the environment with the same name.
>> > You can define your own $TEST_NGINX_BLAH_BLAH_PORT macros as long as
>> > its prefix is TEST_NGINX_ and all in upper case letters.
>> >
>> > And now we can run your test script against the etcproxy port 11984:
>> >
>> > TEST_NGINX_MEMCACHED_PORT=11984 prove t/foo.t
>> >
>> > Then the TCP chains look like this:
>> >
>> > Test::Nginx <=> nginx (1984) <=> etcproxy (11984) <=> memcached
>>(11211)
>> >
>> > If TEST_NGINX_MEMCACHED_PORT is not set, then it will take the default
>> > value 11211, which is what we want when there's no etcproxy
>> > configured:
>> >
>> > Test::Nginx <=> nginx (1984) <=> memcached (11211)
>> >
>> > This approach also works for proxied mysql and postgres traffic.
>> > Please see the live test suite of ngx_drizzle and ngx_postgres for
>> > more details.
>> >
>> > Usually we set both TEST_NGINX_CLIENT_PORT and
>> > TEST_NGINX_MEMCACHED_PORT (and etc) at the same time, effectively
>> > yielding the following chain:
>> >
>> > Test::Nginx <=> etcproxy (1234) <=> nginx (1984) <=> etcproxy
>> > (11984) <=> memcached (11211)
>> >
>> > as long as you run two separate etcproxy instances in two separate
>>terminals.
>> >
>> > It's easy to verify if the traffic actually goes through your etcproxy
>> > server. Just check if the terminal running etcproxy emits outputs. By
>> > default, etcproxy always dump out the incoming and outgoing data to
>> > stdout/stderr.
>> >
>> > Use Test::Nginx::Socket with valgrind memcheck
>> > ====================================
>> >
>> > Test::Nginx has integrated support for valgrind [2] even though by
>> > default it does not bother running it with the tests because valgrind
>> > will significantly slow down the test sutie.
>> >
>> > First ensure that your valgrind executable visible in your PATH env.
>> > And then run your test suite with the TEST_NGINX_USE_VALGRIND env set
>> > to true:
>> >
>> > TEST_NGINX_USE_VALGRIND=1 prove -r t
>> >
>> > If you see false alarms, you do have a chance to skip them by defining
>> > a ./valgrind.suppress file at the root of your module source tree, as
>> > in
>> >
>> >
>>>https://github.com/chaoslawful/drizzle-nginx-module/blob/master/valgrind.su…
>>s
>> >
>> >
>> > This is the suppression file for ngx_drizzle. Test::Nginx will
>> > automatically use it to start nginx with valgrind memcheck if this
>> > file does exist at the expected location.
>> >
>> > If you do see a lot of "Connection refused" errors while running the
>> > tests this way, then you probably have a slow machine (or a very busy
>> > one) that the default waiting time is not sufficient for valgrind to
>> > start. You can define the sleep time to a larger value by setting the
>> > TEST_NGINX_SLEEP env:
>> >
>> > TEST_NGINX_SLEEP=1 prove -r t
>> >
>> > The time unit used here is "second". The default sleep setting just
>> > fits my ThinkPad (Core2Duo T9600).
>> >
>> > Applying the no-pool patch to your nginx core is recommended while
>> > running nginx with valgrind:
>> >
>> > https://github.com/shrimp/no-pool-nginx
>> >
>> > The nginx memory pool can prevent valgrind from spotting lots of
>> > invalid memory reads/writes as well as certain double-free errors. We
>> > did find a lot more memory issues in many of our modules when we first
>> > introduced the no-pool patch in practice ;)
>> >
>> > There's also more advanced features in Test::Nginx that have never
>> > documented. I'd like to write more about them in the near future ;)
>> >
>> > Cheers,
>> > -agentzh
>> >
>> > References
>> >
>> > [1] etcproxy: https://github.com/chaoslawful/etcproxy
>> > [2] valgrind: http://valgrind.org/
>> >
>>
>>
>>
>>
>> _______________________________________________
>> nginx-devel mailing list
>> nginx-devel(a)nginx.org
>> http://nginx.org/mailman/listinfo/nginx-devel
>>
>
>
>
sorry, the diff file in the last file is bad, use this one.
This is a new implementation according to your comments.
Here is my comments to this patch
1. ngx_inet_sock_addr is removed. You are right, ngx_parse_url can do that
job. So no need to change ngx_inet.h
2. the old "gethostbyname" implementation will be used if "--with-ipv6" is
not set; when "--with-ipv6" is set, "getaddrinfo" will be used
3. now when "ngx_inet_parse_url" find u->host might be a hostname, it will
invoke new added function "ngx_inet_resolve_hostname".
4. seems there is a bug in old file's line 829. Only when uri is NULL, len =
last - port.
5. now "ngx_inet6_parse_url" will finally invoke new added
"ngx_inet6_resolve_host", just like what "ngx_inet_parse_url" does. I think
it should be done but the old code doesn't do that. Please confirm this is
valid.
6. I try to remove duplicated code as much as possible, so the
"ngx_inet_resolve_host" is almost rewritten. Now, it will first handle the
u->host as it's an IP; if not, it will try to resolve them.
7. In "ngx_inet_parse_url", it firstly assume it's AF_INET and allocate the
memory. I think it's not suitable in the new implementation, but I didn't
touch this part of code.
8. All the fix are tested as before (with and without "--with-ipv6"), all
test are passed.
9. I make this implementation based on 0.9.3. I compare the ngx_inet.c in
official 0.9.3 and 0.9.5, and find there is a difference:
- for (i = 0; i < u->naddrs; i++) {
+ for (i = 0; h->h_addr_list[i] != NULL; i++) {
Since my implementation use "getaddrinfo", h_addr_list[i] will be invalid
when "--with-ipv6" is set.
Thanks.