From cnst++ at freebsd.org Sun Sep 1 01:17:37 2019 From: cnst++ at freebsd.org (Constantine A. Murenin) Date: Sat, 31 Aug 2019 19:17:37 -0600 Subject: Patch: slash_redirect_temporary directive In-Reply-To: <73B604EF-7C7A-4D30-9E82-1168001CA07F@qzxj.net> References: <73B604EF-7C7A-4D30-9E82-1168001CA07F@qzxj.net> Message-ID: If you don't like 301 redirects because permanent caching ? can't blame you there, as I'm in same boat ? an easier way would be to simply use what I call the exception handling mechanism of nginx to change all 301 replies to 302, and you don't need any patches to perform such a change, as a simple nginx.conf snippet would suffice: error_page 301 =302 @200; location @200 { # https://serverfault.com/a/870911/110020 default_type ""; return 200; } If you don't like the `200` part in the config, and lack of a body in the response, then the following is also an option: error_page 301 = @302; location @302 { # https://serverfault.com/a/870722/110020 return 302 $sent_http_location; } I've tested both of the above for directory access sans `/`, and both do the job just fine. Cheers, Constantine.SU. http://cm.su/ On Sat, 31 Aug 2019 at 17:46, Blake Williams wrote: > Hello! > > We ran into an issue where with the permanent redirects in > ngx_http_static_module.c that occur when you omit a slash when requesting a > folder, for example from "/foo" to the folder "/foo/". We changed some > things around in our site so that "/foo" was actually a file, not a folder, > but unfortunately, browsers aggressively cache 301 redirects so our clients > were trying to hit the new URL, the browser used its permanent cache, and > they'd be redirected to "/foo/" again, which no longer existed as it had > been changed to a file. > > This patch adds an extra configuration directive that allows you to > configure that redirect to issue a 302 instead: > > # HG changeset patch > # User Blake Williams > # Date 1567294381 -36000 > # Sun Sep 01 09:33:01 2019 +1000 > # Node ID 85c36c3f5c349a83b1b397a8aad2d11bf6a0875a > # Parent 9f1f9d6e056a4f85907957ef263f78a426ae4f9c > Add slash_redirect_temporary directive to core > > diff -r 9f1f9d6e056a -r 85c36c3f5c34 contrib/vim/syntax/nginx.vim > --- a/contrib/vim/syntax/nginx.vim Mon Aug 19 15:16:06 2019 +0300 > +++ b/contrib/vim/syntax/nginx.vim Sun Sep 01 09:33:01 2019 +1000 > @@ -571,6 +571,7 @@ > syn keyword ngxDirective contained session_log_format > syn keyword ngxDirective contained session_log_zone > syn keyword ngxDirective contained set_real_ip_from > +syn keyword ngxDirective contained slash_redirect_temporary > syn keyword ngxDirective contained slice > syn keyword ngxDirective contained smtp_auth > syn keyword ngxDirective contained smtp_capabilities > diff -r 9f1f9d6e056a -r 85c36c3f5c34 > src/http/modules/ngx_http_static_module.c > --- a/src/http/modules/ngx_http_static_module.c Mon Aug 19 15:16:06 2019 > +0300 > +++ b/src/http/modules/ngx_http_static_module.c Sun Sep 01 09:33:01 2019 > +1000 > @@ -188,7 +188,11 @@ > r->headers_out.location->value.len = len; > r->headers_out.location->value.data = location; > > - return NGX_HTTP_MOVED_PERMANENTLY; > + if (!clcf->slash_redirect_temporary) { > + return NGX_HTTP_MOVED_PERMANENTLY; > + } else { > + return NGX_HTTP_MOVED_TEMPORARILY; > + } > } > > #if !(NGX_WIN32) /* the not regular files are probably Unix specific */ > diff -r 9f1f9d6e056a -r 85c36c3f5c34 src/http/ngx_http_core_module.c > --- a/src/http/ngx_http_core_module.c Mon Aug 19 15:16:06 2019 +0300 > +++ b/src/http/ngx_http_core_module.c Sun Sep 01 09:33:01 2019 +1000 > @@ -520,6 +520,13 @@ > offsetof(ngx_http_core_loc_conf_t, satisfy), > &ngx_http_core_satisfy }, > > + { ngx_string("slash_redirect_temporary"), > + > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, > + ngx_conf_set_flag_slot, > + NGX_HTTP_LOC_CONF_OFFSET, > + offsetof(ngx_http_core_loc_conf_t, slash_redirect_temporary), > + NULL }, > + > { ngx_string("internal"), > NGX_HTTP_LOC_CONF|NGX_CONF_NOARGS, > ngx_http_core_internal, > @@ -3443,6 +3450,8 @@ > clcf->open_file_cache_errors = NGX_CONF_UNSET; > clcf->open_file_cache_events = NGX_CONF_UNSET; > > + clcf->slash_redirect_temporary = NGX_CONF_UNSET; > + > #if (NGX_HTTP_GZIP) > clcf->gzip_vary = NGX_CONF_UNSET; > clcf->gzip_http_version = NGX_CONF_UNSET_UINT; > @@ -3727,6 +3736,9 @@ > > ngx_conf_merge_sec_value(conf->open_file_cache_events, > prev->open_file_cache_events, 0); > + > + ngx_conf_merge_value(conf->slash_redirect_temporary, > + prev->slash_redirect_temporary, 0); > #if (NGX_HTTP_GZIP) > > ngx_conf_merge_value(conf->gzip_vary, prev->gzip_vary, 0); > diff -r 9f1f9d6e056a -r 85c36c3f5c34 src/http/ngx_http_core_module.h > --- a/src/http/ngx_http_core_module.h Mon Aug 19 15:16:06 2019 +0300 > +++ b/src/http/ngx_http_core_module.h Sun Sep 01 09:33:01 2019 +1000 > @@ -433,6 +433,8 @@ > ngx_uint_t types_hash_max_size; > ngx_uint_t types_hash_bucket_size; > > + ngx_flag_t slash_redirect_temporary; > + > ngx_queue_t *locations; > > #if 0 > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx > -------------- next part -------------- An HTML attachment was scrubbed... URL: From nginx-forum at forum.nginx.org Mon Sep 2 08:29:02 2019 From: nginx-forum at forum.nginx.org (maximewimez) Date: Mon, 02 Sep 2019 04:29:02 -0400 Subject: How to fix : Received RST_STREAM with error code 2 when using nginx reverse proxy Message-ID: <6b23cdee5b906040ccbc662c24b7e1fa.NginxMailingListEnglish@forum.nginx.org> I'm currently using the dialogflow api on a raspberry. Everything works fine when calling StreamingDetectIntent method using grpc. I have to use multiples apis on my product and so, I'm trying to put a reverse proxy in front of them. Like that, I can call only one address I'm using nginx to reverse proxy my GRPC request to google api. I have no problem when calling simple method, but when calling a streaming method like StreamingDetectIntent, I got an error during the request. Dialogflow do not have problem to get the audio flux coming from my client, but I got problem to get the last part of the request, the downstream flux. Here is the error that my client give me : ``` grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with: status = StatusCode.INTERNAL details = "Received RST_STREAM with error code 2" debug_error_string = "{"created":"@1567173815.816362297","description":"Error received from peer ipv4:163.172.143.250:443","file":"src/core/lib/surface/call.cc","file_line":1041,"grpc_message":"Received RST_STREAM with error code 2","grpc_status":13}" > ``` and here the error I can see in Nginx log : ``` upstream sent frame for closed stream 1 while reading upstream, client: ..., server: exemple.com, request: "POST /google.cloud.dialogflow.v2beta1.Sessions/StreamingDetectIntent HTTP/2.0", upstream: "grpcs://...:443", host: "example.com:443" I've tried to increase grpc_buffer_size parameter to big value, but didn't worked.. ``` Here is my current Nginx config : ``` user nginx; worker_processes 1; error_log /var/log/nginx/error.log debug; pid /var/run/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main; sendfile on; keepalive_timeout 65; client_max_body_size 4000M; grpc_read_timeout 1d; grpc_send_timeout 1d; # this seems to fix it; but see comment in README.md grpc_buffer_size 100M; include /etc/nginx/conf.d/*.conf; server { # SSL configuration listen 443 ssl http2; access_log /var/log/nginx/access_grpc.log main; location / { grpc_pass grpcs://dialogflow.googleapis.com:443; } ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem; ssl_certificate_key/etc/letsencrypt/live/exemple.com/privkey.pem; } server { if ($host = example.com) { return 301 https://$host$request_uri; } listen 80 ; listen [::]:80 ; return 404; # managed by Certbot } } ``` Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285506,285506#msg-285506 From nginx-forum at forum.nginx.org Mon Sep 2 08:50:59 2019 From: nginx-forum at forum.nginx.org (Georgisim) Date: Mon, 02 Sep 2019 04:50:59 -0400 Subject: $ssl_server_name Message-ID: <64597d7f1de666700b57eeb8c76fdef9.NginxMailingListEnglish@forum.nginx.org> Hi nginx team, Are there plans to support $ssl_server_name for ssl_stapling_file? Or some other way to do dynamic ssl with ocsp? Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285507,285507#msg-285507 From nginx-forum at forum.nginx.org Mon Sep 2 09:09:23 2019 From: nginx-forum at forum.nginx.org (milanleon) Date: Mon, 02 Sep 2019 05:09:23 -0400 Subject: How to add Multiple sites with ipv6 and SSL on Nginx ? In-Reply-To: <000d01d55f2b$2a039d10$7e0ad730$@roze.lv> References: <000d01d55f2b$2a039d10$7e0ad730$@roze.lv> Message-ID: Thank you so much for your answer. It seems that all things are working now. Thank you once again. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285450,285508#msg-285508 From mdounin at mdounin.ru Mon Sep 2 13:51:53 2019 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 2 Sep 2019 16:51:53 +0300 Subject: $ssl_server_name In-Reply-To: <64597d7f1de666700b57eeb8c76fdef9.NginxMailingListEnglish@forum.nginx.org> References: <64597d7f1de666700b57eeb8c76fdef9.NginxMailingListEnglish@forum.nginx.org> Message-ID: <20190902135153.GR1877@mdounin.ru> Hello! On Mon, Sep 02, 2019 at 04:50:59AM -0400, Georgisim wrote: > Are there plans to support $ssl_server_name for ssl_stapling_file? Or some > other way to do dynamic ssl with ocsp? No, there are no such plans. If you want to use OCSP stapling, consider configuring SSL certficates explicitly. -- Maxim Dounin http://mdounin.ru/ From Keerthi.Jayarajan at Honeywell.com Mon Sep 2 17:45:59 2019 From: Keerthi.Jayarajan at Honeywell.com (Jayarajan, Keerthi (AT ASP RTC)) Date: Mon, 2 Sep 2019 17:45:59 +0000 Subject: Routing Http2 traffic without decrypting tls packets In-Reply-To: References: Message-ID: Hi, I'm working in blockchain project for Honeywell. We have blockchain nodes hosted in our cloud. These nodes should connect and talk to external node and vice versa. We are using Nginx as Reverse proxy server through which external node can connect to our nodes. We are using Hyperledger fabric blockchain framework which works on grpc protocol using http2. I found Nginx 1.15.2 supports http2. So I installed it and tested sample grpc application(without tls) successfully. Now, I need external node to talk to our node reverse proxied via Nginx. But our nodes are tls encrypted and client authentication is enabled. I want the external node to talk to internal node as if the Nginx was not there. It should forward everything based on the hostname without decrypting the packets. I tried this ssl_preread directive but It says not allowed for http2. Can you please technically tell how this can be accomplished. Looking forward for your support. If this is feasible, we would like to go with Nginx Plus for production. Thanks and Regards Keerthi -------------- next part -------------- An HTML attachment was scrubbed... URL: From francis at daoine.org Mon Sep 2 21:02:17 2019 From: francis at daoine.org (Francis Daly) Date: Mon, 2 Sep 2019 22:02:17 +0100 Subject: Allow internal redirect to URI x, but deny external request for x? In-Reply-To: <20190831215526.mmxekmk7scfoertl@mink.imca.aps.anl.gov> References: <20190830173317.klutzplfsurrpmrw@mink.imca.aps.anl.gov> <20190830182031.jrruhqrcyi5izmor@mink.imca.aps.anl.gov> <20190830183743.uzww2kwo2j4sutjb@mink.imca.aps.anl.gov> <20190830185823.6ohwsgpi6usmzsmz@mink.imca.aps.anl.gov> <20190830205440.2ztlmmzfy3j57gmu@daoine.org> <20190830215936.mdqfwbz3ouna5ove@mink.imca.aps.anl.gov> <20190830232140.yqspqr6rzyrenpdb@daoine.org> <20190831141009.ub2vibpfy63kd4fy@mink.imca.aps.anl.gov> <20190831205045.o647ydblhwau3fan@daoine.org> <20190831215526.mmxekmk7scfoertl@mink.imca.aps.anl.gov> Message-ID: <20190902210217.bafsxfjwl57ee7pd@daoine.org> On Sat, Aug 31, 2019 at 04:55:26PM -0500, J. Lewis Muir wrote: > On 08/31, Francis Daly wrote: > > On Sat, Aug 31, 2019 at 09:10:09AM -0500, J. Lewis Muir wrote: Hi there, > > Using "realpath" should not affect nginx at all. nginx invites the > > fastcgi server to use pathname2 instead of pathname1; so the fastcgi > > server is the only thing that should care. > > Hmm, I might not be understanding this. The rationale of using > $realpath_root instead of $document_root was to make it so that a > new version of the web app could be deployed atomically at any time > by changing the "current" symlink, ... > fastcgi_param DOCUMENT_ROOT $realpath_root; > fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name; > > So, does that make sense, or am I still not understanding this? I don't > know what you mean by "nginx invites the fastcgi server to use pathname2 > instead of pathname1." What are pathname1 and pathname2? nginx does not "do" php. nginx does not care what your fastcgi server will do with the key/value pairs that it sends. nginx cares that the fastcgi server gives a valid response to the request that nginx makes. Typically, your fastcgi server will use the value associated with SCRIPT_FILENAME as "the name of the file to execute". If your fastcgi server fails to find / read / execute that file, it will return its own error indication. (So your "if", or the more common "try_files", is just an early-out, to sometimes avoid involving the fastcgi server. It may happen that the file is present when nginx looks for it, but is absent when the fastcgi server looks for it -- so that case does have to be handled anyway.) In this case, if $document_root is /srv/www/my-app/current/ and $realpath_root is /srv/www/my-app/releases/1.0.2/, and the script name is test.php, then with one config, nginx would send the string "/srv/www/my-app/current/test.php", and with the other config nginx would send the string "/srv/www/my-app/releases/1.0.2/test.php". (That is "pathname1" vs "pathname2".) So if "one request" involves the fastcgi server reading "/srv/www/my-app/current/test.php", and then reading a bunch of other files in the same directory -- then I guess that unfortunate timing could lead to it reading some files from releases/1.0.1 and some from releases/1.0.2. (Assuming that it opens the directory afresh each time -- which can't be ruled out.) But if "the app" involves a http request to part1.php and then a http request to part2.php (or: a second http request to part1.php), I don't think that the symlink+realpath thing will prevent those two requests going to different release versions. All the best, f -- Francis Daly francis at daoine.org From soon.hyouk at gmail.com Tue Sep 3 02:23:26 2019 From: soon.hyouk at gmail.com (Soon Hyouk Lee) Date: Mon, 2 Sep 2019 22:23:26 -0400 Subject: Reverse proxy 404 error help! In-Reply-To: <20190830195452.fmus3jzl6xhmtpi5@daoine.org> References: <20190830025433.oiugtwtabejfrywz@thinkarch.localdomain> <20190830195452.fmus3jzl6xhmtpi5@daoine.org> Message-ID: <45EE540C-6ED8-4E77-ACB5-5423B1943C02@gmail.com> Thank you! Can confirm that indeed reverse proxy at ?/? location succeeds. I used subdomain (unifi.mydomain.com) to accomplish app-specific address but at the ?/? location using a dedicated server block as instructed. Thank you! > On Aug 30, 2019, at 3:54 PM, Francis Daly wrote: > > On Thu, Aug 29, 2019 at 10:54:33PM -0400, Soon Hyouk Lee wrote: > > Hi there, > > some web services are not set up to be friendly to be reverse-proxied > at a different part of the local url hierarchy than they know about. > > Perhaps this is one of them. > > If you can configure the back-end server to believe that it is rooted > at /unifi/ instead of at /, then perhaps it can work. > > Otherwise, you may have more luck using a dedicated server{} block that > reverse proxies everything to the unifi service without changing the > local url. > > The other option, of trying to rewrite the content on-the-fly, is unlikely > to work reliably. > > f > -- > Francis Daly francis at daoine.org > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx From ru at nginx.com Tue Sep 3 14:39:38 2019 From: ru at nginx.com (Ruslan Ermilov) Date: Tue, 3 Sep 2019 17:39:38 +0300 Subject: Routing Http2 traffic without decrypting tls packets In-Reply-To: References: Message-ID: <20190903143938.GA34341@lo0.su> On Mon, Sep 02, 2019 at 05:45:59PM +0000, Jayarajan, Keerthi (AT ASP RTC) wrote: > Hi, > > I'm working in blockchain project for Honeywell. We have blockchain nodes > hosted in our cloud. These nodes should connect and talk to external node and > vice versa. We are using Nginx as Reverse proxy server through which external > node can connect to our nodes. We are using Hyperledger fabric blockchain > framework which works on grpc protocol using http2. I found Nginx 1.15.2 > supports http2. So I installed it and tested sample grpc application(without > tls) successfully. Now, I need external node to talk to our node reverse > proxied via Nginx. But our nodes are tls encrypted and client authentication > is enabled. I want the external node to talk to internal node as if the Nginx > was not there. It should forward everything based on the hostname without > decrypting the packets. I tried this ssl_preread directive but It says not > allowed for http2. Can you please technically tell how this can be > accomplished. Looking forward for your support. If this is feasible, we would > like to go with Nginx Plus for production. ngx_stream_ssl_preread_module that you mentioned above can route traffic based on either server name requested through SNI or protocols advertised in ALPN, or combination. The documentation for the module has complete examples: http://nginx.org/en/docs/stream/ngx_stream_ssl_preread_module.html#example You don't need "ssl http2" in the listen directive for it to work. From jlmuir at imca-cat.org Tue Sep 3 17:26:15 2019 From: jlmuir at imca-cat.org (J. Lewis Muir) Date: Tue, 3 Sep 2019 12:26:15 -0500 Subject: Allow internal redirect to URI x, but deny external request for x? In-Reply-To: <20190902210217.bafsxfjwl57ee7pd@daoine.org> References: <20190830182031.jrruhqrcyi5izmor@mink.imca.aps.anl.gov> <20190830183743.uzww2kwo2j4sutjb@mink.imca.aps.anl.gov> <20190830185823.6ohwsgpi6usmzsmz@mink.imca.aps.anl.gov> <20190830205440.2ztlmmzfy3j57gmu@daoine.org> <20190830215936.mdqfwbz3ouna5ove@mink.imca.aps.anl.gov> <20190830232140.yqspqr6rzyrenpdb@daoine.org> <20190831141009.ub2vibpfy63kd4fy@mink.imca.aps.anl.gov> <20190831205045.o647ydblhwau3fan@daoine.org> <20190831215526.mmxekmk7scfoertl@mink.imca.aps.anl.gov> <20190902210217.bafsxfjwl57ee7pd@daoine.org> Message-ID: <20190903172615.g4ffg6r2uwxn6v6x@mink.imca.aps.anl.gov> On 09/02, Francis Daly wrote: > nginx does not "do" php. nginx does not care what your fastcgi server > will do with the key/value pairs that it sends. nginx cares that the > fastcgi server gives a valid response to the request that nginx makes. > > Typically, your fastcgi server will use the value associated with > SCRIPT_FILENAME as "the name of the file to execute". If your fastcgi > server fails to find / read / execute that file, it will return its own > error indication. > > (So your "if", or the more common "try_files", is just an early-out, > to sometimes avoid involving the fastcgi server. It may happen that the > file is present when nginx looks for it, but is absent when the fastcgi > server looks for it -- so that case does have to be handled anyway.) > > In this case, if $document_root is /srv/www/my-app/current/ and > $realpath_root is /srv/www/my-app/releases/1.0.2/, and the script > name is test.php, then with one config, nginx would send the string > "/srv/www/my-app/current/test.php", and with the other config nginx > would send the string "/srv/www/my-app/releases/1.0.2/test.php". > > (That is "pathname1" vs "pathname2".) Understood. > So if "one request" involves the fastcgi server reading > "/srv/www/my-app/current/test.php", and then reading a bunch of other > files in the same directory -- then I guess that unfortunate timing > could lead to it reading some files from releases/1.0.1 and some from > releases/1.0.2. (Assuming that it opens the directory afresh each time -- > which can't be ruled out.) Right, that's what I was trying to avoid by using $realpath_root. I assumed that $realpath_root was set at the beginning of the location processing. That way, I could be guaranteed that it would not change for the duration of the request handling within nginx. And since nginx would give that value (i.e., the path with the symlinks resolved) to the FastCGI server, the FastCGI server would be using that same path for the whole request and wouldn't know anything about the "current" symlink that can change at any moment. But perhaps that's an invalid assumption, and the path is resolved every time $realpath_root is expanded as a variable? I hope not, but that would be really important to understand. > But if "the app" involves a http request to part1.php and then a http > request to part2.php (or: a second http request to part1.php), I don't > think that the symlink+realpath thing will prevent those two requests > going to different release versions. Hmm, good point. I'm not sure how to do a seamless web app update deploy, then. Maybe it's not possible without additional constraints. I'm assuming the app is hosted on a single nginx server. Although, I'd be curious how this is typically solved for a multiple-server case as well (e.g., a load balancer with multiple identical instances of the web app running on two or more servers). The idea is to have no downtime. I suppose I could encode the app version in the URI (e.g., /my-app/1.0.2) or in the request header. I've seen REST APIs versioned in the URI or in the request header, but I'm not sure web apps do that. Or I could try to ensure that my web app updates are *always* backward compatible if they are to keep using the same URI. I could do that for any web apps I write, but I can't control that for any that I don't write and am just deploying. Another idea I previously toyed with was to deploy the web app in a directory structure similar to the symlink+realpath approach but without the symlink; the path to the app root is versioned. In the nginx config, use the versioned path to the app root (e.g., /srv/www/my-app/releases/1.0.2). To deploy a new version of the app, install to a new versioned app root (e.g., /srv/www/my-app/releases/1.0.3), change the app root in the nginx config, and cause nginx to reload the config. Note that I'm intentionally keeping the file system path versioned so that the path changes when a new version is deployed to avoid the need to flush any caching that might be going on at the FastCGI server or elsewhere. Will there be downtime for a split second when the config is reloaded? What will happen? Will nginx refuse connections? Will it accept connections but just not respond while it's reloading the config? But this approach has the same issue that you pointed out for the symlink+realpath approach in that I don't see a way to prevent the case where the app involves an HTTP request to part1.php that goes to one release and then an HTTP request to part2.php that goes to the updated release if the deploy happens at just the right (wrong) time. What's the best strategy for deploying a new version of an app, then? Expect that all app updates are backward compatible? Expect that the app passes around a version identifier with each request that the app will use to detect when the version has changed and force the user to log in again or something? Version the URI? Thanks! Lewis From jlmuir at imca-cat.org Tue Sep 3 21:30:22 2019 From: jlmuir at imca-cat.org (J. Lewis Muir) Date: Tue, 3 Sep 2019 16:30:22 -0500 Subject: Allow internal redirect to URI x, but deny external request for x? In-Reply-To: <20190903172615.g4ffg6r2uwxn6v6x@mink.imca.aps.anl.gov> References: <20190830183743.uzww2kwo2j4sutjb@mink.imca.aps.anl.gov> <20190830185823.6ohwsgpi6usmzsmz@mink.imca.aps.anl.gov> <20190830205440.2ztlmmzfy3j57gmu@daoine.org> <20190830215936.mdqfwbz3ouna5ove@mink.imca.aps.anl.gov> <20190830232140.yqspqr6rzyrenpdb@daoine.org> <20190831141009.ub2vibpfy63kd4fy@mink.imca.aps.anl.gov> <20190831205045.o647ydblhwau3fan@daoine.org> <20190831215526.mmxekmk7scfoertl@mink.imca.aps.anl.gov> <20190902210217.bafsxfjwl57ee7pd@daoine.org> <20190903172615.g4ffg6r2uwxn6v6x@mink.imca.aps.anl.gov> Message-ID: <20190903213022.35b6jge7v5i7ydvn@mink.imca.aps.anl.gov> On 09/03, J. Lewis Muir wrote: > On 09/02, Francis Daly wrote: > > But if "the app" involves a http request to part1.php and then a http > > request to part2.php (or: a second http request to part1.php), I don't > > think that the symlink+realpath thing will prevent those two requests > > going to different release versions. > > Hmm, good point. > > I'm not sure how to do a seamless web app update deploy, then. Maybe > it's not possible without additional constraints. After searching the web and failing to find anything addressing this (maybe it's out there, but I couldn't find it), I'm inclined to believe that there are roughly two choices: either the web app maintains backward compatibility in its request API, or it doesn't. The web app that maintains backward compatibility in its request API will work with the symlink+realpath approach, assuming the FastCGI server either does no caching or caches based on the file path. (The path-based caching works because the path changes when an app update is deployed because the version is encoded in the path.) Note, however, that even for an app that maintains backward compatibility like this, rolling back a deploy to a previous release would not work unless it was a patch update (as defined in the Semantic Versioning scheme). For example, you could safely roll back from 1.0.3 to 1.0.2, but not from 1.1.0 to 1.0.3, and not from 2.0.0 to 1.2.3. The web app that does *not* maintain backward compatibility in its request API will *not* work with the symlink+realpath approach. It might work by chance depending on the timing of the deploy, the timing of the requests, and which requests were in flight at the time of the deploy. Or you could orchestrate the deploy to shut down the nginx server, wait for an amount of time deemed to be the maximum time that should ever elapse between the "part1.php" request and the "part2.php" request (which may be impossible to determine, or may be infinite) such that all "part2.php" requests will happen and fail because they couldn't connect to the nginx server, deploy the app update, and then start the nginx server again. This approach will never be 100% correct. I'd love to be enlightened on other choices, but this is my understanding as of now, and I think I'll proceed with the symlink+realpath approach under the expectation that the web apps I deploy maintain backward compatibility in their request API, or they might just break for some users when I deploy an update in which case I might choose my deploy time to be the time of least demand on average. Regards, Lewis From nginx-forum at forum.nginx.org Wed Sep 4 02:51:55 2019 From: nginx-forum at forum.nginx.org (j94305) Date: Tue, 03 Sep 2019 22:51:55 -0400 Subject: NGINX R19 Javascript bug with keyval maps Message-ID: <4eea61d10e76438468067f652c9cd0a7.NginxMailingListEnglish@forum.nginx.org> The new R19 introduces "type=ip" keyval maps. Posting IP addresses (e.g., 1.2.3.4) seems to work from both, the API 5 REST calls and from Javascript, except IPv6 addresses are not accepted. Posting CIDR blocks (e.g., 1.2.3.0/24) works fine via the API 5 REST calls but not via Javascript. CIDR entries will not show up in the map at all. I am trying to feed a banlist into a map which used to be a "geo" directive. This is done by a Javascript function because deleting the map and uploading it again would cause a time of the map being empty. Consistently, all CIDR ranges fail. My keyval map definition: keyval_zone zone=banned:32m timeout=7d type=ip sync state=/var/run/nginx/state/banned.json; keyval $custom_addr $map_bannedIP zone=banned; As the placement of regular IP addresses also failed after a while, I tested the function with this Javascript test handler set up as js_content handler for a location: function admin_mapBanned(r) { var v = r.variables; var here = "adminMapBanned"; var n = 0; for (var a = 1; a < 240; a++) { for (var b = 1; b < 255; b++) { for (var c = 1; c < 255; c++) { for (var d = 1; d < 255; d++) { n++; var addr = "" + a + "." + b + "." + c + "." + d; v.custom_addr = addr; v.map_bannedIP = "1"; r.log("bannedIP[" + n + "]: " + addr + " => " + v.map_bannedIP); if (!v.map_bannedIP) { r.return(200); return; } } } } } } The results with different keyval zone sizes: - 1m => 2964 entries - 2m => 6000 entries - 4m => 12032 entries - 8m => 24128 entries - 16m => 48304 entries - 32m => 96704 entries In consequence, this means a keyval map uses almost 350 bytes to store an IP address and the value of "1". Wow! I would have expected this to be much lower in memory consumption. Anyway, knowing this, I can at least reliably feed IPv4 addresses now. Please fix the Javascript issue with IPv6 and CIDR notations. Thanks! --j. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285542,285542#msg-285542 From nginx-forum at forum.nginx.org Wed Sep 4 03:20:56 2019 From: nginx-forum at forum.nginx.org (j94305) Date: Tue, 03 Sep 2019 23:20:56 -0400 Subject: NGINX R19 Javascript bug with keyval maps In-Reply-To: <4eea61d10e76438468067f652c9cd0a7.NginxMailingListEnglish@forum.nginx.org> References: <4eea61d10e76438468067f652c9cd0a7.NginxMailingListEnglish@forum.nginx.org> Message-ID: <906c53c5463438400bb77c5f8f54b2c8.NginxMailingListEnglish@forum.nginx.org> A little correction to my earlier message: IPv6 addresses also seem to work. In my test, I was checking for a dot in the key, and that excluded IPv6 addresses. However, CIDR ranges still fail. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285542,285543#msg-285543 From jlmuir at imca-cat.org Wed Sep 4 03:29:50 2019 From: jlmuir at imca-cat.org (J. Lewis Muir) Date: Tue, 3 Sep 2019 22:29:50 -0500 Subject: Allow internal redirect to URI x, but deny external request for x? In-Reply-To: References: <20190830173317.klutzplfsurrpmrw@mink.imca.aps.anl.gov> Message-ID: <20190904032950.ftj3l6wvi6k3ghqf@mink.imca.aps.anl.gov> On 08/30, j94305 wrote: > I've been following this, and I would take a slightly different approach. > > 1. Serve all apps under /{app}/releases/{version}/{path} as you have them > organized in the deployment structure in the file system. > > 2. Forget about symbolic links and other makeshift versioning/defaulting in > the file system. > > 3. Use a keyval mapping to handle redirections (307) of > /{app}/current/{stuff} to /{app}/releases/{currentVersion}/{stuff}, where > the keyval mapping provides {app} => {currentVersion}. You can update an > manage this during deployment. Sorry, I forgot about your post! Thank you for your suggestions! Is this a keyval? https://nginx.org/en/docs/http/ngx_http_keyval_module.html > We usually include this in a CI/CD pipeline after deployment to dynamically > switch to the last version (using a curl request to the NGINX API). If you > can't use keyvals, use a static map and dynamically generate that "map" > directive's mapping. Restart NGINX to reflect changes. Keyvals let you do > this on the fly. Is this a static map? https://nginx.org/en/docs/http/ngx_http_map_module.html And by "dynamically generate" do you mean generate the map directive as a config file that would be included from the main config and then cause nginx to reload its config? > The major advantage of this approach is with updates. You are most likely > going to run into issues with browser or proxy caching if you provide > different versions of files/apps under the same path. By having a canonical > form that respects the version structure, you are avoiding this altogether. > Yet, you have the flexibility to run hotfixes (replace existing files in an > existing version without creating a new one), or experimental versions > (which won't update the "current" pointer). Interesting. What I was trying to do with $realpath_root, I thought was similar to what you're describing. However, when you mention browser or proxy caching, then I'm not sure. Are you suggesting serving from a different URI for each version of the app? If not, then I don't understand how your proposal behaves differently than the symlink+realpath idea. (But this may be because you wrote this on Aug 30, and the symlink+realpath idea had not been clearly stated yet.) > I would try to keep the complexity low. Agreed! However, changing a symlink (albeit with some nginx config changes to use $realpath_root and such) is pretty simple to me, so it's a little harder for me to see using a keyval or a static map as keeping the complexity low. But if I understand your proposal correctly, it would be more straightforward in terms of not needing to use symlinks at all and not needing to worry about $realpath_root vs. $document_root. Instead, you just use variables, and to update the variables, you just use the API if using a keyval, or cause nginx to reload its config if using the static map. Thank you for the suggestions! Regards, Lewis From juergen.wagner at devoteam.com Wed Sep 4 03:54:12 2019 From: juergen.wagner at devoteam.com (=?UTF-8?Q?J=c3=bcrgen_Wagner_=28DVT=29?=) Date: Wed, 4 Sep 2019 05:54:12 +0200 Subject: Allow internal redirect to URI x, but deny external request for x? In-Reply-To: <20190904032950.ftj3l6wvi6k3ghqf@mink.imca.aps.anl.gov> References: <20190830173317.klutzplfsurrpmrw@mink.imca.aps.anl.gov> <20190904032950.ftj3l6wvi6k3ghqf@mink.imca.aps.anl.gov> Message-ID: <0fcb9403-80a0-9669-3f51-407c2ca6636e@devoteam.com> Hi Lewis, ? the idea is to have a deployment process that places apps or whatever artifacts always in a certain distinct place that is determined once at deployment time. This will determine the address where you can reach the app in the namespace of NGINX. So, if the convention is to place an app in a directory {webroot}/{app}/releases/{version}/... served as https://{server}/{app}/releases/{version}/... you would have a single, official URL prefix for each app version to be served from. Now, you want to be able to say what is the "current" version and reflect this in the URL namespace as well. In the file system, that's a symbolic link. In the URL namespace of NGINX, that could be a redirection (status code 307). Both approaches would work. For the redirection you need a location /{app}/current which redirects any request for paths starting with this to the actual version you want to serve: /{app}/releases/{latestVersion} This can be achieved with a dynamically-generated stub you include in a "map" directive (requiring NGINX reload in case of changes) or a "keyval" map that can be changed via the NGINX API on the fly as you need it (not requiring reloads). The mapping will get the app name and determine the path of the latest version where the redirection should go to. The issue about browser and proxy caches: if over time you serve multiple versions of an app from the same URLs, browsers (or proxies) may consider their cached version of some files current enough not to feel motivated refetching them. In some cases, you would end up with some files loaded into the browser being of an old version, some already a newer one. This can be avoided entirely by giving each version of the app a distinct canonical prefix that will never be re-used. The "current" redirection is simply a pointer to the right location for the latest version, but as it is an external redirection, the browser will ultimately load the app from the official "releases" path with the version number in it. Cheers, --j. On 04.09.2019 05:29, J. Lewis Muir wrote: > On 08/30, j94305 wrote: >> I've been following this, and I would take a slightly different approach. >> >> 1. Serve all apps under /{app}/releases/{version}/{path} as you have them >> organized in the deployment structure in the file system. >> >> 2. Forget about symbolic links and other makeshift versioning/defaulting in >> the file system. >> >> 3. Use a keyval mapping to handle redirections (307) of >> /{app}/current/{stuff} to /{app}/releases/{currentVersion}/{stuff}, where >> the keyval mapping provides {app} => {currentVersion}. You can update an >> manage this during deployment. > Sorry, I forgot about your post! Thank you for your suggestions! > > Is this a keyval? > > https://nginx.org/en/docs/http/ngx_http_keyval_module.html > >> We usually include this in a CI/CD pipeline after deployment to dynamically >> switch to the last version (using a curl request to the NGINX API). If you >> can't use keyvals, use a static map and dynamically generate that "map" >> directive's mapping. Restart NGINX to reflect changes. Keyvals let you do >> this on the fly. > Is this a static map? > > https://nginx.org/en/docs/http/ngx_http_map_module.html > > And by "dynamically generate" do you mean generate the map directive as > a config file that would be included from the main config and then cause > nginx to reload its config? > >> The major advantage of this approach is with updates. You are most likely >> going to run into issues with browser or proxy caching if you provide >> different versions of files/apps under the same path. By having a canonical >> form that respects the version structure, you are avoiding this altogether. >> Yet, you have the flexibility to run hotfixes (replace existing files in an >> existing version without creating a new one), or experimental versions >> (which won't update the "current" pointer). > Interesting. What I was trying to do with $realpath_root, I thought > was similar to what you're describing. However, when you mention > browser or proxy caching, then I'm not sure. Are you suggesting > serving from a different URI for each version of the app? If not, > then I don't understand how your proposal behaves differently than the > symlink+realpath idea. (But this may be because you wrote this on Aug > 30, and the symlink+realpath idea had not been clearly stated yet.) > >> I would try to keep the complexity low. > Agreed! However, changing a symlink (albeit with some nginx config > changes to use $realpath_root and such) is pretty simple to me, so it's > a little harder for me to see using a keyval or a static map as keeping > the complexity low. But if I understand your proposal correctly, it > would be more straightforward in terms of not needing to use symlinks at > all and not needing to worry about $realpath_root vs. $document_root. > Instead, you just use variables, and to update the variables, you just > use the API if using a keyval, or cause nginx to reload its config if > using the static map. > > Thank you for the suggestions! > > Regards, > > Lewis > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: juergen_wagner.vcf Type: text/x-vcard Size: 398 bytes Desc: not available URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 3573 bytes Desc: S/MIME Cryptographic Signature URL: From maxim at nginx.com Wed Sep 4 08:03:50 2019 From: maxim at nginx.com (Maxim Konovalov) Date: Wed, 4 Sep 2019 11:03:50 +0300 Subject: NGINX R19 Javascript bug with keyval maps In-Reply-To: <906c53c5463438400bb77c5f8f54b2c8.NginxMailingListEnglish@forum.nginx.org> References: <4eea61d10e76438468067f652c9cd0a7.NginxMailingListEnglish@forum.nginx.org> <906c53c5463438400bb77c5f8f54b2c8.NginxMailingListEnglish@forum.nginx.org> Message-ID: <47879906-0e3d-87cb-98a2-6d0ef441623d@nginx.com> Hello. On 04/09/2019 06:20, j94305 wrote: > A little correction to my earlier message: IPv6 addresses also seem to work. > In my test, I was checking for a dot in the key, and that excluded IPv6 > addresses. > > However, CIDR ranges still fail. > Please approach nginx-plus support with this issue. -- Maxim Konovalov From jlmuir at imca-cat.org Wed Sep 4 15:28:43 2019 From: jlmuir at imca-cat.org (J. Lewis Muir) Date: Wed, 4 Sep 2019 10:28:43 -0500 Subject: Allow internal redirect to URI x, but deny external request for x? In-Reply-To: <0fcb9403-80a0-9669-3f51-407c2ca6636e@devoteam.com> References: <20190830173317.klutzplfsurrpmrw@mink.imca.aps.anl.gov> <20190904032950.ftj3l6wvi6k3ghqf@mink.imca.aps.anl.gov> <0fcb9403-80a0-9669-3f51-407c2ca6636e@devoteam.com> Message-ID: <20190904152843.zowtpfcjy7ngkqxu@mink.imca.aps.anl.gov> On 09/04, J?rgen Wagner (DVT) wrote: > Now, you want to be able to say what is the "current" version and reflect > this in the URL namespace as well. In the file system, that's a symbolic > link. In the URL namespace of NGINX, that could be a redirection (status > code 307). Both approaches would work. For the redirection you need a > location Got it! Thank you! So this approach versions the URI. > /{app}/current > > which redirects any request for paths starting with this to the actual > version you want to serve: > > /{app}/releases/{latestVersion} > > This can be achieved with a dynamically-generated stub you include in a > "map" directive (requiring NGINX reload in case of changes) or a "keyval" > map that can be changed via the NGINX API on the fly as you need it (not > requiring reloads). The mapping will get the app name and determine the path > of the latest version where the redirection should go to. Got it. > The issue about browser and proxy caches: if over time you serve multiple > versions of an app from the same URLs, browsers (or proxies) may consider > their cached version of some files current enough not to feel motivated > refetching them. In some cases, you would end up with some files loaded into > the browser being of an old version, some already a newer one. This can be > avoided entirely by giving each version of the app a distinct canonical > prefix that will never be re-used. The "current" redirection is simply a > pointer to the right location for the latest version, but as it is an > external redirection, the browser will ultimately load the app from the > official "releases" path with the version number in it. Wouldn't the 307 redirection mean that for *every* request, nginx has to issue a 307 and then the client has to request the versioned URI which nginx then has to server; so a double-request for every resource? I agree that this approach solves the browser and proxy cache problem, though. How does this solve the request-chain problem where "part1.php" executes in one version of the app, but then "part2.php" executes in the updated version because the updated version was deployed in between? I presume it doesn't, which is OK, but I want to make sure I understand. Do you know of any mainstream web apps that are deployed this way (i.e., 307 redirect to versioned URI)? Thank you! Lewis From juergen.wagner at devoteam.com Wed Sep 4 15:43:34 2019 From: juergen.wagner at devoteam.com (=?UTF-8?Q?J=c3=bcrgen_Wagner_=28DVT=29?=) Date: Wed, 4 Sep 2019 17:43:34 +0200 Subject: Allow internal redirect to URI x, but deny external request for x? In-Reply-To: <20190904152843.zowtpfcjy7ngkqxu@mink.imca.aps.anl.gov> References: <20190830173317.klutzplfsurrpmrw@mink.imca.aps.anl.gov> <20190904032950.ftj3l6wvi6k3ghqf@mink.imca.aps.anl.gov> <0fcb9403-80a0-9669-3f51-407c2ca6636e@devoteam.com> <20190904152843.zowtpfcjy7ngkqxu@mink.imca.aps.anl.gov> Message-ID: <924c6a54-10e4-4abf-ba6f-f301cce16db5@devoteam.com> Hi Lewis, ? no, that won't cause double requests. /myapp/current/blah.html 307 => /myapp/releases/1.2.0/blah.html and from thereon (as we did not redirect internally, but rather externally), any further accesses will happen unter the true "releases" path (ideally, as relative URLs). That's only one redirection overhead in the beginning. The redirection will forward any path under "current", i.e., /myapp/current/index.html => /myapp/releases/1.2.0/index.html /myapp/current/images/icon.jpg => /myapp/releases/1.2.0/images/icon.jpg and so on. Only the first request will be a redirection. All subsequent requests would use the true path. We use this approach with a number of applications, e.g., multiple Jenkins or Gitlab installations behind one NGINX, but also with front-end components being deployed with a CI/CD pipeline in Amazon S3, that also switches the "current" link to the then respectively latest version of the artifact. The good thing is: if a user has loaded version 1.2.0, all links into the releases/1.2.0 path will continue to work, even if you upload a new version 1.2.1 and make that the "current" version. Any URLs with the "current" part in it will not be used as a reference except in the initial process of accessing the latest version of an app. From there, everything will always and only use the canonical form in the "private" releases path. That's the nature of a redirection. This is the effect you get by having the HTTP equivalent of a symbolic link in the NGINX (visible to the browser), not in the file system (which is opaque to users). The file system link will (over time) serve different contents under the same URL, so in fact, addressing changes with every deployment. The suggested approach keeps URL addressing constant and just changes the entry pointer on a new deployment. I agree that this is not the solution that first comes to ones mind, but it does solve a number of nasty versioning issues we have run into over time. Your mileage may vary :-) Good luck! --J?rgen On 04.09.2019 17:28, J. Lewis Muir wrote: > Wouldn't the 307 redirection mean that for*every* request, nginx has to > issue a 307 and then the client has to request the versioned URI which > nginx then has to server; so a double-request for every resource? -------------- next part -------------- A non-text attachment was scrubbed... Name: juergen_wagner.vcf Type: text/x-vcard Size: 398 bytes Desc: not available URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 3573 bytes Desc: S/MIME Cryptographic Signature URL: From jlmuir at imca-cat.org Wed Sep 4 16:30:52 2019 From: jlmuir at imca-cat.org (J. Lewis Muir) Date: Wed, 4 Sep 2019 11:30:52 -0500 Subject: Allow internal redirect to URI x, but deny external request for x? In-Reply-To: <924c6a54-10e4-4abf-ba6f-f301cce16db5@devoteam.com> References: <20190830173317.klutzplfsurrpmrw@mink.imca.aps.anl.gov> <20190904032950.ftj3l6wvi6k3ghqf@mink.imca.aps.anl.gov> <0fcb9403-80a0-9669-3f51-407c2ca6636e@devoteam.com> <20190904152843.zowtpfcjy7ngkqxu@mink.imca.aps.anl.gov> <924c6a54-10e4-4abf-ba6f-f301cce16db5@devoteam.com> Message-ID: <20190904163052.aozitahsa5k6iinu@mink.imca.aps.anl.gov> On 09/04, J?rgen Wagner (DVT) wrote: > This is the effect you get by having the HTTP equivalent of a symbolic link > in the NGINX (visible to the browser), not in the file system (which is > opaque to users). The file system link will (over time) serve different > contents under the same URL, so in fact, addressing changes with every > deployment. The suggested approach keeps URL addressing constant and just > changes the entry pointer on a new deployment. > > I agree that this is not the solution that first comes to ones mind, but it > does solve a number of nasty versioning issues we have run into over time. > Your mileage may vary :-) Thank you for the further explanation! Indeed it seems like a compelling solution! What about web search engine indexing; do you do anything to avoid search engines indexing the versioned URLs? I suppose that if you only publish the unversioned entry-point URLs, search engines will respect that? (Maybe wishful thinking.) Or will they follow a 307 redirect and index those URLs? For example, it would seem undesirable to do a web search for "my-app" and get a list of, say, the "index.php" for each version (e.g., "/my-app/releases/1.0.0/index.php", "/my-app/releases/1.0.1/index.php", "/my-app/releases/1.0.2/index.php", etc.). So, perhaps you use a "/robots.txt" to exclude "/my-app/releases/"? DuckDuckGo seems to respect "/robots.txt" for controlling what gets indexed https://help.duckduckgo.com/duckduckgo-help-pages/results/duckduckbot/ But Google says "/robots.txt" is not for keeping a web page out of their index https://support.google.com/webmasters/answer/6062608 and that you should use a "noindex" directive instead. So maybe you use both a "/robots.txt" and the robots meta tag with content="noindex" in the served resources or perhaps "X-Robots-Tag: noindex" in the HTTP header response? Regards, Lewis From postmaster at palvelin.fi Wed Sep 4 21:07:14 2019 From: postmaster at palvelin.fi (Palvelin Postmaster) Date: Wed, 4 Sep 2019 14:07:14 -0700 Subject: How to redirect to https when using load balancer in front of nginx Message-ID: <6C2D6B8A-DA68-478C-8910-D9FC9EAB0D0D@palvelin.fi> I have AWS ALB in front of an instance running nginx. I want to terminate https at the load balancer. I have setup ALB's http listener to redirect http to https and forward https to the instance?s port 80. I?m switching from using apache2 to nginx. My apache responds on a single port 80. In my apache config these directives are used to redirect traffic. RewriteCond %{HTTPS} off RewriteCond %{HTTP:X-Forwarded-Proto} !https RewriteRule ^(.*)$ https://%{HTTP_HOST}%{REQUEST_URI} [L,R=302] SetEnv HTTPS "on" SetEnv HTTP_X_FORWARDED_PROTO ?https? As simple as it may be, I can?t figure out how to match this setup with nginx. With the following simple config most requests work but apparently assets in some of my pages have the scheme hardcoded and they don?t get rewritten. server { listen 80; set_real_ip_from 172.31.0.0/16; real_ip_header X-Forwarded-For; server_name ?my.server.com"; root /var/www/; access_log /var/log/nginx/access-.log main_ext; error_log /var/log/nginx/error.log notice; } -- Hans Vallden VP of Business Development | Vincit California (949) 241-1151 vincit.com | vincitdevtalks.com proud member of codedinoc.com From postmaster at palvelin.fi Wed Sep 4 21:14:43 2019 From: postmaster at palvelin.fi (Palvelin Postmaster) Date: Wed, 4 Sep 2019 14:14:43 -0700 Subject: How to redirect to https when using load balancer in front of nginx Message-ID: <60ECE3F3-69A0-4B13-BC1E-CB29AEFB9DDD@palvelin.fi> I have AWS ALB in front of an instance running nginx. I want to terminate https at the load balancer. I have setup ALB's http listener to redirect http to https and forward https to the instance?s port 80. I?m switching from using apache to nginx. My apache currently responds on a single port 80. In my apache config these directives are used to redirect traffic. RewriteCond %{HTTPS} off RewriteCond %{HTTP:X-Forwarded-Proto} !https RewriteRule ^(.*)$ https://%{HTTP_HOST}%{REQUEST_URI} [L,R=302] SetEnv HTTPS "on" SetEnv HTTP_X_FORWARDED_PROTO ?https? As simple as it may be, I can?t figure out how to match this setup with nginx. With the following simple config most requests work but apparently assets in some of my pages have the scheme hardcoded and they don?t get rewritten. server { listen 80; set_real_ip_from 172.31.0.0/16; real_ip_header X-Forwarded-For; server_name ?my.server.com"; root /var/www/; access_log /var/log/nginx/access-.log main_ext; error_log /var/log/nginx/error.log notice; } -- Palvelin.fi Hostmaster postmaster at palvelin.fi From postmaster at palvelin.fi Wed Sep 4 22:59:41 2019 From: postmaster at palvelin.fi (Palvelin Postmaster) Date: Wed, 4 Sep 2019 15:59:41 -0700 Subject: Errors suggesting nginx isn't started as root In-Reply-To: <186455F3-05A5-4F82-94FB-C852E9582606@palvelin.fi> References: <186455F3-05A5-4F82-94FB-C852E9582606@palvelin.fi> Message-ID: <414BD8FA-A9EA-4F32-B6D5-8790C8CEC6BF@palvelin.fi> This is still a big mystery to me. Upgrading to nginx 1.16.1 didn?t help. As far as I can understand, the nginx master process IS running with root privileges. > On 19 Sep 2018, at 2.00, Palvelin Postmaster via nginx wrote: > > Why am I getting these log warn/emerg? Running Nginx 1.14.0 on Ubuntu 18.04. > > root at k2:~# whoami > root > > root at k2:~# service nginx restart > > root at k2:~# tail /var/log/nginx/error.log > 2018/09/19 11:38:47 [warn] 22399#22399: the "user" directive makes sense only if the master process runs with super-user privileges, ignored in /etc/nginx/nginx.conf:21 > 2018/09/19 11:38:47 [emerg] 22399#22399: SSL_CTX_use_PrivateKey_file("/etc/ssl/private/nginx-selfsigned.key") failed (SSL: error:0200100D:system library:fopen:Permission denied:fopen('/etc/ssl/private/nginx-selfsigned.key','r') error:20074002:BIO routines:file_ctrl:system lib error:140B0002:SSL routines:SSL_CTX_use_PrivateKey_file:system lib) > > root at k2:~# ls -lh /etc/ssl/private/ |grep nginx > -rw-r----- 1 root ssl-cert 1.7K Jul 8 17:12 nginx-selfsigned.key > > root at k2:~# cat /etc/nginx/nginx.conf |grep ^user > user www-data; > > root at k2:~# ps -auxw |grep nginx > root 22317 0.0 0.2 359680 9300 ? Ss 11:38 0:00 nginx: master process /usr/sbin/nginx -g daemon on; master_process on; > www-data 22322 0.0 0.3 361980 15356 ? S 11:38 0:00 nginx: worker process > www-data 22323 0.2 0.4 362244 18984 ? S 11:38 0:00 nginx: worker process > www-data 22326 0.0 0.3 361980 14760 ? S 11:38 0:00 nginx: cache manager process > www-data 22327 0.0 0.3 361980 14760 ? S 11:38 0:00 nginx: cache loader process From nginx-forum at forum.nginx.org Thu Sep 5 01:32:41 2019 From: nginx-forum at forum.nginx.org (j94305) Date: Wed, 04 Sep 2019 21:32:41 -0400 Subject: How to redirect to https when using load balancer in front of nginx In-Reply-To: <60ECE3F3-69A0-4B13-BC1E-CB29AEFB9DDD@palvelin.fi> References: <60ECE3F3-69A0-4B13-BC1E-CB29AEFB9DDD@palvelin.fi> Message-ID: <667b8c251854518b4e5a349276fa7b46.NginxMailingListEnglish@forum.nginx.org> In order to redirect http to https, you have to define a listener rule in the ALB that redirects all traffic on port 80 to port 443 (of the ALB) with the original path and query parameters. The status code should be a 301 (permanent redirection). That's the context between the client and the ALB. The certificate for the domain(s) will be installed in the ALB. The target group for the ALB would contain the http target (or be filled in by an auto-scaling group's members in case you have multiple targets from an auto-scaling group). Your application server would only see http requests coming from the ALB. You won't get any https requests. The important point about rewriting http requests (from the client perspective) to https requests (client perspective again) is to define that rule in the ALB, and make that rule redirect requests for all paths. On the other hand, why do you need the ALB if you have an NGINX in there, anyway? I would rather settle for a simple NLB and handle http/https redirections etc. in the NGINX itself. Cheers, --j. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285559,285561#msg-285561 From nginx-forum at forum.nginx.org Thu Sep 5 02:48:59 2019 From: nginx-forum at forum.nginx.org (oah433) Date: Wed, 04 Sep 2019 22:48:59 -0400 Subject: What is the difference bewteen nginx against mp4 modules Message-ID: <42feaabc445080ba2dc813e7da9bbafa.NginxMailingListEnglish@forum.nginx.org> Hi What is the difference between the mp4 module and the slice module for streaming mp4 videos in nginx? Both seem to work on streaming mp4 files but I can't really see the difference. On another aspect, the slice module seems to work nicely with the caching. Where slices from cache module are stored and served to the users. Can the same be done to the mp4 module i.e., to have responses from the mp4 module cached ? Attached is a code snipped for caching the output from the slice module. location / { slice 1m; proxy_cache cache; proxy_cache_key $uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_cache_valid 200 206 1h; proxy_pass http://localhost:8000; } thx Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285562,285562#msg-285562 From nginx-forum at forum.nginx.org Thu Sep 5 10:35:06 2019 From: nginx-forum at forum.nginx.org (drookie) Date: Thu, 05 Sep 2019 06:35:06 -0400 Subject: still seeing 413 error with client_max_body_size 0 Message-ID: Hello, I was getting the bunch of 413 statuses in the access log along with getting explicit error messages about client (logstash in my case, seems like it was trying to send bodies around 100 megabytes) trying to post body larger than the client_max_body_size. After I raised this setting to 128m, I stopped receiving messages in the error log, but not the access log 413 statuses: 10.3.51.214 - - [05/Sep/2019:15:21:27 +0500] elasticsearch.dev.alamics.ru "POST /_bulk HTTP/1.1" 413 0 "-" "Java/1.8.0_212" "-" "-" 82.609 192.168.57.23:9200 413 - 10.3.51.214 - - [05/Sep/2019:15:23:00 +0500] elasticsearch.dev.alamics.ru "POST /_bulk HTTP/1.1" 413 0 "-" "Java/1.8.0_212" "-" "-" 91.931 192.168.57.23:9200 413 - 10.3.51.214 - - [05/Sep/2019:15:24:24 +0500] elasticsearch.dev.alamics.ru "POST /_bulk HTTP/1.1" 413 0 "-" "Java/1.8.0_212" "-" "-" 83.679 192.168.57.23:9200 413 - 10.3.51.214 - - [05/Sep/2019:15:25:35 +0500] elasticsearch.dev.alamics.ru "POST /_bulk HTTP/1.1" 413 0 "-" "Java/1.8.0_212" "-" "-" 69.195 192.168.57.23:9200 413 - 10.3.51.214 - - [05/Sep/2019:15:27:01 +0500] elasticsearch.dev.alamics.ru "POST /_bulk HTTP/1.1" 413 0 "-" "Java/1.8.0_212" "-" "-" 85.953 192.168.57.23:9200 413 - I've even tried to set the client_max_body_size to 0, but I'm still getting these 413 like once per minute. As you can see, the request times are about 1.5 minutes, so it's not the case when I'm still seeing past failing requests for old setting. I'm pretty much stuck at this point. nginx/1.16.0 on FreeBSD 12-STABLE amd64 from ports. Thanks. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285564,285564#msg-285564 From nginx-forum at forum.nginx.org Thu Sep 5 10:40:55 2019 From: nginx-forum at forum.nginx.org (drookie) Date: Thu, 05 Sep 2019 06:40:55 -0400 Subject: still seeing 413 error with client_max_body_size 0 In-Reply-To: References: Message-ID: <8475750640aecf7004425685ed9097a3.NginxMailingListEnglish@forum.nginx.org> Oh, sorry. It's clear that the upstream is sending 413 errors, not the nginx himself. Should read the log more carefully. Sorry again. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285564,285565#msg-285565 From nginx-forum at forum.nginx.org Thu Sep 5 13:03:24 2019 From: nginx-forum at forum.nginx.org (yunteam) Date: Thu, 05 Sep 2019 09:03:24 -0400 Subject: Feature Request proxy_cache_min_size and proxy_cache_max_size In-Reply-To: <4437674df22a0f98907e574aaa6a8c6b.NginxMailingListEnglish@forum.nginx.org> References: <4437674df22a0f98907e574aaa6a8c6b.NginxMailingListEnglish@forum.nginx.org> Message-ID: <8b0916a1a091259beffadd24691eb14d.NginxMailingListEnglish@forum.nginx.org> I also encountered the same problem, but after 4 years, this feature has not been added to the latest version. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,258604,285566#msg-285566 From nginx-forum at forum.nginx.org Thu Sep 5 16:53:06 2019 From: nginx-forum at forum.nginx.org (raimonbosch) Date: Thu, 05 Sep 2019 12:53:06 -0400 Subject: Some POST requests give 400 Bad Request when word 'select' is in the body Message-ID: <6c1a83a4de1c95e51831a1c4473a9358.NginxMailingListEnglish@forum.nginx.org> Hi all, we have seen that if you add the word 'SELECT' in some requests sometimes you get a 400 Bad Request. It is a use case where we save contents via POST to edit a item of our catalog. I suspect that nginx may interpret this request as a some kind of SQL injection. Do you know if it is a known bug? nginx version: nginx/1.15.4 Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285567,285567#msg-285567 From mdounin at mdounin.ru Thu Sep 5 18:52:31 2019 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 5 Sep 2019 21:52:31 +0300 Subject: Errors suggesting nginx isn't started as root In-Reply-To: <414BD8FA-A9EA-4F32-B6D5-8790C8CEC6BF@palvelin.fi> References: <186455F3-05A5-4F82-94FB-C852E9582606@palvelin.fi> <414BD8FA-A9EA-4F32-B6D5-8790C8CEC6BF@palvelin.fi> Message-ID: <20190905185231.GJ1877@mdounin.ru> Hello! On Wed, Sep 04, 2019 at 03:59:41PM -0700, Palvelin Postmaster wrote: > This is still a big mystery to me. Upgrading to nginx 1.16.1 didn?t help. > > As far as I can understand, the nginx master process IS running with root privileges. The error is from process 22399, and no information available to find out the user started it. The errors suggest it wasn't root. > > 2018/09/19 11:38:47 [warn] 22399#22399: the "user" directive makes sense only if the master process runs with super-user privileges, ignored in /etc/nginx/nginx.conf:21 > > 2018/09/19 11:38:47 [emerg] 22399#22399: SSL_CTX_use_PrivateKey_file("/etc/ssl/private/nginx-selfsigned.key") failed (SSL: error:0200100D:system library:fopen:Permission denied:fopen('/etc/ssl/private/nginx-selfsigned.key','r') error:20074002:BIO routines:file_ctrl:system lib error:140B0002:SSL routines:SSL_CTX_use_PrivateKey_file:system lib) [...] > > root at k2:~# ps -auxw |grep nginx > > root 22317 0.0 0.2 359680 9300 ? Ss 11:38 0:00 nginx: master process /usr/sbin/nginx -g daemon on; master_process on; > > www-data 22322 0.0 0.3 361980 15356 ? S 11:38 0:00 nginx: worker process > > www-data 22323 0.2 0.4 362244 18984 ? S 11:38 0:00 nginx: worker process > > www-data 22326 0.0 0.3 361980 14760 ? S 11:38 0:00 nginx: cache manager process > > www-data 22327 0.0 0.3 361980 14760 ? S 11:38 0:00 nginx: cache loader process -- Maxim Dounin http://mdounin.ru/ From postmaster at palvelin.fi Thu Sep 5 19:45:11 2019 From: postmaster at palvelin.fi (Palvelin Postmaster) Date: Thu, 5 Sep 2019 12:45:11 -0700 Subject: Errors suggesting nginx isn't started as root In-Reply-To: <20190905185231.GJ1877@mdounin.ru> References: <186455F3-05A5-4F82-94FB-C852E9582606@palvelin.fi> <414BD8FA-A9EA-4F32-B6D5-8790C8CEC6BF@palvelin.fi> <20190905185231.GJ1877@mdounin.ru> Message-ID: <6BDE34BA-2B87-486D-A27D-8AA3BCF79DFE@palvelin.fi> Great catch, I hadn?t noticed that. Thanks Maxim! Now I need to figure out what that process is. The log suggests it gets started when I launch the service but doesn?t keep running. root at k2:/var/www# service nginx restart root at k2:/var/www# tail /var/log/nginx/error.log 2019/09/05 22:40:34 [warn] 10871#10871: the "user" directive makes sense only if the master process runs with super-user privileges, ignored in /etc/nginx/nginx.conf:21 2019/09/05 22:40:34 [emerg] 10871#10871: cannot load certificate key "/etc/ssl/private/nginx-selfsigned.key": BIO_new_file() failed (SSL: error:0200100D:system library:fopen:Permission denied:fopen('/etc/ssl/private/nginx-selfsigned.key','r') error:2006D002:BIO routines:BIO_new_file:system lib) root at k2:/var/www# ps -auxw |grep nginx root 10789 0.0 0.0 387164 4352 ? Ss 22:40 0:00 nginx: master process /usr/sbin/nginx -g daemon on; master_process on; www-data 10793 0.0 0.1 389708 12808 ? S 22:40 0:00 nginx: worker process www-data 10794 0.0 0.1 389464 9320 ? S 22:40 0:00 nginx: worker process www-data 10795 0.0 0.1 389464 9508 ? S 22:40 0:00 nginx: cache manager process www-data 10799 0.0 0.1 389464 9508 ? S 22:40 0:00 nginx: cache loader process root 10885 0.0 0.0 14660 1084 pts/0 R+ 22:40 0:00 grep --color=auto nginx > On 5 Sep 2019, at 11.52, Maxim Dounin wrote: > > Hello! > > On Wed, Sep 04, 2019 at 03:59:41PM -0700, Palvelin Postmaster wrote: > >> This is still a big mystery to me. Upgrading to nginx 1.16.1 didn?t help. >> >> As far as I can understand, the nginx master process IS running with root privileges. > > The error is from process 22399, and no information available to > find out the user started it. The errors suggest it wasn't root. > >>> 2018/09/19 11:38:47 [warn] 22399#22399: the "user" directive makes sense only if the master process runs with super-user privileges, ignored in /etc/nginx/nginx.conf:21 >>> 2018/09/19 11:38:47 [emerg] 22399#22399: SSL_CTX_use_PrivateKey_file("/etc/ssl/private/nginx-selfsigned.key") failed (SSL: error:0200100D:system library:fopen:Permission denied:fopen('/etc/ssl/private/nginx-selfsigned.key','r') error:20074002:BIO routines:file_ctrl:system lib error:140B0002:SSL routines:SSL_CTX_use_PrivateKey_file:system lib) > > [...] > >>> root at k2:~# ps -auxw |grep nginx >>> root 22317 0.0 0.2 359680 9300 ? Ss 11:38 0:00 nginx: master process /usr/sbin/nginx -g daemon on; master_process on; >>> www-data 22322 0.0 0.3 361980 15356 ? S 11:38 0:00 nginx: worker process >>> www-data 22323 0.2 0.4 362244 18984 ? S 11:38 0:00 nginx: worker process >>> www-data 22326 0.0 0.3 361980 14760 ? S 11:38 0:00 nginx: cache manager process >>> www-data 22327 0.0 0.3 361980 14760 ? S 11:38 0:00 nginx: cache loader process From postmaster at palvelin.fi Thu Sep 5 19:55:38 2019 From: postmaster at palvelin.fi (Palvelin Postmaster) Date: Thu, 5 Sep 2019 12:55:38 -0700 Subject: How to redirect to https when using load balancer in front of nginx In-Reply-To: <667b8c251854518b4e5a349276fa7b46.NginxMailingListEnglish@forum.nginx.org> References: <60ECE3F3-69A0-4B13-BC1E-CB29AEFB9DDD@palvelin.fi> <667b8c251854518b4e5a349276fa7b46.NginxMailingListEnglish@forum.nginx.org> Message-ID: > On 4 Sep 2019, at 18.32, j94305 wrote: > > In order to redirect http to https, you have to define a listener rule in > the ALB that redirects all traffic on port 80 to port 443 (of the ALB) with > the original path and query parameters. The status code should be a 301 > (permanent redirection). That's the context between the client and the ALB. > > The certificate for the domain(s) will be installed in the ALB. > > The target group for the ALB would contain the http target (or be filled in > by an auto-scaling group's members in case you have multiple targets from an > auto-scaling group). Your application server would only see http requests > coming from the ALB. You won't get any https requests. > > The important point about rewriting http requests (from the client > perspective) to https requests (client perspective again) is to define that > rule in the ALB, and make that rule redirect requests for all paths. Yes, I believe this I have setup in ALB, as I tried to explain (poorly, maybe): alb-my-server-com | HTTP:80 (1 rule) Redirect to: https://#{host}:443/#{path}?#{query} Status code: HTTP_301 The issue I outlined happens with nginx regardless. From nginx-forum at forum.nginx.org Fri Sep 6 05:06:25 2019 From: nginx-forum at forum.nginx.org (rambabuy) Date: Fri, 06 Sep 2019 01:06:25 -0400 Subject: Keepalive disable vs enable Issues Message-ID: Hi, To avoid some Issues I need to create new connections for POST (upload file) requests. But, If I disable keepalive under upstream I am getting the following error " 99: Cannot assign requested address) while connecting to upstream, " Is there a possibility to use idle connections for GET requests and new connection for POSTS requests? or is there a way to control TCP new connections or to control TIME_WAIT connections. TIME_WAIT connections causes "99: Cannot assign requested address) while connecting to upstream, " error Thanks Rambabu Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285572,285572#msg-285572 From nginx-forum at forum.nginx.org Fri Sep 6 10:13:19 2019 From: nginx-forum at forum.nginx.org (raimonbosch) Date: Fri, 06 Sep 2019 06:13:19 -0400 Subject: Some POST requests give 400 Bad Request when word 'select' is in the body In-Reply-To: <6c1a83a4de1c95e51831a1c4473a9358.NginxMailingListEnglish@forum.nginx.org> References: <6c1a83a4de1c95e51831a1c4473a9358.NginxMailingListEnglish@forum.nginx.org> Message-ID: Seems a bad config in our system. They are printing as if it were a native nginx error when it comes from another point of the application. Nevermind. Thanks anyway, Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285567,285573#msg-285573 From postmaster at palvelin.fi Fri Sep 6 19:12:00 2019 From: postmaster at palvelin.fi (Palvelin Postmaster) Date: Fri, 6 Sep 2019 12:12:00 -0700 Subject: How to redirect to https when using load balancer in front of nginx In-Reply-To: <667b8c251854518b4e5a349276fa7b46.NginxMailingListEnglish@forum.nginx.org> References: <60ECE3F3-69A0-4B13-BC1E-CB29AEFB9DDD@palvelin.fi> <667b8c251854518b4e5a349276fa7b46.NginxMailingListEnglish@forum.nginx.org> Message-ID: <762484A9-B797-40D2-95A5-AB78DF0FE637@palvelin.fi> > On 4 Sep 2019, at 18.32, j94305 wrote: > > On the other hand, why do you need the ALB if you have an NGINX in there, > anyway? I would rather settle for a simple NLB and handle http/https > redirections etc. in the NGINX itself. Is it possible to terminate SSL at NLB with multiple certificates for different domains in a single instance? From violaocosmico at gmail.com Sat Sep 7 13:28:46 2019 From: violaocosmico at gmail.com (Anand Arun) Date: Sat, 7 Sep 2019 10:28:46 -0300 Subject: Gzip not compressing Message-ID: Hi, guys I have an issue in nginx ubuntu 16.04 in digital ocean. It seems i configued everything fine in my nginx.conf, but still the compression does not happen. Here is the situation posted in DO community. https://www.digitalocean.com/community/questions/enabling-gzip-compression-guidance-needed Thanks for any help. Arun www.arun.com.br (61) 99636 1290 - WhatsApp Instagram: arunviolaocosmico -------------- next part -------------- An HTML attachment was scrubbed... URL: From r at roze.lv Sat Sep 7 14:09:47 2019 From: r at roze.lv (Reinis Rozitis) Date: Sat, 7 Sep 2019 17:09:47 +0300 Subject: Gzip not compressing In-Reply-To: References: Message-ID: <000a01d56585$e80c17a0$b82446e0$@roze.lv> > Here is the situation posted in DO community. > https://www.digitalocean.com/community/questions/enabling-gzip-compression-guidance-needed > Thanks for any help. Well you are testing in a wrong way. First of all: curl -H "Accept-Encoding: gzip" -I http://localhost/test.jpg HTTP/1.1 301 Moved Permanently Server: nginx/1.10.3 (Ubuntu) Date: Wed, 04 Sep 2019 18:12:38 GMT Content-Type: text/html Content-Length: 194 Location: https://radha.org.br/test.jpg You see that the request is actually redirect (301) and curl doesn't follow it (it's the same with your second attempt). Also even the response wasn't a redirect it's less than the configured minimal object size for compression (there is generally no sense compressing small objects as you waste more cpu than save traffic. I would suggest increasing it to at least 1Kb): gzip_min_length 256; At last while the content type of particular request is text/html if there was an actual jpg image returned then because of the: gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/vnd.ms-fontob$fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon; .. directive nginx wouldn't compress the file (as there is no 'image/jpeg' included). Besides it doesn't make sense to gzip jpeg as the image format is already compressed. Hope that helps. rr From nginx-forum at forum.nginx.org Sat Sep 7 15:53:13 2019 From: nginx-forum at forum.nginx.org (arunkiji) Date: Sat, 07 Sep 2019 11:53:13 -0400 Subject: Gzip not compressing In-Reply-To: <000a01d56585$e80c17a0$b82446e0$@roze.lv> References: <000a01d56585$e80c17a0$b82446e0$@roze.lv> Message-ID: <96702c3f42a8b63d4963dafc3c0744ef.NginxMailingListEnglish@forum.nginx.org> Hi, Reinis I am trying to reduce transfer size in my website. Although i apparently have enabled Gzip compression, i does not show as enabled in GTmetrix testing. Would you, please, be so patient to give me precise instructions on how to set my Gzip compression properly, writing the code i am to paste in the config page or any other step i need to proceed to get this done? I thank you so much for your generousity and attention. Arun arun.com.br Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285576,285578#msg-285578 From r at roze.lv Sat Sep 7 16:57:02 2019 From: r at roze.lv (Reinis Rozitis) Date: Sat, 7 Sep 2019 19:57:02 +0300 Subject: Gzip not compressing In-Reply-To: <96702c3f42a8b63d4963dafc3c0744ef.NginxMailingListEnglish@forum.nginx.org> References: <000a01d56585$e80c17a0$b82446e0$@roze.lv> <96702c3f42a8b63d4963dafc3c0744ef.NginxMailingListEnglish@forum.nginx.org> Message-ID: <001001d5659d$45a9bb80$d0fd3280$@roze.lv> > I am trying to reduce transfer size in my website. Although i apparently have > enabled Gzip compression, i does not show as enabled in GTmetrix testing. For testing purposes just putting: gzip on; gzip_types text/html text/plain text/xml text/css application/javascript application/json; in http {} block (and reloading nginx) should be enough to enable gzip (obviously add whatever other types you need like svg / fonts). Other directives can be specified later for finetuning. > i does not show as enabled in GTmetrix testing. You should check which resources the testing tool expects to be compressed (maybe all of those come from external sites?). Also not all of the recommendations are (very) important. For example the testing tool for one of my site complains - manifest.json could save 92B (32% reduction). While the 32% percentage looks reasonable the actual gain of 92 bytes doesn't make much sense to compress the file on server side / decompress on client side. rr From nginx-forum at forum.nginx.org Sat Sep 7 20:20:33 2019 From: nginx-forum at forum.nginx.org (arunkiji) Date: Sat, 07 Sep 2019 16:20:33 -0400 Subject: Gzip not compressing In-Reply-To: <001001d5659d$45a9bb80$d0fd3280$@roze.lv> References: <001001d5659d$45a9bb80$d0fd3280$@roze.lv> Message-ID: Hi, RR Thank you so much for giving attention. My conf page gzip section looks like this " # Gzip Settings ## gzip on; gzip_disable "msie6"; gzip_vary on; gzip_proxied any; gzip_comp_level 6; gzip_buffers 16 8k; gzip_http_version 1.1; gzip_min_length 256; gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/vnd.ms-fontob$fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon; " When i add your suggestion: " gzip on; gzip_types text/html text/plain text/xml text/css application/javascript application/json; " Should I delete any other line? Then you also mention: " in http {} block (and reloading nginx) should be enough to enable gzip (obviously add whatever other types you need like svg / fonts). " I am not sure what to do with this " http {} block. " I know that after making changes i should always give " sudo nginx -t sudo systemctl restart nginx " Thank you once again :) Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285576,285580#msg-285580 From nginx-forum at forum.nginx.org Mon Sep 9 08:17:17 2019 From: nginx-forum at forum.nginx.org (proxyuser) Date: Mon, 09 Sep 2019 04:17:17 -0400 Subject: Problems with PROXY_PROTOCOL in ngx_stream Message-ID: <0fdbda5607cab0d9de6f3f131f1cf33e.NginxMailingListEnglish@forum.nginx.org> I am using ngx_stream with ssl_preread_module. The $ssl_preread_server_name variable is used in the proxy_pass directive. Nginx is behind NLB in AWS and receives v2 of the protocol with Amazon's own extension - PP2_TYPE_AWS - https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html#proxy-protocol The problem I am running into is the fact nginx does not pass the original proxy_protocol header so the upstreams can not see the PP2_TYPE_AWS field from the original header. Any advice? Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285581,285581#msg-285581 From yichun at openresty.com Mon Sep 9 18:06:08 2019 From: yichun at openresty.com (Yichun Zhang) Date: Tue, 10 Sep 2019 02:06:08 +0800 Subject: [ANN] OpenResty 1.15.8.2 released Message-ID: Hi folks! OpenResty 1.15.8.2 is a patch release addressing security vulnerabilities in the HTTP/2 protocol which may cause excessive memory consumption and CPU usage (CVE-2019-9511, CVE-2019-9513, CVE-2019-9516). All previous NGINX cores supporting HTTP/2 are affected by this issue (1.9.5 to 1.16.1). If you are serving HTTP/2 traffic with *any* previous OpenResty release, upgrade to 1.15.8.2 or disable HTTP/2. Starting from this verison, we provide more official binary Yum/Apt repositories for Red Hat Enterprise Linux (RHEL) 8 x86_64, OpenSUSE Leap 15.1 x86_64, Debian 10 amd64, Fedora 30 x86_64, Amazon Linux 2 x86_64, and CentOS 7 aarch64 (arm64): https://openresty.org/en/linux-packages.html We will keep adding more official binary package repositories for more Linux distributions in the future. However, we have discontinued the maintainence of the official Apt repositories for i386 Ubuntu systems due to the lack of interest from the community. We also upgrade the PCRE and OpenSSL in our official Win32 and Win64 binary packages to their latest versions, 8.43 and 1.1.0k, respectively. Download this version here: https://openresty.org/en/download.html The (portable) source code distribution, the Win32/Win64 binary distributions, and the pre-built binary Linux packages for Ubuntu, Debian, Fedora, CentOS, RHEL, OpenSUSE, Amazon Linux are provided on this Download page. This is the second OpenResty release based on the nginx 1.15.8 core. Acknowledgments We wish to thank the Netflix and Google security teams for their efforts in discovering these vulnerabilities, as well as the NGINX team for promptly patching them. Thanks Thibault Charbonnier for helping this release. Version highlights * bugfix: applied the nginx core patch for new HTTP/2 security advisories (CVE-2019-9511 CVE-2019-9513 CVE-2019-9516). Full Changelog Complete change logs since the last (formal) release, 1.15.8.1, can be browsed in the page Change Log for 1.15.8.x: https://openresty.org/en/changelog-1015008.html Testing We have run extensive testing on our Amazon EC2 test cluster and ensured that all the components (including the Nginx core) play well together. The latest test report can always be found here: https://qa.openresty.org/ We also always run our OpenResty Edge commercial software based on the latest open source version of OpenResty in our own global CDN network (dubbed "mini CDN") powering our openresty.org and openresty.com websites. See https://openresty.com/ for more details. Feedback Feedback on this release is more than welcome. Feel free to create new [GitHub issues](https://github.com/openresty/openresty/issues) or send emails to one of our mailing lists. The Next Release The next release will be based on a very recent nginx 1.17.x core and is already near the corner. We have been working hard on this next release for several months now. Stay tuned! Thanks! Best regards, Yichun From nginx-forum at forum.nginx.org Tue Sep 10 09:12:48 2019 From: nginx-forum at forum.nginx.org (everhardt) Date: Tue, 10 Sep 2019 05:12:48 -0400 Subject: How is the progress to support DTLS In-Reply-To: <7cb11a7f-6639-9725-4929-b3c40da26fdf@nginx.com> References: <7cb11a7f-6639-9725-4929-b3c40da26fdf@nginx.com> Message-ID: I have been using it for more than a year now for more than 500 IoT devices with a cellular connection that connect on average about 4 times per day. My experience has been very positive: easy to set up and no issues at all (both for the 1.13.0 and the 1.13.9 patch). As NGINX is at 1.17 already, I'd like to update as well. Are there any plans to either release a new patch, or preferably, integrate this into the main product? Posted at Nginx Forum: https://forum.nginx.org/read.php?2,278434,285590#msg-285590 From nginx-forum at forum.nginx.org Tue Sep 10 18:43:59 2019 From: nginx-forum at forum.nginx.org (j94305) Date: Tue, 10 Sep 2019 14:43:59 -0400 Subject: NGINX R19 Javascript bug with keyval maps In-Reply-To: <47879906-0e3d-87cb-98a2-6d0ef441623d@nginx.com> References: <47879906-0e3d-87cb-98a2-6d0ef441623d@nginx.com> Message-ID: <0a47066cc3ab24131cecbb86965a8c33.NginxMailingListEnglish@forum.nginx.org> I did. They said it works as designed as keyval maps with type=ip have no option to retrieve the status of entries other than by supplying IP addresses. Values cannot be retrieved anymore if the key needs to be a CIDR block. I am doing a workaround now. --j. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285542,285598#msg-285598 From nginx-forum at forum.nginx.org Tue Sep 10 18:46:54 2019 From: nginx-forum at forum.nginx.org (j94305) Date: Tue, 10 Sep 2019 14:46:54 -0400 Subject: Allow internal redirect to URI x, but deny external request for x? In-Reply-To: <20190904163052.aozitahsa5k6iinu@mink.imca.aps.anl.gov> References: <20190904163052.aozitahsa5k6iinu@mink.imca.aps.anl.gov> Message-ID: <4b299f4a98136fec8260e2e61c005213.NginxMailingListEnglish@forum.nginx.org> Robots exclusion is generally quite unreliable. Exclusions based on user agents are also not really reliable. You can try all of the options for robots exclusion and may still get undesired crawlers on your site. The only way you can keep robots out is to require authentication for those parts you don't want to have crawled. --j. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285463,285599#msg-285599 From vl at nginx.com Wed Sep 11 00:43:11 2019 From: vl at nginx.com (Vladimir Homutov) Date: Wed, 11 Sep 2019 03:43:11 +0300 Subject: How is the progress to support DTLS In-Reply-To: References: <7cb11a7f-6639-9725-4929-b3c40da26fdf@nginx.com> Message-ID: <20190911004311.GA28633@gbox> On Tue, Sep 10, 2019 at 05:12:48AM -0400, everhardt wrote: > I have been using it for more than a year now for more than 500 IoT devices > with a cellular connection that connect on average about 4 times per day. My > experience has been very positive: easy to set up and no issues at all (both > for the 1.13.0 and the 1.13.9 patch). > > As NGINX is at 1.17 already, I'd like to update as well. Are there any plans > to either release a new patch, or preferably, integrate this into the main > product? Currently there are no such plans. What kind of functionality are you using? Do you terminate DTLS or proxy it ? For the latter, you don't need patches, as recent nginx version support UDP "sessions". From nginx-forum at forum.nginx.org Wed Sep 11 04:48:46 2019 From: nginx-forum at forum.nginx.org (everhardt) Date: Wed, 11 Sep 2019 00:48:46 -0400 Subject: How is the progress to support DTLS In-Reply-To: <20190911004311.GA28633@gbox> References: <20190911004311.GA28633@gbox> Message-ID: <9815b240ab2e25c7414245f1f0b9fab4.NginxMailingListEnglish@forum.nginx.org> I?m using it for termination. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,278434,285603#msg-285603 From meir.hazon at ev-box.com Wed Sep 11 14:45:21 2019 From: meir.hazon at ev-box.com (meir hazon) Date: Wed, 11 Sep 2019 16:45:21 +0200 Subject: Proxy Pass based on the first field in the URI Message-ID: Hello, I have tried to do the following but couldn't pull it out. Maybe you will have a good idea as I am not an expert. I would like to proxy pass to different proxies based on the first part of a URI. I would also like to pass the message to the proxy without the first part of the URI but use the rest of it. Please note that the length and the URI strings and number of fields might change example, incoming to nginx, https://yyy.com/bla/aa/er if $1 == bla proxy pass to http://xxx.com/aa/er if $1 == vv proxy pass to http://ccc.com/aa/er Thanks so much, Meir -------------- next part -------------- An HTML attachment was scrubbed... URL: From francis at daoine.org Wed Sep 11 15:46:22 2019 From: francis at daoine.org (Francis Daly) Date: Wed, 11 Sep 2019 16:46:22 +0100 Subject: Proxy Pass based on the first field in the URI In-Reply-To: References: Message-ID: <20190911154622.g42gsxlo3v5a2k32@daoine.org> On Wed, Sep 11, 2019 at 04:45:21PM +0200, meir hazon wrote: Hi there, > I would also like to pass the message to the proxy without the first part > of the URI but use the rest of it. http://nginx.org/r/proxy_pass includes the section: """ If the proxy_pass directive is specified with a URI, then when a request is passed to the server, the part of a normalized request URI matching the location is replaced by a URI specified in the directive: location /name/ { proxy_pass http://127.0.0.1/remote/; } """ With that sample config, you want two "location"s: > https://yyy.com/bla/aa/er > if $1 == bla proxy pass to http://xxx.com/aa/er > if $1 == vv proxy pass to http://ccc.com/aa/er "/name/" == "/bla/"; "/remote/" == "/". "/name/" == "/vv/"; "/remote/" == "/". Does that work for you? f -- Francis Daly francis at daoine.org From nginx-forum at forum.nginx.org Wed Sep 11 15:55:45 2019 From: nginx-forum at forum.nginx.org (j94305) Date: Wed, 11 Sep 2019 11:55:45 -0400 Subject: Proxy Pass based on the first field in the URI In-Reply-To: References: Message-ID: <75269176ae5621f4bcf25c7b13bb9c3d.NginxMailingListEnglish@forum.nginx.org> Try something like this: map $urlprefix $urlproxy { "foo" "https://foohost.foo.com"; "bar" "http://barhost.blah.com"; "fie" "https://fie.special.domain.com/blubb"; default "https://standard.com"; } [...] location ~ "^/(?[^/]+)(?/.*)$" { [...] proxy_pass "$urlproxy$urlsuffix$is_args$args"; [...] } You can dynamically determine the host (including a URL piece, if necessary) with the "map", and use the mapped proxy prefix then in the proxy_pass directive. If whatever lies beyond the NGINX requires URL arguments, don't forget the $is_args$args part. --j. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285607,285609#msg-285609 From meir.hazon at ev-box.com Wed Sep 11 18:41:15 2019 From: meir.hazon at ev-box.com (meir hazon) Date: Wed, 11 Sep 2019 20:41:15 +0200 Subject: Proxy Pass based on the first field in the URI In-Reply-To: <20190911154622.g42gsxlo3v5a2k32@daoine.org> References: <20190911154622.g42gsxlo3v5a2k32@daoine.org> Message-ID: Hi, I don't think so, it should always be the first field of the URI but it would be a random string. I have to proxy pass and rebuild the new URI based on location at the original URI only. Could you think of a way to do this? Thanks so much, Meir On Wed, Sep 11, 2019 at 5:46 PM Francis Daly wrote: > On Wed, Sep 11, 2019 at 04:45:21PM +0200, meir hazon wrote: > > Hi there, > > > I would also like to pass the message to the proxy without the first part > > of the URI but use the rest of it. > > http://nginx.org/r/proxy_pass > > includes the section: > > """ > If the proxy_pass directive is specified with a URI, then when a request > is passed to the server, the part of a normalized request URI matching > the location is replaced by a URI specified in the directive: > location /name/ { > proxy_pass http://127.0.0.1/remote/; > } > """ > > With that sample config, you want two "location"s: > > > https://yyy.com/bla/aa/er > > if $1 == bla proxy pass to http://xxx.com/aa/er > > if $1 == vv proxy pass to http://ccc.com/aa/er > > "/name/" == "/bla/"; "/remote/" == "/". > "/name/" == "/vv/"; "/remote/" == "/". > > Does that work for you? > > f > -- > Francis Daly francis at daoine.org > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx > -------------- next part -------------- An HTML attachment was scrubbed... URL: From meir.hazon at ev-box.com Wed Sep 11 18:42:21 2019 From: meir.hazon at ev-box.com (meir hazon) Date: Wed, 11 Sep 2019 20:42:21 +0200 Subject: Proxy Pass based on the first field in the URI In-Reply-To: <75269176ae5621f4bcf25c7b13bb9c3d.NginxMailingListEnglish@forum.nginx.org> References: <75269176ae5621f4bcf25c7b13bb9c3d.NginxMailingListEnglish@forum.nginx.org> Message-ID: Hi, It looks great, I will try it out and let you know. Thanks so much, Meir On Wed, Sep 11, 2019 at 5:56 PM j94305 wrote: > Try something like this: > > map $urlprefix $urlproxy { > "foo" "https://foohost.foo.com"; > "bar" "http://barhost.blah.com"; > "fie" "https://fie.special.domain.com/blubb"; > default "https://standard.com"; > } > > [...] > > location ~ "^/(?[^/]+)(?/.*)$" { > > [...] > proxy_pass "$urlproxy$urlsuffix$is_args$args"; > [...] > } > > You can dynamically determine the host (including a URL piece, if > necessary) > with the "map", and use the mapped proxy prefix then in the proxy_pass > directive. If whatever lies beyond the NGINX requires URL arguments, don't > forget the $is_args$args part. > > --j. > > Posted at Nginx Forum: > https://forum.nginx.org/read.php?2,285607,285609#msg-285609 > > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx > -------------- next part -------------- An HTML attachment was scrubbed... URL: From francis at daoine.org Wed Sep 11 21:48:38 2019 From: francis at daoine.org (Francis Daly) Date: Wed, 11 Sep 2019 22:48:38 +0100 Subject: Proxy Pass based on the first field in the URI In-Reply-To: References: <20190911154622.g42gsxlo3v5a2k32@daoine.org> Message-ID: <20190911214838.4k66hdm3f4pitfrb@daoine.org> On Wed, Sep 11, 2019 at 08:41:15PM +0200, meir hazon wrote: Hi there, > I don't think so, it should always be the first field of the URI but it > would be a random string. I have to proxy pass and rebuild the new URI > based on location at the original URI only. I don't understand what your requirements are. > > > https://yyy.com/bla/aa/er > > > if $1 == bla proxy pass to http://xxx.com/aa/er > > > if $1 == vv proxy pass to http://ccc.com/aa/er "bla" is not random; it is a thing that you know that maps to xxx.com. "vv" is not random; it is a thing that you know that maps to ccc.com. So, somewhere, you have a list of the uri prefix / upstream server mappings, no? Use that list to make the nginx config. > Could you think of a way to do this? Not without me understanding what "this" is. Good luck with it, f -- Francis Daly francis at daoine.org From zeeshanopel at gmail.com Thu Sep 12 06:08:15 2019 From: zeeshanopel at gmail.com (Zeeshan Opel) Date: Thu, 12 Sep 2019 11:08:15 +0500 Subject: Rewriting url in nginx Message-ID: I am trying to rewrite a url in nginx. When i am accessing mail.parco.net.pk in browser, it opens below link: http://mailsvr.parco.net.pk/mailsvr/mail/mailbox.nsf But in actual it should open http://mailsvr.parco.net.pk/mail/mailbox.nsf Below is my nginx conf file. worker_processes 1; worker_rlimit_nofile 30000; events { worker_connections 1024; } http { include mime.types; default_type application/octet-stream; sendfile on; keepalive_timeout 65; server { listen 80; return 301 http://mailsvr.parco.net.pk$request_uri; #rewrite ^/mailsvr/(.*) /$1 break; #rewrite ^/http://mailsvr.parco.net.pk(.*)$ http://mailsvr.parco.net.pk/mail=$1 permanent; #return 301 http://$host$request_uri; #server_name 192.168.17.53; client_max_body_size 100m; location /{ #proxy_pass http://172.17.5.157/; #proxy_set_header X-Real-IP $remote_addr; #proxy_set_header X-Forwarded-For $remote_addr; #proxy_set_header Host $host; #proxy_set_header X-Forwarded-Proto $scheme; proxy_read_timeout 240; proxy_buffering off; }#end location }#end server }#end http -- Zeeshan Qaiser Opel +92-301-8446630 -------------- next part -------------- An HTML attachment was scrubbed... URL: From meir.hazon at ev-box.com Thu Sep 12 07:34:20 2019 From: meir.hazon at ev-box.com (meir hazon) Date: Thu, 12 Sep 2019 09:34:20 +0200 Subject: Proxy Pass based on the first field in the URI In-Reply-To: <20190911214838.4k66hdm3f4pitfrb@daoine.org> References: <20190911154622.g42gsxlo3v5a2k32@daoine.org> <20190911214838.4k66hdm3f4pitfrb@daoine.org> Message-ID: Dear Francis and friends, I am sorry if I wasn't clear enough, I appreciate your help and advice dearly. I was thinking on creating environments dynamically, Meaning, 1. the source "host" of the URL is random 2. The first field of the URI includes a random string with the environment ID. 3. Based on this ID a proxy pass is added (I will add DNS records etc.) 4. The proxy pass points to a static "host" and the URI includes the original URI excluding the first filed (env ID field) Meaning that I will need a good rewrite and an "if" statement for each one of the environments (IDs) Could you please On Wed, Sep 11, 2019 at 11:48 PM Francis Daly wrote: > On Wed, Sep 11, 2019 at 08:41:15PM +0200, meir hazon wrote: > > Hi there, > > > I don't think so, it should always be the first field of the URI but it > > would be a random string. I have to proxy pass and rebuild the new URI > > based on location at the original URI only. > > I don't understand what your requirements are. > > > > > https://yyy.com/bla/aa/er > > > > if $1 == bla proxy pass to http://xxx.com/aa/er > > > > if $1 == vv proxy pass to http://ccc.com/aa/er > > "bla" is not random; it is a thing that you know that maps to xxx.com. > > "vv" is not random; it is a thing that you know that maps to ccc.com. > > So, somewhere, you have a list of the uri prefix / upstream server > mappings, no? > > Use that list to make the nginx config. > > > Could you think of a way to do this? > > Not without me understanding what "this" is. > > Good luck with it, > > f > -- > Francis Daly francis at daoine.org > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx > -------------- next part -------------- An HTML attachment was scrubbed... URL: From meir.hazon at ev-box.com Thu Sep 12 07:40:47 2019 From: meir.hazon at ev-box.com (meir hazon) Date: Thu, 12 Sep 2019 09:40:47 +0200 Subject: Proxy Pass based on the first field in the URI In-Reply-To: References: <20190911154622.g42gsxlo3v5a2k32@daoine.org> <20190911214838.4k66hdm3f4pitfrb@daoine.org> Message-ID: Sorry, the only random is the first part of the URI, Following is the right description, Dear Francis and friends, I am sorry if I wasn't clear enough, I appreciate your help and advice dearly. I was thinking on creating environments dynamically, Meaning, 1. the source "host" of the URL is static 2. The first field of the URI includes a random string with the environment ID. 3. Based on this ID a proxy pass is added to the nginx conf 4. The proxy pass points to a static NEW "host" and the URI includes the original URI excluding the first filed (env ID field) a DNS record would be added etc based on the env ID Meaning that I will need a good rewrite and an "if" statement for each one of the environments ( based on the env IDs ) Could you please share your thoughts with me? Thanks so much, Meir On Thu, Sep 12, 2019 at 9:34 AM meir hazon wrote: > Dear Francis and friends, > > I am sorry if I wasn't clear enough, I appreciate your help and advice > dearly. > > I was thinking on creating environments dynamically, Meaning, > > 1. the source "host" of the URL is random > > 2. The first field of the URI includes a random string with the > environment ID. > > 3. Based on this ID a proxy pass is added (I will add DNS records etc.) > > 4. The proxy pass points to a static "host" and the URI includes the > original URI excluding the first filed (env ID field) > > Meaning that I will need a good rewrite and an "if" statement for each one > of the environments (IDs) > > Could you please > > On Wed, Sep 11, 2019 at 11:48 PM Francis Daly wrote: > >> On Wed, Sep 11, 2019 at 08:41:15PM +0200, meir hazon wrote: >> >> Hi there, >> >> > I don't think so, it should always be the first field of the URI but it >> > would be a random string. I have to proxy pass and rebuild the new URI >> > based on location at the original URI only. >> >> I don't understand what your requirements are. >> >> > > > https://yyy.com/bla/aa/er >> > > > if $1 == bla proxy pass to http://xxx.com/aa/er >> > > > if $1 == vv proxy pass to http://ccc.com/aa/er >> >> "bla" is not random; it is a thing that you know that maps to xxx.com. >> >> "vv" is not random; it is a thing that you know that maps to ccc.com. >> >> So, somewhere, you have a list of the uri prefix / upstream server >> mappings, no? >> >> Use that list to make the nginx config. >> >> > Could you think of a way to do this? >> >> Not without me understanding what "this" is. >> >> Good luck with it, >> >> f >> -- >> Francis Daly francis at daoine.org >> _______________________________________________ >> nginx mailing list >> nginx at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx >> > -------------- next part -------------- An HTML attachment was scrubbed... URL: From zeeshanopel at gmail.com Thu Sep 12 10:51:58 2019 From: zeeshanopel at gmail.com (Zeeshan Opel) Date: Thu, 12 Sep 2019 15:51:58 +0500 Subject: Rewriting url in nginx In-Reply-To: References: Message-ID: Please help On Thu, Sep 12, 2019 at 11:08 AM Zeeshan Opel wrote: > I am trying to rewrite a url in nginx. > > When i am accessing mail.parco.net.pk in browser, it opens below link: > > http://mailsvr.parco.net.pk/mailsvr/mail/mailbox.nsf > But in actual it should open http://mailsvr.parco.net.pk/mail/mailbox.nsf > > Below is my nginx conf file. > > worker_processes 1; > worker_rlimit_nofile 30000; > events { > worker_connections 1024; > } > > > http { > include mime.types; > default_type application/octet-stream; > sendfile on; > keepalive_timeout 65; > server { > listen 80; > return 301 http://mailsvr.parco.net.pk$request_uri; > #rewrite ^/mailsvr/(.*) /$1 break; > #rewrite ^/http://mailsvr.parco.net.pk(.*)$ > http://mailsvr.parco.net.pk/mail=$1 permanent; > #return 301 http://$host$request_uri; > #server_name 192.168.17.53; > client_max_body_size 100m; > > location /{ > #proxy_pass http://172.17.5.157/; > > #proxy_set_header X-Real-IP $remote_addr; > #proxy_set_header X-Forwarded-For $remote_addr; > #proxy_set_header Host $host; > #proxy_set_header X-Forwarded-Proto $scheme; > > proxy_read_timeout 240; > proxy_buffering off; > }#end location > }#end server > > }#end http > > -- > Zeeshan Qaiser Opel > +92-301-8446630 > -- Zeeshan Qaiser Opel +92-301-8446630 -------------- next part -------------- An HTML attachment was scrubbed... URL: From al-nginx at none.at Thu Sep 12 12:51:14 2019 From: al-nginx at none.at (Aleksandar Lazic) Date: Thu, 12 Sep 2019 14:51:14 +0200 Subject: Rewriting url in nginx In-Reply-To: References: Message-ID: Am 12.09.2019 um 08:08 schrieb Zeeshan Opel: > I am trying to rewrite a url in nginx.? > > When i am accessing mail.parco.net.pk in browser, it > opens below link: > > http://mailsvr.parco.net.pk/mailsvr/mail/mailbox.nsf > But in actual it should open? http://mailsvr.parco.net.pk/mail/mailbox.nsf What does your backend returns? > Below is my nginx conf file. > > worker_processes ?1; > worker_rlimit_nofile ? ?30000; > events { > ? ? worker_connections ?1024; > } > > > http { > ? ? include ? ? ? ? ? mime.types; > ? ? default_type ? ? ? application/octet-stream; > ? ? sendfile ? ? ? ? on; > ? ? keepalive_timeout ? 65; ? ? > ? ? server { > ? ? ? ? listen ? ? ? ? ? ? 80; > ? ? ? ? return ? ?301 ? ? http://mailsvr.parco.net.pk$request_uri; > ? ? ? ? #rewrite ^/mailsvr/(.*) /$1 break; What's the debug output when you activate the line above and comment the `return 301 ...` in quote from the doc https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return ``` Stops processing and returns the specified code to a client. ``` The rewrite looks good to me. For debug log please take a look into this doc. https://nginx.org/en/docs/debugging_log.html > ? ? ? ? #rewrite ^/http://mailsvr.parco.net.pk(.*)$ > http://mailsvr.parco.net.pk/mail=$1 permanent; > ? ? ? ? #return ? ?301 ? ?http://$host$request_uri; ? ? ? ? > ? ? ? ? #server_name ? ? ? ?192.168.17.53; > ? ? ? ? client_max_body_size ? ?100m; > > ? ? ? ? location /{ ? ? ? ? > ? ? ? ? ? ? ? ? #proxy_pass ? ? ? ? ? ? ? ? ? ? http://172.17.5.157/; ? ? ? ? > ? ? ? ? ? ? ? ? #proxy_set_header ? ? X-Real-IP ? ? ? ?$remote_addr; > ? ? ? ? ? ? ? ? #proxy_set_header ? ? X-Forwarded-For ? ? ? ?$remote_addr; > ? ? ? ? ? ? ? ? #proxy_set_header ? ?Host ? ? ? ? ? ?$host; ? ? ? ? ? ? ? ? > ? ? ? ? ? ? ? ? #proxy_set_header ? ?X-Forwarded-Proto ? ?$scheme; ? ? ? ? ? ? > ? ? ? ? ? ? ? ? proxy_read_timeout ? ? ? ? ? ? ? ?240; > ? ? ? ? ? ? ? ? proxy_buffering ? ? ? ? ? ? ? ? ? ?off; > ? ? ? ? ? ? }#end location > ? ? }#end server > ? ? > }#end http > > -- > Zeeshan Qaiser Opel > +92-301-8446630 > > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx > From nginx-forum at forum.nginx.org Thu Sep 12 13:11:31 2019 From: nginx-forum at forum.nginx.org (arunkiji) Date: Thu, 12 Sep 2019 09:11:31 -0400 Subject: Gzip not compressing In-Reply-To: References: Message-ID: Hi, guys!! Resolved!! Thank you!! With this code from Hummingbird: # Enable Gzip compression gzip on; # Compression level (1-9) gzip_comp_level 5; # Don't compress anything under 256 bytes gzip_min_length 256; # Compress output of these MIME-types gzip_types application/atom+xml application/javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-font-opentype application/x-font-truetype application/x-javascript application/x-web-app-manifest+json application/xhtml+xml application/xml font/eot font/opentype font/otf image/svg+xml image/x-icon image/vnd.microsoft.icon text/css text/plain text/javascript text/x-component; # Disable gzip for bad browsers gzip_disable "MSIE [1-6]\.(?!.*SV1)"; Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285576,285620#msg-285620 From nginx-forum at forum.nginx.org Sat Sep 14 22:17:44 2019 From: nginx-forum at forum.nginx.org (j94305) Date: Sat, 14 Sep 2019 18:17:44 -0400 Subject: Proxy Pass based on the first field in the URI In-Reply-To: References: Message-ID: PS: If, like you mentioned in the other reply message, want to create environments dynamically, you could use the map directive with an include file that is dynamically updated by the deployment process of such an environment (and then do nginx -s reload), but even more elegant would be the replacement of the (static) map by a (dynamic) keyval map. The keyval mappings can be maintained via the NGINX API, so you won't need reloads of NGINX in the event of updates. The use of the keyval map is identical to the map directive, except the definition would use a keyval zone. Cheers, --j. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285607,285623#msg-285623 From nginx-forum at forum.nginx.org Tue Sep 17 12:42:35 2019 From: nginx-forum at forum.nginx.org (rihad) Date: Tue, 17 Sep 2019 08:42:35 -0400 Subject: nginx error : cache file has too long header In-Reply-To: <20161207104122.GE4627@Romans-MacBook-Air.local> References: <20161207104122.GE4627@Romans-MacBook-Air.local> Message-ID: <855610d6084dbb53af06ba4239b80f3d.NginxMailingListEnglish@forum.nginx.org> Is time of last request also stored with the cached file on disk? If not, how does nginx track inactive timeout if it gets restarted? Posted at Nginx Forum: https://forum.nginx.org/read.php?2,271382,285638#msg-285638 From nginx-forum at forum.nginx.org Tue Sep 17 20:12:12 2019 From: nginx-forum at forum.nginx.org (salmaanp) Date: Tue, 17 Sep 2019 16:12:12 -0400 Subject: Making parallel subrequests in a module Message-ID: <79881cec43941e1ec66c116dab0d6240.NginxMailingListEnglish@forum.nginx.org> Hi, I'm trying to make parallel network subrequests in my module. They work perfectly fine when run sequentially as follows. ``` ACCESS PHASE return create_subrequest1; ---> (NGX_AGAIN) return create_subrequest2; ---> (NGX_AGAIN) ``` To make it parallel, I removed the returns and called NGX_AGAIN twice later on in the access phase when the subrequests should be done. By doing this the subrequests are created in parallel and the right handler is called as well. However, when the second subrequest is finalized, I face an issue where r!=c->data for the second subrequest and so get this line in the logs ``` http finalize non-active request: "//?" ``` So first subrequest is finalized, second subrequest handler is called but not finalized, parent request finshes and then second subrequest handler called again and finalized. I don't have the NGX_HTTP_SUBREQUEST_WAITED flag when calling the subrequest as well. Anybody who has run into this kind of issue? Any thoughts on how to proceed? Thanks! - Salmaan Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285639,285639#msg-285639 From vbart at nginx.com Thu Sep 19 19:08:32 2019 From: vbart at nginx.com (Valentin V. Bartenev) Date: Thu, 19 Sep 2019 22:08:32 +0300 Subject: Unit 1.11.0 release Message-ID: <1780216.2efUyg02iL@vbart-workstation> Hi, I'm glad to announce a new release of NGINX Unit. This release improves the stability of Go applications and introduces three major features: 1. Ability to Serve Static Media Assets With this feature, we're only at the beginning of a long road to transform Unit into a full-fledged web server, capable of acting as a building block for web services of any kind. In this release, the support for static files is very simple; you can only specify the document root directory for Unit to handle: { "share": "/data/www/example.com" } Also, you can fine-tune MIME types: { "mime_types": { "text/plain": [ "readme", ".c", ".h" ], "application/msword": ".doc" } } Use encoding to access object members with names that contain "/" characters directly by their URI: GET /config/settings/http/static/mime_types/text%2Fplain/ See the documentation for details: - https://unit.nginx.org/configuration/#static-files In the upcoming releases, we'll extend this area of functionality to handle more use cases in the most performant manner. Unfortunately, basic proxying support did not make it to this release, as tests have revealed that it needs more work. There are excellent chances that the feature will be included in the next release in a month or so. 2. Application Isolation This capability increases the security of running applications, allowing to run them in isolated environments based on Linux namespaces. This is very similar to how Docker containers work. The configuration is pretty straightforward: you can customize the isolation level and configure UID/GID mapping between the host and the container: { "namespaces": { "credential": true, "pid": true, "network": true, "mount": false, "uname": true, "cgroup": false }, "uidmap": [ { "container": 1000, "host": 812, "size": 1 } ], "gidmap": [ { "container": 1000, "host": 812, "size": 1 } ] } See the documentation for details: - https://unit.nginx.org/configuration/#process-isolation This feature was implemented by Tiago de Bem Natel de Moura, who has joined our team recently; he will continue working on security features hardening and container support of Unit. 3. WebSockets in Java Servlet Containers WebSocket connection offloading was first introduced in the previous release for Node.js only; now it's extended to JSC as well. We will continue advancing application language support further to provide equally broad opportunities, whichever language you may prefer. Changes with Unit 1.11.0 19 Sep 2019 *) Feature: basic support for serving static files. *) Feature: isolation of application processes with Linux namespaces. *) Feature: built-in WebSocket server implementation for Java Servlet Containers. *) Feature: direct addressing of API configuration options containing slashes "/" using URI encoding (%2F). *) Bugfix: segmentation fault might have occurred in Go applications under high load. *) Bugfix: WebSocket support was broken if Unit was built with some linkers other than GNU ld (e.g. gold or LLD). That's all for this release. Try, test, leave feedback, and stay tuned! wbr, Valentin V. Bartenev From MPALMA at largus.fr Fri Sep 20 07:59:15 2019 From: MPALMA at largus.fr (PALMA Mickael) Date: Fri, 20 Sep 2019 07:59:15 +0000 Subject: set directive not working when mirror is used Message-ID: <93E87A31-5EF2-403B-AE6B-9149F381FE36@largus.fr> Hi, Here is my location directive : server { # Start with an undefined API name, each API will update this value set $api_name -; [...] location = /_oauth { internal; set $api_name 'Oauth API'; # Policy configuration here (authentication, rate limiting, logging, more...) mirror /_mirrored_api; proxy_pass http://$upstream$request_uri; } } In my log I only get ?-? as api_name but if I comment the mirror directive I get 'Oauth API?. Any idea how to avoid that. I always want the name to be ''Oauth API?'. Regards, Micka?l Palma Responsable domaine & valorisation 11-13 rue des Petits H?tels 75010 Paris Tel : 01.53.29.35.53 | Fax : 01.53.29.11.66 -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Sep 20 12:54:34 2019 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 20 Sep 2019 15:54:34 +0300 Subject: set directive not working when mirror is used In-Reply-To: <93E87A31-5EF2-403B-AE6B-9149F381FE36@largus.fr> References: <93E87A31-5EF2-403B-AE6B-9149F381FE36@largus.fr> Message-ID: <20190920125433.GH1877@mdounin.ru> Hello! On Fri, Sep 20, 2019 at 07:59:15AM +0000, PALMA Mickael wrote: > Hi, > > Here is my location directive : > > server { > # Start with an undefined API name, each API will update this value > set $api_name -; > > [...] > > location = /_oauth { > internal; > set $api_name 'Oauth API'; > > # Policy configuration here (authentication, rate limiting, logging, more...) > mirror /_mirrored_api; > proxy_pass http://$upstream$request_uri; > } > } > > In my log I only get ?-? as api_name but if I comment the mirror directive I get 'Oauth API?. > > Any idea how to avoid that. > I always want the name to be ''Oauth API?'. The problem is that variables space is shared among all subrequests, including mirror one. As such, "set $api_name -;" in the mirror subrequest overrides the one previously set for the request. The only solution is to avoid changing variables you don't want to be changed. In this particular configuration - consider removing "set ..." from the server level. -- Maxim Dounin http://mdounin.ru/ From MPALMA at largus.fr Fri Sep 20 15:02:00 2019 From: MPALMA at largus.fr (PALMA Mickael) Date: Fri, 20 Sep 2019 15:02:00 +0000 Subject: set directive not working when mirror is used In-Reply-To: <20190920125433.GH1877@mdounin.ru> References: <93E87A31-5EF2-403B-AE6B-9149F381FE36@largus.fr> <20190920125433.GH1877@mdounin.ru> Message-ID: <313BB038-D89E-49E0-80B3-27F7CF3F21B6@largus.fr> Thank you Dimitry, I will remove the default value. It will be empty by default in stead of ?-? not a big loss ;-) Regards. On 20 Sep 2019, at 14:54, Maxim Dounin > wrote: Hello! On Fri, Sep 20, 2019 at 07:59:15AM +0000, PALMA Mickael wrote: Hi, Here is my location directive : server { # Start with an undefined API name, each API will update this value set $api_name -; [...] location = /_oauth { internal; set $api_name 'Oauth API'; # Policy configuration here (authentication, rate limiting, logging, more...) mirror /_mirrored_api; proxy_pass http://$upstream$request_uri; } } In my log I only get ?-? as api_name but if I comment the mirror directive I get 'Oauth API?. Any idea how to avoid that. I always want the name to be ''Oauth API?'. The problem is that variables space is shared among all subrequests, including mirror one. As such, "set $api_name -;" in the mirror subrequest overrides the one previously set for the request. The only solution is to avoid changing variables you don't want to be changed. In this particular configuration - consider removing "set ..." from the server level. -- Maxim Dounin http://mdounin.ru/ _______________________________________________ nginx mailing list nginx at nginx.org http://mailman.nginx.org/mailman/listinfo/nginx -------------- next part -------------- An HTML attachment was scrubbed... URL: From lagged at gmail.com Sat Sep 21 18:02:57 2019 From: lagged at gmail.com (Andrei) Date: Sat, 21 Sep 2019 13:02:57 -0500 Subject: Failing to cache requests Message-ID: Hello, I'm having a hard time understanding why these requests aren't being cached for 10 minutes? Is it the cookies? If so how can I avoid this and force cache? curl -I https://NNN.com/abc123/amp HTTP/1.1 200 OK Date: Sat, 21 Sep 2019 17:59:11 GMT Content-Type: text/html; charset=UTF-8 Connection: keep-alive Set-Cookie: __cfduid=d79327988a8ff43f308be68675eedfac31569088750; expires=Sun, 20-Sep-20 17:59:10 GMT; path=/; domain=.NNN.com; HttpOnly Alternate-Protocol: 443:npn-http/2 Cache-Control: max-age=600 Cf-Railgun: direct (starting new WAN connection) Expires: Sat, 21 Sep 2019 18:09:10 GMT Last-Modified: Thu, 01 Jan 1970 00:00:00 GMT Set-Cookie: pvc_visits[0]=1569175150b5472; expires=Sun, 22-Sep-2019 17:59:10 GMT; Max-Age=86400; path=/; secure; HttpOnly Set-Cookie: cookielawinfo-checkbox-necessary=yes; expires=Sat, 21-Sep-2019 18:59:10 GMT; Max-Age=3600; path=/ X-Cache-Status: MISS-0 X-Pingback: https://NNN.com/abc123/xmlrpc.php X-Request-Id: zbbdrnjacxubirpb Expect-CT: max-age=604800, report-uri=" https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct" Server: cloudflare CF-RAY: 519dee748b7e91da-EWR -------------- next part -------------- An HTML attachment was scrubbed... URL: From kartare48 at gmail.com Sat Sep 21 18:51:14 2019 From: kartare48 at gmail.com (Kathy Prosser) Date: Sat, 21 Sep 2019 18:51:14 +0000 Subject: Group text Pro Message-ID: Hi I have just bought this app but can't send a group ext as it just freezes before I can even right a message. Freezes as soon as I click on the pen icon. Not Happy Kathy -------------- next part -------------- An HTML attachment was scrubbed... URL: From marc at nemeto.fr Sun Sep 22 10:39:37 2019 From: marc at nemeto.fr (Marc) Date: Sun, 22 Sep 2019 12:39:37 +0200 Subject: WebDAV and anonymous+authenticated access In-Reply-To: <839dbfaa-00aa-b81e-f05b-993fafe6e77b@nemeto.fr> References: <839dbfaa-00aa-b81e-f05b-993fafe6e77b@nemeto.fr> Message-ID: <3c75c05e-e0b9-b1da-cbc6-b4e4f751f281@nemeto.fr> Hello, Is it possible to setup nginx so that it shares a directory via WebDAV which would have read+write access for authenticated users and simple read access for non-authenticated/guest/anonymous users ? At the moment my WebDAV setup works correctly but only for authenticated users. Authentication is always required if I try to browse to https://myserver/myshare via a web browser. How can I solve this ? Thanks. -- Marc PGP : E12177BF 24CC9C9B From francis at daoine.org Sun Sep 22 12:25:57 2019 From: francis at daoine.org (Francis Daly) Date: Sun, 22 Sep 2019 13:25:57 +0100 Subject: Failing to cache requests In-Reply-To: References: Message-ID: <20190922122557.dnoufrkob4ek72fv@daoine.org> On Sat, Sep 21, 2019 at 01:02:57PM -0500, Andrei wrote: Hi there, > I'm having a hard time understanding why these requests aren't being cached > for 10 minutes? Is it the cookies? Yes. http://nginx.org/r/proxy_cache_valid """ If the header includes the ?Set-Cookie? field, such a response will not be cached. """ > If so how can I avoid this and force cache? """ Processing of one or more of these response header fields can be disabled using the proxy_ignore_headers directive. """ f -- Francis Daly francis at daoine.org From francis at daoine.org Sun Sep 22 12:42:36 2019 From: francis at daoine.org (Francis Daly) Date: Sun, 22 Sep 2019 13:42:36 +0100 Subject: WebDAV and anonymous+authenticated access In-Reply-To: <3c75c05e-e0b9-b1da-cbc6-b4e4f751f281@nemeto.fr> References: <839dbfaa-00aa-b81e-f05b-993fafe6e77b@nemeto.fr> <3c75c05e-e0b9-b1da-cbc6-b4e4f751f281@nemeto.fr> Message-ID: <20190922124236.kzun7lm7c4m3vsp2@daoine.org> On Sun, Sep 22, 2019 at 12:39:37PM +0200, Marc wrote: Hi there, Untested suggestions... > Is it possible to setup nginx so that it shares a directory via WebDAV > which would have read+write access for authenticated users and simple > read access for non-authenticated/guest/anonymous users ? Without knowing about the details of WebDAV, I suspect that the simplest way would probably be to have two urls -- /ro-share and /rw-share -- that share the same directory. Require authentication on "rw"; and not on "ro"; and let the user choose which they want to access. > At the moment my WebDAV setup works correctly but only for authenticated > users. Authentication is always required if I try to browse to > https://myserver/myshare via a web browser. > > How can I solve this ? There is no concept of "optional http basic authentication" that a generic browser will work with (I believe), so if you have configured authentication by http basic auth, then you probably have to use two urls. There is a concept of "optional ssl client certificates", so if you have configured authentication by client certificates, then you possibly can use a shared url, and react differently to "valid cert" and "no valid cert". f -- Francis Daly francis at daoine.org From lagged at gmail.com Sun Sep 22 14:37:51 2019 From: lagged at gmail.com (Andrei) Date: Sun, 22 Sep 2019 09:37:51 -0500 Subject: Failing to cache requests In-Reply-To: <20190922122557.dnoufrkob4ek72fv@daoine.org> References: <20190922122557.dnoufrkob4ek72fv@daoine.org> Message-ID: Hello, Thanks for the confirmation! Is there a way to selectively ignore only specific "set-cookie"/cookies, versus all "set-cookie" headers to force cache? Or even better to remove/strip certain cookies with a regex match? I imagine this way I can strip the cookies that aren't relevant, it would cache on it's own, and if some new cookie appears it will avoid caching the request. I believe this might be doable using Lua, but I haven't had any luck :( On Sun, Sep 22, 2019 at 7:26 AM Francis Daly wrote: > On Sat, Sep 21, 2019 at 01:02:57PM -0500, Andrei wrote: > > Hi there, > > > I'm having a hard time understanding why these requests aren't being > cached > > for 10 minutes? Is it the cookies? > > Yes. > > http://nginx.org/r/proxy_cache_valid > > """ > If the header includes the ?Set-Cookie? field, such a response will not be > cached. > """ > > > If so how can I avoid this and force cache? > > """ > Processing of one or more of these response header fields can be disabled > using the proxy_ignore_headers directive. > """ > > f > -- > Francis Daly francis at daoine.org > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx -------------- next part -------------- An HTML attachment was scrubbed... URL: From francis at daoine.org Sun Sep 22 16:13:18 2019 From: francis at daoine.org (Francis Daly) Date: Sun, 22 Sep 2019 17:13:18 +0100 Subject: WebDAV and anonymous+authenticated access In-Reply-To: <20190922124236.kzun7lm7c4m3vsp2@daoine.org> References: <839dbfaa-00aa-b81e-f05b-993fafe6e77b@nemeto.fr> <3c75c05e-e0b9-b1da-cbc6-b4e4f751f281@nemeto.fr> <20190922124236.kzun7lm7c4m3vsp2@daoine.org> Message-ID: <20190922161318.phbkqg6yya2lv6lb@daoine.org> On Sun, Sep 22, 2019 at 01:42:36PM +0100, Francis Daly wrote: > On Sun, Sep 22, 2019 at 12:39:37PM +0200, Marc wrote: Hi there, > Untested suggestions... and one more, also untested... > > Is it possible to setup nginx so that it shares a directory via WebDAV > > which would have read+write access for authenticated users and simple > > read access for non-authenticated/guest/anonymous users ? http://nginx.org/en/docs/http/ngx_http_dav_module.html includes an example configuration that includes "limit_except GET {". Would something like that work for you? Require authorization for any method other than GET -- GET is probably read-only, and anything else is possibly read-write. (You may need to investigate whether OPTIONS needs to be allowed in the same way as GET.) f -- Francis Daly francis at daoine.org From francis at daoine.org Sun Sep 22 20:07:43 2019 From: francis at daoine.org (Francis Daly) Date: Sun, 22 Sep 2019 21:07:43 +0100 Subject: Failing to cache requests In-Reply-To: References: <20190922122557.dnoufrkob4ek72fv@daoine.org> Message-ID: <20190922200743.zite5zlcsussze4g@daoine.org> On Sun, Sep 22, 2019 at 09:37:51AM -0500, Andrei wrote: Hi there, > Thanks for the confirmation! Is there a way to selectively ignore only > specific "set-cookie"/cookies, versus all "set-cookie" headers to force > cache? Not that I am aware of, for proxy_cache. > Or even better to remove/strip certain cookies with a regex match? Not that I am aware of. If you do find a way, and it happens too late to affect proxy_cache, then you can possibly get around that by doing two proxy_pass'es -- one with caching to another, which strips the Set-Cookie headers that you don't want to see after proxy_pass'ing to the "real" upstream. But you may be better off configuring the upstream server not to send the Set-Cookie header that you don't want. That would presumably be more reliable, if it is doable. Good luck with it, f -- Francis Daly francis at daoine.org From nginx-forum at forum.nginx.org Sun Sep 22 23:01:16 2019 From: nginx-forum at forum.nginx.org (ptcell) Date: Sun, 22 Sep 2019 19:01:16 -0400 Subject: Any way to reset r->request_body to avoid conflicts between modules? Message-ID: <1bf048f2105b6e573ea6d0d8ab1275c9.NginxMailingListEnglish@forum.nginx.org> I have a preaccess filter in my module that loads in the request_body with `ngx_http_read_client_request_body` (it basically scans the buffer for security violations, no modifications). I don't discard the buffer and this module works fine when proxying POST requests or multiform data to a downstream reverse proxy. If I try using a lua module that attempts to read the request body, too (like `resty.upload`), the module complains that request->request_body already exists and it cannot proceed. Is there any way to reset request->request_body? Just for a laugh I tried setting it to 0 and of course that doesn't work. Barring that is there a way my module could read the request_body without causing a state change to the request struct? Thanks Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285672,285672#msg-285672 From lagged at gmail.com Mon Sep 23 07:25:31 2019 From: lagged at gmail.com (Andrei) Date: Mon, 23 Sep 2019 02:25:31 -0500 Subject: Failing to cache requests In-Reply-To: <20190922200743.zite5zlcsussze4g@daoine.org> References: <20190922122557.dnoufrkob4ek72fv@daoine.org> <20190922200743.zite5zlcsussze4g@daoine.org> Message-ID: Thanks for the input! On Sun, Sep 22, 2019 at 3:07 PM Francis Daly wrote: > On Sun, Sep 22, 2019 at 09:37:51AM -0500, Andrei wrote: > > Hi there, > > > Thanks for the confirmation! Is there a way to selectively ignore only > > specific "set-cookie"/cookies, versus all "set-cookie" headers to force > > cache? > > Not that I am aware of, for proxy_cache. > > > Or even better to remove/strip certain cookies with a regex match? > > Not that I am aware of. > > If you do find a way, and it happens too late to affect proxy_cache, > then you can possibly get around that by doing two proxy_pass'es -- > one with caching to another, which strips the Set-Cookie headers that > you don't want to see after proxy_pass'ing to the "real" upstream. > > > But you may be better off configuring the upstream server not to send > the Set-Cookie header that you don't want. > > That would presumably be more reliable, if it is doable. > > Good luck with it, > > f > -- > Francis Daly francis at daoine.org > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx > -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Mon Sep 23 12:49:02 2019 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 23 Sep 2019 15:49:02 +0300 Subject: Any way to reset r->request_body to avoid conflicts between modules? In-Reply-To: <1bf048f2105b6e573ea6d0d8ab1275c9.NginxMailingListEnglish@forum.nginx.org> References: <1bf048f2105b6e573ea6d0d8ab1275c9.NginxMailingListEnglish@forum.nginx.org> Message-ID: <20190923124902.GN1877@mdounin.ru> Hello! On Sun, Sep 22, 2019 at 07:01:16PM -0400, ptcell wrote: > I have a preaccess filter in my module that loads in the request_body with > `ngx_http_read_client_request_body` (it basically scans the buffer for > security violations, no modifications). I don't discard the buffer and this > module works fine when proxying POST requests or multiform data to a > downstream reverse proxy. > > If I try using a lua module that attempts to read the request body, too > (like `resty.upload`), the module complains that request->request_body > already exists and it cannot proceed. > > Is there any way to reset request->request_body? Just for a laugh I tried > setting it to 0 and of course that doesn't work. Barring that is there a > way my module could read the request_body without causing a state change to > the request struct? You cannot "reset" a request body which is already read, as it is already read from the client, and there is no way to read it again. In general, it is expected that the request body is being read when a module is going to use it. And in most cases modules are able to use a body which is already read. This is, for example, naturally happens when an upstream server error occurs, and the request is redirected to a different location with the error_page directive. There are few exceptions though - in particular, it is not possible to use WebDAV module with body already read, as it uses body reading machinery to save the body to disk, and it is not possible to use non-buffered body reading more than once, as it doesn't save anything. In this particular case I would first check if the module you are facing problems with needs to be fixed instead. As outlined above, there are only a few valid reasons not to accept the body which is already read, and it is generally bad idea to introduce additional such cases. Also you may want to consider re-writing your module to use request body filters instead. This way your inspection code will be called only when ngx_http_read_client_request_body() is called elsewhere. This approach is expected to be compatible with all uses of the client request body. And it also make it possible to inspect buffers in memory even if they are going to be written to disk. Just in case, an example request body filter can be found here: http://mdounin.ru/hg/ngx_http_catch_body_filter_module/ -- Maxim Dounin http://mdounin.ru/ From wirelessduck at gmail.com Tue Sep 24 04:18:20 2019 From: wirelessduck at gmail.com (wirelessduck at gmail.com) Date: Tue, 24 Sep 2019 14:18:20 +1000 Subject: No subject Message-ID: Hi, I would like to know whether nginx supports mirroring of a gRPC proxy? I've got a backend server with two endpoints running and setup nginx with the following config: server { listen 50051 http2 ssl; ssl_certificate /etc/ssl/certs/ssl-cert-snakeoil.pem; ssl_certificate_key /etc/ssl/private/ssl-cert-snakeoil.key; ssl_session_cache shared:SSL:10m; ssl_session_timeout 5m; ssl_ciphers HIGH:!aNULL:!MD5; ssl_protocols TLSv1.2 TLSv1.3; location / { mirror /mirror; mirror_request_body off; grpc_pass grpcs://grpc-server.example.com:50051; } location /mirror { internal; grpc_pass grpcs://grpc-server.example.com:50052; } } Sending a request to the nginx proxy only results in the request going through to the endpoint on 50051, and nothing appears on the 50052 endpoint. If I remove "mirror_request_body" config, then the request results in a timeout and nothing appears on either endpoint. Is this supported, or does nginx not yet support mirroring of gRPC? Thanks --Tom From nginx-forum at forum.nginx.org Tue Sep 24 09:05:59 2019 From: nginx-forum at forum.nginx.org (astre) Date: Tue, 24 Sep 2019 05:05:59 -0400 Subject: c++ dynamic module fails to load Message-ID: <2fd48b6184a921abf21e9f045550b773.NginxMailingListEnglish@forum.nginx.org> Hi All, I have a C++ module developed for apache HTTP which now Im trying to port to Nginx. For this I started reading about creating dynamic modules and specifically c++. I already found ngx_cpp_test_module.cpp in the source and able to compile it. I extended it a little and wrote a hello world program which compiles fine but when I restart Nginx I get following error: [build at 8e269df38d20 nginx-1.16.1]$ sudo /usr/sbin/nginx -t nginx: [emerg] dlopen() "/etc/nginx/modules/ngx_http_cpp_hello_module.so" failed (/etc/nginx/modules/ngx_http_cpp_hello_module.so: undefined symbol: ngx_http_cpp_hello_module) in /etc/nginx/nginx.conf:2 I have configured it as follows: ./configure --with-cc-opt='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic -fPIC' --with-ld-opt='-lstdc++ -lrt -Wl,-z,relro -Wl,-z,now -fPIC' --prefix=/home/build --add-dynamic-module=/mnt/src/hello-world-cpp --with-threads The config: ngx_addon_name=ngx_http_cpp_hello_module if test -n "$ngx_module_link"; then ngx_module_type=HTTP ngx_module_name=$ngx_addon_name ngx_module_srcs="$ngx_addon_dir/ngx_http_cpp_hello_world_module.cpp" . auto/module else HTTP_MODULES="$HTTP_MODULES ngx_http_hello_module" NGX_ADDON_SRCS="$NGX_ADDON_SRCS $ngx_addon_dir/ngx_http_cpp_hello_world_module.cpp" fi As per the error "undefined symbol: ngx_http_cpp_hello_module" Im trying to understand why it is trying to find that function in the code. Is there something that I'm missing during configuring or compilation process? Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285679,285679#msg-285679 From nginx-forum at forum.nginx.org Tue Sep 24 10:35:10 2019 From: nginx-forum at forum.nginx.org (shivramg94) Date: Tue, 24 Sep 2019 06:35:10 -0400 Subject: Verification of proxied HTTPS server certificate Message-ID: Hi, According to the documentation (http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify) the directive "proxy_ssl_verify" is used to enable or disabled the verification of the proxied HTTPS server certificate. But it doesn't talk about what all different types of validations (like Host Name Verification, Certificate Expiry etc) it does. Could someone list out the validations Nginx performs on the obtained server certificate from the upstream server when the above said directive is set to "on"? Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285683,285683#msg-285683 From mdounin at mdounin.ru Tue Sep 24 11:31:51 2019 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 24 Sep 2019 14:31:51 +0300 Subject: your mail In-Reply-To: References: Message-ID: <20190924113151.GP1877@mdounin.ru> Hello! On Tue, Sep 24, 2019 at 02:18:20PM +1000, wirelessduck at gmail.com wrote: > Hi, > > I would like to know whether nginx supports mirroring of a gRPC proxy? > I've got a backend server with two endpoints running and setup nginx > with the following config: > > server { > listen 50051 http2 ssl; > > ssl_certificate /etc/ssl/certs/ssl-cert-snakeoil.pem; > ssl_certificate_key /etc/ssl/private/ssl-cert-snakeoil.key; > ssl_session_cache shared:SSL:10m; > ssl_session_timeout 5m; > ssl_ciphers HIGH:!aNULL:!MD5; > ssl_protocols TLSv1.2 TLSv1.3; > > location / { > mirror /mirror; > mirror_request_body off; > grpc_pass grpcs://grpc-server.example.com:50051; > } > > location /mirror { > internal; > grpc_pass grpcs://grpc-server.example.com:50052; > } > } > > Sending a request to the nginx proxy only results in the request going > through to the endpoint on 50051, and nothing appears on the 50052 > endpoint. If I remove "mirror_request_body" config, then the request > results in a timeout and nothing appears on either endpoint. > > Is this supported, or does nginx not yet support mirroring of gRPC? With "mirror_request_body off;" you are unlikely to get a meaningful result with gRPC, as gRPC requests are in the request body. With "mirror_request_body on;" things are expected to work, but might not - if the gRPC service expects streaming from the client. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Sep 24 12:17:04 2019 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 24 Sep 2019 15:17:04 +0300 Subject: c++ dynamic module fails to load In-Reply-To: <2fd48b6184a921abf21e9f045550b773.NginxMailingListEnglish@forum.nginx.org> References: <2fd48b6184a921abf21e9f045550b773.NginxMailingListEnglish@forum.nginx.org> Message-ID: <20190924121704.GQ1877@mdounin.ru> Hello! On Tue, Sep 24, 2019 at 05:05:59AM -0400, astre wrote: > Hi All, > > I have a C++ module developed for apache HTTP which now Im trying to port to > Nginx. For this I started reading about creating dynamic modules and > specifically c++. I already found ngx_cpp_test_module.cpp in the source and > able to compile it. I extended it a little and wrote a hello world program > which compiles fine but when I restart Nginx I get following error: > > [build at 8e269df38d20 nginx-1.16.1]$ sudo /usr/sbin/nginx -t > nginx: [emerg] dlopen() "/etc/nginx/modules/ngx_http_cpp_hello_module.so" > failed (/etc/nginx/modules/ngx_http_cpp_hello_module.so: undefined symbol: > ngx_http_cpp_hello_module) in /etc/nginx/nginx.conf:2 > > I have configured it as follows: > > ./configure --with-cc-opt='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 > -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 > -grecord-gcc-switches -m64 -mtune=generic -fPIC' --with-ld-opt='-lstdc++ > -lrt -Wl,-z,relro -Wl,-z,now -fPIC' --prefix=/home/build > --add-dynamic-module=/mnt/src/hello-world-cpp --with-threads > > The config: > > ngx_addon_name=ngx_http_cpp_hello_module > > if test -n "$ngx_module_link"; then > ngx_module_type=HTTP > ngx_module_name=$ngx_addon_name > ngx_module_srcs="$ngx_addon_dir/ngx_http_cpp_hello_world_module.cpp" > > . auto/module > else > HTTP_MODULES="$HTTP_MODULES ngx_http_hello_module" > NGX_ADDON_SRCS="$NGX_ADDON_SRCS > $ngx_addon_dir/ngx_http_cpp_hello_world_module.cpp" > fi > > As per the error "undefined symbol: ngx_http_cpp_hello_module" Im trying to > understand why it is trying to find that function in the code. > Is there something that I'm missing during configuring or compilation > process? That's the module name you've specified during build ("ngx_module_name=$ngx_addon_name"), and hence this is the module structure nginx tries to find during loading. Since there is no such structure, module loading fails. If you are curios where the structure in question is referenced, check auto/module and auto/make files. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Sep 24 14:03:48 2019 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 24 Sep 2019 17:03:48 +0300 Subject: Verification of proxied HTTPS server certificate In-Reply-To: References: Message-ID: <20190924140348.GR1877@mdounin.ru> Hello! On Tue, Sep 24, 2019 at 06:35:10AM -0400, shivramg94 wrote: > According to the documentation > (http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify) > the directive "proxy_ssl_verify" is used to enable or disabled the > verification of the proxied HTTPS server certificate. But it doesn't talk > about what all different types of validations (like Host Name Verification, > Certificate Expiry etc) it does. > > Could someone list out the validations Nginx performs on the obtained server > certificate from the upstream server when the above said directive is set to > "on"? It verifies that the certificate is valid, signed by a trusted CA, and matches the host name as used in the proxy_pass directive. Much like it normally happens with any HTTPS client as per RFC 2818. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Tue Sep 24 15:16:52 2019 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 24 Sep 2019 18:16:52 +0300 Subject: nginx-1.17.4 Message-ID: <20190924151652.GT1877@mdounin.ru> Changes with nginx 1.17.4 24 Sep 2019 *) Change: better detection of incorrect client behavior in HTTP/2. *) Change: in handling of not fully read client request body when returning errors in HTTP/2. *) Bugfix: the "worker_shutdown_timeout" directive might not work when using HTTP/2. *) Bugfix: a segmentation fault might occur in a worker process when using HTTP/2 and the "proxy_request_buffering" directive. *) Bugfix: the ECONNABORTED error log level was "crit" instead of "error" on Windows when using SSL. *) Bugfix: nginx ignored extra data when using chunked transfer encoding. *) Bugfix: nginx always returned the 500 error if the "return" directive was used and an error occurred during reading client request body. *) Bugfix: in memory allocation error handling. -- Maxim Dounin http://nginx.org/ From kworthington at gmail.com Tue Sep 24 15:59:30 2019 From: kworthington at gmail.com (Kevin Worthington) Date: Tue, 24 Sep 2019 11:59:30 -0400 Subject: [nginx-announce] nginx-1.17.4 In-Reply-To: <20190924151657.GU1877@mdounin.ru> References: <20190924151657.GU1877@mdounin.ru> Message-ID: Hello Nginx users, Now available: Nginx 1.17.4 for Windows https://kevinworthington.com/nginxwin1174 (32-bit and 64-bit versions) These versions are to support legacy users who are already using Cygwin based builds of Nginx. Officially supported native Windows binaries are at nginx.org. Announcements are also available here: Twitter http://twitter.com/kworthington Thank you, Kevin -- Kevin Worthington kworthington *@* (gmail] [dot} {com) https://kevinworthington.com/ https://twitter.com/kworthington On Tue, Sep 24, 2019 at 11:17 AM Maxim Dounin wrote: > Changes with nginx 1.17.4 24 Sep > 2019 > > *) Change: better detection of incorrect client behavior in HTTP/2. > > *) Change: in handling of not fully read client request body when > returning errors in HTTP/2. > > *) Bugfix: the "worker_shutdown_timeout" directive might not work when > using HTTP/2. > > *) Bugfix: a segmentation fault might occur in a worker process when > using HTTP/2 and the "proxy_request_buffering" directive. > > *) Bugfix: the ECONNABORTED error log level was "crit" instead of > "error" on Windows when using SSL. > > *) Bugfix: nginx ignored extra data when using chunked transfer > encoding. > > *) Bugfix: nginx always returned the 500 error if the "return" > directive > was used and an error occurred during reading client request body. > > *) Bugfix: in memory allocation error handling. > > > -- > Maxim Dounin > http://nginx.org/ > _______________________________________________ > nginx-announce mailing list > nginx-announce at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-announce > -------------- next part -------------- An HTML attachment was scrubbed... URL: From wirelessduck at gmail.com Wed Sep 25 03:55:00 2019 From: wirelessduck at gmail.com (wirelessduck at gmail.com) Date: Wed, 25 Sep 2019 13:55:00 +1000 Subject: your mail In-Reply-To: <20190924113151.GP1877@mdounin.ru> References: <20190924113151.GP1877@mdounin.ru> Message-ID: > On 24 Sep 2019, at 21:32, Maxim Dounin wrote: > > With "mirror_request_body off;" you are unlikely to get a > meaningful result with gRPC, as gRPC requests are in the request > body. > > With "mirror_request_body on;" things are expected to work, but > might not - if the gRPC service expects streaming from the client. Thanks Maxim. I guess it won?t work here in my configuration then. I had another attempt using Envoy and that seems to be working for me. ?Tom From nginx-forum at forum.nginx.org Wed Sep 25 06:13:28 2019 From: nginx-forum at forum.nginx.org (astre) Date: Wed, 25 Sep 2019 02:13:28 -0400 Subject: c++ dynamic module fails to load In-Reply-To: <20190924121704.GQ1877@mdounin.ru> References: <20190924121704.GQ1877@mdounin.ru> Message-ID: <26a008630549d9f02728bd1edf8c1c56.NginxMailingListEnglish@forum.nginx.org> Thanks Maxim. I have updated my source to reflect the addon name. It is working. Ill get back once I get deep into auto/module and auto/make :) Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285679,285701#msg-285701 From nginx-forum at forum.nginx.org Wed Sep 25 10:06:16 2019 From: nginx-forum at forum.nginx.org (medievil19) Date: Wed, 25 Sep 2019 06:06:16 -0400 Subject: Nginx domain resolution 502 gateway Message-ID: <2c4c6cd921a82225bb2a283c35e2e5b1.NginxMailingListEnglish@forum.nginx.org> I have developed a website for a customer, it's currently sitting on a sub-domain on our server and is finished but i now need to carry out testing for eCommerce payments and that means the site needs to move from our sub-domain over to their live domain. For this, i've created a cPanel account with that domain but because it's live elsewhere, the best way for me to complete the migration before any DNS records are changed on the 3rd party hosting is to be able to access the site on my machine, i edited my local host files (windows) for that reason Before NGINX was installed on the CentOS server, modifying the local host file would work perfectly and i could access the site only on my machine to finish up the migration, then when finished i'll ask the 3rd party host to change NS to our us, meaning no downtime to their site and a nice migration. At the moment, even though the host file is changed and a local cmd ping brings up our server, i get a 502 gateway error nginx in the browser, checking nginx error logs i believe it's because nginx server is trying to resolve the 3rd party real host IP address but my machine is set to resolve the server ip version. Does that make sense? All other sites on the server are working fine through Apache + Nginx but i'm stuck with this problem. I could simply ask the 3rd party hosting company to change the A record to point to our server but it would mean the client would face some downtime while i finished up the migration. Any help is appreciated. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285702,285702#msg-285702 From nginx-forum at forum.nginx.org Thu Sep 26 10:22:41 2019 From: nginx-forum at forum.nginx.org (krishna) Date: Thu, 26 Sep 2019 06:22:41 -0400 Subject: Empty error and access log Message-ID: <2d73160224de1915677afc539d555b0d.NginxMailingListEnglish@forum.nginx.org> Dear Team, I have configured access_log and error_log at https block level and sometimes(like after log rotation), could see the logs are with 0 bytes (which means, nothing logged though it serves the requests and application is accessible). Any help would be appreciated to figure out the issue and why its occurring . Config: #Log file format log_format main '$remote_addr - $remote_user [$time_local] ' '"$request" $status $body_bytes_sent ' '"$http_referer" "$http_user_agent" "$gzip_ratio" '; access_log /var/logs/access.log main; error_log /var/logs/error.log error; Log Files: -rwxr-xr-x. 1 root root 0 Sep 25 03:13 access.log -rwxr-xr-x. 1 root root 0 Sep 26 03:34 error.log Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285708,285708#msg-285708 From nginx-forum at forum.nginx.org Thu Sep 26 10:24:20 2019 From: nginx-forum at forum.nginx.org (krishna) Date: Thu, 26 Sep 2019 06:24:20 -0400 Subject: Empty error and access log In-Reply-To: <2d73160224de1915677afc539d555b0d.NginxMailingListEnglish@forum.nginx.org> References: <2d73160224de1915677afc539d555b0d.NginxMailingListEnglish@forum.nginx.org> Message-ID: <05653c7c275e362dcdd7a749f2bc5061.NginxMailingListEnglish@forum.nginx.org> Workaround: restarting nginx helps and logs getting generated. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285708,285709#msg-285709 From francis at daoine.org Thu Sep 26 11:09:16 2019 From: francis at daoine.org (Francis Daly) Date: Thu, 26 Sep 2019 12:09:16 +0100 Subject: Empty error and access log In-Reply-To: <2d73160224de1915677afc539d555b0d.NginxMailingListEnglish@forum.nginx.org> References: <2d73160224de1915677afc539d555b0d.NginxMailingListEnglish@forum.nginx.org> Message-ID: <20190926110916.jmwv6xwcywbqppq4@daoine.org> On Thu, Sep 26, 2019 at 06:22:41AM -0400, krishna wrote: Hi there, > I have configured access_log and error_log at https block level and > sometimes(like after log rotation), could see the logs are with 0 bytes > (which means, nothing logged though it serves the requests and application > is accessible). What, specifically, do you mean by "log rotation"? If it involves renaming or deleting a file without telling nginx that the filehandle should be closed and reopened, then it is possible that nginx is still happily logging to the old filehandle. f -- Francis Daly francis at daoine.org From nginx-forum at forum.nginx.org Thu Sep 26 11:25:45 2019 From: nginx-forum at forum.nginx.org (krishna) Date: Thu, 26 Sep 2019 07:25:45 -0400 Subject: Empty error and access log In-Reply-To: <20190926110916.jmwv6xwcywbqppq4@daoine.org> References: <20190926110916.jmwv6xwcywbqppq4@daoine.org> Message-ID: <3fe3d08ebc5deee71520d9b7b549ed4a.NginxMailingListEnglish@forum.nginx.org> Hi Francis, Thanks for the reply. logrotate has been used to archive the nginx log files on daily basis from the machine and below is the configuration. Once it has been executed, the files would get zipped in gzip format and new files would get created (as killing the nginx PID, further the nginx log will be generated). /var/nginx/logs/*log { daily rotate 20 missingok notifempty compress sharedscripts postrotate /usr/bin/kill -USR1 `cat /var/pid/nginx.pid 2>/dev/null` 2>/dev/null || true #PID file for nginx endscript } Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285708,285715#msg-285715 From nginx-forum at forum.nginx.org Thu Sep 26 13:13:20 2019 From: nginx-forum at forum.nginx.org (rick_pri) Date: Thu, 26 Sep 2019 09:13:20 -0400 Subject: Empty error and access log In-Reply-To: <3fe3d08ebc5deee71520d9b7b549ed4a.NginxMailingListEnglish@forum.nginx.org> References: <20190926110916.jmwv6xwcywbqppq4@daoine.org> <3fe3d08ebc5deee71520d9b7b549ed4a.NginxMailingListEnglish@forum.nginx.org> Message-ID: We recently noticed this on our servers and we were using USR1 for postrotation which wasn't working as expected. We changed the post rotation command to be HUP and this fixed the issues of rotation being borked. http://nginx.org/en/docs/control.html It says that USR1 should reopen logfiles after they have been renamed but we found that this didn't work Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285708,285717#msg-285717 From francis at daoine.org Thu Sep 26 14:59:54 2019 From: francis at daoine.org (Francis Daly) Date: Thu, 26 Sep 2019 15:59:54 +0100 Subject: Empty error and access log In-Reply-To: <3fe3d08ebc5deee71520d9b7b549ed4a.NginxMailingListEnglish@forum.nginx.org> References: <20190926110916.jmwv6xwcywbqppq4@daoine.org> <3fe3d08ebc5deee71520d9b7b549ed4a.NginxMailingListEnglish@forum.nginx.org> Message-ID: <20190926145954.ee65625ngswtqqs5@daoine.org> On Thu, Sep 26, 2019 at 07:25:45AM -0400, krishna wrote: Hi there, > logrotate has been used to archive the nginx log files on daily basis from > the machine and below is the configuration. > postrotate > /usr/bin/kill -USR1 `cat /var/pid/nginx.pid 2>/dev/null` 2>/dev/null || > true #PID file for nginx > endscript That looks like it should work; although the "2>/dev/null" parts will possibly throw away any useful status message. http://nginx.org/en/docs/control.html describes what should happen, on the nginx side. When I test here, I don't see anything obvious in the error_log that indicates that USR1 was received. So from here, it looks to me that if you can describe a repeatable recipe that causes this unwanted behaviour to happen, then there may be something within nginx that needs fixing. (If I do a straightforward mv logs/access.log logs/access.log.$(date +%s) then my next request is written to the moved file; and after kill -USR1 `cat logs/nginx.pid` then the next request is written to the new access.log. So a "simple" test here does not fail.) Cheers, f -- Francis Daly francis at daoine.org From mdounin at mdounin.ru Thu Sep 26 15:09:45 2019 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 26 Sep 2019 18:09:45 +0300 Subject: Empty error and access log In-Reply-To: References: <20190926110916.jmwv6xwcywbqppq4@daoine.org> <3fe3d08ebc5deee71520d9b7b549ed4a.NginxMailingListEnglish@forum.nginx.org> Message-ID: <20190926150945.GE1877@mdounin.ru> Hello! On Thu, Sep 26, 2019 at 09:13:20AM -0400, rick_pri wrote: > We recently noticed this on our servers and we were using USR1 for > postrotation which wasn't working as expected. We changed the post rotation > command to be HUP and this fixed the issues of rotation being borked. > > http://nginx.org/en/docs/control.html > It says that USR1 should reopen logfiles after they have been renamed but we > found that this didn't work The USR1 is the right way to ask nginx to reopen log files, and it is enough for log rotation. While using HUP is also possible, it does full configuration reload, and using it for log rotation is wrong - for example, this may result in accidental use of a configuration being edited. If USR1 does not work for you, most likely it is because log files after log rotation cannot be opened for writing by worker processes. Fix is to assing correct access rights to the new files created during log rotation, and also make sure nginx worker process are able to access the directory with the log files. -- Maxim Dounin http://mdounin.ru/ From mdounin at mdounin.ru Thu Sep 26 15:13:55 2019 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 26 Sep 2019 18:13:55 +0300 Subject: Empty error and access log In-Reply-To: <2d73160224de1915677afc539d555b0d.NginxMailingListEnglish@forum.nginx.org> References: <2d73160224de1915677afc539d555b0d.NginxMailingListEnglish@forum.nginx.org> Message-ID: <20190926151354.GF1877@mdounin.ru> Hello! On Thu, Sep 26, 2019 at 06:22:41AM -0400, krishna wrote: > Dear Team, > > I have configured access_log and error_log at https block level and > sometimes(like after log rotation), could see the logs are with 0 bytes > (which means, nothing logged though it serves the requests and application > is accessible). > > Any help would be appreciated to figure out the issue and why its occurring > . > > Config: > > #Log file format > log_format main '$remote_addr - $remote_user [$time_local] ' > '"$request" $status $body_bytes_sent ' > '"$http_referer" "$http_user_agent" "$gzip_ratio" '; > > access_log /var/logs/access.log main; > error_log /var/logs/error.log error; > > Log Files: > > -rwxr-xr-x. 1 root root 0 Sep 25 03:13 access.log > -rwxr-xr-x. 1 root root 0 Sep 26 03:34 error.log These files are only writable by root, hence nginx worker processes won't be able to open these for writing after log rotation. You have to fix your log rotation configuration to create files which are writable by nginx user. For example, nginx own packages as available from nginx.org use the following logrotate configuration (http://hg.nginx.org/pkg-oss/file/tip/debian/nginx.logrotate): /var/log/nginx/*.log { daily missingok rotate 52 compress delaycompress notifempty create 640 nginx adm sharedscripts postrotate if [ -f /var/run/nginx.pid ]; then kill -USR1 `cat /var/run/nginx.pid` fi endscript } Note the "create 640 nginx adm" line. -- Maxim Dounin http://mdounin.ru/ From nginx-forum at forum.nginx.org Thu Sep 26 18:40:55 2019 From: nginx-forum at forum.nginx.org (lvic4594) Date: Thu, 26 Sep 2019 14:40:55 -0400 Subject: proxy http requests to different port numbers? Message-ID: <4d2f3fc6136ab89a9ae9464926d672c0.NginxMailingListEnglish@forum.nginx.org> Hello, I have several http services in the host using different port numbers, for example one REST service listening on 8080, another on 8086. Presently VPN is filtering out all those requests, the only port number allowed for HTTP is 80. Looking for workaround: are there any available proxy modules ( specifically interested in ngnx) that would allow to map request urls to different port numbers? So that, for example: request for "http://myserver.com/rest1/api/index.html" would be forwarded to "http://myserver.com:8080/api/index.html"... Thank you Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285723,285723#msg-285723 From nginx-forum at forum.nginx.org Fri Sep 27 05:36:55 2019 From: nginx-forum at forum.nginx.org (krishna) Date: Fri, 27 Sep 2019 01:36:55 -0400 Subject: Empty error and access log In-Reply-To: <20190926151354.GF1877@mdounin.ru> References: <20190926151354.GF1877@mdounin.ru> Message-ID: Hello All, Thanks for the updates. Figured out that, nginx.pid file doesn't have the correct PID value(it contains 1) - which could be valid since nginx is running inside docker container and we are setting up the log rotation from the host machine where the log files getting rotated but new files not getting logged further by nginx worker process. As per the comments tried to edit the nginx.pid file(with Nginx host PID), then issued command "kill -USR1 `cat /var/run/nginx.pid`" which works fine and new log files getting created and logged with data. Had came across the blogs, they suggested to use nginx reload during post rotation as below with the logrotate, postrotate docker exec nginx bash -c "nginx -s reload 2>/dev/null" endscript Kindly let me know, if above is a valid approach can be followed (or) to get the actual Nginx PID at host level & initiate kill command with USR1. Thanks in advance. Regards, Krishna Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285708,285726#msg-285726 From francis at daoine.org Fri Sep 27 05:57:07 2019 From: francis at daoine.org (Francis Daly) Date: Fri, 27 Sep 2019 06:57:07 +0100 Subject: Empty error and access log In-Reply-To: <20190926151354.GF1877@mdounin.ru> References: <2d73160224de1915677afc539d555b0d.NginxMailingListEnglish@forum.nginx.org> <20190926151354.GF1877@mdounin.ru> Message-ID: <20190927055707.GA1914@daoine.org> On Thu, Sep 26, 2019 at 06:13:55PM +0300, Maxim Dounin wrote: > On Thu, Sep 26, 2019 at 06:22:41AM -0400, krishna wrote: Hi there, > > -rwxr-xr-x. 1 root root 0 Sep 25 03:13 access.log > > -rwxr-xr-x. 1 root root 0 Sep 26 03:34 error.log > > These files are only writable by root, hence nginx worker > processes won't be able to open these for writing after log > rotation. You have to fix your log rotation configuration to > create files which are writable by nginx user. Ah, that was the part that I had missed here too. Thanks for the extended explanation. Cheers, f -- Francis Daly francis at daoine.org From francis at daoine.org Fri Sep 27 07:07:51 2019 From: francis at daoine.org (Francis Daly) Date: Fri, 27 Sep 2019 08:07:51 +0100 Subject: Empty error and access log In-Reply-To: References: <20190926151354.GF1877@mdounin.ru> Message-ID: <20190927070751.GB1914@daoine.org> On Fri, Sep 27, 2019 at 01:36:55AM -0400, krishna wrote: Hi there, I do not have the answer for you. But... > Figured out that, nginx.pid file doesn't have the correct PID value(it > contains 1) - which could be valid since nginx is running inside docker > container and we are setting up the log rotation from the host machine where > the log files getting rotated but new files not getting logged further by > nginx worker process. > > As per the comments tried to edit the nginx.pid file(with Nginx host PID), > then issued command "kill -USR1 `cat /var/run/nginx.pid`" which works fine > and new log files getting created and logged with data. ...what nginx needs is for its master process to receive a "USR1" signal. It does not care how that is done. As I understand it, docker includes a "kill" subcommand with a "-s" option to send a specific signal to a container -- which should mean "to the one process that is running in the container", which in your case should be nginx. > Had came across the blogs, they suggested to use nginx reload during post > rotation as below with the logrotate, > postrotate > docker exec nginx bash -c "nginx -s reload 2>/dev/null" > endscript > > Kindly let me know, if above is a valid approach can be followed (or) to > get the actual Nginx PID at host level & initiate kill command with USR1. I think that there were good reasons explained not to send HUP when USR1 is all that is needed. nginx writes its pid file to a well-known place. If "something else" does virtualising or jailing or containerising or namespacing or any other translation between nginx's idea of its pid and the rest of the world's idea of the nginx pid, then it is that "something else's" job to untranslate as well. Which means: if you use docker to hide nginx from the system, you should use docker to expose nginx to the system. Ask docker for the "real" pid; or use docker to avoid having to find the pid. Good luck with it, f -- Francis Daly francis at daoine.org From nginx-forum at forum.nginx.org Fri Sep 27 09:44:09 2019 From: nginx-forum at forum.nginx.org (astre) Date: Fri, 27 Sep 2019 05:44:09 -0400 Subject: understanding ngx_str_t structure Message-ID: <936dad70cd9fb23fa9a32722efa59103.NginxMailingListEnglish@forum.nginx.org> Hi, I trying to print the requested URL path. For that I came across the "uri" member which is of type ngx_str_t) under ngx_http_request_t structure. When I print the "uri" using ngx_log_debug1 (ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "%s", r->uri.data)) I see some extra data printed. For eg. when http://localhost/test is requested, "/test HTTP/1.1 Host" is printed. In my understanding "uri.data" should have contained only "/test". The "uri.len" correctly shows the length as 5. The "data" member is unsigned char *data so while extracting the actual string do we need to consider the length in ngx_str_t ? Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285729,285729#msg-285729 From arut at nginx.com Fri Sep 27 10:14:14 2019 From: arut at nginx.com (Roman Arutyunyan) Date: Fri, 27 Sep 2019 13:14:14 +0300 Subject: understanding ngx_str_t structure In-Reply-To: <936dad70cd9fb23fa9a32722efa59103.NginxMailingListEnglish@forum.nginx.org> References: <936dad70cd9fb23fa9a32722efa59103.NginxMailingListEnglish@forum.nginx.org> Message-ID: <20190927101414.GV42493@Romans-MacBook-Air.local> Hi, On Fri, Sep 27, 2019 at 05:44:09AM -0400, astre wrote: > Hi, > > I trying to print the requested URL path. For that I came across the "uri" > member which is of type ngx_str_t) under ngx_http_request_t structure. When > I print the "uri" using ngx_log_debug1 (ngx_log_debug1(NGX_LOG_DEBUG_HTTP, > log, 0, "%s", r->uri.data)) I see some extra data printed. > > For eg. when http://localhost/test is requested, "/test HTTP/1.1 Host" is > printed. > > In my understanding "uri.data" should have contained only "/test". The > "uri.len" correctly shows the length as 5. The "data" member is unsigned > char *data so while extracting the actual string do we need to consider the > length in ngx_str_t ? The %s format suggests that the string is null-terminated, which is not the case for r->uri.data. There's a special format %V for printing ngx_str_t: ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, "%V", &r->uri); For more information on ngx_str_t, see the dev guide: http://nginx.org/en/docs/dev/development_guide.html#strings > Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285729,285729#msg-285729 > > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx -- Roman Arutyunyan From nginx-forum at forum.nginx.org Fri Sep 27 10:51:12 2019 From: nginx-forum at forum.nginx.org (astre) Date: Fri, 27 Sep 2019 06:51:12 -0400 Subject: understanding ngx_str_t structure In-Reply-To: <20190927101414.GV42493@Romans-MacBook-Air.local> References: <20190927101414.GV42493@Romans-MacBook-Air.local> Message-ID: <6d21defd618fdde0ecbdbf1a0008f8ff.NginxMailingListEnglish@forum.nginx.org> Thanks Roman, it is working. Is there a way to directly print using std::cout instead of ngx_log_* functions ? P.S: I'm writing module in C++. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285729,285731#msg-285731 From arut at nginx.com Fri Sep 27 11:05:27 2019 From: arut at nginx.com (Roman Arutyunyan) Date: Fri, 27 Sep 2019 14:05:27 +0300 Subject: understanding ngx_str_t structure In-Reply-To: <6d21defd618fdde0ecbdbf1a0008f8ff.NginxMailingListEnglish@forum.nginx.org> References: <20190927101414.GV42493@Romans-MacBook-Air.local> <6d21defd618fdde0ecbdbf1a0008f8ff.NginxMailingListEnglish@forum.nginx.org> Message-ID: <20190927110527.GX42493@Romans-MacBook-Air.local> On Fri, Sep 27, 2019 at 06:51:12AM -0400, astre wrote: > Thanks Roman, it is working. Is there a way to directly print using > std::cout instead of ngx_log_* functions ? > P.S: I'm writing module in C++. You can use string length in the 'len' field to limit the number of characters printed. There are many ways to do that, for example: std::cout.write(r->uri.data, r->uri.len); > > Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285729,285731#msg-285731 > > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx PS: it is better to ask development questions in the nginx-devel mailing list: http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Roman Arutyunyan From nginx-forum at forum.nginx.org Fri Sep 27 11:17:59 2019 From: nginx-forum at forum.nginx.org (astre) Date: Fri, 27 Sep 2019 07:17:59 -0400 Subject: understanding ngx_str_t structure In-Reply-To: <20190927110527.GX42493@Romans-MacBook-Air.local> References: <20190927110527.GX42493@Romans-MacBook-Air.local> Message-ID: Right, actually the question is when I use std::cout nothing gets printed in error_log even when it is set to debug. Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285729,285733#msg-285733 From arut at nginx.com Fri Sep 27 11:30:21 2019 From: arut at nginx.com (Roman Arutyunyan) Date: Fri, 27 Sep 2019 14:30:21 +0300 Subject: understanding ngx_str_t structure In-Reply-To: References: <20190927110527.GX42493@Romans-MacBook-Air.local> Message-ID: <20190927113021.GY42493@Romans-MacBook-Air.local> On Fri, Sep 27, 2019 at 07:17:59AM -0400, astre wrote: > Right, actually the question is when I use std::cout nothing gets printed in > error_log even when it is set to debug. Logging in nginx is more complicated than just writing to fd #1. If you want to log a message to nginx error log, you should use nginx logging API. > Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285729,285733#msg-285733 > > _______________________________________________ > nginx mailing list > nginx at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx -- Roman Arutyunyan From nginx-forum at forum.nginx.org Fri Sep 27 11:53:22 2019 From: nginx-forum at forum.nginx.org (astre) Date: Fri, 27 Sep 2019 07:53:22 -0400 Subject: understanding ngx_str_t structure In-Reply-To: <20190927113021.GY42493@Romans-MacBook-Air.local> References: <20190927113021.GY42493@Romans-MacBook-Air.local> Message-ID: <8a1215d14fb5e6f2d61741a956c84c77.NginxMailingListEnglish@forum.nginx.org> ok thanks, then probably write to syslog using syslog API's and it seems nginx already supports syslog. Will that work? Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285729,285737#msg-285737 From nginx-forum at forum.nginx.org Fri Sep 27 21:52:46 2019 From: nginx-forum at forum.nginx.org (kenny2) Date: Fri, 27 Sep 2019 17:52:46 -0400 Subject: ssl setup please Message-ID: Hello, I'm old and have been trying for years (no exaggeration) to get full ssl to work using cloudflare. The problem appears to be in the nginx setup somewhere (or maybe in the linux system which I may or may not have altered throughout the past years). When I choose to go "full ssl" my server is dead regardless of whether I try to access it using :80 or :443 This is why my setup looks like: server { listen 80 default_server; listen 443 default_server; server_name springfield-ohio-post.com www.springfield-ohio-post.com any ideas? Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285740,285740#msg-285740 From lists at lazygranch.com Fri Sep 27 22:46:08 2019 From: lists at lazygranch.com (lists) Date: Fri, 27 Sep 2019 15:46:08 -0700 Subject: ssl setup please In-Reply-To: Message-ID: What shows up in the log files? Do you really need to use Cloudflare? Have you been DDoSed? I view Cloudflare as a man in the middle. I've been using Let's Encrypt for about a year with no drama. ? Original Message ? From: nginx-forum at forum.nginx.org Sent: September 27, 2019 2:53 PM To: nginx at nginx.org Reply-to: nginx at nginx.org Subject: ssl setup please Hello, I'm old and have been trying for years (no exaggeration) to get full ssl to work using cloudflare. The problem appears to be in the nginx setup somewhere (or maybe in the linux system which I may or may not have altered throughout the past years). When I choose to go "full ssl" my server is dead regardless of whether I try to access it using :80 or :443 This is why my setup looks like: server { listen 80 default_server; listen 443 default_server; server_name springfield-ohio-post.com www.springfield-ohio-post.com any ideas? Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285740,285740#msg-285740 _______________________________________________ nginx mailing list nginx at nginx.org http://mailman.nginx.org/mailman/listinfo/nginx From francis at daoine.org Sun Sep 29 08:24:35 2019 From: francis at daoine.org (Francis Daly) Date: Sun, 29 Sep 2019 09:24:35 +0100 Subject: proxy http requests to different port numbers? In-Reply-To: <4d2f3fc6136ab89a9ae9464926d672c0.NginxMailingListEnglish@forum.nginx.org> References: <4d2f3fc6136ab89a9ae9464926d672c0.NginxMailingListEnglish@forum.nginx.org> Message-ID: <20190929082435.GC1914@daoine.org> On Thu, Sep 26, 2019 at 02:40:55PM -0400, lvic4594 wrote: Hi there, > Looking for workaround: are there any available proxy modules ( > specifically interested in ngnx) that would allow to map request urls to > different port numbers? So that, for example: request for > "http://myserver.com/rest1/api/index.html" would be forwarded to > "http://myserver.com:8080/api/index.html"... That sounds like what nginx's proxy_pass is for, no? http://nginx.org/r/proxy_pass == location /rest1/ { proxy_pass http://myserver.com:8080/; } == plus whatever extra configuration your service needs. (It does assume that the API is happy to be reverse-proxied.) f -- Francis Daly francis at daoine.org From francis at daoine.org Sun Sep 29 08:40:58 2019 From: francis at daoine.org (Francis Daly) Date: Sun, 29 Sep 2019 09:40:58 +0100 Subject: Nginx domain resolution 502 gateway In-Reply-To: <2c4c6cd921a82225bb2a283c35e2e5b1.NginxMailingListEnglish@forum.nginx.org> References: <2c4c6cd921a82225bb2a283c35e2e5b1.NginxMailingListEnglish@forum.nginx.org> Message-ID: <20190929084058.GD1914@daoine.org> On Wed, Sep 25, 2019 at 06:06:16AM -0400, medievil19 wrote: Hi there, > At the moment, even though the host file is changed and a local cmd ping > brings up our server, i get a 502 gateway error nginx in the browser, > checking nginx error logs i believe it's because nginx server is trying to > resolve the 3rd party real host IP address but my machine is set to resolve > the server ip version. If your nginx config file uses hostnames in the form proxy_pass http://$variable; then nginx needs to use a run-time resolver to find the IP address. That will probably not use the local hosts file, even indirectly. If your nginx config uses hostnames in the form proxy_pass http://www.example.com; then nginx will use the system resolver at startup to find the IP address. That will probably use the local hosts file. I confess that from your mail, I am not sure what machines are involved in your system, and how you want them to interact. So I can't say exactly what should be changed, where. But hopefully the above will help you work out what needs doing. Good luck with it, f -- Francis Daly francis at daoine.org From nginx-forum at forum.nginx.org Mon Sep 30 20:02:48 2019 From: nginx-forum at forum.nginx.org (syed) Date: Mon, 30 Sep 2019 16:02:48 -0400 Subject: redirected you too many times (ERR_TOO_MANY_REDIRECTS) Message-ID: <792fd578eaebf053e6388e3e7260d526.NginxMailingListEnglish@forum.nginx.org> Hello Techs, I have an issue setting up the load balancing for my Jboss application. I have issue with the website load and i need to set up another server which would take the load and serve the application from another server as well. I've set up an Upstream to redirect the traffic to other server but the issue I get is as below once i start application on both the servers. my.domain.com redirected you too many times. ERR_TOO_MANY_REDIRECTS My setup is as below for your reference. Your kind help is much appreciated at the earliest. # # The default server # # upstream COM { server localhost:8080; server Remote server:8080; keepalive 100; } server { listen 80; #listen [::]:80 default_server; server_name my.domain.com; client_max_body_size 500M; server_tokens off; # root /usr/share/nginx/html; # Load configuration files for the default server block. include /etc/nginx/default.d/*.conf; location / { if ($uri !~ "^/(EbreezCorp/.*)$"){ set $rule_0 1$rule_0; } if ($rule_0 = "1"){ rewrite ^/(.*)$ /EbreezCorp/corp/ta/FlyinTa$1 break; } proxy_pass http://COM/; proxy_connect_timeout 300; proxy_send_timeout 300; proxy_read_timeout 300; send_timeout 300; proxy_pass_header Server; proxy_pass_header 'FlyinCOM'; add_header X-Content-Type-Options "nosniff"; proxy_cookie_path / "/EbreezCorp; Secure; HTTPOnly;"; } error_page 404 /404.html; location = /40x.html { } error_page 500 502 503 504 /50x.html; location = /50x.html { } } #################################################################################### Now, if I remove the Upstream and use the configurations below, it just works fine with the single server which doesn't suffice my requirement. # # The default server # # server { listen 80; #listen [::]:80 default_server; server_name 192.168.x.x; client_max_body_size 500M; server_tokens off; # root /usr/share/nginx/html; # Load configuration files for the default server block. include /etc/nginx/default.d/*.conf; location / { if ($uri !~ "^/(EbreezCorp/.*)$"){ set $rule_0 1$rule_0; } if ($rule_0 = "1"){ rewrite ^/(.*)$ /EbreezCorp/corp/ta/FlyinTa$1 break; } proxy_pass http://192.168.x.x:8080; proxy_connect_timeout 300; proxy_send_timeout 300; proxy_read_timeout 300; send_timeout 300; proxy_pass_header Server; proxy_pass_header 'FlyinCOM'; add_header X-Content-Type-Options "nosniff"; proxy_cookie_path / "/EbreezCorp; Secure; HTTPOnly;"; } error_page 404 /404.html; location = /40x.html { } error_page 500 502 503 504 /50x.html; location = /50x.html { } } Posted at Nginx Forum: https://forum.nginx.org/read.php?2,285759,285759#msg-285759