reverse ssl proxy - speed & jitter
newnovice
nginx-forum at nginx.us
Wed Jul 23 18:00:36 UTC 2014
Full Config:
#user nobody;
# This number should be, at maximum, the number of CPU cores on your system.
# (since nginx doesn't benefit from more than one worker per CPU.)
worker_processes 5;
#give worker processes the priority (nice) you need/wish, it calls
setpriority().
worker_priority -10;
#decrease number gettimeofday() syscalls. By default gettimeofday() is
called after each return from
# kevent(), epoll, /dev/poll, select(), poll().
timer_resolution 200ms;
#trying to set CPU affinity
worker_cpu_affinity 10001 10010 10011 10100 10101;
#error_log LOGFILE [debug_core | debug_alloc | debug_mutex | debug_event |
debug_http | debug_imap], debug, crit, emerg;
error_log /dev/null emerg;
pid var/state/nginx.pid;
# Number of file descriptors used for Nginx. This is set in the OS with
'ulimit -n 200000' or using /etc/security/limits.conf
#worker_rlimit_nofile 60000;
# workers_conns * 2
events {
use epoll;
worker_connections 20000;
# Accept as many connections as possible, after nginx gets notification
about a new connection.
# May flood worker_connections, if that option is set too low.
multi_accept on;
accept_mutex off;
}
http {
default_type application/octet-stream;
log_format main '[$time_local] - [$time_iso8601] - [$request_time] -
[$upstream_response_time] - $remote_addr ' #$proxy_add_x_forwarded_for_cip
'
' "$http_x_forwarded_for" - $remote_user - "$request"
[$request_time] '
'$status - $request_length - $body_bytes_sent -
"$http_referer" - '
'"$http_user_agent" - $uri - $request_method - '
'$ssl_protocol - $ssl_cipher';
sendfile on;
# Tcp_nopush causes nginx to attempt to send its HTTP response head in
one packet,
# instead of using partial frames. This is useful for prepending headers
before calling sendfile,
# or for throughput optimization.
tcp_nopush on;
#the TCP_NODELAY option. The option is enabled only when a connection is
transitioned into the keep-alive state.
tcp_nodelay on;
# Caches information about open FDs, freqently accessed files.
# Changing this setting, in my environment, brought performance up from
560k req/sec, to 904k req/sec.
# I recommend using some varient of these options, though not the
specific values listed below.
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
open_log_file_cache max=100000 inactive=2m valid=10m min_uses=2;
keepalive_timeout 5000;
# Number of requests which can be made over a keep-alive connection.
# Review and change it to a more suitable value if required.
keepalive_requests 100000;
# allow the server to close the connection after a client stops
responding. Frees up socket-associated memory.
reset_timedout_connection on;
# send the client a "request timed out" if the body is not loaded by
this time. Default 60.
client_body_timeout 10;
# If the client stops reading data, free up the stale client connection
after this much time. Default 60.
send_timeout 2;
# Compression. Reduces the amount of data that needs to be transferred
over the network
gzip on;
gzip_min_length 10240;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml text/javascript
application/x-javascript application/xml;
gzip_disable "MSIE [1-6]\.";
client_body_temp_path var/state/nginx/client_body_temp 1 2;
proxy_temp_path var/state/nginx/proxy_temp 1 2;
fastcgi_temp_path var/state/nginx/fastcgi_temp 1 2;
uwsgi_temp_path var/state/nginx/uwsgi_temp 1 2;
scgi_temp_path var/state/nginx/scgi_temp_path 1 2;
server_tokens off;
postpone_output 0;
upstream downstream_service {
server 127.0.0.1:9999;
keepalive 180;
}
# Turn off proxy buffering
proxy_buffering off;
proxy_buffer_size 128K;
proxy_busy_buffers_size 128K;
proxy_buffers 64 4K;
client_body_buffer_size 512K;
large_client_header_buffers 4 64k;
limit_conn_zone $server_name zone=perserver1:32k;
# Allow arbitrary size client posts
client_max_body_size 0;
# HTTPS Server config
server {
listen 443 ssl;
# sndbuf=128k;
server_name test-domain.com;
# Buffer log writes to speed up IO, or disable them altogether
access_log off; # turn off for better performance
ssl_certificate /dev/shm/test-domain.com/cert;
ssl_certificate_key /dev/shm/test-domain.com/key;
# Do not overflow the SSL send buffer (causes extra round trips)
ssl_buffer_size 8k;
ssl_session_timeout 10m;
ssl_protocols SSLv3 TLSv1;
ssl_ciphers RC4-MD5;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
set $host_header $host;
if ($http_host != "")
{
set $host_header $http_host;
}
location / {
proxy_pass http://downstream_service/;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_set_header Host $host_header;
proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For
$proxy_add_x_forwarded_for_cip;
limit_conn perserver1 180;
}
# Nginx health check only to verify server is up and running
location /nginx_ping {
return 200;
}
}
}
Posted at Nginx Forum: http://forum.nginx.org/read.php?2,252002,252006#msg-252006
More information about the nginx
mailing list