Optimal nginx settings for websockets sending images

michael.heuberger nginx-forum at nginx.us
Tue Feb 26 03:15:55 UTC 2013


Hello guys

The recent nginx 1.3.13 websocket support is fantastic! Big thanks to the
nginx devs, it works like a charm.

I only have performance issues. Sending images through websockets turns out
to be difficult and slow. I have a website sending 5 images per seconds to
the server.

Sometimes I have warnings like "an upstream response is buffered to a
temporary file", then sometimes it's lagging and the server isn't that
fast.

I'm not sure if my settings for this scenario are optimal. Below you will
find extracts of my nginx conf files. Maybe you spot some mistakes or have
suggestions?

Thanks,
Michael

nginx.conf:

user www-data;
worker_processes 2;
pid /var/run/nginx.pid;

events {
	worker_connections 1536;
}

http {
        proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=one:8m
max_size=1400m inactive=500m;
        proxy_temp_path /var/tmp;

        proxy_buffers 8 2m;
        proxy_buffer_size 10m;
        proxy_busy_buffers_size 10m;

        proxy_cache one;
        proxy_cache_key "$request_uri|$request_body";

        # Sendfile copies data between one FD and other from within the
kernel.
        # More efficient than read() + write(), since the requires
transferring data to and from the user space.
        sendfile on; 

        # Tcp_nopush causes nginx to attempt to send its HTTP response head
in one packet,
        # instead of using partial frames. This is useful for prepending
headers before calling sendfile,
        # or for throughput optimization.
        tcp_nopush on;

        # on = don't buffer data-sends (disable Nagle algorithm). Good for
sending frequent small bursts of data in real time.
        # here set off because of large bursts of data
        tcp_nodelay off; 

        # Timeout for keep-alive connections. Server will close connections
after this time.
        keepalive_timeout 30;

	include /etc/nginx/mime.types;
        default_type application/octet-stream;

        access_log /var/log/nginx/access.log;
        error_log /var/log/nginx/error.log notice;

        gzip on;
        gzip_min_length 10240;
        gzip_disable "MSIE [1-6]\.";

        gzip_vary on;
        gzip_proxied any;
        gzip_comp_level 6;
        gzip_buffers 16 8k;
        gzip_http_version 1.1;
        gzip_types text/plain text/css application/json
application/x-javascript text/xml application/xml application/xml+rss
text/javascript;

	include /etc/nginx/conf.d/*.conf;
	include /etc/nginx/sites-enabled/*;
}

/sites-available/other_site.conf:

upstream other_site_upstream {
  server 127.0.0.1:4443;
}

server {

  ...

  location / {
    proxy_next_upstream error timeout http_502;
    proxy_pass         https://other_site_upstream;
    proxy_set_header   X-Real-IP        $remote_addr;
    proxy_set_header   X-Forwarded-For  $proxy_add_x_forwarded_for;
    proxy_set_header   Host             $http_host;
    proxy_set_header   X-NginX-Proxy    true;

    proxy_set_header Upgrade $http_upgrade;
    proxy_set_header Connection "upgrade";

    proxy_http_version 1.1;
    proxy_redirect off;
  }
}

Posted at Nginx Forum: http://forum.nginx.org/read.php?2,236601,236601#msg-236601



More information about the nginx mailing list