epoll_wait() reported that client prematurely closed connection
Vadim Lazovskiy
vadim.lazovskiy at gmail.com
Thu Sep 18 06:01:55 UTC 2014
Здравствуйте.
По умолчанию, если клиент закрывает соединение, то соответствующее
соединение к апстриму тоже закрывается.
Если очень нужно, можно попробовать proxy_ignore_client_abort on;
http://nginx.org/ru/docs/http/ngx_http_proxy_module.html#proxy_ignore_client_abort
2014-09-18 9:49 GMT+04:00 ole-lukoje <nginx-forum at nginx.us>:
> Проблема в том, что на проксируемом сервере происходят ошибки типа "An
> exception occured writing the response entity. Broken pipe", их нужно
> устранить.
> По всей видимости они возникают из-за того, что nginx рвет соединение с
> проксируемым сервером, не уверены что это не будет приводить к утечкам
> памяти в следствии
> такого типа завершения соединения.
>
> CentOS release 6.5
> Linux version 2.6.32-431.el6.x86_64
> nginx/1.5.8
>
> Содержимое файла конфигурации:
>
> user nginx;
>
> worker_processes 8;
> timer_resolution 100ms;
> worker_rlimit_nofile 50000;
> worker_priority -5;
>
> error_log /var/log/nginx/error.log info;
> pid /var/run/nginx.pid;
>
> events {
> worker_connections 25000;
> use epoll;
> }
>
> http {
>
> include mime.types;
> default_type application/x-javascript;
>
> log_format main '$remote_addr - $remote_user [$time_local] $request '
> '"$status" $body_bytes_sent "$http_referer" '
> '"$http_user_agent" "$http_x_forwarded_for"';
>
> chunked_transfer_encoding off;
>
> gzip on;
> gzip_static on;
> gzip_min_length 640;
> gzip_buffers 64 8k;
> gzip_comp_level 4;
> gzip_http_version 1.1;
> gzip_proxied any;
> gzip_types text/plain application/xml application/x-javascript
> text/css;
> gzip_disable "MSIE [1-6]\.(?!.*SV1)";
> gzip_vary on;
>
> output_buffers 32 512k;
> sendfile_max_chunk 128k;
> postpone_output 1460;
> server_names_hash_bucket_size 64;
>
> tcp_nopush on;
> tcp_nodelay on;
>
> client_max_body_size 1m;
> client_body_buffer_size 128k;
> client_header_buffer_size 1k;
> large_client_header_buffers 4 4k;
>
> keepalive_timeout 45 45;
> client_header_timeout 45;
> client_body_timeout 45;
> send_timeout 45;
> reset_timedout_connection on;
>
> memcached_connect_timeout 60s;
> memcached_read_timeout 60s;
> memcached_send_timeout 60s;
>
> charset utf-8;
> source_charset utf-8;
> ignore_invalid_headers on;
> keepalive_requests 100;
> recursive_error_pages off;
> server_tokens off;
> server_name_in_redirect off;
> sendfile on;
>
> open_file_cache max=1000 inactive=20s;
> open_file_cache_valid 30s;
> open_file_cache_min_uses 2;
> open_file_cache_errors on;
>
> #######################################################################
> # PUSH_STREAM_MODULE GLOBAL SETTINGS (COMET)
> #######################################################################
>
> #The size of the memory chunk this module will use to store published
> messages,
> #channels and other shared structures. When this memory is full any new
> request
> #for publish a message or subscribe a channel will receive an 500
> Internal Server Error response.
> push_stream_shared_memory_size 100M;
>
> #Maximum permissible channel id length (number of characters).
> #Longer ids will receive an 400 Bad Request response. I
> push_stream_max_channel_id_length 50;
>
> #The length of time a subscriber will stay connected before it is
> considered expired and disconnected.
> #If you do not want subscribers to be automatically disconnected, just
> not set this directive.
> #But, this operation is very important to help Nginx recycle memory
> consumed to send messages to susbscriber,
> #allocated at pool request.
> push_stream_subscriber_connection_ttl 5m;
> push_stream_longpolling_connection_ttl 5m;
>
> push_stream_wildcard_channel_prefix "broadcast_";
> proxy_cache_path /var/cache/nginx/ftl levels=1:2
> keys_zone=ftl-cache:20m max_size=100m inactive=120m;
>
> upstream memcached_cluster {
> server 127.0.0.1:11211;
> hash $uri/3.8;
> hash_again 1000;
> keepalive 512;
> }
>
> server {
> listen *:80;
> server_name_in_redirect off;
> server_name test.example.com;
>
> proxy_connect_timeout 60s;
> proxy_send_timeout 60s;
> proxy_read_timeout 60s;
>
> proxy_buffering on;
> proxy_buffer_size 64k;
> proxy_buffers 4 64k;
> proxy_busy_buffers_size 128k;
> proxy_temp_file_write_size 10m;
> proxy_headers_hash_bucket_size 256;
>
> proxy_set_header Host $host:$server_port;
> proxy_set_header X-Real-IP $remote_addr;
> proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
> access_log /var/log/nginx/nginx.log main;
> log_not_found off;
> root /var/spool/nginx/;
>
> location /portal-facade-ng/v1/btv/imageMap/ {
> if ($request_method != GET) {
> proxy_pass http://127.0.0.1:8080;
> break;
> }
>
> add_header Cache-Control no-cache;
> add_header Content-Type image/png;
> default_type image/png;
> set $memcached_key "$uri/3.8";
> memcached_pass memcached_cluster;
> }
>
>
> location /portal-facade-ng/v1/btv/epg/current/ {
> if ($request_method != GET) {
> proxy_pass http://127.0.0.1:8080;
> break;
> }
> set $memcached_key "$uri/3.8";
> memcached_pass memcached_cluster;
> }
>
> location /portal-facade-ng/v1/btv/epgGrid/bar/image/ {
> if ($request_method != GET) {
> proxy_pass http://127.0.0.1:8080;
> break;
> }
>
> add_header Cache-Control no-cache;
> add_header Content-Type image/png;
> default_type image/png;
> set $memcached_key "$uri/3.8";
> memcached_pass memcached_cluster;
> }
> }
> }
>
> Posted at Nginx Forum: http://forum.nginx.org/read.php?21,253351,253363#msg-253363
>
> _______________________________________________
> nginx-ru mailing list
> nginx-ru at nginx.org
> http://mailman.nginx.org/mailman/listinfo/nginx-ru
--
WBR,
Vadim Lazovskiy
Подробная информация о списке рассылки nginx-ru