upstream sent no valid HTTP/1.0 header while reading response header from upstream,

sanflores nginx-forum at
Tue Jan 26 20:01:33 UTC 2021

I'm using tomcat as a backend (using websocket), and is hostead on AWS
behind a balancer. After a some time (acording to timeouts) I'm starting to
get the error that the upstream sent no valid HTTP/1.0, but what is strange,
is that I'm only using HTTP/1.1, this is my configuration:

# This file was overwritten during deployment by
user                    nginx;
error_log               /var/log/nginx/error.log warn;
pid                     /var/run/;
worker_processes        auto;
worker_rlimit_nofile    133650;

events {
    worker_connections  10240;

http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;
    client_max_body_size 0; # disable any limits to avoid HTTP 413

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request"
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"

    log_format  main_ext  '$remote_addr - $remote_user [$time_local]
"$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for" '
                      '"$host" sn="$server_name" '
                      'rt=$request_time '
                      'ua="$upstream_addr" us="$upstream_status" '
ul="$upstream_response_length" '

    include       conf.d/*.conf;

    map $http_upgrade $connection_upgrade {
        default     "upgrade";

    server {
        listen        80 default_server;
        access_log    /var/log/nginx/access.log main_ext;

        client_header_timeout 60;
        client_body_timeout   60;
        keepalive_timeout     60;
        gzip                  on;
        gzip_comp_level       4;
        gzip_types text/plain text/css application/json
application/javascript application/x-javascript text/xml application/xml
application/xml+rss text/javascript;

        # Include the Elastic Beanstalk generated locations (We are
commenting this in order to add some CORS configuration)
        # include conf.d/elasticbeanstalk/*.conf;

        location / {
            proxy_http_version  1.1;

            proxy_buffers 16 16k;
            proxy_buffer_size 16k;

            proxy_set_header    Connection          $connection_upgrade;
            proxy_set_header    Upgrade             $http_upgrade;
            proxy_set_header    Host                $host;
            proxy_set_header    X-Real-IP           $remote_addr;
            proxy_set_header    X-Forwarded-For    

        location ~* \.(jpe?g|png|gif|ico|css|woff)$ {
            proxy_http_version  1.1;
            add_header 'Access-Control-Allow-Origin' '*';
            add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';

        location /nginx_status {
            stub_status on;
            deny all;

There is nothing in conf.d/*.conf

I do have a lot of Writing, but I'm guessing that's because of the

# curl localhost/nginx_status;lsof -n -u nginx | awk '{print $5}' | sort |
uniq -c | sort -nr
Active connections: 1029
server accepts handled requests
 62963 62963 342949
Reading: 0 Writing: 1016 Waiting: 13
   2053 IPv4
    338 REG
     81 unix
     28 0000
     24 CHR
     20 DIR
      2 FIFO
      1 TYPE

If I set the timeout on the balancer and proxy_read_timeout on 4.000 seconds
(the max available), I'm able to live without errors for an hour until I
need to restart the tomcat server. We haven't make any configuration change
for the last year but I'm guessing I'm hitting some limit, but I can't find
what is it

Posted at Nginx Forum:,290573,290573#msg-290573

More information about the nginx mailing list