From daniel.black at openquery.com Mon Oct 1 05:18:04 2012 From: daniel.black at openquery.com (Daniel Black) Date: Mon, 1 Oct 2012 15:18:04 +1000 (EST) Subject: [PATCH] allow printing of string buffers in hex format In-Reply-To: <1483791051.332.1349068428241.JavaMail.root@zimbra.lentz.com.au> Message-ID: <1727882291.340.1349068683851.JavaMail.root@zimbra.lentz.com.au> While doing some other development I found it useful to print log messages in hex format. The attached patch allows output of a string buffer in hex format using the format %X*s. An example is: ngx_log_debug2(NGX_LOG_DEBUG_EVENT, log, 0, "Buffer in hex is %X*s", sizeof(buffer), buffer); The existing %*s format still works. -- Daniel Black -------------- next part -------------- A non-text attachment was scrubbed... Name: nginx-print_buffer_in_hex.patch Type: text/x-patch Size: 4186 bytes Desc: not available URL: From mdounin at mdounin.ru Mon Oct 1 12:39:37 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 1 Oct 2012 12:39:37 +0000 Subject: [nginx] svn commit: r4873 - in trunk/src: event http/modules Message-ID: <20121001123938.754A23F9C11@mail.nginx.com> Author: mdounin Date: 2012-10-01 12:39:36 +0000 (Mon, 01 Oct 2012) New Revision: 4873 URL: http://trac.nginx.org/nginx/changeset/4873/nginx Log: OCSP stapling: ssl_trusted_certificate directive. The directive allows to specify additional trusted Certificate Authority certificates to be used during certificate verification. In contrast to ssl_client_certificate DNs of these cerificates aren't sent to a client during handshake. Trusted certificates are loaded regardless of the fact whether client certificates verification is enabled as the same certificates will be used for OCSP stapling, during construction of an OCSP request and for verification of an OCSP response. The same applies to a CRL (which is now always loaded). Modified: trunk/src/event/ngx_event_openssl.c trunk/src/event/ngx_event_openssl.h trunk/src/http/modules/ngx_http_ssl_module.c trunk/src/http/modules/ngx_http_ssl_module.h Modified: trunk/src/event/ngx_event_openssl.c =================================================================== --- trunk/src/event/ngx_event_openssl.c 2012-09-28 18:28:38 UTC (rev 4872) +++ trunk/src/event/ngx_event_openssl.c 2012-10-01 12:39:36 UTC (rev 4873) @@ -297,6 +297,33 @@ ngx_int_t +ngx_ssl_trusted_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, + ngx_int_t depth) +{ + SSL_CTX_set_verify_depth(ssl->ctx, depth); + + if (cert->len == 0) { + return NGX_OK; + } + + if (ngx_conf_full_name(cf->cycle, cert, 1) != NGX_OK) { + return NGX_ERROR; + } + + if (SSL_CTX_load_verify_locations(ssl->ctx, (char *) cert->data, NULL) + == 0) + { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "SSL_CTX_load_verify_locations(\"%s\") failed", + cert->data); + return NGX_ERROR; + } + + return NGX_OK; +} + + +ngx_int_t ngx_ssl_crl(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *crl) { X509_STORE *store; Modified: trunk/src/event/ngx_event_openssl.h =================================================================== --- trunk/src/event/ngx_event_openssl.h 2012-09-28 18:28:38 UTC (rev 4872) +++ trunk/src/event/ngx_event_openssl.h 2012-10-01 12:39:36 UTC (rev 4873) @@ -101,6 +101,8 @@ ngx_str_t *cert, ngx_str_t *key); ngx_int_t ngx_ssl_client_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, ngx_int_t depth); +ngx_int_t ngx_ssl_trusted_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, + ngx_str_t *cert, ngx_int_t depth); ngx_int_t ngx_ssl_crl(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *crl); RSA *ngx_ssl_rsa512_key_callback(SSL *ssl, int is_export, int key_length); ngx_int_t ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file); Modified: trunk/src/http/modules/ngx_http_ssl_module.c =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.c 2012-09-28 18:28:38 UTC (rev 4872) +++ trunk/src/http/modules/ngx_http_ssl_module.c 2012-10-01 12:39:36 UTC (rev 4873) @@ -124,6 +124,13 @@ offsetof(ngx_http_ssl_srv_conf_t, client_certificate), NULL }, + { ngx_string("ssl_trusted_certificate"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_TAKE1, + ngx_conf_set_str_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_ssl_srv_conf_t, trusted_certificate), + NULL }, + { ngx_string("ssl_prefer_server_ciphers"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_FLAG, ngx_conf_set_flag_slot, @@ -325,6 +332,7 @@ * sscf->dhparam = { 0, NULL }; * sscf->ecdh_curve = { 0, NULL }; * sscf->client_certificate = { 0, NULL }; + * sscf->trusted_certificate = { 0, NULL }; * sscf->crl = { 0, NULL }; * sscf->ciphers = { 0, NULL }; * sscf->shm_zone = NULL; @@ -380,6 +388,8 @@ ngx_conf_merge_str_value(conf->client_certificate, prev->client_certificate, ""); + ngx_conf_merge_str_value(conf->trusted_certificate, + prev->trusted_certificate, ""); ngx_conf_merge_str_value(conf->crl, prev->crl, ""); ngx_conf_merge_str_value(conf->ecdh_curve, prev->ecdh_curve, @@ -479,12 +489,20 @@ { return NGX_CONF_ERROR; } + } - if (ngx_ssl_crl(cf, &conf->ssl, &conf->crl) != NGX_OK) { - return NGX_CONF_ERROR; - } + if (ngx_ssl_trusted_certificate(cf, &conf->ssl, + &conf->trusted_certificate, + conf->verify_depth) + != NGX_OK) + { + return NGX_CONF_ERROR; } + if (ngx_ssl_crl(cf, &conf->ssl, &conf->crl) != NGX_OK) { + return NGX_CONF_ERROR; + } + if (conf->prefer_server_ciphers) { SSL_CTX_set_options(conf->ssl.ctx, SSL_OP_CIPHER_SERVER_PREFERENCE); } Modified: trunk/src/http/modules/ngx_http_ssl_module.h =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.h 2012-09-28 18:28:38 UTC (rev 4872) +++ trunk/src/http/modules/ngx_http_ssl_module.h 2012-10-01 12:39:36 UTC (rev 4873) @@ -35,6 +35,7 @@ ngx_str_t dhparam; ngx_str_t ecdh_curve; ngx_str_t client_certificate; + ngx_str_t trusted_certificate; ngx_str_t crl; ngx_str_t ciphers; From mdounin at mdounin.ru Mon Oct 1 12:41:09 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 1 Oct 2012 12:41:09 +0000 Subject: [nginx] svn commit: r4874 - in trunk: auto src/event src/http/modules Message-ID: <20121001124110.13CFE3F9C0F@mail.nginx.com> Author: mdounin Date: 2012-10-01 12:41:08 +0000 (Mon, 01 Oct 2012) New Revision: 4874 URL: http://trac.nginx.org/nginx/changeset/4874/nginx Log: OCSP stapling: ssl_stapling_file support. Very basic version without any OCSP responder query code, assuming valid DER-encoded OCSP response is present in a ssl_stapling_file configured. Such file might be produced with openssl like this: openssl ocsp -issuer root.crt -cert domain.crt -respout domain.staple \ -url http://ocsp.example.com Modified: trunk/auto/sources trunk/src/event/ngx_event_openssl.h trunk/src/http/modules/ngx_http_ssl_module.c trunk/src/http/modules/ngx_http_ssl_module.h Modified: trunk/auto/sources =================================================================== --- trunk/auto/sources 2012-10-01 12:39:36 UTC (rev 4873) +++ trunk/auto/sources 2012-10-01 12:41:08 UTC (rev 4874) @@ -77,7 +77,8 @@ OPENSSL_MODULE=ngx_openssl_module OPENSSL_DEPS=src/event/ngx_event_openssl.h -OPENSSL_SRCS=src/event/ngx_event_openssl.c +OPENSSL_SRCS="src/event/ngx_event_openssl.c \ + src/event/ngx_event_openssl_stapling.c" EVENT_MODULES="ngx_events_module ngx_event_core_module" Modified: trunk/src/event/ngx_event_openssl.h =================================================================== --- trunk/src/event/ngx_event_openssl.h 2012-10-01 12:39:36 UTC (rev 4873) +++ trunk/src/event/ngx_event_openssl.h 2012-10-01 12:41:08 UTC (rev 4874) @@ -17,6 +17,7 @@ #include #include #include +#include #define NGX_SSL_NAME "OpenSSL" @@ -104,6 +105,7 @@ ngx_int_t ngx_ssl_trusted_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, ngx_int_t depth); ngx_int_t ngx_ssl_crl(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *crl); +ngx_int_t ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file); RSA *ngx_ssl_rsa512_key_callback(SSL *ssl, int is_export, int key_length); ngx_int_t ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file); ngx_int_t ngx_ssl_ecdh_curve(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *name); Modified: trunk/src/http/modules/ngx_http_ssl_module.c =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.c 2012-10-01 12:39:36 UTC (rev 4873) +++ trunk/src/http/modules/ngx_http_ssl_module.c 2012-10-01 12:41:08 UTC (rev 4874) @@ -159,6 +159,20 @@ offsetof(ngx_http_ssl_srv_conf_t, crl), NULL }, + { ngx_string("ssl_stapling"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_ssl_srv_conf_t, stapling), + NULL }, + + { ngx_string("ssl_stapling_file"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_TAKE1, + ngx_conf_set_str_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_ssl_srv_conf_t, stapling_file), + NULL }, + ngx_null_command }; @@ -336,6 +350,7 @@ * sscf->crl = { 0, NULL }; * sscf->ciphers = { 0, NULL }; * sscf->shm_zone = NULL; + * sscf->stapling_file = { 0, NULL }; */ sscf->enable = NGX_CONF_UNSET; @@ -344,6 +359,7 @@ sscf->verify_depth = NGX_CONF_UNSET_UINT; sscf->builtin_session_cache = NGX_CONF_UNSET; sscf->session_timeout = NGX_CONF_UNSET; + sscf->stapling = NGX_CONF_UNSET; return sscf; } @@ -397,6 +413,8 @@ ngx_conf_merge_str_value(conf->ciphers, prev->ciphers, NGX_DEFAULT_CIPHERS); + ngx_conf_merge_value(conf->stapling, prev->stapling, 0); + ngx_conf_merge_str_value(conf->stapling_file, prev->stapling_file, ""); conf->ssl.log = cf->log; @@ -533,6 +551,12 @@ return NGX_CONF_ERROR; } + if (conf->stapling + && ngx_ssl_stapling(cf, &conf->ssl, &conf->stapling_file) != NGX_OK) + { + return NGX_CONF_ERROR; + } + return NGX_CONF_OK; } Modified: trunk/src/http/modules/ngx_http_ssl_module.h =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.h 2012-10-01 12:39:36 UTC (rev 4873) +++ trunk/src/http/modules/ngx_http_ssl_module.h 2012-10-01 12:41:08 UTC (rev 4874) @@ -42,6 +42,9 @@ ngx_shm_zone_t *shm_zone; + ngx_flag_t stapling; + ngx_str_t stapling_file; + u_char *file; ngx_uint_t line; } ngx_http_ssl_srv_conf_t; From mdounin at mdounin.ru Mon Oct 1 12:42:44 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 1 Oct 2012 12:42:44 +0000 Subject: [nginx] svn commit: r4875 - trunk/src/event Message-ID: <20121001124244.397793F9C0F@mail.nginx.com> Author: mdounin Date: 2012-10-01 12:42:43 +0000 (Mon, 01 Oct 2012) New Revision: 4875 URL: http://trac.nginx.org/nginx/changeset/4875/nginx Log: OCSP stapling: the ngx_event_openssl_stapling.c file. Missed in previous commit. Added: trunk/src/event/ngx_event_openssl_stapling.c Added: trunk/src/event/ngx_event_openssl_stapling.c =================================================================== --- trunk/src/event/ngx_event_openssl_stapling.c (rev 0) +++ trunk/src/event/ngx_event_openssl_stapling.c 2012-10-01 12:42:43 UTC (rev 4875) @@ -0,0 +1,140 @@ + +/* + * Copyright (C) Maxim Dounin + * Copyright (C) Nginx, Inc. + */ + + +#include +#include +#include + + +#ifdef SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB + + +static int ngx_ssl_certificate_status_callback(ngx_ssl_conn_t *ssl_conn, + void *data); + + +ngx_int_t +ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file) +{ + BIO *bio; + int len; + u_char *p, *buf; + ngx_str_t *staple; + OCSP_RESPONSE *response; + + if (file->len == 0) { + return NGX_OK; + } + + if (ngx_conf_full_name(cf->cycle, file, 1) != NGX_OK) { + return NGX_ERROR; + } + + bio = BIO_new_file((char *) file->data, "r"); + if (bio == NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "BIO_new_file(\"%s\") failed", file->data); + return NGX_ERROR; + } + + response = d2i_OCSP_RESPONSE_bio(bio, NULL); + if (response == NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "d2i_OCSP_RESPONSE_bio(\"%s\") failed", file->data); + BIO_free(bio); + return NGX_ERROR; + } + + len = i2d_OCSP_RESPONSE(response, NULL); + if (len <= 0) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "i2d_OCSP_RESPONSE(\"%s\") failed", file->data); + goto failed; + } + + buf = ngx_pnalloc(cf->pool, len); + if (buf == NULL) { + goto failed; + } + + p = buf; + len = i2d_OCSP_RESPONSE(response, &p); + if (len <= 0) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "i2d_OCSP_RESPONSE(\"%s\") failed", file->data); + goto failed; + } + + OCSP_RESPONSE_free(response); + BIO_free(bio); + + staple = ngx_palloc(cf->pool, sizeof(ngx_str_t)); + if (staple == NULL) { + return NGX_ERROR; + } + + staple->data = buf; + staple->len = len; + + SSL_CTX_set_tlsext_status_cb(ssl->ctx, ngx_ssl_certificate_status_callback); + SSL_CTX_set_tlsext_status_arg(ssl->ctx, staple); + + return NGX_OK; + +failed: + + OCSP_RESPONSE_free(response); + BIO_free(bio); + + return NGX_ERROR; +} + + +static int +ngx_ssl_certificate_status_callback(ngx_ssl_conn_t *ssl_conn, void *data) +{ + u_char *p; + ngx_str_t *staple; + ngx_connection_t *c; + + c = ngx_ssl_get_connection(ssl_conn); + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "SSL certificate status callback"); + + staple = data; + + /* we have to copy the staple as OpenSSL will free it by itself */ + + p = OPENSSL_malloc(staple->len); + if (p == NULL) { + ngx_ssl_error(NGX_LOG_ALERT, c->log, 0, "OPENSSL_malloc() failed"); + return SSL_TLSEXT_ERR_ALERT_FATAL; + } + + ngx_memcpy(p, staple->data, staple->len); + + SSL_set_tlsext_status_ocsp_resp(ssl_conn, p, staple->len); + + return SSL_TLSEXT_ERR_OK; +} + + +#else + + +ngx_int_t +ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file) +{ + ngx_log_error(NGX_LOG_WARN, ssl->log, 0, + "\"ssl_stapling\" ignored, not supported"); + + return NGX_OK; +} + + +#endif From mdounin at mdounin.ru Mon Oct 1 12:47:55 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 1 Oct 2012 12:47:55 +0000 Subject: [nginx] svn commit: r4876 - in trunk/src: core event http/modules Message-ID: <20121001124756.285473F9C12@mail.nginx.com> Author: mdounin Date: 2012-10-01 12:47:55 +0000 (Mon, 01 Oct 2012) New Revision: 4876 URL: http://trac.nginx.org/nginx/changeset/4876/nginx Log: OCSP stapling: loading OCSP responses. This includes the ssl_stapling_responder directive (defaults to OCSP responder set in certificate's AIA extension). OCSP response for a given certificate is requested once we get at least one connection with certificate_status extension in ClientHello, and certificate status won't be sent in the connection in question. This due to limitations in the OpenSSL API (certificate status callback is blocking). Note: SSL_CTX_use_certificate_chain_file() was reimplemented as it doesn't allow to access the certificate loaded via SSL_CTX. Modified: trunk/src/core/ngx_core.h trunk/src/event/ngx_event_openssl.c trunk/src/event/ngx_event_openssl.h trunk/src/event/ngx_event_openssl_stapling.c trunk/src/http/modules/ngx_http_ssl_module.c trunk/src/http/modules/ngx_http_ssl_module.h Modified: trunk/src/core/ngx_core.h =================================================================== --- trunk/src/core/ngx_core.h 2012-10-01 12:42:43 UTC (rev 4875) +++ trunk/src/core/ngx_core.h 2012-10-01 12:47:55 UTC (rev 4876) @@ -69,12 +69,12 @@ #include #include #include +#include #if (NGX_OPENSSL) #include #endif #include #include -#include #include #include #include Modified: trunk/src/event/ngx_event_openssl.c =================================================================== --- trunk/src/event/ngx_event_openssl.c 2012-10-01 12:42:43 UTC (rev 4875) +++ trunk/src/event/ngx_event_openssl.c 2012-10-01 12:47:55 UTC (rev 4876) @@ -82,6 +82,8 @@ int ngx_ssl_connection_index; int ngx_ssl_server_conf_index; int ngx_ssl_session_cache_index; +int ngx_ssl_certificate_index; +int ngx_ssl_stapling_index; ngx_int_t @@ -137,6 +139,22 @@ return NGX_ERROR; } + ngx_ssl_certificate_index = SSL_CTX_get_ex_new_index(0, NULL, NULL, NULL, + NULL); + if (ngx_ssl_certificate_index == -1) { + ngx_ssl_error(NGX_LOG_ALERT, log, 0, + "SSL_CTX_get_ex_new_index() failed"); + return NGX_ERROR; + } + + ngx_ssl_stapling_index = SSL_CTX_get_ex_new_index(0, NULL, NULL, NULL, + NULL); + if (ngx_ssl_stapling_index == -1) { + ngx_ssl_error(NGX_LOG_ALERT, log, 0, + "SSL_CTX_get_ex_new_index() failed"); + return NGX_ERROR; + } + return NGX_OK; } @@ -218,19 +236,89 @@ ngx_ssl_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, ngx_str_t *key) { + BIO *bio; + X509 *x509; + u_long n; + if (ngx_conf_full_name(cf->cycle, cert, 1) != NGX_OK) { return NGX_ERROR; } - if (SSL_CTX_use_certificate_chain_file(ssl->ctx, (char *) cert->data) + /* + * we can't use SSL_CTX_use_certificate_chain_file() as it doesn't + * allow to access certificate later from SSL_CTX, so we reimplement + * it here + */ + + bio = BIO_new_file((char *) cert->data, "r"); + if (bio == NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "BIO_new_file(\"%s\") failed", cert->data); + return NGX_ERROR; + } + + x509 = PEM_read_bio_X509_AUX(bio, NULL, NULL, NULL); + if (x509 == NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "PEM_read_bio_X509_AUX(\"%s\") failed", cert->data); + BIO_free(bio); + return NGX_ERROR; + } + + if (SSL_CTX_use_certificate(ssl->ctx, x509) == 0) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "SSL_CTX_use_certificate(\"%s\") failed", cert->data); + X509_free(x509); + BIO_free(bio); + return NGX_ERROR; + } + + if (SSL_CTX_set_ex_data(ssl->ctx, ngx_ssl_certificate_index, x509) == 0) { ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, - "SSL_CTX_use_certificate_chain_file(\"%s\") failed", - cert->data); + "SSL_CTX_set_ex_data() failed"); return NGX_ERROR; } + X509_free(x509); + + /* read rest of the chain */ + + for ( ;; ) { + + x509 = PEM_read_bio_X509(bio, NULL, NULL, NULL); + if (x509 == NULL) { + n = ERR_peek_last_error(); + + if (ERR_GET_LIB(n) == ERR_LIB_PEM + && ERR_GET_REASON(n) == PEM_R_NO_START_LINE) + { + /* end of file */ + ERR_clear_error(); + break; + } + + /* some real error */ + + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "PEM_read_bio_X509(\"%s\") failed", cert->data); + BIO_free(bio); + return NGX_ERROR; + } + + if (SSL_CTX_add_extra_chain_cert(ssl->ctx, x509) == 0) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "SSL_CTX_add_extra_chain_cert(\"%s\") failed", + cert->data); + X509_free(x509); + BIO_free(bio); + return NGX_ERROR; + } + } + + BIO_free(bio); + if (ngx_conf_full_name(cf->cycle, key, 1) != NGX_OK) { return NGX_ERROR; } Modified: trunk/src/event/ngx_event_openssl.h =================================================================== --- trunk/src/event/ngx_event_openssl.h 2012-10-01 12:42:43 UTC (rev 4875) +++ trunk/src/event/ngx_event_openssl.h 2012-10-01 12:47:55 UTC (rev 4876) @@ -105,7 +105,10 @@ ngx_int_t ngx_ssl_trusted_certificate(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *cert, ngx_int_t depth); ngx_int_t ngx_ssl_crl(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *crl); -ngx_int_t ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file); +ngx_int_t ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, + ngx_str_t *responder, ngx_str_t *file); +ngx_int_t ngx_ssl_stapling_resolver(ngx_conf_t *cf, ngx_ssl_t *ssl, + ngx_resolver_t *resolver, ngx_msec_t resolver_timeout); RSA *ngx_ssl_rsa512_key_callback(SSL *ssl, int is_export, int key_length); ngx_int_t ngx_ssl_dhparam(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file); ngx_int_t ngx_ssl_ecdh_curve(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *name); @@ -161,6 +164,8 @@ extern int ngx_ssl_connection_index; extern int ngx_ssl_server_conf_index; extern int ngx_ssl_session_cache_index; +extern int ngx_ssl_certificate_index; +extern int ngx_ssl_stapling_index; #endif /* _NGX_EVENT_OPENSSL_H_INCLUDED_ */ Modified: trunk/src/event/ngx_event_openssl_stapling.c =================================================================== --- trunk/src/event/ngx_event_openssl_stapling.c 2012-10-01 12:42:43 UTC (rev 4875) +++ trunk/src/event/ngx_event_openssl_stapling.c 2012-10-01 12:47:55 UTC (rev 4876) @@ -8,28 +8,193 @@ #include #include #include +#include #ifdef SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB +typedef struct { + ngx_str_t staple; + ngx_msec_t timeout; + + ngx_resolver_t *resolver; + ngx_msec_t resolver_timeout; + + ngx_addr_t *addrs; + ngx_str_t host; + ngx_str_t uri; + in_port_t port; + + SSL_CTX *ssl_ctx; + + X509 *cert; + X509 *issuer; + + time_t valid; + + ngx_uint_t loading; /* unsigned:1 */ +} ngx_ssl_stapling_t; + + +typedef struct ngx_ssl_ocsp_ctx_s ngx_ssl_ocsp_ctx_t; + +struct ngx_ssl_ocsp_ctx_s { + X509 *cert; + X509 *issuer; + + ngx_uint_t naddrs; + + ngx_addr_t *addrs; + ngx_str_t host; + ngx_str_t uri; + in_port_t port; + + ngx_resolver_t *resolver; + ngx_msec_t resolver_timeout; + + ngx_msec_t timeout; + + void (*handler)(ngx_ssl_ocsp_ctx_t *r); + void *data; + + ngx_buf_t *request; + ngx_buf_t *response; + ngx_peer_connection_t peer; + + ngx_int_t (*process)(ngx_ssl_ocsp_ctx_t *r); + + ngx_uint_t state; + + ngx_uint_t code; + ngx_uint_t count; + + ngx_uint_t done; + + u_char *header_name_start; + u_char *header_name_end; + u_char *header_start; + u_char *header_end; + + ngx_pool_t *pool; + ngx_log_t *log; +}; + + +static ngx_int_t ngx_ssl_stapling_file(ngx_conf_t *cf, ngx_ssl_t *ssl, + ngx_str_t *file); +static ngx_int_t ngx_ssl_stapling_issuer(ngx_conf_t *cf, ngx_ssl_t *ssl); +static ngx_int_t ngx_ssl_stapling_responder(ngx_conf_t *cf, ngx_ssl_t *ssl, + ngx_str_t *responder); + static int ngx_ssl_certificate_status_callback(ngx_ssl_conn_t *ssl_conn, void *data); +static void ngx_ssl_stapling_update(ngx_ssl_stapling_t *staple); +static void ngx_ssl_stapling_ocsp_handler(ngx_ssl_ocsp_ctx_t *ctx); +static void ngx_ssl_stapling_cleanup(void *data); +static ngx_ssl_ocsp_ctx_t *ngx_ssl_ocsp_start(void); +static void ngx_ssl_ocsp_done(ngx_ssl_ocsp_ctx_t *ctx); +static void ngx_ssl_ocsp_request(ngx_ssl_ocsp_ctx_t *ctx); +static void ngx_ssl_ocsp_resolve_handler(ngx_resolver_ctx_t *resolve); +static void ngx_ssl_ocsp_connect(ngx_ssl_ocsp_ctx_t *ctx); +static void ngx_ssl_ocsp_write_handler(ngx_event_t *wev); +static void ngx_ssl_ocsp_read_handler(ngx_event_t *rev); +static void ngx_ssl_ocsp_dummy_handler(ngx_event_t *ev); + +static ngx_int_t ngx_ssl_ocsp_create_request(ngx_ssl_ocsp_ctx_t *ctx); +static ngx_int_t ngx_ssl_ocsp_process_status_line(ngx_ssl_ocsp_ctx_t *ctx); +static ngx_int_t ngx_ssl_ocsp_parse_status_line(ngx_ssl_ocsp_ctx_t *ctx); +static ngx_int_t ngx_ssl_ocsp_process_headers(ngx_ssl_ocsp_ctx_t *ctx); +static ngx_int_t ngx_ssl_ocsp_parse_header_line(ngx_ssl_ocsp_ctx_t *ctx); +static ngx_int_t ngx_ssl_ocsp_process_body(ngx_ssl_ocsp_ctx_t *ctx); + +static u_char *ngx_ssl_ocsp_log_error(ngx_log_t *log, u_char *buf, size_t len); + + ngx_int_t -ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file) +ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *responder, + ngx_str_t *file) { - BIO *bio; - int len; - u_char *p, *buf; - ngx_str_t *staple; - OCSP_RESPONSE *response; + ngx_int_t rc; + ngx_pool_cleanup_t *cln; + ngx_ssl_stapling_t *staple; - if (file->len == 0) { + staple = ngx_pcalloc(cf->pool, sizeof(ngx_ssl_stapling_t)); + if (staple == NULL) { + return NGX_ERROR; + } + + cln = ngx_pool_cleanup_add(cf->pool, 0); + if (cln == NULL) { + return NGX_ERROR; + } + + cln->handler = ngx_ssl_stapling_cleanup; + cln->data = staple; + + if (SSL_CTX_set_ex_data(ssl->ctx, ngx_ssl_stapling_index, staple) + == 0) + { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "SSL_CTX_set_ex_data() failed"); + return NGX_ERROR; + } + + staple->ssl_ctx = ssl->ctx; + staple->timeout = 60000; + + if (file->len) { + /* use OCSP response from the file */ + + if (ngx_ssl_stapling_file(cf, ssl, file) != NGX_OK) { + return NGX_ERROR; + } + + goto done; + } + + rc = ngx_ssl_stapling_issuer(cf, ssl); + + if (rc == NGX_DECLINED) { return NGX_OK; } + if (rc != NGX_OK) { + return NGX_ERROR; + } + + rc = ngx_ssl_stapling_responder(cf, ssl, responder); + + if (rc == NGX_DECLINED) { + return NGX_OK; + } + + if (rc != NGX_OK) { + return NGX_ERROR; + } + +done: + + SSL_CTX_set_tlsext_status_cb(ssl->ctx, ngx_ssl_certificate_status_callback); + SSL_CTX_set_tlsext_status_arg(ssl->ctx, staple); + + return NGX_OK; +} + + +static ngx_int_t +ngx_ssl_stapling_file(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file) +{ + BIO *bio; + int len; + u_char *p, *buf; + OCSP_RESPONSE *response; + ngx_ssl_stapling_t *staple; + + staple = SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_stapling_index); + if (ngx_conf_full_name(cf->cycle, file, 1) != NGX_OK) { return NGX_ERROR; } @@ -56,7 +221,7 @@ goto failed; } - buf = ngx_pnalloc(cf->pool, len); + buf = ngx_alloc(len, ssl->log); if (buf == NULL) { goto failed; } @@ -66,23 +231,16 @@ if (len <= 0) { ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, "i2d_OCSP_RESPONSE(\"%s\") failed", file->data); + ngx_free(buf); goto failed; } OCSP_RESPONSE_free(response); BIO_free(bio); - staple = ngx_palloc(cf->pool, sizeof(ngx_str_t)); - if (staple == NULL) { - return NGX_ERROR; - } + staple->staple.data = buf; + staple->staple.len = len; - staple->data = buf; - staple->len = len; - - SSL_CTX_set_tlsext_status_cb(ssl->ctx, ngx_ssl_certificate_status_callback); - SSL_CTX_set_tlsext_status_arg(ssl->ctx, staple); - return NGX_OK; failed: @@ -94,12 +252,204 @@ } +static ngx_int_t +ngx_ssl_stapling_issuer(ngx_conf_t *cf, ngx_ssl_t *ssl) +{ + int i, n, rc; + X509 *cert, *issuer; + X509_STORE *store; + X509_STORE_CTX *store_ctx; + STACK_OF(X509) *chain; + ngx_ssl_stapling_t *staple; + + staple = SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_stapling_index); + cert = SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_certificate_index); + +#if OPENSSL_VERSION_NUMBER >= 0x10001000L + SSL_CTX_get_extra_chain_certs(ssl->ctx, &chain); +#else + chain = ssl->ctx->extra_certs; +#endif + + n = sk_X509_num(chain); + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, ssl->log, 0, + "SSL get issuer: %d extra certs", n); + + for (i = 0; i < n; i++) { + issuer = sk_X509_value(chain, i); + if (X509_check_issued(issuer, cert) == X509_V_OK) { + CRYPTO_add(&issuer->references, 1, CRYPTO_LOCK_X509); + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, ssl->log, 0, + "SSL get issuer: found %p in extra certs", issuer); + + staple->cert = cert; + staple->issuer = issuer; + + return NGX_OK; + } + } + + store = SSL_CTX_get_cert_store(ssl->ctx); + if (store == NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "SSL_CTX_get_cert_store() failed"); + return NGX_ERROR; + } + + store_ctx = X509_STORE_CTX_new(); + if (store_ctx == NULL) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "X509_STORE_CTX_new() failed"); + return NGX_ERROR; + } + + if (X509_STORE_CTX_init(store_ctx, store, NULL, NULL) == 0) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "X509_STORE_CTX_init() failed"); + return NGX_ERROR; + } + + rc = X509_STORE_CTX_get1_issuer(&issuer, store_ctx, cert); + + if (rc == -1) { + ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, + "X509_STORE_CTX_get1_issuer() failed"); + X509_STORE_CTX_free(store_ctx); + return NGX_ERROR; + } + + if (rc == 0) { + ngx_log_error(NGX_LOG_WARN, ssl->log, 0, + "\"ssl_stapling\" ignored, issuer certificate not found"); + X509_STORE_CTX_free(store_ctx); + return NGX_DECLINED; + } + + X509_STORE_CTX_free(store_ctx); + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, ssl->log, 0, + "SSL get issuer: found %p in cert store", issuer); + + staple->cert = cert; + staple->issuer = issuer; + + return NGX_OK; +} + + +static ngx_int_t +ngx_ssl_stapling_responder(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *responder) +{ + ngx_url_t u; + char *s; + ngx_ssl_stapling_t *staple; + STACK_OF(OPENSSL_STRING) *aia; + + staple = SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_stapling_index); + + if (responder->len == 0) { + + /* extract OCSP responder URL from certificate */ + + aia = X509_get1_ocsp(staple->cert); + if (aia == NULL) { + ngx_log_error(NGX_LOG_WARN, ssl->log, 0, + "\"ssl_stapling\" ignored, " + "no OCSP responder URL in the certificate"); + return NGX_DECLINED; + } + +#if OPENSSL_VERSION_NUMBER >= 0x10000000L + s = sk_OPENSSL_STRING_value(aia, 0); +#else + s = sk_value(aia, 0); +#endif + if (s == NULL) { + ngx_log_error(NGX_LOG_WARN, ssl->log, 0, + "\"ssl_stapling\" ignored, " + "no OCSP responder URL in the certificate"); + X509_email_free(aia); + return NGX_DECLINED; + } + + responder->len = ngx_strlen(s); + responder->data = ngx_palloc(cf->pool, responder->len); + if (responder->data == NULL) { + X509_email_free(aia); + return NGX_ERROR; + } + + ngx_memcpy(responder->data, s, responder->len); + X509_email_free(aia); + } + + ngx_memzero(&u, sizeof(ngx_url_t)); + + u.url = *responder; + u.default_port = 80; + u.uri_part = 1; + + if (u.url.len > 7 + && ngx_strncasecmp(u.url.data, (u_char *) "http://", 7) == 0) + { + u.url.len -= 7; + u.url.data += 7; + + } else { + ngx_log_error(NGX_LOG_WARN, ssl->log, 0, + "\"ssl_stapling\" ignored, " + "invalid URL prefix in OCSP responder \"%V\"", &u.url); + return NGX_DECLINED; + } + + if (ngx_parse_url(cf->pool, &u) != NGX_OK) { + if (u.err) { + ngx_log_error(NGX_LOG_WARN, ssl->log, 0, + "\"ssl_stapling\" ignored, " + "%s in OCSP responder \"%V\"", u.err, &u.url); + return NGX_DECLINED; + } + + return NGX_ERROR; + } + + staple->addrs = u.addrs; + staple->host = u.host; + staple->uri = u.uri; + staple->port = u.port; + + if (staple->uri.len == 0) { + ngx_str_set(&staple->uri, "/"); + } + + return NGX_OK; +} + + +ngx_int_t +ngx_ssl_stapling_resolver(ngx_conf_t *cf, ngx_ssl_t *ssl, + ngx_resolver_t *resolver, ngx_msec_t resolver_timeout) +{ + ngx_ssl_stapling_t *staple; + + staple = SSL_CTX_get_ex_data(ssl->ctx, ngx_ssl_stapling_index); + + staple->resolver = resolver; + staple->resolver_timeout = resolver_timeout; + + return NGX_OK; +} + + static int ngx_ssl_certificate_status_callback(ngx_ssl_conn_t *ssl_conn, void *data) { - u_char *p; - ngx_str_t *staple; - ngx_connection_t *c; + int rc; + u_char *p; + ngx_connection_t *c; + ngx_ssl_stapling_t *staple; c = ngx_ssl_get_connection(ssl_conn); @@ -107,28 +457,1247 @@ "SSL certificate status callback"); staple = data; + rc = SSL_TLSEXT_ERR_NOACK; - /* we have to copy the staple as OpenSSL will free it by itself */ + if (staple->staple.len) { + /* we have to copy ocsp response as OpenSSL will free it by itself */ - p = OPENSSL_malloc(staple->len); - if (p == NULL) { - ngx_ssl_error(NGX_LOG_ALERT, c->log, 0, "OPENSSL_malloc() failed"); - return SSL_TLSEXT_ERR_ALERT_FATAL; + p = OPENSSL_malloc(staple->staple.len); + if (p == NULL) { + ngx_ssl_error(NGX_LOG_ALERT, c->log, 0, "OPENSSL_malloc() failed"); + return SSL_TLSEXT_ERR_NOACK; + } + + ngx_memcpy(p, staple->staple.data, staple->staple.len); + + SSL_set_tlsext_status_ocsp_resp(ssl_conn, p, staple->staple.len); + + rc = SSL_TLSEXT_ERR_OK; } - ngx_memcpy(p, staple->data, staple->len); + ngx_ssl_stapling_update(staple); - SSL_set_tlsext_status_ocsp_resp(ssl_conn, p, staple->len); + return rc; +} - return SSL_TLSEXT_ERR_OK; + +static void +ngx_ssl_stapling_update(ngx_ssl_stapling_t *staple) +{ + ngx_ssl_ocsp_ctx_t *ctx; + + if (staple->host.len == 0 + || staple->loading || staple->valid >= ngx_time()) + { + return; + } + + staple->loading = 1; + + ctx = ngx_ssl_ocsp_start(); + if (ctx == NULL) { + return; + } + + ctx->cert = staple->cert; + ctx->issuer = staple->issuer; + + ctx->addrs = staple->addrs; + ctx->host = staple->host; + ctx->uri = staple->uri; + ctx->port = staple->port; + ctx->timeout = staple->timeout; + + ctx->resolver = staple->resolver; + ctx->resolver_timeout = staple->resolver_timeout; + + ctx->handler = ngx_ssl_stapling_ocsp_handler; + ctx->data = staple; + + ngx_ssl_ocsp_request(ctx); + + return; } +static void +ngx_ssl_stapling_ocsp_handler(ngx_ssl_ocsp_ctx_t *ctx) +{ +#if OPENSSL_VERSION_NUMBER >= 0x0090707fL + const +#endif + u_char *p; + int n; + size_t len; + ngx_str_t response; + X509_STORE *store; + STACK_OF(X509) *chain; + OCSP_CERTID *id; + OCSP_RESPONSE *ocsp; + OCSP_BASICRESP *basic; + ngx_ssl_stapling_t *staple; + ASN1_GENERALIZEDTIME *thisupdate, *nextupdate; + + staple = ctx->data; + ocsp = NULL; + basic = NULL; + id = NULL; + + if (ctx->code != 200) { + goto error; + } + + /* check the response */ + + len = ctx->response->last - ctx->response->pos; + p = ctx->response->pos; + + ocsp = d2i_OCSP_RESPONSE(NULL, &p, len); + if (ocsp == NULL) { + ngx_ssl_error(NGX_LOG_ERR, ctx->log, 0, + "d2i_OCSP_RESPONSE() failed"); + goto error; + } + + n = OCSP_response_status(ocsp); + + if (n != OCSP_RESPONSE_STATUS_SUCCESSFUL) { + ngx_log_error(NGX_LOG_ERR, ctx->log, 0, + "OCSP response not successful (%d: %s)", + n, OCSP_response_status_str(n)); + goto error; + } + + basic = OCSP_response_get1_basic(ocsp); + if (basic == NULL) { + ngx_ssl_error(NGX_LOG_ERR, ctx->log, 0, + "OCSP_response_get1_basic() failed"); + goto error; + } + + store = SSL_CTX_get_cert_store(staple->ssl_ctx); + if (store == NULL) { + ngx_ssl_error(NGX_LOG_CRIT, ctx->log, 0, + "SSL_CTX_get_cert_store() failed"); + goto error; + } + +#if OPENSSL_VERSION_NUMBER >= 0x10001000L + SSL_CTX_get_extra_chain_certs(staple->ssl_ctx, &chain); #else + chain = staple->ssl_ctx->extra_certs; +#endif + if (OCSP_basic_verify(basic, chain, store, 0) != 1) { + ngx_ssl_error(NGX_LOG_ERR, ctx->log, 0, + "OCSP_basic_verify() failed"); + goto error; + } + id = OCSP_cert_to_id(NULL, ctx->cert, ctx->issuer); + if (id == NULL) { + ngx_ssl_error(NGX_LOG_CRIT, ctx->log, 0, + "OCSP_cert_to_id() failed"); + goto error; + } + + if (OCSP_resp_find_status(basic, id, &n, NULL, NULL, + &thisupdate, &nextupdate) + != 1) + { + ngx_log_error(NGX_LOG_ERR, ctx->log, 0, + "certificate status not found in the OCSP response", + n, OCSP_response_status_str(n)); + goto error; + } + + if (n != V_OCSP_CERTSTATUS_GOOD) { + ngx_log_error(NGX_LOG_ERR, ctx->log, 0, + "certificate status \"%s\" in the OCSP response", + n, OCSP_cert_status_str(n)); + goto error; + } + + if (OCSP_check_validity(thisupdate, nextupdate, 300, -1) != 1) { + ngx_ssl_error(NGX_LOG_ERR, ctx->log, 0, + "OCSP_check_validity() failed"); + goto error; + } + + OCSP_CERTID_free(id); + OCSP_BASICRESP_free(basic); + OCSP_RESPONSE_free(ocsp); + + /* copy the response to memory not in ctx->pool */ + + response.len = len; + response.data = ngx_alloc(response.len, ctx->log); + + if (response.data == NULL) { + goto done; + } + + ngx_memcpy(response.data, ctx->response->pos, response.len); + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp response, %s, %uz", + OCSP_cert_status_str(n), response.len); + + if (staple->staple.data) { + ngx_free(staple->staple.data); + } + + staple->staple = response; + +done: + + staple->loading = 0; + staple->valid = ngx_time() + 3600; /* ssl_stapling_valid */ + + ngx_ssl_ocsp_done(ctx); + return; + +error: + + staple->loading = 0; + staple->valid = ngx_time() + 300; /* ssl_stapling_err_valid */ + + if (id) { + OCSP_CERTID_free(id); + } + + if (basic) { + OCSP_BASICRESP_free(basic); + } + + if (ocsp) { + OCSP_RESPONSE_free(ocsp); + } + + ngx_ssl_ocsp_done(ctx); +} + + +static void +ngx_ssl_stapling_cleanup(void *data) +{ + ngx_ssl_stapling_t *staple = data; + + if (staple->issuer) { + X509_free(staple->issuer); + } + + if (staple->staple.data) { + ngx_free(staple->staple.data); + } +} + + +static ngx_ssl_ocsp_ctx_t * +ngx_ssl_ocsp_start(void) +{ + ngx_log_t *log; + ngx_pool_t *pool; + ngx_ssl_ocsp_ctx_t *ctx; + + pool = ngx_create_pool(2048, ngx_cycle->log); + if (pool == NULL) { + return NULL; + } + + ctx = ngx_pcalloc(pool, sizeof(ngx_ssl_ocsp_ctx_t)); + if (ctx == NULL) { + ngx_destroy_pool(pool); + return NULL; + } + + log = ngx_palloc(pool, sizeof(ngx_log_t)); + if (log == NULL) { + ngx_destroy_pool(pool); + return NULL; + } + + ctx->pool = pool; + + *log = *ctx->pool->log; + + ctx->pool->log = log; + ctx->log = log; + + log->handler = ngx_ssl_ocsp_log_error; + log->data = ctx; + log->action = "requesting certificate status"; + + return ctx; +} + + +static void +ngx_ssl_ocsp_done(ngx_ssl_ocsp_ctx_t *ctx) +{ + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp done"); + + if (ctx->peer.connection) { + ngx_close_connection(ctx->peer.connection); + } + + ngx_destroy_pool(ctx->pool); +} + + +static void +ngx_ssl_ocsp_error(ngx_ssl_ocsp_ctx_t *ctx) +{ + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp error"); + + ctx->code = 0; + ctx->handler(ctx); +} + + +static void +ngx_ssl_ocsp_request(ngx_ssl_ocsp_ctx_t *ctx) +{ + ngx_resolver_ctx_t *resolve, temp; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp request"); + + if (ngx_ssl_ocsp_create_request(ctx) != NGX_OK) { + ngx_ssl_ocsp_error(ctx); + return; + } + + if (ctx->resolver) { + /* resolve OCSP responder hostname */ + + temp.name = ctx->host; + + resolve = ngx_resolve_start(ctx->resolver, &temp); + if (resolve == NULL) { + ngx_ssl_ocsp_error(ctx); + return; + } + + if (resolve == NGX_NO_RESOLVER) { + ngx_log_error(NGX_LOG_WARN, ctx->log, 0, + "no resolver defined to resolve %V", &ctx->host); + goto connect; + } + + resolve->name = ctx->host; + resolve->type = NGX_RESOLVE_A; + resolve->handler = ngx_ssl_ocsp_resolve_handler; + resolve->data = ctx; + resolve->timeout = ctx->resolver_timeout; + + if (ngx_resolve_name(resolve) != NGX_OK) { + ngx_ssl_ocsp_error(ctx); + return; + } + + return; + } + +connect: + + ngx_ssl_ocsp_connect(ctx); +} + + +static void +ngx_ssl_ocsp_resolve_handler(ngx_resolver_ctx_t *resolve) +{ + ngx_ssl_ocsp_ctx_t *ctx = resolve->data; + + u_char *p; + size_t len; + in_port_t port; + ngx_uint_t i; + struct sockaddr_in *sin; + + ngx_log_debug0(NGX_LOG_ALERT, ctx->log, 0, + "ssl ocsp resolve handler"); + + if (resolve->state) { + ngx_log_error(NGX_LOG_ERR, ctx->log, 0, + "%V could not be resolved (%i: %s)", + &resolve->name, resolve->state, + ngx_resolver_strerror(resolve->state)); + goto failed; + } + +#if (NGX_DEBUG) + { + in_addr_t addr; + + for (i = 0; i < resolve->naddrs; i++) { + addr = ntohl(resolve->addrs[i]); + + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "name was resolved to %ud.%ud.%ud.%ud", + (addr >> 24) & 0xff, (addr >> 16) & 0xff, + (addr >> 8) & 0xff, addr & 0xff); + } + } +#endif + + ctx->naddrs = resolve->naddrs; + ctx->addrs = ngx_pcalloc(ctx->pool, ctx->naddrs * sizeof(ngx_addr_t)); + + if (ctx->addrs == NULL) { + goto failed; + } + + port = htons(ctx->port); + + for (i = 0; i < resolve->naddrs; i++) { + + sin = ngx_pcalloc(ctx->pool, sizeof(struct sockaddr_in)); + if (sin == NULL) { + goto failed; + } + + sin->sin_family = AF_INET; + sin->sin_port = port; + sin->sin_addr.s_addr = resolve->addrs[i]; + + ctx->addrs[i].sockaddr = (struct sockaddr *) sin; + ctx->addrs[i].socklen = sizeof(struct sockaddr_in); + + len = NGX_INET_ADDRSTRLEN + sizeof(":65535") - 1; + + p = ngx_pnalloc(ctx->pool, len); + if (p == NULL) { + goto failed; + } + + len = ngx_sock_ntop((struct sockaddr *) sin, p, len, 1); + + ctx->addrs[i].name.len = len; + ctx->addrs[i].name.data = p; + } + + ngx_resolve_name_done(resolve); + + ngx_ssl_ocsp_connect(ctx); + return; + +failed: + + ngx_resolve_name_done(resolve); + ngx_ssl_ocsp_error(ctx); +} + + +static void +ngx_ssl_ocsp_connect(ngx_ssl_ocsp_ctx_t *ctx) +{ + ngx_int_t rc; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp connect"); + + /* TODO: use all ip addresses */ + + ctx->peer.sockaddr = ctx->addrs[0].sockaddr; + ctx->peer.socklen = ctx->addrs[0].socklen; + ctx->peer.name = &ctx->addrs[0].name; + ctx->peer.get = ngx_event_get_peer; + ctx->peer.log = ctx->log; + ctx->peer.log_error = NGX_ERROR_ERR; + + rc = ngx_event_connect_peer(&ctx->peer); + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp connect peer done"); + + if (rc == NGX_ERROR || rc == NGX_BUSY || rc == NGX_DECLINED) { + ngx_ssl_ocsp_error(ctx); + return; + } + + ctx->peer.connection->data = ctx; + ctx->peer.connection->pool = ctx->pool; + + ctx->peer.connection->read->handler = ngx_ssl_ocsp_read_handler; + ctx->peer.connection->write->handler = ngx_ssl_ocsp_write_handler; + + ctx->process = ngx_ssl_ocsp_process_status_line; + + ngx_add_timer(ctx->peer.connection->read, ctx->timeout); + ngx_add_timer(ctx->peer.connection->write, ctx->timeout); + + if (rc == NGX_OK) { + ngx_ssl_ocsp_write_handler(ctx->peer.connection->write); + return; + } +} + + +static void +ngx_ssl_ocsp_write_handler(ngx_event_t *wev) +{ + ssize_t n, size; + ngx_connection_t *c; + ngx_ssl_ocsp_ctx_t *ctx; + + c = wev->data; + ctx = c->data; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, wev->log, 0, + "ssl ocsp write handler"); + + if (wev->timedout) { + ngx_log_error(NGX_LOG_ERR, wev->log, NGX_ETIMEDOUT, + "OCSP responder timed out"); + ngx_ssl_ocsp_error(ctx); + return; + } + + size = ctx->request->last - ctx->request->pos; + + n = ngx_send(c, ctx->request->pos, size); + + if (n == NGX_ERROR) { + ngx_ssl_ocsp_error(ctx); + return; + } + + if (n > 0) { + ctx->request->pos += n; + + if (n == size) { + wev->handler = ngx_ssl_ocsp_dummy_handler; + + if (wev->timer_set) { + ngx_del_timer(wev); + } + + if (ngx_handle_write_event(wev, 0) != NGX_OK) { + ngx_ssl_ocsp_error(ctx); + } + + return; + } + } + + if (!wev->timer_set) { + ngx_add_timer(wev, ctx->timeout); + } +} + + +static void +ngx_ssl_ocsp_read_handler(ngx_event_t *rev) +{ + ssize_t n, size; + ngx_int_t rc; + ngx_ssl_ocsp_ctx_t *ctx; + ngx_connection_t *c; + + c = rev->data; + ctx = c->data; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, rev->log, 0, + "ssl ocsp read handler"); + + if (rev->timedout) { + ngx_log_error(NGX_LOG_ERR, rev->log, NGX_ETIMEDOUT, + "OCSP responder timed out"); + ngx_ssl_ocsp_error(ctx); + return; + } + + if (ctx->response == NULL) { + ctx->response = ngx_create_temp_buf(ctx->pool, 16384); + if (ctx->response == NULL) { + ngx_ssl_ocsp_error(ctx); + return; + } + } + + for ( ;; ) { + + size = ctx->response->end - ctx->response->last; + + n = ngx_recv(c, ctx->response->last, size); + + if (n > 0) { + ctx->response->last += n; + + rc = ctx->process(ctx); + + if (rc == NGX_ERROR) { + ngx_ssl_ocsp_error(ctx); + return; + } + + continue; + } + + if (n == NGX_AGAIN) { + + if (ngx_handle_read_event(rev, 0) != NGX_OK) { + ngx_ssl_ocsp_error(ctx); + } + + return; + } + + break; + } + + ctx->done = 1; + + rc = ctx->process(ctx); + + if (rc == NGX_DONE) { + /* ctx->handler() was called */ + return; + } + + ngx_log_error(NGX_LOG_ERR, ctx->log, 0, + "OCSP responder prematurely closed connection"); + + ngx_ssl_ocsp_error(ctx); +} + + +static void +ngx_ssl_ocsp_dummy_handler(ngx_event_t *ev) +{ + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "ssl ocsp dummy handler"); +} + + +static ngx_int_t +ngx_ssl_ocsp_create_request(ngx_ssl_ocsp_ctx_t *ctx) +{ + int len; + u_char *p; + uintptr_t escape; + ngx_str_t binary, base64; + ngx_buf_t *b; + OCSP_CERTID *id; + OCSP_REQUEST *ocsp; + + ocsp = OCSP_REQUEST_new(); + if (ocsp == NULL) { + ngx_ssl_error(NGX_LOG_CRIT, ctx->log, 0, + "OCSP_REQUEST_new() failed"); + return NGX_ERROR; + } + + id = OCSP_cert_to_id(NULL, ctx->cert, ctx->issuer); + if (id == NULL) { + ngx_ssl_error(NGX_LOG_CRIT, ctx->log, 0, + "OCSP_cert_to_id() failed"); + goto failed; + } + + if (OCSP_request_add0_id(ocsp, id) == NULL) { + ngx_ssl_error(NGX_LOG_CRIT, ctx->log, 0, + "OCSP_request_add0_id() failed"); + goto failed; + } + + len = i2d_OCSP_REQUEST(ocsp, NULL); + if (len <= 0) { + ngx_ssl_error(NGX_LOG_CRIT, ctx->log, 0, + "i2d_OCSP_REQUEST() failed"); + goto failed; + } + + binary.len = len; + binary.data = ngx_palloc(ctx->pool, len); + if (binary.data == NULL) { + goto failed; + } + + p = binary.data; + len = i2d_OCSP_REQUEST(ocsp, &p); + if (len <= 0) { + ngx_ssl_error(NGX_LOG_EMERG, ctx->log, 0, + "i2d_OCSP_REQUEST() failed"); + goto failed; + } + + base64.len = ngx_base64_encoded_length(binary.len); + base64.data = ngx_palloc(ctx->pool, base64.len); + if (base64.data == NULL) { + goto failed; + } + + ngx_encode_base64(&base64, &binary); + + escape = ngx_escape_uri(NULL, base64.data, base64.len, + NGX_ESCAPE_URI_COMPONENT); + + ngx_log_debug(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp request length %z, escape %d", + base64.len, escape); + + len = sizeof("GET ") - 1 + ctx->uri.len + sizeof("/") - 1 + + base64.len + 2 * escape + sizeof(" HTTP/1.0" CRLF) - 1 + + sizeof("Host: ") - 1 + ctx->host.len + sizeof(CRLF) - 1 + + sizeof(CRLF) - 1; + + b = ngx_create_temp_buf(ctx->pool, len); + if (b == NULL) { + goto failed; + } + + p = b->last; + + p = ngx_cpymem(p, "GET ", sizeof("GET ") - 1); + p = ngx_cpymem(p, ctx->uri.data, ctx->uri.len); + + if (ctx->uri.data[ctx->uri.len - 1] != '/') { + *p++ = '/'; + } + + if (escape == 0) { + p = ngx_cpymem(p, base64.data, base64.len); + + } else { + p = (u_char *) ngx_escape_uri(p, base64.data, base64.len, + NGX_ESCAPE_URI_COMPONENT); + } + + p = ngx_cpymem(p, " HTTP/1.0" CRLF, sizeof(" HTTP/1.0" CRLF) - 1); + p = ngx_cpymem(p, "Host: ", sizeof("Host: ") - 1); + p = ngx_cpymem(p, ctx->host.data, ctx->host.len); + *p++ = CR; *p++ = LF; + + /* add "\r\n" at the header end */ + *p++ = CR; *p++ = LF; + + b->last = p; + ctx->request = b; + + return NGX_OK; + +failed: + + OCSP_REQUEST_free(ocsp); + + return NGX_ERROR; +} + + +static ngx_int_t +ngx_ssl_ocsp_process_status_line(ngx_ssl_ocsp_ctx_t *ctx) +{ + ngx_int_t rc; + + rc = ngx_ssl_ocsp_parse_status_line(ctx); + + if (rc == NGX_OK) { +#if 0 + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp status line \"%*s\"", + ctx->response->pos - ctx->response->start, + ctx->response->start); +#endif + + ctx->process = ngx_ssl_ocsp_process_headers; + return ctx->process(ctx); + } + + if (rc == NGX_AGAIN) { + return NGX_AGAIN; + } + + /* rc == NGX_ERROR */ + + ngx_log_error(NGX_LOG_ERR, ctx->log, 0, + "OCSP responder sent invalid response"); + + return NGX_ERROR; +} + + +static ngx_int_t +ngx_ssl_ocsp_parse_status_line(ngx_ssl_ocsp_ctx_t *ctx) +{ + u_char ch; + u_char *p; + ngx_buf_t *b; + enum { + sw_start = 0, + sw_H, + sw_HT, + sw_HTT, + sw_HTTP, + sw_first_major_digit, + sw_major_digit, + sw_first_minor_digit, + sw_minor_digit, + sw_status, + sw_space_after_status, + sw_status_text, + sw_almost_done + } state; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp process status line"); + + state = ctx->state; + b = ctx->response; + + for (p = b->pos; p < b->last; p++) { + ch = *p; + + switch (state) { + + /* "HTTP/" */ + case sw_start: + switch (ch) { + case 'H': + state = sw_H; + break; + default: + return NGX_ERROR; + } + break; + + case sw_H: + switch (ch) { + case 'T': + state = sw_HT; + break; + default: + return NGX_ERROR; + } + break; + + case sw_HT: + switch (ch) { + case 'T': + state = sw_HTT; + break; + default: + return NGX_ERROR; + } + break; + + case sw_HTT: + switch (ch) { + case 'P': + state = sw_HTTP; + break; + default: + return NGX_ERROR; + } + break; + + case sw_HTTP: + switch (ch) { + case '/': + state = sw_first_major_digit; + break; + default: + return NGX_ERROR; + } + break; + + /* the first digit of major HTTP version */ + case sw_first_major_digit: + if (ch < '1' || ch > '9') { + return NGX_ERROR; + } + + state = sw_major_digit; + break; + + /* the major HTTP version or dot */ + case sw_major_digit: + if (ch == '.') { + state = sw_first_minor_digit; + break; + } + + if (ch < '0' || ch > '9') { + return NGX_ERROR; + } + + break; + + /* the first digit of minor HTTP version */ + case sw_first_minor_digit: + if (ch < '0' || ch > '9') { + return NGX_ERROR; + } + + state = sw_minor_digit; + break; + + /* the minor HTTP version or the end of the request line */ + case sw_minor_digit: + if (ch == ' ') { + state = sw_status; + break; + } + + if (ch < '0' || ch > '9') { + return NGX_ERROR; + } + + break; + + /* HTTP status code */ + case sw_status: + if (ch == ' ') { + break; + } + + if (ch < '0' || ch > '9') { + return NGX_ERROR; + } + + ctx->code = ctx->code * 10 + ch - '0'; + + if (++ctx->count == 3) { + state = sw_space_after_status; + } + + break; + + /* space or end of line */ + case sw_space_after_status: + switch (ch) { + case ' ': + state = sw_status_text; + break; + case '.': /* IIS may send 403.1, 403.2, etc */ + state = sw_status_text; + break; + case CR: + state = sw_almost_done; + break; + case LF: + goto done; + default: + return NGX_ERROR; + } + break; + + /* any text until end of line */ + case sw_status_text: + switch (ch) { + case CR: + state = sw_almost_done; + break; + case LF: + goto done; + } + break; + + /* end of status line */ + case sw_almost_done: + switch (ch) { + case LF: + goto done; + default: + return NGX_ERROR; + } + } + } + + b->pos = p; + ctx->state = state; + + return NGX_AGAIN; + +done: + + b->pos = p + 1; + ctx->state = sw_start; + + return NGX_OK; +} + + +static ngx_int_t +ngx_ssl_ocsp_process_headers(ngx_ssl_ocsp_ctx_t *ctx) +{ + ngx_int_t rc; + + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp process headers"); + + for ( ;; ) { + rc = ngx_ssl_ocsp_parse_header_line(ctx); + + if (rc == NGX_OK) { + + ngx_log_debug4(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp header \"%*s: %*s\"", + ctx->header_name_end - ctx->header_name_start, + ctx->header_name_start, + ctx->header_end - ctx->header_start, + ctx->header_start); + + /* TODO: honor Content-Length */ + + continue; + } + + if (rc == NGX_DONE) { + break; + } + + if (rc == NGX_AGAIN) { + return NGX_AGAIN; + } + + /* rc == NGX_ERROR */ + + ngx_log_error(NGX_LOG_ERR, ctx->log, 0, + "OCSP responder sent invalid response"); + + return NGX_ERROR; + } + + ctx->process = ngx_ssl_ocsp_process_body; + return ctx->process(ctx); +} + +static ngx_int_t +ngx_ssl_ocsp_parse_header_line(ngx_ssl_ocsp_ctx_t *ctx) +{ + u_char c, ch, *p; + enum { + sw_start = 0, + sw_name, + sw_space_before_value, + sw_value, + sw_space_after_value, + sw_almost_done, + sw_header_almost_done + } state; + + state = ctx->state; + + for (p = ctx->response->pos; p < ctx->response->last; p++) { + ch = *p; + +#if 0 + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "s:%d in:'%02Xd:%c'", state, ch, ch); +#endif + + switch (state) { + + /* first char */ + case sw_start: + + switch (ch) { + case CR: + ctx->header_end = p; + state = sw_header_almost_done; + break; + case LF: + ctx->header_end = p; + goto header_done; + default: + state = sw_name; + ctx->header_name_start = p; + + c = (u_char) (ch | 0x20); + if (c >= 'a' && c <= 'z') { + break; + } + + if (ch >= '0' && ch <= '9') { + break; + } + + return NGX_ERROR; + } + break; + + /* header name */ + case sw_name: + c = (u_char) (ch | 0x20); + if (c >= 'a' && c <= 'z') { + break; + } + + if (ch == ':') { + ctx->header_name_end = p; + state = sw_space_before_value; + break; + } + + if (ch == '-') { + break; + } + + if (ch >= '0' && ch <= '9') { + break; + } + + if (ch == CR) { + ctx->header_name_end = p; + ctx->header_start = p; + ctx->header_end = p; + state = sw_almost_done; + break; + } + + if (ch == LF) { + ctx->header_name_end = p; + ctx->header_start = p; + ctx->header_end = p; + goto done; + } + + return NGX_ERROR; + + /* space* before header value */ + case sw_space_before_value: + switch (ch) { + case ' ': + break; + case CR: + ctx->header_start = p; + ctx->header_end = p; + state = sw_almost_done; + break; + case LF: + ctx->header_start = p; + ctx->header_end = p; + goto done; + default: + ctx->header_start = p; + state = sw_value; + break; + } + break; + + /* header value */ + case sw_value: + switch (ch) { + case ' ': + ctx->header_end = p; + state = sw_space_after_value; + break; + case CR: + ctx->header_end = p; + state = sw_almost_done; + break; + case LF: + ctx->header_end = p; + goto done; + } + break; + + /* space* before end of header line */ + case sw_space_after_value: + switch (ch) { + case ' ': + break; + case CR: + state = sw_almost_done; + break; + case LF: + goto done; + default: + state = sw_value; + break; + } + break; + + /* end of header line */ + case sw_almost_done: + switch (ch) { + case LF: + goto done; + default: + return NGX_ERROR; + } + + /* end of header */ + case sw_header_almost_done: + switch (ch) { + case LF: + goto header_done; + default: + return NGX_ERROR; + } + } + } + + ctx->response->pos = p; + ctx->state = state; + + return NGX_AGAIN; + +done: + + ctx->response->pos = p + 1; + ctx->state = sw_start; + + return NGX_OK; + +header_done: + + ctx->response->pos = p + 1; + ctx->state = sw_start; + + return NGX_DONE; +} + + +static ngx_int_t +ngx_ssl_ocsp_process_body(ngx_ssl_ocsp_ctx_t *ctx) +{ + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp process body"); + + if (ctx->done) { + ctx->handler(ctx); + return NGX_DONE; + } + + return NGX_AGAIN; +} + + +static u_char * +ngx_ssl_ocsp_log_error(ngx_log_t *log, u_char *buf, size_t len) +{ + u_char *p; + ngx_ssl_ocsp_ctx_t *ctx; + + p = buf; + + if (log->action) { + p = ngx_snprintf(buf, len, " while %s", log->action); + len -= p - buf; + } + + ctx = log->data; + + if (ctx) { + p = ngx_snprintf(p, len, ", responder: %V", &ctx->host); + } + + return p; +} + + +#else + + ngx_int_t -ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file) +ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *responder, + ngx_str_t *file) { ngx_log_error(NGX_LOG_WARN, ssl->log, 0, "\"ssl_stapling\" ignored, not supported"); @@ -136,5 +1705,12 @@ return NGX_OK; } +ngx_int_t +ngx_ssl_stapling_resolver(ngx_conf_t *cf, ngx_ssl_t *ssl, + ngx_resolver_t *resolver, ngx_msec_t resolver_timeout) +{ + return NGX_OK; +} + #endif Modified: trunk/src/http/modules/ngx_http_ssl_module.c =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.c 2012-10-01 12:42:43 UTC (rev 4875) +++ trunk/src/http/modules/ngx_http_ssl_module.c 2012-10-01 12:47:55 UTC (rev 4876) @@ -33,7 +33,9 @@ static char *ngx_http_ssl_session_cache(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +static ngx_int_t ngx_http_ssl_init(ngx_conf_t *cf); + static ngx_conf_bitmask_t ngx_http_ssl_protocols[] = { { ngx_string("SSLv2"), NGX_SSL_SSLv2 }, { ngx_string("SSLv3"), NGX_SSL_SSLv3 }, @@ -173,13 +175,20 @@ offsetof(ngx_http_ssl_srv_conf_t, stapling_file), NULL }, + { ngx_string("ssl_stapling_responder"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_TAKE1, + ngx_conf_set_str_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_ssl_srv_conf_t, stapling_responder), + NULL }, + ngx_null_command }; static ngx_http_module_t ngx_http_ssl_module_ctx = { ngx_http_ssl_add_variables, /* preconfiguration */ - NULL, /* postconfiguration */ + ngx_http_ssl_init, /* postconfiguration */ NULL, /* create main configuration */ NULL, /* init main configuration */ @@ -351,6 +360,7 @@ * sscf->ciphers = { 0, NULL }; * sscf->shm_zone = NULL; * sscf->stapling_file = { 0, NULL }; + * sscf->stapling_responder = { 0, NULL }; */ sscf->enable = NGX_CONF_UNSET; @@ -415,6 +425,8 @@ ngx_conf_merge_value(conf->stapling, prev->stapling, 0); ngx_conf_merge_str_value(conf->stapling_file, prev->stapling_file, ""); + ngx_conf_merge_str_value(conf->stapling_responder, + prev->stapling_responder, ""); conf->ssl.log = cf->log; @@ -551,10 +563,15 @@ return NGX_CONF_ERROR; } - if (conf->stapling - && ngx_ssl_stapling(cf, &conf->ssl, &conf->stapling_file) != NGX_OK) - { - return NGX_CONF_ERROR; + if (conf->stapling) { + + if (ngx_ssl_stapling(cf, &conf->ssl, &conf->stapling_responder, + &conf->stapling_file) + != NGX_OK) + { + return NGX_CONF_ERROR; + } + } return NGX_CONF_OK; @@ -692,3 +709,37 @@ return NGX_CONF_ERROR; } + + +static ngx_int_t +ngx_http_ssl_init(ngx_conf_t *cf) +{ + ngx_uint_t s; + ngx_http_ssl_srv_conf_t *sscf; + ngx_http_core_loc_conf_t *clcf; + ngx_http_core_srv_conf_t **cscfp; + ngx_http_core_main_conf_t *cmcf; + + cmcf = ngx_http_conf_get_module_main_conf(cf, ngx_http_core_module); + cscfp = cmcf->servers.elts; + + for (s = 0; s < cmcf->servers.nelts; s++) { + + sscf = cscfp[s]->ctx->srv_conf[ngx_http_ssl_module.ctx_index]; + + if (!sscf->stapling) { + continue; + } + + clcf = cscfp[s]->ctx->loc_conf[ngx_http_core_module.ctx_index]; + + if (ngx_ssl_stapling_resolver(cf, &sscf->ssl, clcf->resolver, + clcf->resolver_timeout) + != NGX_OK) + { + return NGX_ERROR; + } + } + + return NGX_OK; +} Modified: trunk/src/http/modules/ngx_http_ssl_module.h =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.h 2012-10-01 12:42:43 UTC (rev 4875) +++ trunk/src/http/modules/ngx_http_ssl_module.h 2012-10-01 12:47:55 UTC (rev 4876) @@ -44,6 +44,7 @@ ngx_flag_t stapling; ngx_str_t stapling_file; + ngx_str_t stapling_responder; u_char *file; ngx_uint_t line; From mdounin at mdounin.ru Mon Oct 1 12:48:55 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 1 Oct 2012 12:48:55 +0000 Subject: [nginx] svn commit: r4877 - trunk/src/event Message-ID: <20121001124855.1595C3F9C12@mail.nginx.com> Author: mdounin Date: 2012-10-01 12:48:54 +0000 (Mon, 01 Oct 2012) New Revision: 4877 URL: http://trac.nginx.org/nginx/changeset/4877/nginx Log: OCSP stapling: check Content-Type. This will result in better error message in case of incorrect response from OCSP responder: ... OCSP responder sent invalid "Content-Type" header: "text/plain" while requesting certificate status, responder: ... vs. ... d2i_OCSP_RESPONSE() failed (SSL: error:0D07209B:asn1 encoding routines:ASN1_get_object:too long error:0D068066:asn1 encoding routines:ASN1_CHECK_TLEN:bad object header error:0D07803A:asn1 encoding routines:ASN1_ITEM_EX_D2I:nested asn1 error) while requesting certificate status, responder: ... Modified: trunk/src/event/ngx_event_openssl_stapling.c Modified: trunk/src/event/ngx_event_openssl_stapling.c =================================================================== --- trunk/src/event/ngx_event_openssl_stapling.c 2012-10-01 12:47:55 UTC (rev 4876) +++ trunk/src/event/ngx_event_openssl_stapling.c 2012-10-01 12:48:54 UTC (rev 4877) @@ -1425,6 +1425,7 @@ static ngx_int_t ngx_ssl_ocsp_process_headers(ngx_ssl_ocsp_ctx_t *ctx) { + size_t len; ngx_int_t rc; ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ctx->log, 0, @@ -1442,6 +1443,33 @@ ctx->header_end - ctx->header_start, ctx->header_start); + len = ctx->header_name_end - ctx->header_name_start; + + if (len == sizeof("Content-Type") - 1 + && ngx_strncasecmp(ctx->header_name_start, + (u_char *) "Content-Type", + sizeof("Content-Type") - 1) + == 0) + { + len = ctx->header_end - ctx->header_start; + + if (len != sizeof("application/ocsp-response") - 1 + || ngx_strncasecmp(ctx->header_start, + (u_char *) "application/ocsp-response", + sizeof("application/ocsp-response") - 1) + != 0) + { + ngx_log_error(NGX_LOG_ERR, ctx->log, 0, + "OCSP responder sent invalid " + "\"Content-Type\" header: \"%*s\"", + ctx->header_end - ctx->header_start, + ctx->header_start); + return NGX_ERROR; + } + + continue; + } + /* TODO: honor Content-Length */ continue; From mdounin at mdounin.ru Mon Oct 1 12:50:36 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 1 Oct 2012 12:50:36 +0000 Subject: [nginx] svn commit: r4878 - trunk/src/event Message-ID: <20121001125036.B34F13F9C12@mail.nginx.com> Author: mdounin Date: 2012-10-01 12:50:36 +0000 (Mon, 01 Oct 2012) New Revision: 4878 URL: http://trac.nginx.org/nginx/changeset/4878/nginx Log: OCSP stapling: log error data in ngx_ssl_error(). It's hard to debug OCSP_basic_verify() failures without the actual error string it records in the error data field. Modified: trunk/src/event/ngx_event_openssl.c Modified: trunk/src/event/ngx_event_openssl.c =================================================================== --- trunk/src/event/ngx_event_openssl.c 2012-10-01 12:48:54 UTC (rev 4877) +++ trunk/src/event/ngx_event_openssl.c 2012-10-01 12:50:36 UTC (rev 4878) @@ -1590,10 +1590,12 @@ void ngx_cdecl ngx_ssl_error(ngx_uint_t level, ngx_log_t *log, ngx_err_t err, char *fmt, ...) { - u_long n; - va_list args; - u_char *p, *last; - u_char errstr[NGX_MAX_CONF_ERRSTR]; + int flags; + u_long n; + va_list args; + u_char *p, *last; + u_char errstr[NGX_MAX_CONF_ERRSTR]; + const char *data; last = errstr + NGX_MAX_CONF_ERRSTR; @@ -1605,14 +1607,14 @@ for ( ;; ) { - n = ERR_get_error(); + n = ERR_peek_error_line_data(NULL, NULL, &data, &flags); if (n == 0) { break; } if (p >= last) { - continue; + goto next; } *p++ = ' '; @@ -1622,6 +1624,15 @@ while (p < last && *p) { p++; } + + if (p < last && *data && (flags & ERR_TXT_STRING)) { + *p++ = ':'; + p = ngx_cpystrn(p, (u_char *) data, last - p); + } + + next: + + (void) ERR_get_error(); } ngx_log_error(level, log, err, "%s)", errstr); From mdounin at mdounin.ru Mon Oct 1 12:51:29 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 1 Oct 2012 12:51:29 +0000 Subject: [nginx] svn commit: r4879 - trunk/src/event Message-ID: <20121001125129.8F6503F9C18@mail.nginx.com> Author: mdounin Date: 2012-10-01 12:51:27 +0000 (Mon, 01 Oct 2012) New Revision: 4879 URL: http://trac.nginx.org/nginx/changeset/4879/nginx Log: OCSP stapling: OCSP_basic_verify() OCSP_TRUSTOTHER flag now used. This is expected to simplify configuration in a common case when OCSP response is signed by a certificate already present in ssl_certificate chain. This case won't need any extra trusted certificates. Modified: trunk/src/event/ngx_event_openssl_stapling.c Modified: trunk/src/event/ngx_event_openssl_stapling.c =================================================================== --- trunk/src/event/ngx_event_openssl_stapling.c 2012-10-01 12:50:36 UTC (rev 4878) +++ trunk/src/event/ngx_event_openssl_stapling.c 2012-10-01 12:51:27 UTC (rev 4879) @@ -588,7 +588,7 @@ chain = staple->ssl_ctx->extra_certs; #endif - if (OCSP_basic_verify(basic, chain, store, 0) != 1) { + if (OCSP_basic_verify(basic, chain, store, OCSP_TRUSTOTHER) != 1) { ngx_ssl_error(NGX_LOG_ERR, ctx->log, 0, "OCSP_basic_verify() failed"); goto error; From mdounin at mdounin.ru Mon Oct 1 12:53:12 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 1 Oct 2012 12:53:12 +0000 Subject: [nginx] svn commit: r4880 - in trunk/src: event http/modules Message-ID: <20121001125312.557CE3F9C18@mail.nginx.com> Author: mdounin Date: 2012-10-01 12:53:11 +0000 (Mon, 01 Oct 2012) New Revision: 4880 URL: http://trac.nginx.org/nginx/changeset/4880/nginx Log: OCSP stapling: ssl_stapling_verify directive. OCSP response verification is now switched off by default to simplify configuration, and the ssl_stapling_verify allows to switch it on. Note that for stapling OCSP response verification isn't something required as it will be done by a client anyway. But doing verification on a server allows to mitigate some attack vectors, most notably stop an attacker from presenting some specially crafted data to all site clients. Modified: trunk/src/event/ngx_event_openssl.h trunk/src/event/ngx_event_openssl_stapling.c trunk/src/http/modules/ngx_http_ssl_module.c trunk/src/http/modules/ngx_http_ssl_module.h Modified: trunk/src/event/ngx_event_openssl.h =================================================================== --- trunk/src/event/ngx_event_openssl.h 2012-10-01 12:51:27 UTC (rev 4879) +++ trunk/src/event/ngx_event_openssl.h 2012-10-01 12:53:11 UTC (rev 4880) @@ -106,7 +106,7 @@ ngx_str_t *cert, ngx_int_t depth); ngx_int_t ngx_ssl_crl(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *crl); ngx_int_t ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, - ngx_str_t *responder, ngx_str_t *file); + ngx_str_t *file, ngx_str_t *responder, ngx_uint_t verify); ngx_int_t ngx_ssl_stapling_resolver(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_resolver_t *resolver, ngx_msec_t resolver_timeout); RSA *ngx_ssl_rsa512_key_callback(SSL *ssl, int is_export, int key_length); Modified: trunk/src/event/ngx_event_openssl_stapling.c =================================================================== --- trunk/src/event/ngx_event_openssl_stapling.c 2012-10-01 12:51:27 UTC (rev 4879) +++ trunk/src/event/ngx_event_openssl_stapling.c 2012-10-01 12:53:11 UTC (rev 4880) @@ -33,7 +33,8 @@ time_t valid; - ngx_uint_t loading; /* unsigned:1 */ + unsigned verify:1; + unsigned loading:1; } ngx_ssl_stapling_t; @@ -114,8 +115,8 @@ ngx_int_t -ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *responder, - ngx_str_t *file) +ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file, + ngx_str_t *responder, ngx_uint_t verify) { ngx_int_t rc; ngx_pool_cleanup_t *cln; @@ -144,6 +145,7 @@ staple->ssl_ctx = ssl->ctx; staple->timeout = 60000; + staple->verify = verify; if (file->len) { /* use OCSP response from the file */ @@ -588,7 +590,10 @@ chain = staple->ssl_ctx->extra_certs; #endif - if (OCSP_basic_verify(basic, chain, store, OCSP_TRUSTOTHER) != 1) { + if (OCSP_basic_verify(basic, chain, store, + staple->verify ? OCSP_TRUSTOTHER : OCSP_NOVERIFY) + != 1) + { ngx_ssl_error(NGX_LOG_ERR, ctx->log, 0, "OCSP_basic_verify() failed"); goto error; Modified: trunk/src/http/modules/ngx_http_ssl_module.c =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.c 2012-10-01 12:51:27 UTC (rev 4879) +++ trunk/src/http/modules/ngx_http_ssl_module.c 2012-10-01 12:53:11 UTC (rev 4880) @@ -182,6 +182,13 @@ offsetof(ngx_http_ssl_srv_conf_t, stapling_responder), NULL }, + { ngx_string("ssl_stapling_verify"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_HTTP_SRV_CONF_OFFSET, + offsetof(ngx_http_ssl_srv_conf_t, stapling_verify), + NULL }, + ngx_null_command }; @@ -370,6 +377,7 @@ sscf->builtin_session_cache = NGX_CONF_UNSET; sscf->session_timeout = NGX_CONF_UNSET; sscf->stapling = NGX_CONF_UNSET; + sscf->stapling_verify = NGX_CONF_UNSET; return sscf; } @@ -424,6 +432,7 @@ ngx_conf_merge_str_value(conf->ciphers, prev->ciphers, NGX_DEFAULT_CIPHERS); ngx_conf_merge_value(conf->stapling, prev->stapling, 0); + ngx_conf_merge_value(conf->stapling_verify, prev->stapling_verify, 0); ngx_conf_merge_str_value(conf->stapling_file, prev->stapling_file, ""); ngx_conf_merge_str_value(conf->stapling_responder, prev->stapling_responder, ""); @@ -565,8 +574,8 @@ if (conf->stapling) { - if (ngx_ssl_stapling(cf, &conf->ssl, &conf->stapling_responder, - &conf->stapling_file) + if (ngx_ssl_stapling(cf, &conf->ssl, &conf->stapling_file, + &conf->stapling_responder, conf->stapling_verify) != NGX_OK) { return NGX_CONF_ERROR; Modified: trunk/src/http/modules/ngx_http_ssl_module.h =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.h 2012-10-01 12:51:27 UTC (rev 4879) +++ trunk/src/http/modules/ngx_http_ssl_module.h 2012-10-01 12:53:11 UTC (rev 4880) @@ -43,6 +43,7 @@ ngx_shm_zone_t *shm_zone; ngx_flag_t stapling; + ngx_flag_t stapling_verify; ngx_str_t stapling_file; ngx_str_t stapling_responder; From mdounin at mdounin.ru Mon Oct 1 13:54:13 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 1 Oct 2012 13:54:13 +0000 Subject: [nginx] svn commit: r4881 - trunk/src/event Message-ID: <20121001135413.ABC613F9C11@mail.nginx.com> Author: mdounin Date: 2012-10-01 13:54:13 +0000 (Mon, 01 Oct 2012) New Revision: 4881 URL: http://trac.nginx.org/nginx/changeset/4881/nginx Log: OCSP stapling: build fixes. With the "ssl_stapling_verify" commit build with old OpenSSL libraries was broken due to incorrect prototype of the ngx_ssl_stapling() function. One incorrect use of ngx_log_debug() instead of ngx_log_debug2() slipped in and broke win32 build. Modified: trunk/src/event/ngx_event_openssl_stapling.c Modified: trunk/src/event/ngx_event_openssl_stapling.c =================================================================== --- trunk/src/event/ngx_event_openssl_stapling.c 2012-10-01 12:53:11 UTC (rev 4880) +++ trunk/src/event/ngx_event_openssl_stapling.c 2012-10-01 13:54:13 UTC (rev 4881) @@ -1142,9 +1142,9 @@ escape = ngx_escape_uri(NULL, base64.data, base64.len, NGX_ESCAPE_URI_COMPONENT); - ngx_log_debug(NGX_LOG_DEBUG_EVENT, ctx->log, 0, - "ssl ocsp request length %z, escape %d", - base64.len, escape); + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ctx->log, 0, + "ssl ocsp request length %z, escape %d", + base64.len, escape); len = sizeof("GET ") - 1 + ctx->uri.len + sizeof("/") - 1 + base64.len + 2 * escape + sizeof(" HTTP/1.0" CRLF) - 1 @@ -1729,8 +1729,8 @@ ngx_int_t -ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *responder, - ngx_str_t *file) +ngx_ssl_stapling(ngx_conf_t *cf, ngx_ssl_t *ssl, ngx_str_t *file, + ngx_str_t *responder, ngx_uint_t verify) { ngx_log_error(NGX_LOG_WARN, ssl->log, 0, "\"ssl_stapling\" ignored, not supported"); From daniel.black at openquery.com Tue Oct 2 02:09:00 2012 From: daniel.black at openquery.com (Daniel Black) Date: Tue, 2 Oct 2012 12:09:00 +1000 (EST) Subject: [PATCH] remove ngx_ssl_server_conf_index In-Reply-To: <1914606367.342.1349142248170.JavaMail.root@zimbra.lentz.com.au> Message-ID: <34955423.375.1349143740573.JavaMail.root@zimbra.lentz.com.au> Just a minor cleanup as I was doing some development around the openssl event code. As we're adding indexes for ngx_ssl_certificate_index and ngx_ssl_stapling_index it makes sense to remove the ones we're not using. The patch removes the configuration pointer ngx_ssl_server_conf_index out of the SSL context. As the pointer could be a ngx_http_ssl_srv_conf_t or a ngx_mail_ssl_conf_t it isn't particularly useful as you can't reliable cast it in any event_openssl callback. Perhaps for this reason it hasn't been used. -- Daniel Black -------------- next part -------------- A non-text attachment was scrubbed... Name: remove_ngx_ssl_server_conf_index.patch Type: text/x-patch Size: 2534 bytes Desc: not available URL: From daniel.black at openquery.com Tue Oct 2 02:09:32 2012 From: daniel.black at openquery.com (Daniel Black) Date: Tue, 2 Oct 2012 12:09:32 +1000 (EST) Subject: [PATCH] cast results of ngx_ssl_get_connection to right type In-Reply-To: <1099915785.351.1349142539426.JavaMail.root@zimbra.lentz.com.au> Message-ID: <542292977.378.1349143772483.JavaMail.root@zimbra.lentz.com.au> cast the output of the ngx_ssl_get_connection to (ngx_connection_t *) since by default the macro returns a void *. This is so we can ngx_ssl_get_connection(ssl_ctx)->log directly that assigning it to an intermediate. It will also generate a warning if we use the wrong variable type. -- Daniel Black -------------- next part -------------- A non-text attachment was scrubbed... Name: ngx_ssl_get_connection_macro_cast.patch Type: text/x-patch Size: 945 bytes Desc: not available URL: From daniel.black at openquery.com Tue Oct 2 02:09:48 2012 From: daniel.black at openquery.com (Daniel Black) Date: Tue, 2 Oct 2012 12:09:48 +1000 (EST) Subject: [PATCH] rfc5077 session tickets In-Reply-To: <1383033407.357.1349142802790.JavaMail.root@zimbra.lentz.com.au> Message-ID: <576328333.381.1349143788781.JavaMail.root@zimbra.lentz.com.au> For a quick summary of session tickets look at http://vincent.bernat.im/en/blog/2011-ssl-session-reuse-rfc5077.html and for a longer version read the rfc. Session tickets are supported in chrome and firefox browsers. Both session tickets and session id (the current session implementation) allow the server to resume SSL/TLS session with a quicker round trip and less cryptographic material generation. The advantage of session tickets over session ids is that the server can now handle an unlimited number of session resumption clients using a fixed amount of shared memory (52 bytes). It also handles resumption for client side certificates. Because there's no memory cost for these they have a larger timeout value allowing those clients to have a quicker resumption (one round trip quicker) when they connect within the ssl_ticket_timeout. This also disables session tickets when a shared memory isn't setup. This is a current problem when there is more than one worker. By default openssl handles session tickets using an internal buffer that isn't shared between processes. As such clients are likely to go through a renegotiation. This requires previous sent patches: [PATCH] allow printing of string buffers in hex format [PATCH] cast results of ngx_ssl_get_connection to right type Originally submitted as part of http://trac.nginx.org/nginx/ticket/120 The test plan nginx-rfc5077-testplan.txt? attached to trac ticket enables verification of the functionality. -- Daniel Black -------------- next part -------------- A non-text attachment was scrubbed... Name: rfc5077.patch Type: text/x-patch Size: 18286 bytes Desc: not available URL: From mdounin at mdounin.ru Tue Oct 2 10:03:59 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 2 Oct 2012 14:03:59 +0400 Subject: [PATCH] remove ngx_ssl_server_conf_index In-Reply-To: <34955423.375.1349143740573.JavaMail.root@zimbra.lentz.com.au> References: <1914606367.342.1349142248170.JavaMail.root@zimbra.lentz.com.au> <34955423.375.1349143740573.JavaMail.root@zimbra.lentz.com.au> Message-ID: <20121002100359.GY40452@mdounin.ru> Hello! On Tue, Oct 02, 2012 at 12:09:00PM +1000, Daniel Black wrote: > > Just a minor cleanup as I was doing some development around the > openssl event code. As we're adding indexes for > ngx_ssl_certificate_index and ngx_ssl_stapling_index it makes > sense to remove the ones we're not using. > > The patch removes the configuration pointer > ngx_ssl_server_conf_index out of the SSL context. > > As the pointer could be a ngx_http_ssl_srv_conf_t or a > ngx_mail_ssl_conf_t > it isn't particularly useful as you can't reliable cast it in > any event_openssl callback. This isn't something expected to be used in callbacks set by generic code in ngx_event_openssl.c, but rather something intended to be used by a party which calls ngx_ssl_create(). It was previously used by a session cache code when it was in ngx_http_ssl_module.c, but not used now as session cache code was made generic and moved into ngx_event_openssl.c. I don't think it should be removed though, as it might still be usable in some cases. [...] > @@ -169,12 +160,6 @@ ngx_ssl_create(ngx_ssl_t *ssl, ngx_uint_t protocols, void *data) > return NGX_ERROR; > } > > - if (SSL_CTX_set_ex_data(ssl->ctx, ngx_ssl_server_conf_index, data) == 0) { > - ngx_ssl_error(NGX_LOG_EMERG, ssl->log, 0, > - "SSL_CTX_set_ex_data() failed"); > - return NGX_ERROR; > - } > - Just a side note: it doesn't make sense to remove SSL_CTX_set_ex_data(ngx_ssl_server_conf_index) but preserve "data" argument of ngx_ssl_create(). [...] -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Tue Oct 2 10:09:52 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 2 Oct 2012 14:09:52 +0400 Subject: [PATCH] cast results of ngx_ssl_get_connection to right type In-Reply-To: <542292977.378.1349143772483.JavaMail.root@zimbra.lentz.com.au> References: <1099915785.351.1349142539426.JavaMail.root@zimbra.lentz.com.au> <542292977.378.1349143772483.JavaMail.root@zimbra.lentz.com.au> Message-ID: <20121002100952.GZ40452@mdounin.ru> Hello! On Tue, Oct 02, 2012 at 12:09:32PM +1000, Daniel Black wrote: > > cast the output of the ngx_ssl_get_connection to (ngx_connection_t *) since by default the macro returns a void *. > > This is so we can ngx_ssl_get_connection(ssl_ctx)->log directly that assigning it to an intermediate. It will also generate a warning if we use the wrong variable type. > > -- > Daniel Black > Author: Daniel Black > Date: Mon Oct 1 17:46:49 2012 +1000 > > cast the output of the ngx_ssl_get_connection to (ngx_connection_t *) since by default it is a void *. Any user of the macro will need to cast it anyway > > diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h > index 3315ffb..50cc7f5 100644 > --- a/src/event/ngx_event_openssl.h > +++ b/src/event/ngx_event_openssl.h > @@ -123,7 +123,7 @@ ngx_int_t ngx_ssl_set_session(ngx_connection_t *c, ngx_ssl_session_t *session); > #define ngx_ssl_get_session(c) SSL_get1_session(c->ssl->connection) > #define ngx_ssl_free_session SSL_SESSION_free > #define ngx_ssl_get_connection(ssl_conn) \ > - SSL_get_ex_data(ssl_conn, ngx_ssl_connection_index) > + ((ngx_connection_t *) SSL_get_ex_data(ssl_conn, ngx_ssl_connection_index)) > > > ngx_int_t ngx_ssl_get_protocol(ngx_connection_t *c, ngx_pool_t *pool, I would rather not, thanks. Assigning to an intermediate variable is something required anyway in most cases, and even if it's not - it makes code much more readable. -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Tue Oct 2 10:25:58 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 2 Oct 2012 14:25:58 +0400 Subject: [PATCH] rfc5077 session tickets In-Reply-To: <576328333.381.1349143788781.JavaMail.root@zimbra.lentz.com.au> References: <1383033407.357.1349142802790.JavaMail.root@zimbra.lentz.com.au> <576328333.381.1349143788781.JavaMail.root@zimbra.lentz.com.au> Message-ID: <20121002102558.GA40452@mdounin.ru> Hello! On Tue, Oct 02, 2012 at 12:09:48PM +1000, Daniel Black wrote: > > For a quick summary of session tickets look at http://vincent.bernat.im/en/blog/2011-ssl-session-reuse-rfc5077.html and for a longer version read the rfc. > > Session tickets are supported in chrome and firefox browsers. > > Both session tickets and session id (the current session implementation) allow the server to resume SSL/TLS session with a quicker round trip and less cryptographic material generation. > > The advantage of session tickets over session ids is that the server can now handle an unlimited number of session resumption clients using a fixed amount of shared memory (52 bytes). > > It also handles resumption for client side certificates. > > Because there's no memory cost for these they have a larger timeout value allowing those clients to have a quicker resumption (one round trip quicker) when they connect within the ssl_ticket_timeout. > > This also disables session tickets when a shared memory isn't setup. This is a current problem when there is more than one worker. By default openssl handles session tickets using an internal buffer that isn't shared between processes. As such clients are likely to go through a renegotiation. > > This requires previous sent patches: > [PATCH] allow printing of string buffers in hex format > [PATCH] cast results of ngx_ssl_get_connection to right type > > Originally submitted as part of http://trac.nginx.org/nginx/ticket/120 As I already replied in the ticket, the only real thing that the patch adds compared to what we have now with OpenSSL's default session ticket code is session ticket timeout. While it's something valuable per se, it's unlikely deserve the amount of code introduced. On the other hand, the patch breaks existing session tickets support if there are multiple worker processes configure unless shared ssl_session_cache is configure as well. [...] -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Tue Oct 2 13:00:09 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 2 Oct 2012 17:00:09 +0400 Subject: A possible bug in ngx_rbtree In-Reply-To: <000001cd9d8a$19dacdb0$4d906910$@com> References: <000001cd9d8a$19dacdb0$4d906910$@com> Message-ID: <20121002130009.GC40452@mdounin.ru> Hello! On Fri, Sep 28, 2012 at 11:01:12AM -0400, YongFeng Wu wrote: > Hi, > > > > We just found a worker process was stuck in an infinite loop, in function > ngx_open_file_lookup(). Checking the open file cache RB tree with GDB shows > the following: > > > > (gdb) p cache->rbtree.root->right > > $3 = (ngx_rbtree_node_t *) 0x80122f900 > > (gdb) p cache->rbtree.root->right->right > > $4 = (ngx_rbtree_node_t *) 0x8040ea400 > > (gdb) p cache->rbtree.root->right->right->left > > $5 = (ngx_rbtree_node_t *) 0x801236980 > > > > (gdb) p cache->rbtree.root->right->right->left->right > > $6 = (ngx_rbtree_node_t *) 0x8090ee080 > > (gdb) p cache->rbtree.root->right->right->left->right->right > > $7 = (ngx_rbtree_node_t *) 0x804aab280 > > (gdb) p cache->rbtree.root->right->right->left->right->right->left > > $8 = (ngx_rbtree_node_t *) 0x804aabf00 [...] > That means the $9 == $9->parent->parent->parent, so the infinite > loop. > > I think there might be a bug in ngx_rbtree.c. I'll really appreciate it if > somebody can look into it. I've looked though code again and don't see any obvious problems. Could you please provide more details? It would be helpful to see "nginx -V" output, and to make sure there are no 3rd party modules/patches. It might be also helpful to look at "cache", "*cache" and "cache->rbtree", in particular at root and sentinel addresses, as the best guess for now is some memory corruption. Stack trace might be also helpful. -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Tue Oct 2 13:33:37 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Tue, 2 Oct 2012 13:33:37 +0000 Subject: [nginx] svn commit: r4882 - trunk/docs/xml/nginx Message-ID: <20121002133338.8AD683F9C11@mail.nginx.com> Author: mdounin Date: 2012-10-02 13:33:37 +0000 (Tue, 02 Oct 2012) New Revision: 4882 URL: http://trac.nginx.org/nginx/changeset/4882/nginx Log: nginx-1.3.7-RELEASE Modified: trunk/docs/xml/nginx/changes.xml Modified: trunk/docs/xml/nginx/changes.xml =================================================================== --- trunk/docs/xml/nginx/changes.xml 2012-10-01 13:54:13 UTC (rev 4881) +++ trunk/docs/xml/nginx/changes.xml 2012-10-02 13:33:37 UTC (rev 4882) @@ -5,6 +5,53 @@ + + + + +????????? OCSP stapling.
+??????? Comodo, DigiCert ? GlobalSign ?? ????????????? ??????????. +
+ +OCSP stapling support.
+Thanks to Comodo, DigiCert and GlobalSign for sponsoring this work. +
+
+ + + +????????? ssl_trusted_certificate. + + +the "ssl_trusted_certificate" directive. + + + + + +?????? resolver ????????? ??????? ?????? ??????? +???????????? ?????????????? ???????.
+??????? ?????? ??????. +
+ +resolver now randomly rotates addresses +returned from cache.
+Thanks to Anton Jouline. +
+
+ + + +????????????? ? OpenSSL 0.9.7. + + +OpenSSL 0.9.7 compatibility. + + + +
+ + From mdounin at mdounin.ru Tue Oct 2 13:34:00 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Tue, 2 Oct 2012 13:34:00 +0000 Subject: [nginx] svn commit: r4883 - tags Message-ID: <20121002133400.439733F9C11@mail.nginx.com> Author: mdounin Date: 2012-10-02 13:33:58 +0000 (Tue, 02 Oct 2012) New Revision: 4883 URL: http://trac.nginx.org/nginx/changeset/4883/nginx Log: release-1.3.7 tag Added: tags/release-1.3.7/ From ywu at about.com Tue Oct 2 15:08:27 2012 From: ywu at about.com (YongFeng Wu) Date: Tue, 2 Oct 2012 11:08:27 -0400 Subject: A possible bug in ngx_rbtree In-Reply-To: <20121002130009.GC40452@mdounin.ru> References: <000001cd9d8a$19dacdb0$4d906910$@com> <20121002130009.GC40452@mdounin.ru> Message-ID: <001501cda0af$cd58eb10$680ac130$@com> Hi Maxim, Thank you so much for looking into it: Following is the nginx -V output: nginx version: nginx/1.2.3 built by gcc 4.2.1 20070719 [FreeBSD] configure arguments: --with-pcre --with-debug --with-http_geoip_module --with-http_stub_status_module Following is the content for cache, cache->rbtree, root and sentinel: (gdb) p cache $5 = (ngx_open_file_cache_t *) 0x8012495f8 (gdb) p *cache $6 = {rbtree = {root = 0x803377580, sentinel = 0x801249610, insert = 0x423ba0 }, sentinel = { key = 0, left = 0x0, right = 0x0, parent = 0x8044d4100, color = 0 '\0', data = 0 '\0'}, expire_queue = {prev = 0x8044d4ba8, next = 0x803376228}, current = 999, max = 1000, inactive = 60} (gdb) p cache->rbtree $7 = {root = 0x803377580, sentinel = 0x801249610, insert = 0x423ba0 } (gdb) p *cache->rbtree->root $8 = {key = 2661524630, left = 0x801230100, right = 0x8032b4100, parent = 0x0, color = 0 '\0', data = 46 '.'} (gdb) p cache->sentinel $9 = {key = 0, left = 0x0, right = 0x0, parent = 0x8044d4100, color = 0 '\0', data = 0 '\0'} (gdb) p *cache->sentinel->parent->parent->parent->parent->parent->parent->parent->pa rent $10 = {key = 3464485871, left = 0x803b35680, right = 0x8032b4d00, parent = 0x803377580, color = 0 '\0', data = 0 '\0'} (gdb) p *cache->sentinel->parent->parent->parent->parent->parent->parent->parent->pa rent->parent $11 = {key = 2661524630, left = 0x801230100, right = 0x8032b4100, parent = 0x0, color = 0 '\0', data = 46 '.'} One thing weird is that the sentinel->parent is not null but set to a node. Could this cause some problems? sentinel->parent could be set in code like (function ngx_rbtree_delete, line 209 in ngx_rbtree.c, version 1.2.3): if (subst == node) { temp->parent = subst->parent; } else { Again, thank you for your help. Yongfeng Wu -----Original Message----- From: Maxim Dounin [mailto:mdounin at mdounin.ru] Sent: Tuesday, October 02, 2012 9:00 AM To: nginx-devel at nginx.org; ywu at about.com Subject: Re: A possible bug in ngx_rbtree Hello! On Fri, Sep 28, 2012 at 11:01:12AM -0400, YongFeng Wu wrote: > Hi, > > > > We just found a worker process was stuck in an infinite loop, in > function ngx_open_file_lookup(). Checking the open file cache RB tree > with GDB shows the following: > > > > (gdb) p cache->rbtree.root->right > > $3 = (ngx_rbtree_node_t *) 0x80122f900 > > (gdb) p cache->rbtree.root->right->right > > $4 = (ngx_rbtree_node_t *) 0x8040ea400 > > (gdb) p cache->rbtree.root->right->right->left > > $5 = (ngx_rbtree_node_t *) 0x801236980 > > > > (gdb) p cache->rbtree.root->right->right->left->right > > $6 = (ngx_rbtree_node_t *) 0x8090ee080 > > (gdb) p cache->rbtree.root->right->right->left->right->right > > $7 = (ngx_rbtree_node_t *) 0x804aab280 > > (gdb) p cache->rbtree.root->right->right->left->right->right->left > > $8 = (ngx_rbtree_node_t *) 0x804aabf00 [...] > That means the $9 == $9->parent->parent->parent, so the infinite loop. > > I think there might be a bug in ngx_rbtree.c. I'll really appreciate > it if somebody can look into it. I've looked though code again and don't see any obvious problems. Could you please provide more details? It would be helpful to see "nginx -V" output, and to make sure there are no 3rd party modules/patches. It might be also helpful to look at "cache", "*cache" and "cache->rbtree", in particular at root and sentinel addresses, as the best guess for now is some memory corruption. Stack trace might be also helpful. -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Tue Oct 2 16:28:16 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 2 Oct 2012 20:28:16 +0400 Subject: A possible bug in ngx_rbtree In-Reply-To: <001501cda0af$cd58eb10$680ac130$@com> References: <000001cd9d8a$19dacdb0$4d906910$@com> <20121002130009.GC40452@mdounin.ru> <001501cda0af$cd58eb10$680ac130$@com> Message-ID: <20121002162816.GM40452@mdounin.ru> Hello! On Tue, Oct 02, 2012 at 11:08:27AM -0400, YongFeng Wu wrote: > > Hi Maxim, > > Thank you so much for looking into it: > > Following is the nginx -V output: > > nginx version: nginx/1.2.3 > built by gcc 4.2.1 20070719 [FreeBSD] > configure arguments: --with-pcre --with-debug --with-http_geoip_module > --with-http_stub_status_module Is geoip module actually used? Unfortunately MaxMind's GeoIP library it uses is known to do bad things if used with corrupted database file, this may be a culprit. It might be also good idea to make sure hardware problems are ruled out. From your initial message I conclude you've only seen this only once on a single server, is it correct? > Following is the content for cache, cache->rbtree, root and sentinel: > > (gdb) p cache > $5 = (ngx_open_file_cache_t *) 0x8012495f8 > (gdb) p *cache > $6 = {rbtree = {root = 0x803377580, sentinel = 0x801249610, insert = > 0x423ba0 }, sentinel = { > key = 0, left = 0x0, right = 0x0, parent = 0x8044d4100, color = 0 '\0', > data = 0 '\0'}, expire_queue = {prev = 0x8044d4ba8, > next = 0x803376228}, current = 999, max = 1000, inactive = 60} > (gdb) p cache->rbtree > $7 = {root = 0x803377580, sentinel = 0x801249610, insert = 0x423ba0 > } > (gdb) p *cache->rbtree->root > $8 = {key = 2661524630, left = 0x801230100, right = 0x8032b4100, parent = > 0x0, color = 0 '\0', data = 46 '.'} [...] Looks fine, i.e. nothing suspicious here. > One thing weird is that the sentinel->parent is not null but set to a node. > Could this cause some problems? sentinel->parent could be set in code like > (function ngx_rbtree_delete, line 209 in ngx_rbtree.c, version 1.2.3): > > if (subst == node) { > > temp->parent = subst->parent; > > } else { This code is not exactly right (and catched my eye during re-looking though the code, too) and we might want to change it to do nothing if temp == sentinel, but it should be completely harmless. -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Wed Oct 3 15:22:19 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Wed, 3 Oct 2012 15:22:19 +0000 Subject: [nginx] svn commit: r4884 - in trunk/src: core http/modules/perl Message-ID: <20121003152219.F27243F9C0D@mail.nginx.com> Author: mdounin Date: 2012-10-03 15:22:18 +0000 (Wed, 03 Oct 2012) New Revision: 4884 URL: http://trac.nginx.org/nginx/changeset/4884/nginx Log: Version bump. Modified: trunk/src/core/nginx.h trunk/src/http/modules/perl/nginx.pm Modified: trunk/src/core/nginx.h =================================================================== --- trunk/src/core/nginx.h 2012-10-02 13:33:58 UTC (rev 4883) +++ trunk/src/core/nginx.h 2012-10-03 15:22:18 UTC (rev 4884) @@ -9,8 +9,8 @@ #define _NGINX_H_INCLUDED_ -#define nginx_version 1003007 -#define NGINX_VERSION "1.3.7" +#define nginx_version 1003008 +#define NGINX_VERSION "1.3.8" #define NGINX_VER "nginx/" NGINX_VERSION #define NGINX_VAR "NGINX" Modified: trunk/src/http/modules/perl/nginx.pm =================================================================== --- trunk/src/http/modules/perl/nginx.pm 2012-10-02 13:33:58 UTC (rev 4883) +++ trunk/src/http/modules/perl/nginx.pm 2012-10-03 15:22:18 UTC (rev 4884) @@ -50,7 +50,7 @@ HTTP_INSUFFICIENT_STORAGE ); -our $VERSION = '1.3.7'; +our $VERSION = '1.3.8'; require XSLoader; XSLoader::load('nginx', $VERSION); From mdounin at mdounin.ru Wed Oct 3 15:24:08 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Wed, 3 Oct 2012 15:24:08 +0000 Subject: [nginx] svn commit: r4885 - in trunk/src: event http http/modules Message-ID: <20121003152408.AFEDF3F9C0D@mail.nginx.com> Author: mdounin Date: 2012-10-03 15:24:08 +0000 (Wed, 03 Oct 2012) New Revision: 4885 URL: http://trac.nginx.org/nginx/changeset/4885/nginx Log: SSL: the "ssl_verify_client" directive parameter "optional_no_ca". This parameter allows to don't require certificate to be signed by a trusted CA, e.g. if CA certificate isn't known in advance, like in WebID protocol. Note that it doesn't add any security unless the certificate is actually checked to be trusted by some external means (e.g. by a backend). Patch by Mike Kazantsev, Eric O'Connor. Modified: trunk/src/event/ngx_event_openssl.h trunk/src/http/modules/ngx_http_ssl_module.c trunk/src/http/ngx_http_request.c Modified: trunk/src/event/ngx_event_openssl.h =================================================================== --- trunk/src/event/ngx_event_openssl.h 2012-10-03 15:22:18 UTC (rev 4884) +++ trunk/src/event/ngx_event_openssl.h 2012-10-03 15:24:08 UTC (rev 4885) @@ -127,7 +127,14 @@ #define ngx_ssl_get_server_conf(ssl_ctx) \ SSL_CTX_get_ex_data(ssl_ctx, ngx_ssl_server_conf_index) +#define ngx_ssl_verify_error_optional(n) \ + (n == X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT \ + || n == X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN \ + || n == X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY \ + || n == X509_V_ERR_CERT_UNTRUSTED \ + || n == X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE) + ngx_int_t ngx_ssl_get_protocol(ngx_connection_t *c, ngx_pool_t *pool, ngx_str_t *s); ngx_int_t ngx_ssl_get_cipher_name(ngx_connection_t *c, ngx_pool_t *pool, Modified: trunk/src/http/modules/ngx_http_ssl_module.c =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.c 2012-10-03 15:22:18 UTC (rev 4884) +++ trunk/src/http/modules/ngx_http_ssl_module.c 2012-10-03 15:24:08 UTC (rev 4885) @@ -50,6 +50,7 @@ { ngx_string("off"), 0 }, { ngx_string("on"), 1 }, { ngx_string("optional"), 2 }, + { ngx_string("optional_no_ca"), 3 }, { ngx_null_string, 0 } }; @@ -515,7 +516,7 @@ if (conf->verify) { - if (conf->client_certificate.len == 0) { + if (conf->client_certificate.len == 0 && conf->verify != 3) { ngx_log_error(NGX_LOG_EMERG, cf->log, 0, "no ssl_client_certificate for ssl_client_verify"); return NGX_CONF_ERROR; Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2012-10-03 15:22:18 UTC (rev 4884) +++ trunk/src/http/ngx_http_request.c 2012-10-03 15:24:08 UTC (rev 4885) @@ -1642,7 +1642,9 @@ if (sscf->verify) { rc = SSL_get_verify_result(c->ssl->connection); - if (rc != X509_V_OK) { + if (rc != X509_V_OK + && (sscf->verify != 3 || !ngx_ssl_verify_error_optional(rc))) + { ngx_log_error(NGX_LOG_INFO, c->log, 0, "client SSL certificate verify error: (%l:%s)", rc, X509_verify_cert_error_string(rc)); From mdounin at mdounin.ru Wed Oct 3 15:25:06 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Wed, 3 Oct 2012 15:25:06 +0000 Subject: [nginx] svn commit: r4886 - trunk/src/http/modules Message-ID: <20121003152506.EA6243F9C0D@mail.nginx.com> Author: mdounin Date: 2012-10-03 15:25:06 +0000 (Wed, 03 Oct 2012) New Revision: 4886 URL: http://trac.nginx.org/nginx/changeset/4886/nginx Log: Log: $apache_bytes_sent removed. It was renamed to $body_bytes_sent in nginx 0.3.10 and the old name is deprecated since then. Modified: trunk/src/http/modules/ngx_http_log_module.c Modified: trunk/src/http/modules/ngx_http_log_module.c =================================================================== --- trunk/src/http/modules/ngx_http_log_module.c 2012-10-03 15:24:08 UTC (rev 4885) +++ trunk/src/http/modules/ngx_http_log_module.c 2012-10-03 15:25:06 UTC (rev 4886) @@ -209,8 +209,6 @@ { ngx_string("bytes_sent"), NGX_OFF_T_LEN, ngx_http_log_bytes_sent }, { ngx_string("body_bytes_sent"), NGX_OFF_T_LEN, ngx_http_log_body_bytes_sent }, - { ngx_string("apache_bytes_sent"), NGX_OFF_T_LEN, - ngx_http_log_body_bytes_sent }, { ngx_string("request_length"), NGX_SIZE_T_LEN, ngx_http_log_request_length }, @@ -1143,12 +1141,6 @@ goto invalid; } - if (ngx_strncmp(var.data, "apache_bytes_sent", 17) == 0) { - ngx_conf_log_error(NGX_LOG_WARN, cf, 0, - "use \"$body_bytes_sent\" instead of " - "\"$apache_bytes_sent\""); - } - for (v = ngx_http_log_vars; v->name.len; v++) { if (v->name.len == var.len From mdounin at mdounin.ru Wed Oct 3 15:25:37 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Wed, 3 Oct 2012 15:25:37 +0000 Subject: [nginx] svn commit: r4887 - trunk/src/http Message-ID: <20121003152537.5728C3F9F0F@mail.nginx.com> Author: mdounin Date: 2012-10-03 15:25:36 +0000 (Wed, 03 Oct 2012) New Revision: 4887 URL: http://trac.nginx.org/nginx/changeset/4887/nginx Log: Variable $bytes_sent. It replicates variable $bytes_sent as previously available in log module only. Patch by Benjamin Gr?\195?\182ssing (with minor changes). Modified: trunk/src/http/ngx_http_variables.c Modified: trunk/src/http/ngx_http_variables.c =================================================================== --- trunk/src/http/ngx_http_variables.c 2012-10-03 15:25:06 UTC (rev 4886) +++ trunk/src/http/ngx_http_variables.c 2012-10-03 15:25:36 UTC (rev 4887) @@ -69,6 +69,8 @@ ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_remote_user(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); +static ngx_int_t ngx_http_variable_bytes_sent(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_body_bytes_sent(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_request_completion(ngx_http_request_t *r, @@ -212,6 +214,9 @@ { ngx_string("remote_user"), NULL, ngx_http_variable_remote_user, 0, 0, 0 }, + { ngx_string("bytes_sent"), NULL, ngx_http_variable_bytes_sent, + 0, 0, 0 }, + { ngx_string("body_bytes_sent"), NULL, ngx_http_variable_body_bytes_sent, 0, 0, 0 }, @@ -1434,6 +1439,27 @@ static ngx_int_t +ngx_http_variable_bytes_sent(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data) +{ + u_char *p; + + p = ngx_pnalloc(r->pool, NGX_OFF_T_LEN); + if (p == NULL) { + return NGX_ERROR; + } + + v->len = ngx_sprintf(p, "%O", r->connection->sent) - p; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + v->data = p; + + return NGX_OK; +} + + +static ngx_int_t ngx_http_variable_body_bytes_sent(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data) { From mdounin at mdounin.ru Wed Oct 3 15:27:58 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 3 Oct 2012 19:27:58 +0400 Subject: [PATCH] (re-post) Add "optional_no_ca" option to ssl_verify_client to enable app-only CA chain validation In-Reply-To: References: <20120918074355.GQ40452@mdounin.ru> <20120922170953.3f8f50b1@sacrilege> <20120925043928.GE40452@mdounin.ru> <20120927200525.64de9ef3@sacrilege> Message-ID: <20121003152758.GX40452@mdounin.ru> Hello! On Thu, Sep 27, 2012 at 12:30:29PM -0400, Eric O'Connor wrote: > Here is a modified patch addressing issues that Maxim brought up earlier: > > diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h > index cd6d885..97da051 100644 > --- a/src/event/ngx_event_openssl.h > +++ b/src/event/ngx_event_openssl.h > @@ -141,6 +141,14 @@ ngx_int_t > ngx_ssl_get_client_verify(ngx_connection_t *c, ngx_pool_t *pool, > ngx_str_t *s); [...] Patch committed, thanks. -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Wed Oct 3 15:29:21 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 3 Oct 2012 19:29:21 +0400 Subject: Can not use $bytes_sent in module (complex values) In-Reply-To: References: <20120919000656.GA40452@mdounin.ru> <1348215438.687512098@f258.mail.ru> Message-ID: <20121003152921.GY40452@mdounin.ru> Hello! On Thu, Sep 27, 2012 at 05:30:10PM +0200, Benjamin Gr?ssing wrote: [...] > I have also had a look at the changeset 4686, adding the $status > variable. Thats exactly what I had in mind! Since $body_bytes_sent > already exists (it actually just subtracts the header length from the > total length!), adding $bytes_sent is actually quite easy (or did I > miss something?). > > I have attached a patch (based on nginx 1.3.6) that adds $bytes_sent > to ngx_http_variables. It works great for me - I'd be happy to see it > in a future nginx release. Patch committed, thanks. [...] -- Maxim Dounin http://nginx.com/support.html From eoconnor at coincident.com Wed Oct 3 16:55:15 2012 From: eoconnor at coincident.com (Eric O'Connor) Date: Wed, 3 Oct 2012 12:55:15 -0400 Subject: [PATCH] (re-post) Add "optional_no_ca" option to ssl_verify_client to enable app-only CA chain validation In-Reply-To: <20121003152758.GX40452@mdounin.ru> References: <20120918074355.GQ40452@mdounin.ru> <20120922170953.3f8f50b1@sacrilege> <20120925043928.GE40452@mdounin.ru> <20120927200525.64de9ef3@sacrilege> <20121003152758.GX40452@mdounin.ru> Message-ID: Great! Here is a short [English] documentation patch to match. Unfortunately, I do not speak Russian. ????????. Index: xml/en/docs/http/ngx_http_ssl_module.xml =================================================================== --- xml/en/docs/http/ngx_http_ssl_module.xml (revision 701) +++ xml/en/docs/http/ngx_http_ssl_module.xml (working copy) @@ -481,7 +481,7 @@ on | off | - optional + optional | optional_no_ca off http server @@ -490,6 +490,10 @@ Enables the client certificate verification. The optional parameter (0.8.7+) requests the client certificate and verifies it if it was present. +The optional_no_ca parameter (1.3.7) requests the client +certificate but performs no certificate chain verification. This is intended +to be used with a directive to +pass the $ssl_client_cert variable to a server that performs verification. The result of verification is stored in the $ssl_client_verify variable. On Wed, Oct 3, 2012 at 11:27 AM, Maxim Dounin wrote: > Hello! > > On Thu, Sep 27, 2012 at 12:30:29PM -0400, Eric O'Connor wrote: > >> Here is a modified patch addressing issues that Maxim brought up earlier: >> >> diff --git a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h >> index cd6d885..97da051 100644 >> --- a/src/event/ngx_event_openssl.h >> +++ b/src/event/ngx_event_openssl.h >> @@ -141,6 +141,14 @@ ngx_int_t >> ngx_ssl_get_client_verify(ngx_connection_t *c, ngx_pool_t *pool, >> ngx_str_t *s); > > [...] > > Patch committed, thanks. > > -- > Maxim Dounin > http://nginx.com/support.html From yjkim2040 at gmail.com Thu Oct 4 05:13:17 2012 From: yjkim2040 at gmail.com (=?EUC-KR?B?sei/tcH4?=) Date: Thu, 4 Oct 2012 14:13:17 +0900 Subject: nginx module memory problem Message-ID: hi, i am web server(nginx) module developer. my nginx modules has a major problem. maybe... this problem is seem to nginx core bug. i hope this is my module problem. Because, i wish the problem to be settled soon. *my module function:* large post data logging from specified file(nginx.conf setting : /home1/test_access.log) *problem:* constant use of memory(nginx-1.2.2 , same situation) [image: ?? ??? 1] *nginx environment:* ./nginx -V nginx version: nginx/1.0.14 built by gcc 4.1.2 20080704 (Red Hat 4.1.2-44) configure arguments: --prefix=/home1/apps/nginx --with-http_gzip_static_module --with-http_stub_status_module --add-module=/home1/apps/nginx-1.0.14/modules*/testmodule* ** *server environment:* uname -a Linux test.server 2.6.18-164.el5 #1 SMP Thu Sep 3 03:28:30 EDT 2009 x86_64 x86_64 x86_64 GNU/Linux */etc/redhat-release:* CentOS release 5.3 (Final) *nginx.conf:* worker_processes 2; worker_cpu_affinity 01 10; worker_rlimit_core 500M; working_directory /home1/cores/; worker_rlimit_nofile 15000; ... events { worker_connections 15000; accept_mutex on; } ... http { client_body_in_single_buffer on; client_body_in_file_only off; client_body_buffer_size 10m; client_max_body_size 10m; client_header_buffer_size 10m; server { listen 80; server_name test.test.com; location = /test { testaccess_log /home1/test_access.log; ... } ... } } ... * my module source code :* static int ngx_testmodule_log_write(ngx_http_request_t *r, char * content, unsigned int content_size) { this function is file writting... } void ngx_testmodule_post_read_request_body(ngx_http_request_t *r) { ngx_int_t totalreadsize; ngx_int_t movepos; ngx_chain_t * cl=NULL; ngx_chain_t * cl2=NULL; ngx_buf_t * buf=NULL; char * container=NULL; ngx_http_testmodule_conf_t * pConfig = NULL; int split_returnvalue; pConfig = ngx_http_get_module_loc_conf(r, ngx_http_testmodule_module); if(r->request_body == NULL || r->request_body->bufs == NULL || r->request_body->bufs->buf == NULL) { ngx_http_finalize_request(r, NGX_DONE); return; } cl = r->request_body->bufs; if(cl->next == NULL) { buf = cl->buf; totalreadsize = buf->last-buf->pos; if(totalreadsize > 0) { container = (char *)ngx_pcalloc(r->pool, totalreadsize+1); if(container == NULL) { ngx_http_finalize_request(r, NGX_DONE); return; } memcpy(container, (char *)buf->pos, buf->last-buf->pos); *(container+(buf->last-buf->pos)) = '\0'; } else { ngx_http_finalize_request(r, NGX_DONE); return; } } else { fprintf(stderr, "=====MULTI BUFFER=====\n"); totalreadsize = 0; cl2 = cl; while(cl2) { totalreadsize += cl2->buf->last - cl2->buf->pos; cl2 = cl2->next; } if(totalreadsize > 0) { container = (char *)ngx_pcalloc(r->pool, totalreadsize+1); if(container == NULL) { ngx_http_finalize_request(r, NGX_DONE); return; } movepos = 0; while(cl) { memcpy(container + movepos, (char *)cl->buf->pos, cl->buf->last - cl->buf->pos); movepos += (cl->buf->last - cl->buf->pos); cl = cl->next; } *(container + movepos) = '\0'; } else { ngx_http_finalize_request(r, NGX_DONE); return; } if(totalreadsize != movepos) { ngx_http_finalize_request(r, NGX_DONE); return; } } split_returnvalue = ngx_testmodule_log_write(r, container, totalreadsize); ngx_http_finalize_request(r, NGX_DONE); return; } ngx_http_testmodule_handler(ngx_http_request_t *r) { if (!(r->method & NGX_HTTP_POST)) { return NGX_HTTP_NOT_ALLOWED; } else if(!pConfig->enable) { return NGX_DECLINED; } rc_post = ngx_http_read_client_request_body(r, ngx_testmodule_post_read_request_body); ngx_testmodule_resheader(r); ... return ngx_http_send_response(r, NGX_HTTP_OK, &ngx_http_text_type, &cv); } i have tested my nginx module. but... i don't know.. difficult problem.. i am turning to you for help. -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: memory.jpg Type: image/jpeg Size: 21406 bytes Desc: not available URL: From wangtf418 at gmail.com Thu Oct 4 10:42:42 2012 From: wangtf418 at gmail.com (Wang Tiefeng) Date: Thu, 4 Oct 2012 18:42:42 +0800 Subject: bug report for nginx version: nginx/1.3.6 Message-ID: Recently?I start to read nginx source code. I chose nginx/1.3.6 a relatively new version? When I read file ngx_log.c, the function ngx_log_errno() confused me . There may be some bugs in the following codes : 238 if (buf > last - 50) { 239 240 /* leave a space for an error code */ 241 242 buf = last - 50; 243 *buf++ = '.'; 244 *buf++ = '.'; 245 *buf++ = '.'; 246 } Althoug?I am not sure about my judgment?valgrind reports invalid write on line 243. Expect reply E-mail. -------------- next part -------------- An HTML attachment was scrubbed... URL: From pass86 at gmail.com Thu Oct 4 14:30:28 2012 From: pass86 at gmail.com (Pass86) Date: Thu, 4 Oct 2012 22:30:28 +0800 Subject: nginx module memory problem In-Reply-To: References: Message-ID: <91F61CE3-7DAF-413F-91CB-2DA110F65649@gmail.com> maybe pool ? ???? iPhone ? 2012-10-4?13:13???? ??? > hi, i am web server(nginx) module developer. > my nginx modules has a major problem. > maybe... this problem is seem to nginx core bug. > i hope this is my module problem. Because, i wish the problem to be settled soon. > > my module function: > large post data logging from specified file(nginx.conf setting : /home1/test_access.log) > > problem: > constant use of memory(nginx-1.2.2 , same situation) > > > > nginx environment: > ./nginx -V > nginx version: nginx/1.0.14 > built by gcc 4.1.2 20080704 (Red Hat 4.1.2-44) > configure arguments: --prefix=/home1/apps/nginx --with-http_gzip_static_module --with-http_stub_status_module --add-module=/home1/apps/nginx-1.0.14/modules/testmodule > > server environment: > uname -a > Linux test.server 2.6.18-164.el5 #1 SMP Thu Sep 3 03:28:30 EDT 2009 x86_64 x86_64 x86_64 GNU/Linux > /etc/redhat-release: > CentOS release 5.3 (Final) > nginx.conf: > worker_processes 2; > worker_cpu_affinity 01 10; > worker_rlimit_core 500M; > working_directory /home1/cores/; > worker_rlimit_nofile 15000; > ... > events { > worker_connections 15000; > accept_mutex on; > } > ... > http { > client_body_in_single_buffer on; > client_body_in_file_only off; > client_body_buffer_size 10m; > client_max_body_size 10m; > client_header_buffer_size 10m; > server { > listen 80; > server_name test.test.com; > location = /test { > testaccess_log /home1/test_access.log; > ... > } > ... > } > } > ... > > my module source code : > static int ngx_testmodule_log_write(ngx_http_request_t *r, char * content, unsigned int content_size) > { > this function is file writting... > } > > void > ngx_testmodule_post_read_request_body(ngx_http_request_t *r) > { > ngx_int_t totalreadsize; > ngx_int_t movepos; > > ngx_chain_t * cl=NULL; > ngx_chain_t * cl2=NULL; > ngx_buf_t * buf=NULL; > char * container=NULL; > ngx_http_testmodule_conf_t * pConfig = NULL; > int split_returnvalue; > pConfig = ngx_http_get_module_loc_conf(r, ngx_http_testmodule_module); > > if(r->request_body == NULL || r->request_body->bufs == NULL || r->request_body->bufs->buf == NULL) > { > ngx_http_finalize_request(r, NGX_DONE); > return; > } > > cl = r->request_body->bufs; > if(cl->next == NULL) > { > buf = cl->buf; > totalreadsize = buf->last-buf->pos; > if(totalreadsize > 0) > { > container = (char *)ngx_pcalloc(r->pool, totalreadsize+1); > if(container == NULL) > { > ngx_http_finalize_request(r, NGX_DONE); > return; > } > memcpy(container, (char *)buf->pos, buf->last-buf->pos); > *(container+(buf->last-buf->pos)) = '\0'; > } > else > { > ngx_http_finalize_request(r, NGX_DONE); > return; > } > } > else > { > fprintf(stderr, "=====MULTI BUFFER=====\n"); > totalreadsize = 0; > cl2 = cl; > while(cl2) > { > totalreadsize += cl2->buf->last - cl2->buf->pos; > cl2 = cl2->next; > } > if(totalreadsize > 0) > { > container = (char *)ngx_pcalloc(r->pool, totalreadsize+1); > if(container == NULL) > { > ngx_http_finalize_request(r, NGX_DONE); > return; > } > > movepos = 0; > while(cl) > { > memcpy(container + movepos, (char *)cl->buf->pos, cl->buf->last - cl->buf->pos); > movepos += (cl->buf->last - cl->buf->pos); > cl = cl->next; > } > *(container + movepos) = '\0'; > } > else > { > ngx_http_finalize_request(r, NGX_DONE); > return; > } > > if(totalreadsize != movepos) > { > ngx_http_finalize_request(r, NGX_DONE); > return; > } > } > > split_returnvalue = ngx_testmodule_log_write(r, container, totalreadsize); > ngx_http_finalize_request(r, NGX_DONE); > return; > > } > > ngx_http_testmodule_handler(ngx_http_request_t *r) > { > if (!(r->method & NGX_HTTP_POST)) { > return NGX_HTTP_NOT_ALLOWED; > } > else if(!pConfig->enable) { > return NGX_DECLINED; > } > > rc_post = ngx_http_read_client_request_body(r, ngx_testmodule_post_read_request_body); > > ngx_testmodule_resheader(r); > ... > return ngx_http_send_response(r, NGX_HTTP_OK, &ngx_http_text_type, &cv); > } > > > i have tested my nginx module. but... i don't know.. difficult problem.. > i am turning to you for help. > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Oct 4 15:08:10 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 4 Oct 2012 19:08:10 +0400 Subject: bug report for nginx version: nginx/1.3.6 In-Reply-To: References: Message-ID: <20121004150810.GC40452@mdounin.ru> Hello! On Thu, Oct 04, 2012 at 06:42:42PM +0800, Wang Tiefeng wrote: > Recently?I start to read nginx source code. > I chose nginx/1.3.6 a relatively new version? > > When I read file ngx_log.c, the function ngx_log_errno() confused me . > > There may be some bugs in the following codes : > 238 if (buf > last - 50) { > 239 > 240 /* leave a space for an error code */ > 241 > 242 buf = last - 50; > 243 *buf++ = '.'; > 244 *buf++ = '.'; > 245 *buf++ = '.'; > 246 } > > Althoug?I am not sure about my judgment?valgrind reports invalid write on > line 243. See no problem here. The code depends on the fact that the buffer used for printing errors is at least 50 bytes long, and the "last" pointer marks it's end, but it looks perfectly safe as long as ngx_log_errno() is used correctly. -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Thu Oct 4 16:20:33 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 4 Oct 2012 20:20:33 +0400 Subject: nginx module memory problem In-Reply-To: References: Message-ID: <20121004162032.GG40452@mdounin.ru> Hello! On Thu, Oct 04, 2012 at 02:13:17PM +0900, ??? wrote: > hi, i am web server(nginx) module developer. > my nginx modules has a major problem. > maybe... this problem is seem to nginx core bug. > i hope this is my module problem. Because, i wish the problem to be settled > soon. > > *my module function:* > large post data logging from specified file(nginx.conf setting : > /home1/test_access.log) > *problem:* > constant use of memory(nginx-1.2.2 , same situation) > [image: ?? ??? 1] [...] > rc_post = ngx_http_read_client_request_body(r, > ngx_testmodule_post_read_request_body); > > ngx_testmodule_resheader(r); > ... > return ngx_http_send_response(r, NGX_HTTP_OK, &ngx_http_text_type, &cv); > } > > > i have tested my nginx module. but... i don't know.. difficult problem.. > i am turning to you for help. One obvious problem in your code is that you use ngx_http_read_client_request_body() incorrectly. After a call to ngx_http_read_client_request_body() you _must_ follow the pattern rc = ngx_http_read_client_request_body(r, ...); if (rc >= NGX_HTTP_SPECIAL_RESPONSE) { return rc; } return NGX_DONE; See e.g. ngx_http_proxy_module.c for an example. Failing to do so will likely result in request hangs/socket leaks. -- Maxim Dounin http://nginx.com/support.html From wangtf418 at gmail.com Fri Oct 5 08:05:42 2012 From: wangtf418 at gmail.com (Wang Tiefeng) Date: Fri, 5 Oct 2012 16:05:42 +0800 Subject: bug report for nginx version: nginx/1.3.6 In-Reply-To: <20121004150810.GC40452@mdounin.ru> References: <20121004150810.GC40452@mdounin.ru> Message-ID: Hi! When buf > last - 50? buf (= last - 50) is an invalid memory address. And the follow lines write on this invalid memoy. AIthough, bufs for log in nginx are all bigger than 50, the function does not depend on this. At least , I think this funciton is not robust? 2012/10/4 Maxim Dounin > Hello! > > On Thu, Oct 04, 2012 at 06:42:42PM +0800, Wang Tiefeng wrote: > > > Recently?I start to read nginx source code. > > I chose nginx/1.3.6 a relatively new version? > > > > When I read file ngx_log.c, the function ngx_log_errno() confused me . > > > > There may be some bugs in the following codes : > > 238 if (buf > last - 50) { > > 239 > > 240 /* leave a space for an error code */ > > 241 > > 242 buf = last - 50; > > 243 *buf++ = '.'; > > 244 *buf++ = '.'; > > 245 *buf++ = '.'; > > 246 } > > > > Althoug?I am not sure about my judgment?valgrind reports invalid write on > > line 243. > > See no problem here. The code depends on the fact that the buffer > used for printing errors is at least 50 bytes long, and the "last" > pointer marks it's end, but it looks perfectly safe as long as > ngx_log_errno() is used correctly. > > -- > Maxim Dounin > http://nginx.com/support.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Oct 5 10:09:45 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 5 Oct 2012 14:09:45 +0400 Subject: bug report for nginx version: nginx/1.3.6 In-Reply-To: References: <20121004150810.GC40452@mdounin.ru> Message-ID: <20121005100945.GI40452@mdounin.ru> Hello! On Fri, Oct 05, 2012 at 04:05:42PM +0800, Wang Tiefeng wrote: > Hi! > When buf > last - 50? buf (= last - 50) is an invalid memory address. > And the follow lines write on this invalid memoy. AIthough, bufs for log > in nginx are all bigger than 50, the function does not depend on this. At > least , I think this funciton is not robust? As I already said, the code depends on the buffer being at least 50 bytes long. Much like the other fact that the "last" pointer should mark buffer end. As long as the function is used correctly - there is no problem. If it's used incorrectly (i.e. with buffer less than 50 bytes long) - there will be a problem, but not in a function itself, but in the code which uses it incorrectly. Maxim Dounin > > > 2012/10/4 Maxim Dounin > > > Hello! > > > > On Thu, Oct 04, 2012 at 06:42:42PM +0800, Wang Tiefeng wrote: > > > > > Recently?I start to read nginx source code. > > > I chose nginx/1.3.6 a relatively new version? > > > > > > When I read file ngx_log.c, the function ngx_log_errno() confused me . > > > > > > There may be some bugs in the following codes : > > > 238 if (buf > last - 50) { > > > 239 > > > 240 /* leave a space for an error code */ > > > 241 > > > 242 buf = last - 50; > > > 243 *buf++ = '.'; > > > 244 *buf++ = '.'; > > > 245 *buf++ = '.'; > > > 246 } > > > > > > Althoug?I am not sure about my judgment?valgrind reports invalid write on > > > line 243. > > > > See no problem here. The code depends on the fact that the buffer > > used for printing errors is at least 50 bytes long, and the "last" > > pointer marks it's end, but it looks perfectly safe as long as > > ngx_log_errno() is used correctly. > > > > -- > > Maxim Dounin > > http://nginx.com/support.html > > > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > http://mailman.nginx.org/mailman/listinfo/nginx-devel > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Fri Oct 5 11:09:15 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Fri, 5 Oct 2012 11:09:15 +0000 Subject: [nginx] svn commit: r4888 - trunk/src/http/modules Message-ID: <20121005110915.245EE3F9C4B@mail.nginx.com> Author: mdounin Date: 2012-10-05 11:09:14 +0000 (Fri, 05 Oct 2012) New Revision: 4888 URL: http://trac.nginx.org/nginx/changeset/4888/nginx Log: OCSP stapling: properly check if there is ssl.ctx. This fixes segfault if stapling was enabled in a server without a certificate configured (and hence no ssl.ctx). Modified: trunk/src/http/modules/ngx_http_ssl_module.c Modified: trunk/src/http/modules/ngx_http_ssl_module.c =================================================================== --- trunk/src/http/modules/ngx_http_ssl_module.c 2012-10-03 15:25:36 UTC (rev 4887) +++ trunk/src/http/modules/ngx_http_ssl_module.c 2012-10-05 11:09:14 UTC (rev 4888) @@ -737,7 +737,7 @@ sscf = cscfp[s]->ctx->srv_conf[ngx_http_ssl_module.ctx_index]; - if (!sscf->stapling) { + if (sscf->ssl.ctx == NULL || !sscf->stapling) { continue; } From vshebordaev at mail.ru Fri Oct 5 11:30:02 2012 From: vshebordaev at mail.ru (Vladimir Shebordaev) Date: Fri, 05 Oct 2012 15:30:02 +0400 Subject: bug report for nginx version: nginx/1.3.6 In-Reply-To: References: <20121004150810.GC40452@mdounin.ru> Message-ID: <506EC4BA.4020709@mail.ru> On 05.10.2012 12:05, Wang Tiefeng wrote: > Hi! > When buf > last - 50? buf (= last - 50) is an invalid > memory address. And the follow lines write on this invalid memoy. > AIthough, bufs for log in nginx are all bigger than 50, the > function does not depend on this. At least , I think this > funciton is not robust? > Usually, nginx'es "last" pointers indicate the memory location right after the end of a buffer, so you'd better look at the entire context to take the code semantics into account instead of being that overcautious about every code snippet like valgrind do. Hope it helps. Regards, Vladimir > 2012/10/4 Maxim Dounin > > > Hello! > > On Thu, Oct 04, 2012 at 06:42:42PM +0800, Wang Tiefeng wrote: > > > Recently?I start to read nginx source code. > > I chose nginx/1.3.6 a relatively new version? > > > > When I read file ngx_log.c, the function ngx_log_errno() > confused me . > > > > There may be some bugs in the following codes : > > 238 if (buf > last - 50) { > > 239 > > 240 /* leave a space for an error code */ > > 241 > > 242 buf = last - 50; > > 243 *buf++ = '.'; > > 244 *buf++ = '.'; > > 245 *buf++ = '.'; > > 246 } > > > > Althoug?I am not sure about my judgment?valgrind reports > invalid write on > > line 243. > > See no problem here. The code depends on the fact that the > buffer > used for printing errors is at least 50 bytes long, and the > "last" > pointer marks it's end, but it looks perfectly safe as long as > ngx_log_errno() is used correctly. > > -- > Maxim Dounin > http://nginx.com/support.html > From yaoweibin at gmail.com Mon Oct 8 03:44:42 2012 From: yaoweibin at gmail.com (=?GB2312?B?0qbOsLHz?=) Date: Mon, 8 Oct 2012 11:44:42 +0800 Subject: What's the module status of rewrite module? Message-ID: Hi, gurus, We are translating the docs of this page: http://nginx.org/en/docs/install.html. With the option of '--without-http_rewrite_module ', it said "The module is experimental ? its directives may change in the future." It means the module will be changed in the future? I need some comments. Thanks. -- Weibin Yao Developer @ Server Platform Team of Taobao From ru at nginx.com Mon Oct 8 14:49:31 2012 From: ru at nginx.com (Ruslan Ermilov) Date: Mon, 8 Oct 2012 18:49:31 +0400 Subject: [PATCH] (re-post) Add "optional_no_ca" option to ssl_verify_client to enable app-only CA chain validation In-Reply-To: References: <20120918074355.GQ40452@mdounin.ru> <20120922170953.3f8f50b1@sacrilege> <20120925043928.GE40452@mdounin.ru> <20120927200525.64de9ef3@sacrilege> <20121003152758.GX40452@mdounin.ru> Message-ID: <20121008144931.GI9992@lo0.su> On Wed, Oct 03, 2012 at 12:55:15PM -0400, Eric O'Connor wrote: > Great! > > Here is a short [English] documentation patch to match. Unfortunately, > I do not speak Russian. ????????. Here's the cleaned up version: %%% Index: ngx_http_ssl_module.xml =================================================================== --- ngx_http_ssl_module.xml (revision 712) +++ ngx_http_ssl_module.xml (working copy) @@ -10,7 +10,7 @@ + rev="3">
@@ -481,7 +481,7 @@ on | off | - optional + optional | optional_no_ca off http server @@ -490,6 +490,12 @@ Enables the client certificate verification. The optional parameter (0.8.7+) requests the client certificate and verifies it if it was present. +The optional_no_ca parameter (1.3.7) requests the client +certificate but performs no certificate chain verification. +This is intended to be used with a + directive +to pass the $ssl_client_cert variable to a server that performs +verification. The result of verification is stored in the $ssl_client_verify variable. %%% From crk_world at yahoo.com.cn Tue Oct 9 03:00:49 2012 From: crk_world at yahoo.com.cn (chen cw) Date: Tue, 9 Oct 2012 11:00:49 +0800 Subject: nginx module memory problem In-Reply-To: <20121004162032.GG40452@mdounin.ru> References: <20121004162032.GG40452@mdounin.ru> Message-ID: It is suspicious that your problem is caused by ngx_pcalloc(). When you allocated a large size of memory, nginx does not allocate it in pool, instead, it uses standard malloc() in C, because the size is larger than the pool size. When a block is allocated by malloc(), it may not be returned to OS even if it has been freed by free(). Why? GCC memory management layer can cache the blocks in process for future allocation. I don't understand why you must copy the input chain of client. In fact, if you can reap the buffers-chain together, you can directly write each part into log file separately. Moreover, first create a hole in file, then fill the log into a hole will get rid of interlace among different log records. On Fri, Oct 5, 2012 at 12:20 AM, Maxim Dounin wrote: > Hello! > > On Thu, Oct 04, 2012 at 02:13:17PM +0900, ??? wrote: > > > hi, i am web server(nginx) module developer. > > my nginx modules has a major problem. > > maybe... this problem is seem to nginx core bug. > > i hope this is my module problem. Because, i wish the problem to be > settled > > soon. > > > > *my module function:* > > large post data logging from specified file(nginx.conf setting : > > /home1/test_access.log) > > *problem:* > > constant use of memory(nginx-1.2.2 , same situation) > > [image: ?? ??? 1] > > [...] > > > rc_post = ngx_http_read_client_request_body(r, > > ngx_testmodule_post_read_request_body); > > > > ngx_testmodule_resheader(r); > > ... > > return ngx_http_send_response(r, NGX_HTTP_OK, &ngx_http_text_type, &cv); > > } > > > > > > i have tested my nginx module. but... i don't know.. difficult problem.. > > i am turning to you for help. > > One obvious problem in your code is that you use > ngx_http_read_client_request_body() incorrectly. > > After a call to ngx_http_read_client_request_body() you _must_ > follow the pattern > > rc = ngx_http_read_client_request_body(r, ...); > > if (rc >= NGX_HTTP_SPECIAL_RESPONSE) { > return rc; > } > > return NGX_DONE; > > See e.g. ngx_http_proxy_module.c for an example. Failing to do so > will likely result in request hangs/socket leaks. > > -- > Maxim Dounin > http://nginx.com/support.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -- -- Charles Chen Software Engineer Server Platforms Team at Taobao.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From dirk.feytons at gmail.com Tue Oct 9 07:54:09 2012 From: dirk.feytons at gmail.com (Dirk Feytons) Date: Tue, 9 Oct 2012 09:54:09 +0200 Subject: [PATCH] optional setproctitle functionality Message-ID: Hi, I bumped into an interesting issue. On a Linux-based embedded platform (using a rather old uClibc toolchain) the setproctitle code interferes with the dynamic linker when LD_LIBRARY_PATH is used and the code tries to dlopen() a library. Apparently the linker doesn't refetch the LD_LIBRARY_PATH variable but uses a cached pointer. However, the setproctitle code has messed with the string data leading to a LD_LIBRARY_PATH that has become invalid or empty. Exact scenario: - nginx with ngx_lua module - My Lua code loads a Lua C module. - The Lua VM uses dlopen() to load that module. - The dynamic linker tries to load the dependencies. - For those dependencies to be found I've set LD_LIBRARY_PATH but it gets lost as described above. - The dynamic linker fails to find a dependency and the Lua VM reports that the module could not be loaded with the error "File not found". So I've created a patch (against 1.3.7) to add a configure option --without-setproctitle that allows you to disable the setproctitle functionality. It might be considered overkill to add a configure option for this but I'll leave that up to you to decide. I've also only tested on Linux; I'm not sure everything still works for BSD. Regards, Dirk F. -------------- next part -------------- A non-text attachment was scrubbed... Name: setproctitle.patch Type: application/octet-stream Size: 4036 bytes Desc: not available URL: From wandenberg at gmail.com Tue Oct 9 20:46:04 2012 From: wandenberg at gmail.com (Wandenberg Peixoto) Date: Tue, 9 Oct 2012 17:46:04 -0300 Subject: Help with cache manager Message-ID: Hi, I'm having trouble with the cache manager, it is not controlling the max_size of the cache. I have 22 workers on a 24 core server, 3 cache paths, one with 600g and other two with 200g of max_size. The cache path with 600g usually pass this value and reach 640g, as example. The files used on this server has no more than 300mb each. The other 200g cache paths don't have this problem. Do you know any problem about a limit on max_size? Any suggestion where I can investigate the problem? The configuration is like this proxy_cache_path /path1 levels=1:2 keys_zone=c1:1024m inactive=10d max_size=600g; proxy_cache_path /path2 levels=1:2 keys_zone=c2:1024m inactive=10d max_size=200g; proxy_cache_path /path3 levels=1:2 keys_zone=c3:1024m inactive=10d max_size=200g; When I restart the server the cache goes to the configured size. Nginx: 1.2.0 with cache_purge module Regards, Wandenberg -------------- next part -------------- An HTML attachment was scrubbed... URL: From ru at nginx.com Thu Oct 11 10:28:59 2012 From: ru at nginx.com (Ruslan Ermilov) Date: Thu, 11 Oct 2012 14:28:59 +0400 Subject: Update the Chinese documentation with the nginx.org website. In-Reply-To: <50630E3B.1080503@nginx.com> References: <50603FA6.6090500@nginx.com> <50607332.4020106@nginx.com> <50630E3B.1080503@nginx.com> Message-ID: <20121011102859.GA92325@lo0.su> On Wed, Sep 26, 2012 at 06:16:27PM +0400, Maxim Konovalov wrote: > Hi Weibin, > > On 9/26/12 5:48 PM, ??? wrote: > > Hi Maxim, > > > > The attachment is the updated patch as your suggestion. > > > > After discussion with the translation, all the translators agreed to > > remove their copyright with the translated documentation, just keep > > the translator attribute. There is no need for the confirmation with > > the license. It's also easy for your team to publish these translated > > document. > > > > If there is any problem, let us know. > > > The patch looks good for me. I'll ask Ruslan to review and commit it. I've applied some minor whitespace corrections, moved "translator" between "lang" and "rev" so diffs between "en" and "cn" are easier to read, added missing copyright blocks, and finally committed it. Enjoy! From sse.auburn.study at gmail.com Sun Oct 14 06:47:33 2012 From: sse.auburn.study at gmail.com (Auburn Study) Date: Sun, 14 Oct 2012 01:47:33 -0500 Subject: Buffer Overflow Study at Auburn University - nginx developers I would really appreciate your help! Message-ID: Hi All, I am a graduate student at Auburn University, working with Dr. Munawar Hafiz on an empirical study project to understand the software engineering practices used in companies that produce secure software. In particular, we are concentrating on how developers write code to prevent buffer overflow and integer overflow vulnerabilities. We are interested in the software development process: how you develop software, how you test and analyze programs to detect vulnerabilities, and what processes you follow to remove bugs. We are looking into automated tools that software developers use, and are expecting that there is a common insight in the security engineering process that can be reusable. We request your assistance by participating in this research study. We would greatly appreciate it if you would share your experience with us by answering the questions at the end of this email. We may send some follow up questions based on your response in future. Your response(s) will be kept confidential, and will only be aggregated with those of other reporters. Please let us know if you have any questions or concerns regarding the study. Thanks in advance for your support. Yasmeen Rawajfih Software Analysis, Transformations and Security Group Auburn University Working under the supervision of: Dr. Munawar Hafiz Assistant Professor Dept. of Computer Science and Software Engineering Auburn University Auburn, AL http://munawarhafiz.com/ Questions: (There are ten questions.) 1. How long have you been a software developer? 2. How long have you been affiliated with nginx? Were you part of the original development team for this software? 3. What is the size of the current code base? 4. Did you follow a coding standard when developing this software? Is it a standard determined by your group? 5. What did you use to manage bug reports in your software? Does it satisfy your requirements? Are there other software options that you would consider switching to? 6. Did you use any compiler options to detect integer overflow vulnerabilities? Do you think that they are useful? 7. Did you use any automated (static or dynamic analysis) tools to detect buffer overflows, integer overflows, or any other bugs? Which tools did you use? Why these tools? 8. Did you use fuzzing? Which tools did you use and why? If you wrote your own fuzzer, why did you write it yourself? Was it written from scratch or by extending some other fuzzing tools? 9. Did you have specific phases during development where you concentrated on fixing security issues? Did you have a test suite, unit tests, or regression tests? 10. Buffer overflows often result from the use of unsafe functions, such as strcpy. Does your software use those? If you use a different string library, why is it used? Is it an in-house library or an off-the-shelf library? Did you migrate your code to use the string library? -------------- next part -------------- An HTML attachment was scrubbed... URL: From david at gwynne.id.au Mon Oct 15 01:22:00 2012 From: david at gwynne.id.au (David Gwynne) Date: Mon, 15 Oct 2012 11:22:00 +1000 Subject: [PATCH] implement a $location variable Message-ID: <1B6DEF34-186D-4D03-B4BA-36B245EAA211@gwynne.id.au> this patch is actually from piotr sikora, and is part of coolkit already. this is just a port of it to stock nginx. this makes the part of the request uri that matches the locations name available as a variable within a block. eg if you have location /foo { } and request /foo/bar/foo, $location will have the value /foo. if you have location ^~ /(foo|bar) and request /foo/bar/foo, $location will still have /foo in it. think of it as $0 for regex locations, but it works for non-regex location names too. im running this in production now and its working great. it allows me to treat the locations name as a parameter to an applications front end controller without having to repeat the value within a location block. cheers, dlg --- src/http/ngx_http_variables.c.orig Tue Jul 3 03:41:52 2012 +++ src/http/ngx_http_variables.c Thu Aug 23 10:32:22 2012 @@ -65,6 +65,8 @@ static ngx_int_t ngx_http_variable_request_filename(ng ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_server_name(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); +static ngx_int_t ngx_http_variable_location(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_request_method(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_remote_user(ngx_http_request_t *r, @@ -206,6 +208,10 @@ static ngx_http_variable_t ngx_http_core_variables[] { ngx_string("server_name"), NULL, ngx_http_variable_server_name, 0, 0, 0 }, + { ngx_string("location"), NULL, + ngx_http_variable_location, 0, + NGX_HTTP_VAR_NOCACHEABLE, 0 }, + { ngx_string("request_method"), NULL, ngx_http_variable_request_method, 0, NGX_HTTP_VAR_NOCACHEABLE, 0 }, @@ -1382,6 +1388,39 @@ ngx_http_variable_server_name(ngx_http_request_t *r, v->no_cacheable = 0; v->not_found = 0; v->data = cscf->server_name.data; + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_variable_location(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data) +{ + ngx_http_core_loc_conf_t *clcf; + ngx_int_t rc; + int captures[3]; + + clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); + + if (clcf->regex) { + rc = ngx_regex_exec(clcf->regex->regex, &r->uri, captures, 3); + + if (rc == NGX_REGEX_NO_MATCHED) { + return NGX_ERROR; + } + + v->data = r->uri.data + captures[0]; + v->len = captures[1] - captures[0]; + + } else { + v->data = clcf->name.data; + v->len = clcf->name.len; + } + + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; return NGX_OK; } From mdounin at mdounin.ru Mon Oct 15 14:53:49 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 15 Oct 2012 18:53:49 +0400 Subject: [PATCH] implement a $location variable In-Reply-To: <1B6DEF34-186D-4D03-B4BA-36B245EAA211@gwynne.id.au> References: <1B6DEF34-186D-4D03-B4BA-36B245EAA211@gwynne.id.au> Message-ID: <20121015145349.GG40452@mdounin.ru> Hello! On Mon, Oct 15, 2012 at 11:22:00AM +1000, David Gwynne wrote: > this patch is actually from piotr sikora, and is part of coolkit > already. this is just a port of it to stock nginx. > > this makes the part of the request uri that matches the > locations name available as a variable within a block. eg if you > have location /foo { } and request /foo/bar/foo, $location will > have the value /foo. if you have location ^~ /(foo|bar) and > request /foo/bar/foo, $location will still have /foo in it. > think of it as $0 for regex locations, but it works for > non-regex location names too. > > im running this in production now and its working great. it > allows me to treat the locations name as a parameter to an > applications front end controller without having to repeat the > value within a location block. Behaviour within regexp locations is counterintuitive, and contradicts to what the variable does in a normal location. > --- src/http/ngx_http_variables.c.orig Tue Jul 3 03:41:52 2012 > +++ src/http/ngx_http_variables.c Thu Aug 23 10:32:22 2012 > @@ -65,6 +65,8 @@ static ngx_int_t ngx_http_variable_request_filename(ng > ngx_http_variable_value_t *v, uintptr_t data); > static ngx_int_t ngx_http_variable_server_name(ngx_http_request_t *r, > ngx_http_variable_value_t *v, uintptr_t data); > +static ngx_int_t ngx_http_variable_location(ngx_http_request_t *r, > + ngx_http_variable_value_t *v, uintptr_t data); Just a side note: your mail client corrupts patches. [...] > + if (clcf->regex) { > + rc = ngx_regex_exec(clcf->regex->regex, &r->uri, captures, 3); > + > + if (rc == NGX_REGEX_NO_MATCHED) { > + return NGX_ERROR; > + } > + > + v->data = r->uri.data + captures[0]; > + v->len = captures[1] - captures[0]; Note that result is actually unstable in the regexp location case, as the code re-executes regular expression against a possibly changed URI. [...] -- Maxim Dounin http://nginx.com/support.html From flygoast at 126.com Mon Oct 15 15:30:11 2012 From: flygoast at 126.com (=?GBK?B?t+u5yw==?=) Date: Mon, 15 Oct 2012 23:30:11 +0800 (CST) Subject: init_master called somewhere? Message-ID: <48419c6b.19be1c.13a650bef19.Coremail.flygoast@126.com> Hi, guys. I'm a new learner of nginx. I havn't found "init_master" pointer in ngx_module_t structure called in anywhere. Is it useless or of some other reasons? Thank you. -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Mon Oct 15 15:54:55 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Mon, 15 Oct 2012 19:54:55 +0400 Subject: init_master called somewhere? In-Reply-To: <48419c6b.19be1c.13a650bef19.Coremail.flygoast@126.com> References: <48419c6b.19be1c.13a650bef19.Coremail.flygoast@126.com> Message-ID: <20121015155455.GI40452@mdounin.ru> Hello! On Mon, Oct 15, 2012 at 11:30:11PM +0800, ?? wrote: > I'm a new learner of nginx. > I havn't found "init_master" pointer in ngx_module_t structure > called in anywhere. Is it useless or of some other reasons? It's a stub which isn't currently used. -- Maxim Dounin http://nginx.com/support.html From maxim at nginx.com Tue Oct 16 12:50:17 2012 From: maxim at nginx.com (Maxim Konovalov) Date: Tue, 16 Oct 2012 16:50:17 +0400 Subject: What's the module status of rewrite module? In-Reply-To: References: Message-ID: <507D5809.7010002@nginx.com> Hi, On 10/8/12 7:44 AM, ??? wrote: > Hi, gurus, > > We are translating the docs of this page: > http://nginx.org/en/docs/install.html. With the option of > '--without-http_rewrite_module ', it said "The module is experimental > ? its directives may change in the future." It means the module will > be changed in the future? I need some comments. > > Thanks. > I've just removed this comment from the document: http://trac.nginx.org/nginx/changeset?old_path=%2Fnginx_org&old=733&new_path=%2Fnginx_org&new=733 -- Maxim Konovalov +7 (910) 4293178 http://nginx.com/support.html From david at gwynne.id.au Tue Oct 16 13:16:00 2012 From: david at gwynne.id.au (David Gwynne) Date: Tue, 16 Oct 2012 23:16:00 +1000 Subject: [PATCH] implement a $location variable In-Reply-To: <20121015145349.GG40452@mdounin.ru> References: <1B6DEF34-186D-4D03-B4BA-36B245EAA211@gwynne.id.au> <20121015145349.GG40452@mdounin.ru> Message-ID: On 16/10/2012, at 12:53 AM, Maxim Dounin wrote: > Hello! ola, > > On Mon, Oct 15, 2012 at 11:22:00AM +1000, David Gwynne wrote: > >> this patch is actually from piotr sikora, and is part of coolkit >> already. this is just a port of it to stock nginx. >> >> this makes the part of the request uri that matches the >> locations name available as a variable within a block. eg if you >> have location /foo { } and request /foo/bar/foo, $location will >> have the value /foo. if you have location ^~ /(foo|bar) and >> request /foo/bar/foo, $location will still have /foo in it. >> think of it as $0 for regex locations, but it works for >> non-regex location names too. >> >> im running this in production now and its working great. it >> allows me to treat the locations name as a parameter to an >> applications front end controller without having to repeat the >> value within a location block. > > Behaviour within regexp locations is counterintuitive, and > contradicts to what the variable does in a normal location. > >> --- src/http/ngx_http_variables.c.orig Tue Jul 3 03:41:52 2012 >> +++ src/http/ngx_http_variables.c Thu Aug 23 10:32:22 2012 >> @@ -65,6 +65,8 @@ static ngx_int_t ngx_http_variable_request_filename(ng >> ngx_http_variable_value_t *v, uintptr_t data); >> static ngx_int_t ngx_http_variable_server_name(ngx_http_request_t *r, >> ngx_http_variable_value_t *v, uintptr_t data); >> +static ngx_int_t ngx_http_variable_location(ngx_http_request_t *r, >> + ngx_http_variable_value_t *v, uintptr_t data); > > Just a side note: your mail client corrupts patches. i usually use mutt, sorry on this one :/ > [...] > >> + if (clcf->regex) { >> + rc = ngx_regex_exec(clcf->regex->regex, &r->uri, captures, 3); >> + >> + if (rc == NGX_REGEX_NO_MATCHED) { >> + return NGX_ERROR; >> + } >> + >> + v->data = r->uri.data + captures[0]; >> + v->len = captures[1] - captures[0]; > > Note that result is actually unstable in the regexp location case, > as the code re-executes regular expression against a possibly > changed URI. is there a way to keep the equivalent of $0 around after the original regex is run? dlg > [...] > > -- > Maxim Dounin > http://nginx.com/support.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Tue Oct 16 14:46:21 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 16 Oct 2012 18:46:21 +0400 Subject: [PATCH] implement a $location variable In-Reply-To: References: <1B6DEF34-186D-4D03-B4BA-36B245EAA211@gwynne.id.au> <20121015145349.GG40452@mdounin.ru> Message-ID: <20121016144621.GT40452@mdounin.ru> Hello! On Tue, Oct 16, 2012 at 11:16:00PM +1000, David Gwynne wrote: [...] > >> + if (clcf->regex) { > >> + rc = ngx_regex_exec(clcf->regex->regex, &r->uri, captures, 3); > >> + > >> + if (rc == NGX_REGEX_NO_MATCHED) { > >> + return NGX_ERROR; > >> + } > >> + > >> + v->data = r->uri.data + captures[0]; > >> + v->len = captures[1] - captures[0]; > > > > Note that result is actually unstable in the regexp location case, > > as the code re-executes regular expression against a possibly > > changed URI. > > is there a way to keep the equivalent of $0 around after the original regex is run? It can be easily done without any additional code, using named captures: location ~ ^(?/foo|/bar) { # here one can use $matched } -- Maxim Dounin http://nginx.com/support.html From jefftk at google.com Tue Oct 16 19:02:11 2012 From: jefftk at google.com (Jeff Kaufman) Date: Tue, 16 Oct 2012 15:02:11 -0400 Subject: determining the url of a request In-Reply-To: References: Message-ID: If I have a ngx_http_request_t, is there a built-in way to get the full url for the request? r->uri has "/path/to/page.html" but is there a way to get a full "scheme://host[:port]/path/to/page.html"? Some background: I'm working on ngx_pagespeed [1], a port of mod_pagespeed to nginx. The idea is to automatically rewrite webpages and their resources to make pages load faster. The pagespeed code needs to know what url has been requested so it can do things like turn absolute urls into relative urls. Jeff [1] https://github.com/pagespeed/ngx_pagespeed From david at gwynne.id.au Tue Oct 16 23:43:21 2012 From: david at gwynne.id.au (David Gwynne) Date: Wed, 17 Oct 2012 09:43:21 +1000 Subject: [PATCH] implement a $location variable In-Reply-To: <20121016144621.GT40452@mdounin.ru> References: <1B6DEF34-186D-4D03-B4BA-36B245EAA211@gwynne.id.au> <20121015145349.GG40452@mdounin.ru> <20121016144621.GT40452@mdounin.ru> Message-ID: On 17/10/2012, at 12:46 AM, Maxim Dounin wrote: > Hello! > > On Tue, Oct 16, 2012 at 11:16:00PM +1000, David Gwynne wrote: > > [...] > >>>> + if (clcf->regex) { >>>> + rc = ngx_regex_exec(clcf->regex->regex, &r->uri, captures, 3); >>>> + >>>> + if (rc == NGX_REGEX_NO_MATCHED) { >>>> + return NGX_ERROR; >>>> + } >>>> + >>>> + v->data = r->uri.data + captures[0]; >>>> + v->len = captures[1] - captures[0]; >>> >>> Note that result is actually unstable in the regexp location case, >>> as the code re-executes regular expression against a possibly >>> changed URI. >> >> is there a way to keep the equivalent of $0 around after the original regex is run? > > It can be easily done without any additional code, using named > captures: > > location ~ ^(?/foo|/bar) { > # here one can use $matched > } i can live with that. i would still like a $location variable though. it simplifies the creation of reusable blocks for front end controllers. i can provide an example if you want. should i respin the diff without the regex bits? dlg From mdounin at mdounin.ru Wed Oct 17 09:02:27 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 17 Oct 2012 13:02:27 +0400 Subject: determining the url of a request In-Reply-To: References: Message-ID: <20121017090227.GE40452@mdounin.ru> Hello! On Tue, Oct 16, 2012 at 03:02:11PM -0400, Jeff Kaufman wrote: > If I have a ngx_http_request_t, is there a built-in way to get the > full url for the request? r->uri has "/path/to/page.html" but is there > a way to get a full "scheme://host[:port]/path/to/page.html"? As you probably know, in most cases there is no full URL in HTTP requests. It's broken down into URI in a request line and the Host header (if present). The "scheme" part usually isn't sent at all, and should be determined from other factors. So the reconstruction of a full URL isn't something trivial (and not always possible). In nginx original URI as sent by a client is available as r->unparsed_uri (that is, "/path?arguments" part). Parsed URI (and probably changed due to internal redirects) is available as r->uri. Note it's unescaped and doesn't include query string, which is available separately as r->args. The Host header, if present, is available in r->headers_in->host. You may want to use r->headers_in.server though (it's normalized and includes hostname if it's sent in a request line). You probably want to fallback to local ip address or a server name if it's not present. (You may want to look at ngx_http_variable_host() in ngx_http_variables.c, and at the Location header construction in ngx_http_header_filter() in ngx_http_header_filter_module.c) The scheme may be determined from r->connection->ssl presense, see e.g. ngx_http_variable_scheme() in ngx_http_variables.c. -- Maxim Dounin http://nginx.com/support.html From witekfl at gazeta.pl Wed Oct 17 10:24:31 2012 From: witekfl at gazeta.pl (witekfl) Date: Wed, 17 Oct 2012 12:24:31 +0200 Subject: How to insert something in the middle of the buf? Message-ID: Hi, there is the code: http://rkd.republika.pl/ngx_http_layout_filter_module.c I want to insert the header before and the footer before . If there is no add the header as the first chunk. If there is no add the footer as the last chunk. The problem is that, the number of waiting growing rapidly. What is wrong with this code? Could you provide a snippet how to insert something in the middle of existing chunk or how to do that right in this situation (reading from file)? And why the number of waiting connections is growing so fast? From ahh at one.com Wed Oct 17 11:02:59 2012 From: ahh at one.com (Adam Hasselbalch Hansen) Date: Wed, 17 Oct 2012 13:02:59 +0200 Subject: Manipulating the body of a PUT/POST In-Reply-To: <20120309131839.GH67687@mdounin.ru> References: <4F2B9FEE.3060601@one.com> <20120203100450.GH67687@mdounin.ru> <4F58BBBC.9000903@one.com> <20120309131839.GH67687@mdounin.ru> Message-ID: <507E9063.8010001@one.com> On 2012-03-09 14:18, Maxim Dounin wrote: > Hello! > > On Thu, Mar 08, 2012 at 03:01:32PM +0100, Adam Hasselbalch Hansen wrote: > >> On 2012-02-03 11:04, Maxim Dounin wrote: >> >>> I'm working on prototyping input body filtering, it's expected to >>> appear somewhere in 1.2.x. It will allow manipulation of request >>> body, as well as other tasks like content inspection and so on. >> >> Any ETA on that, btw? > > No ETA yet. Any update on this? Adam From kyprizel at gmail.com Wed Oct 17 11:09:26 2012 From: kyprizel at gmail.com (kyprizel) Date: Wed, 17 Oct 2012 15:09:26 +0400 Subject: Manipulating the body of a PUT/POST In-Reply-To: <507E9063.8010001@one.com> References: <4F2B9FEE.3060601@one.com> <20120203100450.GH67687@mdounin.ru> <4F58BBBC.9000903@one.com> <20120309131839.GH67687@mdounin.ru> <507E9063.8010001@one.com> Message-ID: Maxim, we use body handling code from Valery Kholodkov's upload module(and nginx core) in Nginx ModSecurity module, can you please look at the code and check if we do it correctly? http://mod-security.svn.sourceforge.net/viewvc/mod-security/m2/trunk/nginx/modsecurity/ Adam, if you want ETA - why don't you buy Nginx premium support? On Wed, Oct 17, 2012 at 3:02 PM, Adam Hasselbalch Hansen wrote: > On 2012-03-09 14:18, Maxim Dounin wrote: >> >> Hello! >> >> On Thu, Mar 08, 2012 at 03:01:32PM +0100, Adam Hasselbalch Hansen wrote: >> >>> On 2012-02-03 11:04, Maxim Dounin wrote: >>> >>>> I'm working on prototyping input body filtering, it's expected to >>>> appear somewhere in 1.2.x. It will allow manipulation of request >>>> body, as well as other tasks like content inspection and so on. >>> >>> >>> Any ETA on that, btw? >> >> >> No ETA yet. > > > > Any update on this? > > Adam > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From ahh at one.com Wed Oct 17 11:12:35 2012 From: ahh at one.com (Adam Hasselbalch Hansen) Date: Wed, 17 Oct 2012 13:12:35 +0200 Subject: Manipulating the body of a PUT/POST In-Reply-To: References: <4F2B9FEE.3060601@one.com> <20120203100450.GH67687@mdounin.ru> <4F58BBBC.9000903@one.com> <20120309131839.GH67687@mdounin.ru> <507E9063.8010001@one.com> Message-ID: <507E92A3.2070006@one.com> On 2012-10-17 13:09, kyprizel wrote: > Adam, if you want ETA - why don't you buy Nginx premium support? Relax! It wasn't a demand. Just a polite question. "No ETA yet" is also a valid answer. Jeez. From ne at vbart.ru Wed Oct 17 11:34:05 2012 From: ne at vbart.ru (Valentin V. Bartenev) Date: Wed, 17 Oct 2012 15:34:05 +0400 Subject: Manipulating the body of a PUT/POST In-Reply-To: <507E92A3.2070006@one.com> References: <4F2B9FEE.3060601@one.com> <507E92A3.2070006@one.com> Message-ID: <201210171534.05763.ne@vbart.ru> On Wednesday 17 October 2012 15:12:35 Adam Hasselbalch Hansen wrote: > On 2012-10-17 13:09, kyprizel wrote: > > Adam, if you want ETA - why don't you buy Nginx premium support? > > Relax! It wasn't a demand. Just a polite question. "No ETA yet" is also > a valid answer. Jeez. > Planned features for the near future and some optimistic ETA for them can always be found here: https://trac.nginx.org/nginx/roadmap wbr, Valentin V. Bartenev -- http://nginx.com/support.html http://nginx.org/en/donation.html From mdounin at mdounin.ru Wed Oct 17 17:20:53 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 17 Oct 2012 21:20:53 +0400 Subject: Manipulating the body of a PUT/POST In-Reply-To: <507E9063.8010001@one.com> References: <4F2B9FEE.3060601@one.com> <20120203100450.GH67687@mdounin.ru> <4F58BBBC.9000903@one.com> <20120309131839.GH67687@mdounin.ru> <507E9063.8010001@one.com> Message-ID: <20121017172053.GO40452@mdounin.ru> Hello! On Wed, Oct 17, 2012 at 01:02:59PM +0200, Adam Hasselbalch Hansen wrote: > On 2012-03-09 14:18, Maxim Dounin wrote: > >Hello! > > > >On Thu, Mar 08, 2012 at 03:01:32PM +0100, Adam Hasselbalch Hansen wrote: > > > >>On 2012-02-03 11:04, Maxim Dounin wrote: > >> > >>>I'm working on prototyping input body filtering, it's expected to > >>>appear somewhere in 1.2.x. It will allow manipulation of request > >>>body, as well as other tasks like content inspection and so on. > >> > >>Any ETA on that, btw? > > > >No ETA yet. > > > Any update on this? It's expected to happen soon unless I'll give up again, I'm working on it. Feel free to sponsor the work. :) -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Wed Oct 17 17:42:35 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 17 Oct 2012 21:42:35 +0400 Subject: Manipulating the body of a PUT/POST In-Reply-To: References: <4F2B9FEE.3060601@one.com> <20120203100450.GH67687@mdounin.ru> <4F58BBBC.9000903@one.com> <20120309131839.GH67687@mdounin.ru> <507E9063.8010001@one.com> Message-ID: <20121017174235.GP40452@mdounin.ru> Hello! On Wed, Oct 17, 2012 at 03:09:26PM +0400, kyprizel wrote: > Maxim, we use body handling code from Valery Kholodkov's upload > module(and nginx core) in Nginx ModSecurity module, can you please > look at the code and check if we do it correctly? > http://mod-security.svn.sourceforge.net/viewvc/mod-security/m2/trunk/nginx/modsecurity/ Are you kidding? ;) It can't be correct without input body filtering implemented. It's a hack at best, expect it to break on major changes in request body reading code. Additionally, it looks like you've failed to provide any logic to actually save request body for use by other modules if it's large enough to don't fit into memory buffer configured (that is, write request body to disk), nor any logic to honor r->request_body_in_file_only. The only _correct_ aproach available as of now is to call ngx_http_read_client_request_body(), and work with the result once post_handler is called. -- Maxim Dounin http://nginx.com/support.html From jefftk at google.com Wed Oct 17 19:13:53 2012 From: jefftk at google.com (Jeff Kaufman) Date: Wed, 17 Oct 2012 15:13:53 -0400 Subject: determining the url of a request In-Reply-To: <20121017090227.GE40452@mdounin.ru> References: <20121017090227.GE40452@mdounin.ru> Message-ID: On Wed, Oct 17, 2012 at 5:02 AM, Maxim Dounin wrote: > You may want to use r->headers_in.server though ... > You may want to look at ngx_http_variable_host() ... I have this working now [1], thanks! The pointer to ngx_http_variables.c was helpful. Jeff [1] https://github.com/pagespeed/ngx_pagespeed/compare/3548b9ffde088bf7545a066d4c576d9255b11857...3d787d353fda538aee1b97a27a836424e9b625dd From kyprizel at gmail.com Wed Oct 17 20:02:47 2012 From: kyprizel at gmail.com (kyprizel) Date: Thu, 18 Oct 2012 00:02:47 +0400 Subject: Manipulating the body of a PUT/POST In-Reply-To: <20121017174235.GP40452@mdounin.ru> References: <4F2B9FEE.3060601@one.com> <20120203100450.GH67687@mdounin.ru> <4F58BBBC.9000903@one.com> <20120309131839.GH67687@mdounin.ru> <507E9063.8010001@one.com> <20121017174235.GP40452@mdounin.ru> Message-ID: ModSecurity can't handle big bodies anyway, so if the body is too big to fit in memory - it'll be discarded by modsecurity, so there is no reason to handle bodies written to the temp files. On Wed, Oct 17, 2012 at 9:42 PM, Maxim Dounin wrote: > Hello! > > On Wed, Oct 17, 2012 at 03:09:26PM +0400, kyprizel wrote: > >> Maxim, we use body handling code from Valery Kholodkov's upload >> module(and nginx core) in Nginx ModSecurity module, can you please >> look at the code and check if we do it correctly? >> http://mod-security.svn.sourceforge.net/viewvc/mod-security/m2/trunk/nginx/modsecurity/ > > Are you kidding? ;) > > It can't be correct without input body filtering implemented. > It's a hack at best, expect it to break on major changes in > request body reading code. Additionally, it looks like you've > failed to provide any logic to actually save request body for use > by other modules if it's large enough to don't fit into memory > buffer configured (that is, write request body to disk), nor any > logic to honor r->request_body_in_file_only. > > The only _correct_ aproach available as of now is to call > ngx_http_read_client_request_body(), and work with the result once > post_handler is called. > > -- > Maxim Dounin > http://nginx.com/support.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From pass86 at gmail.com Thu Oct 18 04:57:50 2012 From: pass86 at gmail.com (l.jay Yuan) Date: Thu, 18 Oct 2012 12:57:50 +0800 Subject: unsubscribe nginx pass86@gmail.com Message-ID: unsubscribe nginx pass86 at gmail.com From honwel at 163.com Thu Oct 18 05:08:26 2012 From: honwel at 163.com (honwel) Date: Thu, 18 Oct 2012 13:08:26 +0800 (CST) Subject: help with gunzip filter Message-ID: hi,all i recently download the nginx-1.3.7 source from www.nginx.org, and i debug the gunzip module because i want unzip content from server and filter the content(use subs filter module-weibin yao), but gunzip nerver work , so i tracking the code, and see this line from function ngx_http_gunzip_header_filter(ngx_http_request_t *r) static ngx_int_t 1 ngx_http_gunzip_header_filter(ngx_http_request_t *r) 2 { ...... ...... 3 if (!r->gzip_tested) { 4 if (ngx_http_gzip_ok(r) == NGX_OK) { 5 return ngx_http_next_header_filter(r); 6 } 7 } else if (!r->gzip_ok) { 8 return ngx_http_next_header_filter(r); } ...... ...... } l think line 4 should be "if (ngx_http_gzip_ok(r) != NGX_OK) { ... }" and change it , then i recomplie source code, make and debug use gdb tool, so function is run ok, is there a bug? thanks a lot. -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Thu Oct 18 09:10:02 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 18 Oct 2012 13:10:02 +0400 Subject: help with gunzip filter In-Reply-To: References: Message-ID: <20121018091002.GS40452@mdounin.ru> Hello! On Thu, Oct 18, 2012 at 01:08:26PM +0800, honwel wrote: > hi,all > i recently download the nginx-1.3.7 source from www.nginx.org, and i debug the gunzip module because i want unzip content from server and filter the content(use subs filter module-weibin yao), but gunzip nerver work , so i tracking the code, and see this line from function ngx_http_gunzip_header_filter(ngx_http_request_t *r) > static ngx_int_t > 1 ngx_http_gunzip_header_filter(ngx_http_request_t *r) > 2 { > ...... > ...... > 3 if (!r->gzip_tested) { > 4 if (ngx_http_gzip_ok(r) == NGX_OK) { > 5 return ngx_http_next_header_filter(r); > 6 } > 7 } else if (!r->gzip_ok) { > 8 return ngx_http_next_header_filter(r); > } > ...... > ...... > } > > l think line 4 should be "if (ngx_http_gzip_ok(r) != NGX_OK) > { ... }" and change it , then i recomplie source code, make and > debug use gdb tool, so function is run ok, is there a bug? If you want to gunzip content always you have to omit this block entirely. As of now gunzip only gunzips responses if client does not support gzipped responses, hence the check. The bug seems to be present in line 7 though, it should be --- a/src/http/modules/ngx_http_gunzip_filter_module.c +++ b/src/http/modules/ngx_http_gunzip_filter_module.c @@ -145,7 +145,7 @@ ngx_http_gunzip_header_filter(ngx_http_r return ngx_http_next_header_filter(r); } - } else if (!r->gzip_ok) { + } else if (r->gzip_ok) { return ngx_http_next_header_filter(r); } I don't think this code path can be triggered by using stock nginx modules, but fixing this won't hurt anyway. Thanks for pointing this. -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Thu Oct 18 09:32:36 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 18 Oct 2012 13:32:36 +0400 Subject: Manipulating the body of a PUT/POST In-Reply-To: References: <4F2B9FEE.3060601@one.com> <20120203100450.GH67687@mdounin.ru> <4F58BBBC.9000903@one.com> <20120309131839.GH67687@mdounin.ru> <507E9063.8010001@one.com> <20121017174235.GP40452@mdounin.ru> Message-ID: <20121018093236.GV40452@mdounin.ru> Hello! On Thu, Oct 18, 2012 at 12:02:47AM +0400, kyprizel wrote: > ModSecurity can't handle big bodies anyway, so if the body is too big > to fit in memory - it'll be discarded by modsecurity, so there is no > reason to handle bodies written to the temp files. What your code do is silent data corruption. I wouldn't try to advocate such a behaviour with the "security" word in project's name... :) But if don't want to handle big bodies - why you need custom reading code at all? It would be enough to call ngx_http_read_client_request_body() and then in post_handler walk though r->request_body->bufs, returning an error if you'll see a buffer which isn't in memory. > On Wed, Oct 17, 2012 at 9:42 PM, Maxim Dounin wrote: > > Hello! > > > > On Wed, Oct 17, 2012 at 03:09:26PM +0400, kyprizel wrote: > > > >> Maxim, we use body handling code from Valery Kholodkov's upload > >> module(and nginx core) in Nginx ModSecurity module, can you please > >> look at the code and check if we do it correctly? > >> http://mod-security.svn.sourceforge.net/viewvc/mod-security/m2/trunk/nginx/modsecurity/ > > > > Are you kidding? ;) > > > > It can't be correct without input body filtering implemented. > > It's a hack at best, expect it to break on major changes in > > request body reading code. Additionally, it looks like you've > > failed to provide any logic to actually save request body for use > > by other modules if it's large enough to don't fit into memory > > buffer configured (that is, write request body to disk), nor any > > logic to honor r->request_body_in_file_only. > > > > The only _correct_ aproach available as of now is to call > > ngx_http_read_client_request_body(), and work with the result once > > post_handler is called. > > > > -- > > Maxim Dounin > > http://nginx.com/support.html > > > > _______________________________________________ > > nginx-devel mailing list > > nginx-devel at nginx.org > > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -- Maxim Dounin http://nginx.com/support.html From kyprizel at gmail.com Thu Oct 18 12:36:05 2012 From: kyprizel at gmail.com (kyprizel) Date: Thu, 18 Oct 2012 16:36:05 +0400 Subject: Manipulating the body of a PUT/POST In-Reply-To: <20121018093236.GV40452@mdounin.ru> References: <4F2B9FEE.3060601@one.com> <20120203100450.GH67687@mdounin.ru> <4F58BBBC.9000903@one.com> <20120309131839.GH67687@mdounin.ru> <507E9063.8010001@one.com> <20121017174235.GP40452@mdounin.ru> <20121018093236.GV40452@mdounin.ru> Message-ID: 1. we can limit request body size in config - 8Mb is enought in most cases. 2. modsecurity shouldn't inspect all requests. 3. ngx_http_read_client_request_body returns 64/128Kb only, reading to memory allows us to handle much more, of course it should be changed for use of body inspection interface, but there is no ETA and parse temporary files isn't a good solution at the moment. On Thu, Oct 18, 2012 at 1:32 PM, Maxim Dounin wrote: > Hello! > > On Thu, Oct 18, 2012 at 12:02:47AM +0400, kyprizel wrote: > >> ModSecurity can't handle big bodies anyway, so if the body is too big >> to fit in memory - it'll be discarded by modsecurity, so there is no >> reason to handle bodies written to the temp files. > > What your code do is silent data corruption. I wouldn't try to > advocate such a behaviour with the "security" word in project's > name... :) > > But if don't want to handle big bodies - why you need custom > reading code at all? It would be enough to call > ngx_http_read_client_request_body() and then in post_handler walk > though r->request_body->bufs, returning an error if you'll see a > buffer which isn't in memory. > >> On Wed, Oct 17, 2012 at 9:42 PM, Maxim Dounin wrote: >> > Hello! >> > >> > On Wed, Oct 17, 2012 at 03:09:26PM +0400, kyprizel wrote: >> > >> >> Maxim, we use body handling code from Valery Kholodkov's upload >> >> module(and nginx core) in Nginx ModSecurity module, can you please >> >> look at the code and check if we do it correctly? >> >> http://mod-security.svn.sourceforge.net/viewvc/mod-security/m2/trunk/nginx/modsecurity/ >> > >> > Are you kidding? ;) >> > >> > It can't be correct without input body filtering implemented. >> > It's a hack at best, expect it to break on major changes in >> > request body reading code. Additionally, it looks like you've >> > failed to provide any logic to actually save request body for use >> > by other modules if it's large enough to don't fit into memory >> > buffer configured (that is, write request body to disk), nor any >> > logic to honor r->request_body_in_file_only. >> > >> > The only _correct_ aproach available as of now is to call >> > ngx_http_read_client_request_body(), and work with the result once >> > post_handler is called. >> > >> > -- >> > Maxim Dounin >> > http://nginx.com/support.html >> > >> > _______________________________________________ >> > nginx-devel mailing list >> > nginx-devel at nginx.org >> > http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > -- > Maxim Dounin > http://nginx.com/support.html > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Thu Oct 18 14:27:41 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Thu, 18 Oct 2012 14:27:41 +0000 Subject: [nginx] svn commit: r4889 - trunk/src/http/modules Message-ID: <20121018142741.1EF963F9C3E@mail.nginx.com> Author: mdounin Date: 2012-10-18 14:27:40 +0000 (Thu, 18 Oct 2012) New Revision: 4889 URL: http://trac.nginx.org/nginx/changeset/4889/nginx Log: Gunzip: fixed r->gzip_ok check. Modified: trunk/src/http/modules/ngx_http_gunzip_filter_module.c Modified: trunk/src/http/modules/ngx_http_gunzip_filter_module.c =================================================================== --- trunk/src/http/modules/ngx_http_gunzip_filter_module.c 2012-10-05 11:09:14 UTC (rev 4888) +++ trunk/src/http/modules/ngx_http_gunzip_filter_module.c 2012-10-18 14:27:40 UTC (rev 4889) @@ -145,7 +145,7 @@ return ngx_http_next_header_filter(r); } - } else if (!r->gzip_ok) { + } else if (r->gzip_ok) { return ngx_http_next_header_filter(r); } From mdounin at mdounin.ru Thu Oct 18 14:48:34 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Thu, 18 Oct 2012 14:48:34 +0000 Subject: [nginx] svn commit: r4890 - trunk/src/os/unix Message-ID: <20121018144834.2DA263F9EC2@mail.nginx.com> Author: mdounin Date: 2012-10-18 14:48:33 +0000 (Thu, 18 Oct 2012) New Revision: 4890 URL: http://trac.nginx.org/nginx/changeset/4890/nginx Log: Removed conditional compilation from waitpid() error test. There are reports that call to a signal handler for an exited process despite waitpid() already called for the process may happen on Linux as well. Modified: trunk/src/os/unix/ngx_process.c Modified: trunk/src/os/unix/ngx_process.c =================================================================== --- trunk/src/os/unix/ngx_process.c 2012-10-18 14:27:40 UTC (rev 4889) +++ trunk/src/os/unix/ngx_process.c 2012-10-18 14:48:33 UTC (rev 4890) @@ -474,8 +474,6 @@ return; } -#if (NGX_SOLARIS || NGX_FREEBSD) - /* * Solaris always calls the signal handler for each exited process * despite waitpid() may be already called for this process. @@ -491,8 +489,6 @@ return; } -#endif - ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, err, "waitpid() failed"); return; From toli at webforge.bg Fri Oct 19 08:10:20 2012 From: toli at webforge.bg (Anatoli Marinov) Date: Fri, 19 Oct 2012 11:10:20 +0300 Subject: cached files with wrong size Message-ID: <50810AEC.3090609@webforge.bg> *Hello colleagues, I have some troubles when caching (with proxy cache) some objects which don't have content-length header. It happens with gzipped .js or .css files. Usually these files don't have content-length because its size is not known when headers are sent. If the connection between proxy server and origin behind it disconnects during file transfer the file will be checked if it has correct size only if content-length exists. Otherwise the file will be cached normally even it does not have correct size. In this case corrupted file will be sent to the browser and probably it will break normal behaviour of a served website. Do you know a solution for such kind of issues?* -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Oct 19 10:32:10 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 19 Oct 2012 14:32:10 +0400 Subject: cached files with wrong size In-Reply-To: <50810AEC.3090609@webforge.bg> References: <50810AEC.3090609@webforge.bg> Message-ID: <20121019103210.GG40452@mdounin.ru> Hello! On Fri, Oct 19, 2012 at 11:10:20AM +0300, Anatoli Marinov wrote: > *Hello colleagues, > I have some troubles when caching (with proxy cache) some objects > which don't > have content-length header. It happens with gzipped .js or .css > files. Usually > these files don't have content-length because its size is not known > when headers > are sent. > > If the connection between proxy server and origin behind it > disconnects during > file transfer the file will be checked if it has correct size only if > content-length exists. Otherwise the file will be cached normally even it > does not have correct size. > > In this case corrupted file will be sent to the browser and probably > it will break normal behaviour of a served website. > Do you know a solution for such kind of issues?* As soon as the connection is closed uncleanly (i.e. was reset or times out), there should be no problem with proxy_cache: nginx will detect there is an error and won't cache the response. If the connection is closed cleanly (e.g. your OS does a clean connection close on a behalf of an unexpectedly terminated app, or an unexpected connection close is hidden behind another proxy layer), there is a problem though: one can't detect if a response is complete with HTTP/1.0 without Content-Length present. With HTTP/1.1 situation is a bit improved, as it has chunked transfer-coding. Though as of now nginx doesn't try to take it into account when caching files even if HTTP/1.1 to backends is activated using proxy_http_version directive. I've already suggested a way to improve things here: http://mailman.nginx.org/pipermail/nginx-devel/2012-September/002699.html It should improve things with HTTP/1.1 used, though no one tried to actually do it yet, and I don't have time to dig into this right now. -- Maxim Dounin http://nginx.com/support.html p.s. Please don't use html to post here. Thanks. From simohayha.bobo at gmail.com Fri Oct 19 12:06:15 2012 From: simohayha.bobo at gmail.com (Simon Liu) Date: Fri, 19 Oct 2012 20:06:15 +0800 Subject: change growth factor of array Message-ID: In this document ( https://github.com/facebook/folly/blob/master/folly/docs/FBVector.md ) , It is to suggest use factor 1.5 (when you'd push into a array without there being room) in dynamically-allocated arrays. the factor is 2 in array of Nginx, and so I think may be change factor to 1.5 is be better in Nginx's array. Thanks. -- do not fear to be eccentric in opinion, for every opinion now accepted was once eccentric. -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Fri Oct 19 13:25:06 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 19 Oct 2012 17:25:06 +0400 Subject: change growth factor of array In-Reply-To: References: Message-ID: <20121019132506.GJ40452@mdounin.ru> Hello! On Fri, Oct 19, 2012 at 08:06:15PM +0800, Simon Liu wrote: > In this document ( > https://github.com/facebook/folly/blob/master/folly/docs/FBVector.md ) , > It is to suggest use factor 1.5 (when you'd push into a array without > there being room) in dynamically-allocated arrays. the factor is 2 in > array of Nginx, and so I think may be change factor to 1.5 is be better in > Nginx's array. While the reasoning about memory reuse provided in the document in question looks valid from memory point of view, it doesn't really apply to nginx arrays due to the following reasons: - Previously used memory isn't freed anyway (instead nginx rely on pool allocator to free it on pool destruction, plus in some cases it's on-stack memory). - Arrays are used for usually small data sets which are [almost] stable. If use case suggests dynamic growth - lists are used instead (see src/core/ngx_list.c). On the other hand, 2 is better than 1.5 as it results in less reallocation operations on average. -- Maxim Dounin http://nginx.com/support.html From nginx at lukaperkov.net Fri Oct 19 14:07:04 2012 From: nginx at lukaperkov.net (Luka Perkov) Date: Fri, 19 Oct 2012 16:07:04 +0200 Subject: plans for chunked encoding Message-ID: <20121019140704.GA7781@w500> Hi, I'm working on a module that needs to handle POST requests by looking at the body. Thing is that some clients are sending chunked-encoded messages. I can not use HttpChunkinModule to get the body data because this module implements its own request body reading mechanism: http://wiki.nginx.org/HttpChunkinModule#Known_Issues In that case my module does not see request body :/ I have seen that reading chunked-encoded POSTs is on the roadmap: http://trac.nginx.org/nginx/roadmap I'm wondering if there is any work done on this already ? If so I could test some patches... By the way module will be released as GPLv2 once it's finished. Regards, Luka From mdounin at mdounin.ru Fri Oct 19 14:13:10 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Fri, 19 Oct 2012 18:13:10 +0400 Subject: plans for chunked encoding In-Reply-To: <20121019140704.GA7781@w500> References: <20121019140704.GA7781@w500> Message-ID: <20121019141310.GM40452@mdounin.ru> Hello! On Fri, Oct 19, 2012 at 04:07:04PM +0200, Luka Perkov wrote: > Hi, > > I'm working on a module that needs to handle POST requests by looking at > the body. Thing is that some clients are sending chunked-encoded > messages. I can not use HttpChunkinModule to get the body data because > this module implements its own request body reading mechanism: > > http://wiki.nginx.org/HttpChunkinModule#Known_Issues > > In that case my module does not see request body :/ > > I have seen that reading chunked-encoded POSTs is on the roadmap: > > http://trac.nginx.org/nginx/roadmap > > I'm wondering if there is any work done on this already ? If so I could > test some patches... I'll post it here once there is something to test. > By the way module will be released as GPLv2 once it's finished. JFYI, this is much more restrictive than nginx's BSD license. -- Maxim Dounin http://nginx.com/support.html From nginx at lukaperkov.net Fri Oct 19 14:28:25 2012 From: nginx at lukaperkov.net (Luka Perkov) Date: Fri, 19 Oct 2012 16:28:25 +0200 Subject: plans for chunked encoding In-Reply-To: <20121019141310.GM40452@mdounin.ru> References: <20121019140704.GA7781@w500> <20121019141310.GM40452@mdounin.ru> Message-ID: <20121019142825.GB7781@w500> Hi Maxim, On Fri, Oct 19, 2012 at 06:13:10PM +0400, Maxim Dounin wrote: > > I'm wondering if there is any work done on this already ? If so I could > > test some patches... > > I'll post it here once there is something to test. Thank you. > > By the way module will be released as GPLv2 once it's finished. > > JFYI, this is much more restrictive than nginx's BSD license. I know... But GPLv2 is still better then closed. Regards, Luka From vshebordaev at mail.ru Fri Oct 19 15:23:31 2012 From: vshebordaev at mail.ru (=?UTF-8?B?VmxhZGltaXIgU2hlYm9yZGFldg==?=) Date: Fri, 19 Oct 2012 19:23:31 +0400 Subject: change growth factor of array In-Reply-To: References: Message-ID: <1350660211.895034190@f260.mail.ru> Hi! You might wish to try this patch if you do need really large arrays in nginx that would be fast and memory efficient on insertion at cost of rather slow read access. --- /dev/null 2012-10-18 19:22:00.714822120 +0400 +++ src/core/ngx_vector.h 2012-07-13 05:57:58.000000000 +0400 @@ -0,0 +1,140 @@ +#ifndef _NGX_VECTOR_H_INCLUDED_ +#define _NGX_VECTOR_H_INCLUDED_ + +#include +#include + +typedef struct ngx_vector_s ngx_vector_t; + +struct ngx_vector_s { + size_t size; + ngx_int_t last; + /* data blocks */ + size_t d; + size_t db_size; + size_t d_free; + /* superblocks */ + size_t s; + size_t sb_size; + size_t s_free; + /* index block */ + void **index; + size_t ib_size; + + ngx_pool_t *pool; +}; + +#ifdef __GNUC__ +#define ngx_clz(n) __builtin_clz(n) +#else +#error "You have to define ngx_clz() for your compiler" +#endif + +static ngx_inline +ngx_int_t ngx_vector_bound_check(const ngx_vector_t *vec, ngx_int_t idx) +{ + return idx > vec->last; +} + +static ngx_inline +void *__ngx_vector_locate(const ngx_vector_t *vec, size_t idx) +{ + ngx_uint_t k, msb, half, mask, b, e, p; + + ++idx; + k = (NGX_PTR_SIZE << 3) - 1 - ngx_clz(idx); + + msb = 1 << k; + half = (k + 1) >> 1; + mask = (1 << half) - 1; + + p = mask + (1 << (k - half)) - 1; + b = (idx ^ msb) >> half; + e = idx & mask; + + return (char *)vec->index[p + b] + e * vec->size; +} + +static ngx_inline +void *ngx_vector_read(const ngx_vector_t *vec, size_t idx) +{ + return ngx_vector_bound_check(vec, idx) ? NULL : __ngx_vector_locate(vec, idx); +} + +static ngx_inline +void *ngx_vector_write(const ngx_vector_t *vec, size_t idx, void *value) +{ + void *ret; + + if (ngx_vector_bound_check(vec, idx)) + return NULL; + + ret = __ngx_vector_locate(vec, idx); + if (ret && value) + ngx_memcpy(ret, value, vec->size); + + return ret; +} + +extern ngx_int_t __ngx_vector_grow_by(ngx_vector_t *vec, size_t nelts); +extern ngx_int_t __ngx_vector_alloc_block(ngx_vector_t *vec); + +static ngx_inline ngx_int_t +ngx_vector_grow_by(ngx_vector_t *vec, size_t nelts) +{ + if (nelts <= vec->d_free) { + vec->d_free -= nelts; + vec->last += nelts; + + return NGX_OK; + } + + vec->last += vec->d_free; + nelts -= vec->d_free; + + return __ngx_vector_grow_by(vec, nelts); +} + +static ngx_inline ngx_int_t +ngx_vector_grow(ngx_vector_t *vec) +{ + ngx_int_t ret; + + ret = vec->d_free ? NGX_OK : __ngx_vector_alloc_block(vec); + if (ret != NGX_OK) + goto out; + + vec->d_free--; + vec->last++; +out: + return ret; +} + +static ngx_inline size_t +ngx_vector_size(ngx_vector_t *vec) +{ + return vec->last + 1; +} + +static ngx_inline void * +ngx_vector_push_back(ngx_vector_t *vec, void *value) +{ + void *ret; + + if (ngx_vector_grow(vec) != NGX_OK) { + ret = NULL; + goto out; + } + + ret = __ngx_vector_locate(vec, ngx_vector_size(vec) - 1); + if (ret && value) + ngx_memcpy(ret, value, vec->size); +out: + return ret; +} + +extern ngx_vector_t *ngx_vector_init(size_t size, ngx_pool_t *pool); +extern void ngx_vector_destroy(ngx_vector_t *vec); + +#endif /* _NGX_VECTOR_H_INCLUDED_ */ + --- /dev/null 2012-10-18 19:22:00.714822120 +0400 +++ src/core/ngx_vector.c 2012-07-13 05:36:28.000000000 +0400 @@ -0,0 +1,134 @@ +#include + +ngx_vector_t * +ngx_vector_init(size_t size, ngx_pool_t *pool) +{ + ngx_vector_t *ret; + + ret = NULL; + + if (!pool) + goto out; + + ret = ngx_palloc(pool, sizeof(ngx_vector_t)); + if (!ret) + goto out; + + ret->pool = pool; + + ret->size = size; + ret->last = -1; + + ret->index = ngx_palloc(pool, ngx_pagesize); + if (!ret->index) + goto out_free; + + ngx_memzero(ret->index, ngx_pagesize); + ret->ib_size = ngx_pagesize / sizeof(ret->index[0]); + + ret->d = 0; + ret->db_size = 1; + ret->d_free = 1; + + ret->s = 0; + ret->sb_size = 1; + ret->s_free = 0; + + ret->index[0] = ngx_palloc(pool, size); + if (!ret->index[0]) + goto out_free_index; +out: + return ret; + +out_free_index: + ngx_pfree(pool, ret->index); +out_free: + ngx_pfree(pool, ret); + return NULL; +} + +void +ngx_vector_destroy(ngx_vector_t *vec) +{ + size_t i; + + for (i = 0; i <= vec->d; i++) + ngx_pfree(vec->pool, (void *)vec->index[i]); + + ngx_pfree(vec->pool, vec->index); + ngx_pfree(vec->pool, vec); +} + +ngx_int_t +__ngx_vector_grow_by(ngx_vector_t *vec, size_t nelts) +{ + ngx_int_t ret; + + for(;;) { + ret = __ngx_vector_alloc_block(vec); + if (ret != NGX_OK) + goto out; + + if (nelts < vec->d_free) + break; + + nelts -= vec->db_size; + vec->last += vec->db_size; + } + vec->d_free -= nelts; + vec->last += nelts; + + ret = NGX_OK; +out: + return ret; +} + +ngx_int_t +__ngx_vector_alloc_block(ngx_vector_t *vec) +{ + ngx_int_t ret; + + ret = NGX_ERROR; + + if (!vec->s_free) { + vec->s++; + + if (vec->s & 1) + vec->db_size *= 2; + else + vec->sb_size *= 2; + + vec->s_free = vec->sb_size; + } + + vec->d++; + vec->s_free--; + + if (vec->d >= vec->ib_size) { + void **index; + size_t size; + + size = 2 * vec->ib_size * sizeof(void *); + index = ngx_palloc(vec->pool, size); + if (!index) + goto out; + + ngx_memzero(index, size); + ngx_memcpy(index, vec->index, vec->ib_size * sizeof(void *)); + ngx_pfree(vec->pool, vec->index); + + vec->index = index; + vec->ib_size *= 2; + } + + vec->index[vec->d] = ngx_palloc(vec->pool, vec->db_size * vec->size); + if (!vec->index[vec->d]) + goto out; + + vec->d_free = vec->db_size; + + ret = NGX_OK; +out: + return ret; +} + This code implements the algorithms described here? They say about square root of array size . It used to work for me a few months ago. Sure, it is possible to optimize read access code ?but the element size is known just at run-time, so it seems hard to be done without register parameters and inline assembler. Hope it helps. Regards, Vladimir Fri, 19 Oct 2012 20:06:15 +0800 ?? Simon Liu : > >In this document (?https://github.com/facebook/folly/blob/master/folly/docs/FBVector.md?) , ?It is to suggest use factor 1.5 (when?you'd?push?into >a array without there being room)??in?dynamically-allocated arrays. the factor is 2 in array of Nginx, and so I think may be change factor to 1.5 is be better in Nginx's array. > >Thanks. > > > From ywu at about.com Fri Oct 19 20:03:13 2012 From: ywu at about.com (YongFeng Wu) Date: Fri, 19 Oct 2012 16:03:13 -0400 Subject: quit a worker process Message-ID: <000c01cdae34$c54861c0$4fd92540$@com> Hi, What is an easy way to have a worker process exit and the master process to start a new worker process? For example, in the case of the memory allocation fails. Thanks a lot, Larry -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Sat Oct 20 11:49:51 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Sat, 20 Oct 2012 15:49:51 +0400 Subject: quit a worker process In-Reply-To: <000c01cdae34$c54861c0$4fd92540$@com> References: <000c01cdae34$c54861c0$4fd92540$@com> Message-ID: <20121020114951.GQ40452@mdounin.ru> Hello! On Fri, Oct 19, 2012 at 04:03:13PM -0400, YongFeng Wu wrote: > What is an easy way to have a worker process exit and the master process to > start a new worker process? For example, in the case of the memory > allocation fails. You don't want to exit worker process unless the problem encountered is really fatal. Obviously memory allocation failure is only fatal if it affects operation of the whole worker process and can't be handled in another way (e.g. retried later). In most cases it's just enough to terminate a single affected request if memory allocation fails. To do this it's usually enough to just return an error from a handler function. If for some reason you are really need to terminate a worker process yourself, you may use ngx_abort() (which basically maps to abort()). Note though, that it's something you should only use if there is no way to properly return error instead and you are sure the problem is fatal and the whole worker process cannot continue, as it will kill all requests handled by the process and will likely result in the process core dump written. You should also be careful to don't call it while leaving shared memory in an inconsistent state, as this will result in an inconsistent behaviour of other processes. Just a side note: as of now nginx itself never calls ngx_abort() unless explicitly configured to do so for debugging with the "debug_points" directive. -- Maxim Dounin http://nginx.com/support.html From defan at nginx.com Tue Oct 23 09:08:42 2012 From: defan at nginx.com (defan at nginx.com) Date: Tue, 23 Oct 2012 09:08:42 +0000 Subject: [nginx] svn commit: r4891 - trunk/src/core Message-ID: <20121023090842.66CE63F9C11@mail.nginx.com> Author: defan Date: 2012-10-23 09:08:41 +0000 (Tue, 23 Oct 2012) New Revision: 4891 URL: http://trac.nginx.org/nginx/changeset/4891/nginx Log: Core: the "auto" parameter of the "worker_processes" directive. The parameter will set the number of worker processes to the autodetected number of available CPU cores. Modified: trunk/src/core/nginx.c Modified: trunk/src/core/nginx.c =================================================================== --- trunk/src/core/nginx.c 2012-10-18 14:48:33 UTC (rev 4890) +++ trunk/src/core/nginx.c 2012-10-23 09:08:41 UTC (rev 4891) @@ -21,6 +21,8 @@ static char *ngx_set_priority(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); static char *ngx_set_cpu_affinity(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); +static char *ngx_set_worker_processes(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); static ngx_conf_enum_t ngx_debug_points[] = { @@ -69,9 +71,9 @@ { ngx_string("worker_processes"), NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1, - ngx_conf_set_num_slot, + ngx_set_worker_processes, 0, - offsetof(ngx_core_conf_t, worker_processes), + 0, NULL }, { ngx_string("debug_points"), @@ -1329,3 +1331,32 @@ return ccf->cpu_affinity[ccf->cpu_affinity_n - 1]; } + + +static char * +ngx_set_worker_processes(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_str_t *value; + ngx_core_conf_t *ccf; + + ccf = (ngx_core_conf_t *) conf; + + if (ccf->worker_processes != NGX_CONF_UNSET) { + return "is duplicate"; + } + + value = (ngx_str_t *) cf->args->elts; + + if (ngx_strcmp(value[1].data, "auto") == 0) { + ccf->worker_processes = ngx_ncpu; + return NGX_CONF_OK; + } + + ccf->worker_processes = ngx_atoi(value[1].data, value[1].len); + + if (ccf->worker_processes == NGX_ERROR) { + return "invalid value"; + } + + return NGX_CONF_OK; +} From postmaster at softsearch.ru Tue Oct 23 13:08:07 2012 From: postmaster at softsearch.ru (=?Windows-1251?B?zOj14OjrIMzu7eD4uOI=?=) Date: Tue, 23 Oct 2012 17:08:07 +0400 Subject: [nginx] svn commit: r4891 - trunk/src/core In-Reply-To: <20121023090842.66CE63F9C11@mail.nginx.com> References: <20121023090842.66CE63F9C11@mail.nginx.com> Message-ID: <245137787.20121023170807@softsearch.ru> Hi, defan. > Log: > Core: the "auto" parameter of the "worker_processes" directive. Thank you! -- Michael From vbart at nginx.com Tue Oct 23 14:36:18 2012 From: vbart at nginx.com (vbart at nginx.com) Date: Tue, 23 Oct 2012 14:36:18 +0000 Subject: [nginx] svn commit: r4892 - trunk/src/http Message-ID: <20121023143618.E6E543F9C12@mail.nginx.com> Author: vbart Date: 2012-10-23 14:36:18 +0000 (Tue, 23 Oct 2012) New Revision: 4892 URL: http://trac.nginx.org/nginx/changeset/4892/nginx Log: ngx_http_keepalive_handler() is now trying to not keep c->buffer's memory for idle connections. This behaviour is consistent with the ngx_http_set_keepalive() function and it should decrease memory usage in some cases (especially if epoll/rtsig is used). Modified: trunk/src/http/ngx_http_request.c Modified: trunk/src/http/ngx_http_request.c =================================================================== --- trunk/src/http/ngx_http_request.c 2012-10-23 09:08:41 UTC (rev 4891) +++ trunk/src/http/ngx_http_request.c 2012-10-23 14:36:18 UTC (rev 4892) @@ -2753,6 +2753,20 @@ ngx_http_close_connection(c); } + /* + * Like ngx_http_set_keepalive() we are trying to not hold + * c->buffer's memory for a keepalive connection. + */ + + if (ngx_pfree(c->pool, b->start) == NGX_OK) { + + /* + * the special note that c->buffer's memory was freed + */ + + b->pos = NULL; + } + return; } From jefftk at google.com Tue Oct 23 15:14:52 2012 From: jefftk at google.com (Jeff Kaufman) Date: Tue, 23 Oct 2012 11:14:52 -0400 Subject: proxying from pipes Message-ID: My module wants to sit in the filter chain passing buffers to an asynchronous optimization thread and then send them out to the user when they finish. When a request comes in I have my module roughly doing: body filter: - if first set of buffers - create pipe, pass pipe_write_fd to optimization thread - pass all input data to optimization thread - don't call ngx_http_next_body_filter Is there a way I can ask nginx to watch this pipe and treat any data appearing on the pipe as if it is output from my body filter? Passing it through ngx_http_next_body_filter, etc? And finalizing the request if there's a problem with the pipe? What I'm doing now is: - when creating the pipe c = ngx_get_connection(pipe_read_fd, r->connection->log); c.read->handler = my_read_handler; ngx_add_event(c->read, NGX_READ_EVENT, 0); - when my_read_handler is called read() from pipe_read_fd create buffer, chain link call ngx_http_next_body_filter Is this the right way to go about this? It seems very low level and I would expect proxying from a pipe would be something nginx already supported. Another option would be to simply use the pipe for notification and load the data from the optimization thread through shared memory. Jeff From info at tvdw.eu Tue Oct 23 23:50:18 2012 From: info at tvdw.eu (Tom van der Woerdt) Date: Wed, 24 Oct 2012 01:50:18 +0200 Subject: Invalid content types served when using alias Message-ID: <50872D3A.4060306@tvdw.eu> Hi all, I'm using nginx' locations to serve a javascript file on '/client' : location = /client { expires epoch; alias /path/to/a/file.js; } Works fine, with one major exception: it gets an octet-stream Content-Type header. I tried to solve this with : add_header Content-Type text/javascript; Now I get two Content-Type headers. The raw response: < HTTP/1.1 200 OK < Server: nginx/1.3.6 < Date: Tue, 23 Oct 2012 23:42:27 GMT < Content-Type: application/octet-stream < Content-Length: 23245 < Last-Modified: Sat, 20 Oct 2012 00:09:12 GMT < Connection: keep-alive < ETag: "5081eba8-5acd" < Expires: Thu, 01 Jan 1970 00:00:01 GMT < Cache-Control: no-cache < Content-Type: text/javascript < Accept-Ranges: bytes nginx information : nginx version: nginx/1.3.6 built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) TLS SNI support enabled configure arguments: --user=nginx --group=nginx --with-http_ssl_module --with-http_gzip_static_module --with-http_secure_link_module --with-http_realip_module --with-http_stub_status_module --with-ipv6 --with-openssl=/root/libraries/openssl-1.0.1c/ --prefix=/etc/nginx/ --sbin-path=/usr/sbin/nginx --conf-path=/etc/nginx/nginx.conf --error-log-path=/var/log/nginx/error.log --http-log-path=/var/log/nginx/access.log --pid-path=/var/run/nginx.pid --lock-path=/var/run/nginx.lock --http-client-body-temp-path=/var/cache/nginx/client_temp --http-proxy-temp-path=/var/cache/nginx/proxy_temp --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp --http-scgi-temp-path=/var/cache/nginx/scgi_temp Server information : Linux hostname.goes.here 2.6.32-279.5.2.el6.centos.plus.i686 #1 SMP Thu Aug 23 22:13:33 UTC 2012 i686 i686 i386 GNU/Linux It's not a major issue for me (browsers will accept octet-stream just fine) but might be annoying for other people, should they ever run into this. Tom From wandenberg at gmail.com Wed Oct 24 03:04:50 2012 From: wandenberg at gmail.com (Wandenberg Peixoto) Date: Wed, 24 Oct 2012 01:04:50 -0200 Subject: Invalid content types served when using alias In-Reply-To: <50872D3A.4060306@tvdw.eu> References: <50872D3A.4060306@tvdw.eu> Message-ID: Try to set default_type text/javascript; instead of add_header. Regards, Wandenberg On Tue, Oct 23, 2012 at 9:50 PM, Tom van der Woerdt wrote: > Hi all, > > I'm using nginx' locations to serve a javascript file on '/client' : > > location = /client { > expires epoch; > alias /path/to/a/file.js; > } > > Works fine, with one major exception: it gets an octet-stream Content-Type > header. I tried to solve this with : > > add_header Content-Type text/javascript; > > Now I get two Content-Type headers. > > The raw response: > > < HTTP/1.1 200 OK > < Server: nginx/1.3.6 > < Date: Tue, 23 Oct 2012 23:42:27 GMT > < Content-Type: application/octet-stream > < Content-Length: 23245 > < Last-Modified: Sat, 20 Oct 2012 00:09:12 GMT > < Connection: keep-alive > < ETag: "5081eba8-5acd" > < Expires: Thu, 01 Jan 1970 00:00:01 GMT > < Cache-Control: no-cache > < Content-Type: text/javascript > < Accept-Ranges: bytes > > nginx information : > > nginx version: nginx/1.3.6 > built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) > TLS SNI support enabled > configure arguments: --user=nginx --group=nginx --with-http_ssl_module > --with-http_gzip_static_module --with-http_secure_link_module > --with-http_realip_module --with-http_stub_status_module --with-ipv6 > --with-openssl=/root/**libraries/openssl-1.0.1c/ --prefix=/etc/nginx/ > --sbin-path=/usr/sbin/nginx --conf-path=/etc/nginx/nginx.**conf > --error-log-path=/var/log/**nginx/error.log --http-log-path=/var/log/**nginx/access.log > --pid-path=/var/run/nginx.pid --lock-path=/var/run/nginx.**lock > --http-client-body-temp-path=/**var/cache/nginx/client_temp > --http-proxy-temp-path=/var/**cache/nginx/proxy_temp > --http-fastcgi-temp-path=/var/**cache/nginx/fastcgi_temp > --http-uwsgi-temp-path=/var/**cache/nginx/uwsgi_temp > --http-scgi-temp-path=/var/**cache/nginx/scgi_temp > > Server information : > > Linux hostname.goes.here 2.6.32-279.5.2.el6.centos.**plus.i686 #1 SMP Thu > Aug 23 22:13:33 UTC 2012 i686 i686 i386 GNU/Linux > > It's not a major issue for me (browsers will accept octet-stream just > fine) but might be annoying for other people, should they ever run into > this. > > Tom > > ______________________________**_________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/**mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From info at tvdw.eu Wed Oct 24 08:59:01 2012 From: info at tvdw.eu (Tom van der Woerdt) Date: Wed, 24 Oct 2012 10:59:01 +0200 Subject: Invalid content types served when using alias In-Reply-To: References: <50872D3A.4060306@tvdw.eu> Message-ID: <5087ADD5.4050709@tvdw.eu> Thanks, that works, but it doesn't really sound like a solution, more like a workaround. Tom Op 10/24/12 5:04 AM, Wandenberg Peixoto schreef: > Try to set > > default_type text/javascript; > > instead of add_header. > > Regards, > Wandenberg > > On Tue, Oct 23, 2012 at 9:50 PM, Tom van der Woerdt > wrote: > > Hi all, > > I'm using nginx' locations to serve a javascript file on '/client' : > > location = /client { > expires epoch; > alias /path/to/a/file.js; > } > > Works fine, with one major exception: it gets an octet-stream > Content-Type header. I tried to solve this with : > > add_header Content-Type text/javascript; > > Now I get two Content-Type headers. > > The raw response: > > < HTTP/1.1 200 OK > < Server: nginx/1.3.6 > < Date: Tue, 23 Oct 2012 23:42:27 GMT > < Content-Type: application/octet-stream > < Content-Length: 23245 > < Last-Modified: Sat, 20 Oct 2012 00:09:12 GMT > < Connection: keep-alive > < ETag: "5081eba8-5acd" > < Expires: Thu, 01 Jan 1970 00:00:01 GMT > < Cache-Control: no-cache > < Content-Type: text/javascript > < Accept-Ranges: bytes > > nginx information : > > nginx version: nginx/1.3.6 > built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) > TLS SNI support enabled > configure arguments: --user=nginx --group=nginx > --with-http_ssl_module --with-http_gzip_static_module > --with-http_secure_link_module --with-http_realip_module > --with-http_stub_status_module --with-ipv6 > --with-openssl=/root/libraries/openssl-1.0.1c/ > --prefix=/etc/nginx/ --sbin-path=/usr/sbin/nginx > --conf-path=/etc/nginx/nginx.conf > --error-log-path=/var/log/nginx/error.log > --http-log-path=/var/log/nginx/access.log > --pid-path=/var/run/nginx.pid --lock-path=/var/run/nginx.lock > --http-client-body-temp-path=/var/cache/nginx/client_temp > --http-proxy-temp-path=/var/cache/nginx/proxy_temp > --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp > --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp > --http-scgi-temp-path=/var/cache/nginx/scgi_temp > > Server information : > > Linux hostname.goes.here 2.6.32-279.5.2.el6.centos.plus.i686 #1 > SMP Thu Aug 23 22:13:33 UTC 2012 i686 i686 i386 GNU/Linux > > It's not a major issue for me (browsers will accept octet-stream > just fine) but might be annoying for other people, should they > ever run into this. > > Tom > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 3729 bytes Desc: S/MIME-cryptografische ondertekening URL: From wandenberg at gmail.com Wed Oct 24 10:00:06 2012 From: wandenberg at gmail.com (Wandenberg Peixoto) Date: Wed, 24 Oct 2012 08:00:06 -0200 Subject: Invalid content types served when using alias In-Reply-To: <5087ADD5.4050709@tvdw.eu> References: <50872D3A.4060306@tvdw.eu> <5087ADD5.4050709@tvdw.eu> Message-ID: Are you using a mime.types file? Or setting types block? This file has a mapping from file extension to the content type, may be what is missing on your configuration. On Wed, Oct 24, 2012 at 6:59 AM, Tom van der Woerdt wrote: > Thanks, that works, but it doesn't really sound like a solution, more > like a workaround. > > Tom > > > Op 10/24/12 5:04 AM, Wandenberg Peixoto schreef: > > Try to set > > default_type text/javascript; > > instead of add_header. > > Regards, > Wandenberg > > On Tue, Oct 23, 2012 at 9:50 PM, Tom van der Woerdt wrote: > >> Hi all, >> >> I'm using nginx' locations to serve a javascript file on '/client' : >> >> location = /client { >> expires epoch; >> alias /path/to/a/file.js; >> } >> >> Works fine, with one major exception: it gets an octet-stream >> Content-Type header. I tried to solve this with : >> >> add_header Content-Type text/javascript; >> >> Now I get two Content-Type headers. >> >> The raw response: >> >> < HTTP/1.1 200 OK >> < Server: nginx/1.3.6 >> < Date: Tue, 23 Oct 2012 23:42:27 GMT >> < Content-Type: application/octet-stream >> < Content-Length: 23245 >> < Last-Modified: Sat, 20 Oct 2012 00:09:12 GMT >> < Connection: keep-alive >> < ETag: "5081eba8-5acd" >> < Expires: Thu, 01 Jan 1970 00:00:01 GMT >> < Cache-Control: no-cache >> < Content-Type: text/javascript >> < Accept-Ranges: bytes >> >> nginx information : >> >> nginx version: nginx/1.3.6 >> built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) >> TLS SNI support enabled >> configure arguments: --user=nginx --group=nginx --with-http_ssl_module >> --with-http_gzip_static_module --with-http_secure_link_module >> --with-http_realip_module --with-http_stub_status_module --with-ipv6 >> --with-openssl=/root/libraries/openssl-1.0.1c/ --prefix=/etc/nginx/ >> --sbin-path=/usr/sbin/nginx --conf-path=/etc/nginx/nginx.conf >> --error-log-path=/var/log/nginx/error.log >> --http-log-path=/var/log/nginx/access.log --pid-path=/var/run/nginx.pid >> --lock-path=/var/run/nginx.lock >> --http-client-body-temp-path=/var/cache/nginx/client_temp >> --http-proxy-temp-path=/var/cache/nginx/proxy_temp >> --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp >> --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp >> --http-scgi-temp-path=/var/cache/nginx/scgi_temp >> >> Server information : >> >> Linux hostname.goes.here 2.6.32-279.5.2.el6.centos.plus.i686 #1 SMP Thu >> Aug 23 22:13:33 UTC 2012 i686 i686 i386 GNU/Linux >> >> It's not a major issue for me (browsers will accept octet-stream just >> fine) but might be annoying for other people, should they ever run into >> this. >> >> Tom >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > > > _______________________________________________ > nginx-devel mailing listnginx-devel at nginx.orghttp://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From info at tvdw.eu Wed Oct 24 10:05:10 2012 From: info at tvdw.eu (Tom van der Woerdt) Date: Wed, 24 Oct 2012 12:05:10 +0200 Subject: Invalid content types served when using alias In-Reply-To: References: <50872D3A.4060306@tvdw.eu> <5087ADD5.4050709@tvdw.eu> Message-ID: <5087BD56.8060707@tvdw.eu> Yes, this is in my nginx.conf file : http { include mime.types; default_type application/octet-stream; {more} include /etc/nginx/conf.d/*.conf; } Tom Op 10/24/12 12:00 PM, Wandenberg Peixoto schreef: > Are you using a mime.types file? Or setting types block? > This file has a mapping from file extension to the content type, may > be what is missing on your configuration. > > On Wed, Oct 24, 2012 at 6:59 AM, Tom van der Woerdt > wrote: > > Thanks, that works, but it doesn't really sound like a solution, > more like a workaround. > > Tom > > > Op 10/24/12 5:04 AM, Wandenberg Peixoto schreef: >> Try to set >> >> default_type text/javascript; >> >> instead of add_header. >> >> Regards, >> Wandenberg >> >> On Tue, Oct 23, 2012 at 9:50 PM, Tom van der Woerdt > > wrote: >> >> Hi all, >> >> I'm using nginx' locations to serve a javascript file on >> '/client' : >> >> location = /client { >> expires epoch; >> alias /path/to/a/file.js; >> } >> >> Works fine, with one major exception: it gets an octet-stream >> Content-Type header. I tried to solve this with : >> >> add_header Content-Type text/javascript; >> >> Now I get two Content-Type headers. >> >> The raw response: >> >> < HTTP/1.1 200 OK >> < Server: nginx/1.3.6 >> < Date: Tue, 23 Oct 2012 23:42:27 GMT >> < Content-Type: application/octet-stream >> < Content-Length: 23245 >> < Last-Modified: Sat, 20 Oct 2012 00:09:12 GMT >> < Connection: keep-alive >> < ETag: "5081eba8-5acd" >> < Expires: Thu, 01 Jan 1970 00:00:01 GMT >> < Cache-Control: no-cache >> < Content-Type: text/javascript >> < Accept-Ranges: bytes >> >> nginx information : >> >> nginx version: nginx/1.3.6 >> built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) >> TLS SNI support enabled >> configure arguments: --user=nginx --group=nginx >> --with-http_ssl_module --with-http_gzip_static_module >> --with-http_secure_link_module --with-http_realip_module >> --with-http_stub_status_module --with-ipv6 >> --with-openssl=/root/libraries/openssl-1.0.1c/ >> --prefix=/etc/nginx/ --sbin-path=/usr/sbin/nginx >> --conf-path=/etc/nginx/nginx.conf >> --error-log-path=/var/log/nginx/error.log >> --http-log-path=/var/log/nginx/access.log >> --pid-path=/var/run/nginx.pid --lock-path=/var/run/nginx.lock >> --http-client-body-temp-path=/var/cache/nginx/client_temp >> --http-proxy-temp-path=/var/cache/nginx/proxy_temp >> --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp >> --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp >> --http-scgi-temp-path=/var/cache/nginx/scgi_temp >> >> Server information : >> >> Linux hostname.goes.here 2.6.32-279.5.2.el6.centos.plus.i686 >> #1 SMP Thu Aug 23 22:13:33 UTC 2012 i686 i686 i386 GNU/Linux >> >> It's not a major issue for me (browsers will accept >> octet-stream just fine) but might be annoying for other >> people, should they ever run into this. >> >> Tom >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 3729 bytes Desc: S/MIME-cryptografische ondertekening URL: From wandenberg at gmail.com Wed Oct 24 11:44:19 2012 From: wandenberg at gmail.com (Wandenberg Peixoto) Date: Wed, 24 Oct 2012 09:44:19 -0200 Subject: Invalid content types served when using alias In-Reply-To: <5087BD56.8060707@tvdw.eu> References: <50872D3A.4060306@tvdw.eu> <5087ADD5.4050709@tvdw.eu> <5087BD56.8060707@tvdw.eu> Message-ID: Just to be clear, are you calling "example.com/client" and serving a js file? If yes, this is the problem, the mime types works based on requested filename extension, if I am not wrong. As you called /client the nginx don't know what mime type to deliver. So, or you have too use the default_type on that location, or change the request to something like /client.js, or do a rewrite from /client to the js insted of use a location to that. On Wed, Oct 24, 2012 at 8:05 AM, Tom van der Woerdt wrote: > Yes, this is in my nginx.conf file : > > http { > include mime.types; > default_type application/octet-stream; > > {more} > > include /etc/nginx/conf.d/*.conf; > } > > Tom > > > Op 10/24/12 12:00 PM, Wandenberg Peixoto schreef: > > Are you using a mime.types file? Or setting types block? > This file has a mapping from file extension to the content type, may be > what is missing on your configuration. > > On Wed, Oct 24, 2012 at 6:59 AM, Tom van der Woerdt wrote: > >> Thanks, that works, but it doesn't really sound like a solution, more >> like a workaround. >> >> Tom >> >> >> Op 10/24/12 5:04 AM, Wandenberg Peixoto schreef: >> >> Try to set >> >> default_type text/javascript; >> >> instead of add_header. >> >> Regards, >> Wandenberg >> >> On Tue, Oct 23, 2012 at 9:50 PM, Tom van der Woerdt wrote: >> >>> Hi all, >>> >>> I'm using nginx' locations to serve a javascript file on '/client' : >>> >>> location = /client { >>> expires epoch; >>> alias /path/to/a/file.js; >>> } >>> >>> Works fine, with one major exception: it gets an octet-stream >>> Content-Type header. I tried to solve this with : >>> >>> add_header Content-Type text/javascript; >>> >>> Now I get two Content-Type headers. >>> >>> The raw response: >>> >>> < HTTP/1.1 200 OK >>> < Server: nginx/1.3.6 >>> < Date: Tue, 23 Oct 2012 23:42:27 GMT >>> < Content-Type: application/octet-stream >>> < Content-Length: 23245 >>> < Last-Modified: Sat, 20 Oct 2012 00:09:12 GMT >>> < Connection: keep-alive >>> < ETag: "5081eba8-5acd" >>> < Expires: Thu, 01 Jan 1970 00:00:01 GMT >>> < Cache-Control: no-cache >>> < Content-Type: text/javascript >>> < Accept-Ranges: bytes >>> >>> nginx information : >>> >>> nginx version: nginx/1.3.6 >>> built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) >>> TLS SNI support enabled >>> configure arguments: --user=nginx --group=nginx --with-http_ssl_module >>> --with-http_gzip_static_module --with-http_secure_link_module >>> --with-http_realip_module --with-http_stub_status_module --with-ipv6 >>> --with-openssl=/root/libraries/openssl-1.0.1c/ --prefix=/etc/nginx/ >>> --sbin-path=/usr/sbin/nginx --conf-path=/etc/nginx/nginx.conf >>> --error-log-path=/var/log/nginx/error.log >>> --http-log-path=/var/log/nginx/access.log --pid-path=/var/run/nginx.pid >>> --lock-path=/var/run/nginx.lock >>> --http-client-body-temp-path=/var/cache/nginx/client_temp >>> --http-proxy-temp-path=/var/cache/nginx/proxy_temp >>> --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp >>> --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp >>> --http-scgi-temp-path=/var/cache/nginx/scgi_temp >>> >>> Server information : >>> >>> Linux hostname.goes.here 2.6.32-279.5.2.el6.centos.plus.i686 #1 SMP Thu >>> Aug 23 22:13:33 UTC 2012 i686 i686 i386 GNU/Linux >>> >>> It's not a major issue for me (browsers will accept octet-stream just >>> fine) but might be annoying for other people, should they ever run into >>> this. >>> >>> Tom >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >> >> >> >> _______________________________________________ >> nginx-devel mailing listnginx-devel at nginx.orghttp://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > > > _______________________________________________ > nginx-devel mailing listnginx-devel at nginx.orghttp://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From info at tvdw.eu Wed Oct 24 11:52:13 2012 From: info at tvdw.eu (Tom van der Woerdt) Date: Wed, 24 Oct 2012 13:52:13 +0200 Subject: Invalid content types served when using alias In-Reply-To: References: <50872D3A.4060306@tvdw.eu> <5087ADD5.4050709@tvdw.eu> <5087BD56.8060707@tvdw.eu> Message-ID: <5087D66D.1090504@tvdw.eu> Yes, that's the URL I am using. Using default_type is possible but it still feels like a bug, especially since the actual file it's serving does end with .js. I know it's possible with a rewrite but that also feels like a hack instead of a real solution, especially since the file I'm serving is outside the website's root folder. Tom On 10/24/12 1:44 PM, Wandenberg Peixoto wrote: > Just to be clear, are you calling "example.com/client > " and serving a js file? > If yes, this is the problem, the mime types works based on requested > filename extension, if I am not wrong. > As you called /client the nginx don't know what mime type to deliver. > So, or you have too use the default_type on that location, or change > the request to something like /client.js, > or do a rewrite from /client to the js insted of use a location to that. > > On Wed, Oct 24, 2012 at 8:05 AM, Tom van der Woerdt > wrote: > > Yes, this is in my nginx.conf file : > > http { > include mime.types; > default_type application/octet-stream; > > {more} > > include /etc/nginx/conf.d/*.conf; > } > > Tom > > > Op 10/24/12 12:00 PM, Wandenberg Peixoto schreef: >> Are you using a mime.types file? Or setting types block? >> This file has a mapping from file extension to the content type, >> may be what is missing on your configuration. >> >> On Wed, Oct 24, 2012 at 6:59 AM, Tom van der Woerdt > > wrote: >> >> Thanks, that works, but it doesn't really sound like a >> solution, more like a workaround. >> >> Tom >> >> >> Op 10/24/12 5:04 AM, Wandenberg Peixoto schreef: >>> Try to set >>> >>> default_type text/javascript; >>> >>> instead of add_header. >>> >>> Regards, >>> Wandenberg >>> >>> On Tue, Oct 23, 2012 at 9:50 PM, Tom van der Woerdt >>> > wrote: >>> >>> Hi all, >>> >>> I'm using nginx' locations to serve a javascript file on >>> '/client' : >>> >>> location = /client { >>> expires epoch; >>> alias /path/to/a/file.js; >>> } >>> >>> Works fine, with one major exception: it gets an >>> octet-stream Content-Type header. I tried to solve this >>> with : >>> >>> add_header Content-Type text/javascript; >>> >>> Now I get two Content-Type headers. >>> >>> The raw response: >>> >>> < HTTP/1.1 200 OK >>> < Server: nginx/1.3.6 >>> < Date: Tue, 23 Oct 2012 23:42:27 GMT >>> < Content-Type: application/octet-stream >>> < Content-Length: 23245 >>> < Last-Modified: Sat, 20 Oct 2012 00:09:12 GMT >>> < Connection: keep-alive >>> < ETag: "5081eba8-5acd" >>> < Expires: Thu, 01 Jan 1970 00:00:01 GMT >>> < Cache-Control: no-cache >>> < Content-Type: text/javascript >>> < Accept-Ranges: bytes >>> >>> nginx information : >>> >>> nginx version: nginx/1.3.6 >>> built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) >>> TLS SNI support enabled >>> configure arguments: --user=nginx --group=nginx >>> --with-http_ssl_module --with-http_gzip_static_module >>> --with-http_secure_link_module --with-http_realip_module >>> --with-http_stub_status_module --with-ipv6 >>> --with-openssl=/root/libraries/openssl-1.0.1c/ >>> --prefix=/etc/nginx/ --sbin-path=/usr/sbin/nginx >>> --conf-path=/etc/nginx/nginx.conf >>> --error-log-path=/var/log/nginx/error.log >>> --http-log-path=/var/log/nginx/access.log >>> --pid-path=/var/run/nginx.pid >>> --lock-path=/var/run/nginx.lock >>> --http-client-body-temp-path=/var/cache/nginx/client_temp --http-proxy-temp-path=/var/cache/nginx/proxy_temp >>> --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp >>> --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp >>> --http-scgi-temp-path=/var/cache/nginx/scgi_temp >>> >>> Server information : >>> >>> Linux hostname.goes.here >>> 2.6.32-279.5.2.el6.centos.plus.i686 #1 SMP Thu Aug 23 >>> 22:13:33 UTC 2012 i686 i686 i386 GNU/Linux >>> >>> It's not a major issue for me (browsers will accept >>> octet-stream just fine) but might be annoying for other >>> people, should they ever run into this. >>> >>> Tom >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> >>> >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 3729 bytes Desc: S/MIME Cryptographic Signature URL: From wandenberg at gmail.com Wed Oct 24 11:58:48 2012 From: wandenberg at gmail.com (Wandenberg Peixoto) Date: Wed, 24 Oct 2012 09:58:48 -0200 Subject: Invalid content types served when using alias In-Reply-To: <5087D66D.1090504@tvdw.eu> References: <50872D3A.4060306@tvdw.eu> <5087ADD5.4050709@tvdw.eu> <5087BD56.8060707@tvdw.eu> <5087D66D.1090504@tvdw.eu> Message-ID: But nginx set content type based on requested url, not on delivered file, isn't a bug. Could you change the url to /client.js ? Tthis will solve everything, you will not have to set the default_type. On Wed, Oct 24, 2012 at 9:52 AM, Tom van der Woerdt wrote: > Yes, that's the URL I am using. Using default_type is possible but it > still feels like a bug, especially since the actual file it's serving does > end with .js. I know it's possible with a rewrite but that also feels like > a hack instead of a real solution, especially since the file I'm serving is > outside the website's root folder. > > Tom > > > > On 10/24/12 1:44 PM, Wandenberg Peixoto wrote: > > Just to be clear, are you calling "example.com/client" and serving a js > file? > If yes, this is the problem, the mime types works based on requested > filename extension, if I am not wrong. > As you called /client the nginx don't know what mime type to deliver. > So, or you have too use the default_type on that location, or change the > request to something like /client.js, > or do a rewrite from /client to the js insted of use a location to that. > > On Wed, Oct 24, 2012 at 8:05 AM, Tom van der Woerdt wrote: > >> Yes, this is in my nginx.conf file : >> >> http { >> include mime.types; >> default_type application/octet-stream; >> >> {more} >> >> include /etc/nginx/conf.d/*.conf; >> } >> >> Tom >> >> >> Op 10/24/12 12:00 PM, Wandenberg Peixoto schreef: >> >> Are you using a mime.types file? Or setting types block? >> This file has a mapping from file extension to the content type, may be >> what is missing on your configuration. >> >> On Wed, Oct 24, 2012 at 6:59 AM, Tom van der Woerdt wrote: >> >>> Thanks, that works, but it doesn't really sound like a solution, more >>> like a workaround. >>> >>> Tom >>> >>> >>> Op 10/24/12 5:04 AM, Wandenberg Peixoto schreef: >>> >>> Try to set >>> >>> default_type text/javascript; >>> >>> instead of add_header. >>> >>> Regards, >>> Wandenberg >>> >>> On Tue, Oct 23, 2012 at 9:50 PM, Tom van der Woerdt wrote: >>> >>>> Hi all, >>>> >>>> I'm using nginx' locations to serve a javascript file on '/client' : >>>> >>>> location = /client { >>>> expires epoch; >>>> alias /path/to/a/file.js; >>>> } >>>> >>>> Works fine, with one major exception: it gets an octet-stream >>>> Content-Type header. I tried to solve this with : >>>> >>>> add_header Content-Type text/javascript; >>>> >>>> Now I get two Content-Type headers. >>>> >>>> The raw response: >>>> >>>> < HTTP/1.1 200 OK >>>> < Server: nginx/1.3.6 >>>> < Date: Tue, 23 Oct 2012 23:42:27 GMT >>>> < Content-Type: application/octet-stream >>>> < Content-Length: 23245 >>>> < Last-Modified: Sat, 20 Oct 2012 00:09:12 GMT >>>> < Connection: keep-alive >>>> < ETag: "5081eba8-5acd" >>>> < Expires: Thu, 01 Jan 1970 00:00:01 GMT >>>> < Cache-Control: no-cache >>>> < Content-Type: text/javascript >>>> < Accept-Ranges: bytes >>>> >>>> nginx information : >>>> >>>> nginx version: nginx/1.3.6 >>>> built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) >>>> TLS SNI support enabled >>>> configure arguments: --user=nginx --group=nginx --with-http_ssl_module >>>> --with-http_gzip_static_module --with-http_secure_link_module >>>> --with-http_realip_module --with-http_stub_status_module --with-ipv6 >>>> --with-openssl=/root/libraries/openssl-1.0.1c/ --prefix=/etc/nginx/ >>>> --sbin-path=/usr/sbin/nginx --conf-path=/etc/nginx/nginx.conf >>>> --error-log-path=/var/log/nginx/error.log >>>> --http-log-path=/var/log/nginx/access.log --pid-path=/var/run/nginx.pid >>>> --lock-path=/var/run/nginx.lock >>>> --http-client-body-temp-path=/var/cache/nginx/client_temp >>>> --http-proxy-temp-path=/var/cache/nginx/proxy_temp >>>> --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp >>>> --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp >>>> --http-scgi-temp-path=/var/cache/nginx/scgi_temp >>>> >>>> Server information : >>>> >>>> Linux hostname.goes.here 2.6.32-279.5.2.el6.centos.plus.i686 #1 SMP Thu >>>> Aug 23 22:13:33 UTC 2012 i686 i686 i386 GNU/Linux >>>> >>>> It's not a major issue for me (browsers will accept octet-stream just >>>> fine) but might be annoying for other people, should they ever run into >>>> this. >>>> >>>> Tom >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>> >>> >>> >>> >>> _______________________________________________ >>> nginx-devel mailing listnginx-devel at nginx.orghttp://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >> >> >> >> _______________________________________________ >> nginx-devel mailing listnginx-devel at nginx.orghttp://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > > > _______________________________________________ > nginx-devel mailing listnginx-devel at nginx.orghttp://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From info at tvdw.eu Wed Oct 24 12:04:05 2012 From: info at tvdw.eu (Tom van der Woerdt) Date: Wed, 24 Oct 2012 14:04:05 +0200 Subject: Invalid content types served when using alias In-Reply-To: References: <50872D3A.4060306@tvdw.eu> <5087ADD5.4050709@tvdw.eu> <5087BD56.8060707@tvdw.eu> <5087D66D.1090504@tvdw.eu> Message-ID: <5087D935.6050408@tvdw.eu> No, I cannot change the URLs that are used by my application, although I can confirm that adding .js does fix it. Tom Op 10/24/12 1:58 PM, Wandenberg Peixoto schreef: > But nginx set content type based on requested url, not on delivered > file, isn't a bug. > Could you change the url to /client.js ? > Tthis will solve everything, you will not have to set the default_type. > > On Wed, Oct 24, 2012 at 9:52 AM, Tom van der Woerdt > wrote: > > Yes, that's the URL I am using. Using default_type is possible but > it still feels like a bug, especially since the actual file it's > serving does end with .js. I know it's possible with a rewrite but > that also feels like a hack instead of a real solution, especially > since the file I'm serving is outside the website's root folder. > > Tom > > > > On 10/24/12 1:44 PM, Wandenberg Peixoto wrote: >> Just to be clear, are you calling "example.com/client >> " and serving a js file? >> If yes, this is the problem, the mime types works based on >> requested filename extension, if I am not wrong. >> As you called /client the nginx don't know what mime type to deliver. >> So, or you have too use the default_type on that location, or >> change the request to something like /client.js, >> or do a rewrite from /client to the js insted of use a location >> to that. >> >> On Wed, Oct 24, 2012 at 8:05 AM, Tom van der Woerdt > > wrote: >> >> Yes, this is in my nginx.conf file : >> >> http { >> include mime.types; >> default_type application/octet-stream; >> >> {more} >> >> include /etc/nginx/conf.d/*.conf; >> } >> >> Tom >> >> >> Op 10/24/12 12:00 PM, Wandenberg Peixoto schreef: >>> Are you using a mime.types file? Or setting types block? >>> This file has a mapping from file extension to the content >>> type, may be what is missing on your configuration. >>> >>> On Wed, Oct 24, 2012 at 6:59 AM, Tom van der Woerdt >>> > wrote: >>> >>> Thanks, that works, but it doesn't really sound like a >>> solution, more like a workaround. >>> >>> Tom >>> >>> >>> Op 10/24/12 5:04 AM, Wandenberg Peixoto schreef: >>>> Try to set >>>> >>>> default_type text/javascript; >>>> >>>> instead of add_header. >>>> >>>> Regards, >>>> Wandenberg >>>> >>>> On Tue, Oct 23, 2012 at 9:50 PM, Tom van der Woerdt >>>> > wrote: >>>> >>>> Hi all, >>>> >>>> I'm using nginx' locations to serve a javascript >>>> file on '/client' : >>>> >>>> location = /client { >>>> expires epoch; >>>> alias /path/to/a/file.js; >>>> } >>>> >>>> Works fine, with one major exception: it gets an >>>> octet-stream Content-Type header. I tried to solve >>>> this with : >>>> >>>> add_header Content-Type text/javascript; >>>> >>>> Now I get two Content-Type headers. >>>> >>>> The raw response: >>>> >>>> < HTTP/1.1 200 OK >>>> < Server: nginx/1.3.6 >>>> < Date: Tue, 23 Oct 2012 23:42:27 GMT >>>> < Content-Type: application/octet-stream >>>> < Content-Length: 23245 >>>> < Last-Modified: Sat, 20 Oct 2012 00:09:12 GMT >>>> < Connection: keep-alive >>>> < ETag: "5081eba8-5acd" >>>> < Expires: Thu, 01 Jan 1970 00:00:01 GMT >>>> < Cache-Control: no-cache >>>> < Content-Type: text/javascript >>>> < Accept-Ranges: bytes >>>> >>>> nginx information : >>>> >>>> nginx version: nginx/1.3.6 >>>> built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) >>>> TLS SNI support enabled >>>> configure arguments: --user=nginx --group=nginx >>>> --with-http_ssl_module >>>> --with-http_gzip_static_module >>>> --with-http_secure_link_module >>>> --with-http_realip_module >>>> --with-http_stub_status_module --with-ipv6 >>>> --with-openssl=/root/libraries/openssl-1.0.1c/ >>>> --prefix=/etc/nginx/ --sbin-path=/usr/sbin/nginx >>>> --conf-path=/etc/nginx/nginx.conf >>>> --error-log-path=/var/log/nginx/error.log >>>> --http-log-path=/var/log/nginx/access.log >>>> --pid-path=/var/run/nginx.pid >>>> --lock-path=/var/run/nginx.lock >>>> --http-client-body-temp-path=/var/cache/nginx/client_temp >>>> --http-proxy-temp-path=/var/cache/nginx/proxy_temp >>>> --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp >>>> --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp >>>> --http-scgi-temp-path=/var/cache/nginx/scgi_temp >>>> >>>> Server information : >>>> >>>> Linux hostname.goes.here >>>> 2.6.32-279.5.2.el6.centos.plus.i686 #1 SMP Thu Aug >>>> 23 22:13:33 UTC 2012 i686 i686 i386 GNU/Linux >>>> >>>> It's not a major issue for me (browsers will accept >>>> octet-stream just fine) but might be annoying for >>>> other people, should they ever run into this. >>>> >>>> Tom >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>>> >>>> >>>> >>>> >>>> _______________________________________________ >>>> nginx-devel mailing list >>>> nginx-devel at nginx.org >>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >>> >>> >>> >>> >>> _______________________________________________ >>> nginx-devel mailing list >>> nginx-devel at nginx.org >>> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> >> >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > > > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel -------------- next part -------------- An HTML attachment was scrubbed... URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: smime.p7s Type: application/pkcs7-signature Size: 3729 bytes Desc: S/MIME-cryptografische ondertekening URL: From vshebordaev at mail.ru Wed Oct 24 13:59:33 2012 From: vshebordaev at mail.ru (Vladimir Shebordaev) Date: Wed, 24 Oct 2012 17:59:33 +0400 Subject: Invalid content types served when using alias In-Reply-To: <50872D3A.4060306@tvdw.eu> References: <50872D3A.4060306@tvdw.eu> Message-ID: <5087F445.6020102@mail.ru> Hi! On 24.10.2012 03:50, Tom van der Woerdt wrote: > Hi all, > > I'm using nginx' locations to serve a javascript file on '/client' : > > location = /client { > expires epoch; > alias /path/to/a/file.js; > } > > Works fine, with one major exception: it gets an octet-stream > Content-Type header. This is due to a bug in static module, it doesn't properly set r->exten in this case, so the redirect gets default mime-type. Please try this patch Index: src/http/modules/ngx_http_static_module.c =================================================================== --- src/http/modules/ngx_http_static_module.c (revision 4892) +++ src/http/modules/ngx_http_static_module.c (working copy) @@ -224,6 +224,9 @@ return NGX_HTTP_INTERNAL_SERVER_ERROR; } + r->uri = path; + ngx_http_set_exten(r); + if (ngx_http_set_content_type(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } It does resolve the issue for me. > I tried to solve this with : > add_header Content-Type text/javascript; > > Now I get two Content-Type headers. > Sure, the ability to add a duplicate header with add_header directive is to be subjected to separate design considerations. Hope it helps. Regards, Vladimir > The raw response: > > < HTTP/1.1 200 OK > < Server: nginx/1.3.6 > < Date: Tue, 23 Oct 2012 23:42:27 GMT > < Content-Type: application/octet-stream > < Content-Length: 23245 > < Last-Modified: Sat, 20 Oct 2012 00:09:12 GMT > < Connection: keep-alive > < ETag: "5081eba8-5acd" > < Expires: Thu, 01 Jan 1970 00:00:01 GMT > < Cache-Control: no-cache > < Content-Type: text/javascript > < Accept-Ranges: bytes > > nginx information : > > nginx version: nginx/1.3.6 > built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) > TLS SNI support enabled > configure arguments: --user=nginx --group=nginx > --with-http_ssl_module --with-http_gzip_static_module > --with-http_secure_link_module --with-http_realip_module > --with-http_stub_status_module --with-ipv6 > --with-openssl=/root/libraries/openssl-1.0.1c/ > --prefix=/etc/nginx/ --sbin-path=/usr/sbin/nginx > --conf-path=/etc/nginx/nginx.conf > --error-log-path=/var/log/nginx/error.log > --http-log-path=/var/log/nginx/access.log > --pid-path=/var/run/nginx.pid --lock-path=/var/run/nginx.lock > --http-client-body-temp-path=/var/cache/nginx/client_temp > --http-proxy-temp-path=/var/cache/nginx/proxy_temp > --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp > --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp > --http-scgi-temp-path=/var/cache/nginx/scgi_temp > > Server information : > > Linux hostname.goes.here 2.6.32-279.5.2.el6.centos.plus.i686 #1 > SMP Thu Aug 23 22:13:33 UTC 2012 i686 i686 i386 GNU/Linux > > It's not a major issue for me (browsers will accept octet-stream > just fine) but might be annoying for other people, should they > ever run into this. > > Tom > From igor at sysoev.ru Wed Oct 24 14:06:29 2012 From: igor at sysoev.ru (Igor Sysoev) Date: Wed, 24 Oct 2012 18:06:29 +0400 Subject: Invalid content types served when using alias In-Reply-To: <5087D66D.1090504@tvdw.eu> References: <50872D3A.4060306@tvdw.eu> <5087ADD5.4050709@tvdw.eu> <5087BD56.8060707@tvdw.eu> <5087D66D.1090504@tvdw.eu> Message-ID: <31F87381-4D38-412F-94C0-EF462326D4B1@sysoev.ru> On Oct 24, 2012, at 15:52 , Tom van der Woerdt wrote: > Yes, that's the URL I am using. Using default_type is possible but it still feels like a bug, especially since the actual file it's serving does end with .js. I know it's possible with a rewrite but that also feels like a hack instead of a real solution, especially since the file I'm serving is outside the website's root folder. This is not a bug but intentional behavior. default_type should be used here. -- Igor Sysoev http://nginx.com/support.html -------------- next part -------------- An HTML attachment was scrubbed... URL: From mdounin at mdounin.ru Wed Oct 24 14:07:09 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Wed, 24 Oct 2012 14:07:09 +0000 Subject: [nginx] svn commit: r4893 - trunk/src/core Message-ID: <20121024140709.3355D3F9C0F@mail.nginx.com> Author: mdounin Date: 2012-10-24 14:07:08 +0000 (Wed, 24 Oct 2012) New Revision: 4893 URL: http://trac.nginx.org/nginx/changeset/4893/nginx Log: Resolver: added missing memory allocation error handling. Modified: trunk/src/core/ngx_resolver.c Modified: trunk/src/core/ngx_resolver.c =================================================================== --- trunk/src/core/ngx_resolver.c 2012-10-23 14:36:18 UTC (rev 4892) +++ trunk/src/core/ngx_resolver.c 2012-10-24 14:07:08 UTC (rev 4893) @@ -2144,6 +2144,10 @@ dst = ngx_resolver_alloc(r, n * sizeof(in_addr_t)); + if (dst == NULL) { + return dst; + } + j = ngx_random() % n; if (j == 0) { From info at tvdw.eu Wed Oct 24 14:22:16 2012 From: info at tvdw.eu (Tom van der Woerdt) Date: Wed, 24 Oct 2012 16:22:16 +0200 Subject: Invalid content types served when using alias In-Reply-To: <5087F445.6020102@mail.ru> References: <50872D3A.4060306@tvdw.eu> <5087F445.6020102@mail.ru> Message-ID: <5087F998.7060304@tvdw.eu> Yes, this patch seems to work fine for me as well. Thanks! Tom On 10/24/12 3:59 PM, Vladimir Shebordaev wrote: > Hi! > > On 24.10.2012 03:50, Tom van der Woerdt wrote: >> Hi all, >> >> I'm using nginx' locations to serve a javascript file on '/client' : >> >> location = /client { >> expires epoch; >> alias /path/to/a/file.js; >> } >> >> Works fine, with one major exception: it gets an octet-stream >> Content-Type header. > > This is due to a bug in static module, it doesn't properly set > r->exten in this case, so the redirect gets default mime-type. > > Please try this patch > > Index: src/http/modules/ngx_http_static_module.c > =================================================================== > --- src/http/modules/ngx_http_static_module.c (revision 4892) > +++ src/http/modules/ngx_http_static_module.c (working copy) > @@ -224,6 +224,9 @@ > return NGX_HTTP_INTERNAL_SERVER_ERROR; > } > > + r->uri = path; > + ngx_http_set_exten(r); > + > if (ngx_http_set_content_type(r) != NGX_OK) { > return NGX_HTTP_INTERNAL_SERVER_ERROR; > } > > > It does resolve the issue for me. > > >> I tried to solve this with : >> add_header Content-Type text/javascript; >> >> Now I get two Content-Type headers. >> > > Sure, the ability to add a duplicate header with add_header directive > is to be subjected to separate design considerations. > > Hope it helps. > > Regards, > Vladimir > > >> The raw response: >> >> < HTTP/1.1 200 OK >> < Server: nginx/1.3.6 >> < Date: Tue, 23 Oct 2012 23:42:27 GMT >> < Content-Type: application/octet-stream >> < Content-Length: 23245 >> < Last-Modified: Sat, 20 Oct 2012 00:09:12 GMT >> < Connection: keep-alive >> < ETag: "5081eba8-5acd" >> < Expires: Thu, 01 Jan 1970 00:00:01 GMT >> < Cache-Control: no-cache >> < Content-Type: text/javascript >> < Accept-Ranges: bytes >> >> nginx information : >> >> nginx version: nginx/1.3.6 >> built by gcc 4.4.6 20120305 (Red Hat 4.4.6-4) (GCC) >> TLS SNI support enabled >> configure arguments: --user=nginx --group=nginx >> --with-http_ssl_module --with-http_gzip_static_module >> --with-http_secure_link_module --with-http_realip_module >> --with-http_stub_status_module --with-ipv6 >> --with-openssl=/root/libraries/openssl-1.0.1c/ >> --prefix=/etc/nginx/ --sbin-path=/usr/sbin/nginx >> --conf-path=/etc/nginx/nginx.conf >> --error-log-path=/var/log/nginx/error.log >> --http-log-path=/var/log/nginx/access.log >> --pid-path=/var/run/nginx.pid --lock-path=/var/run/nginx.lock >> --http-client-body-temp-path=/var/cache/nginx/client_temp >> --http-proxy-temp-path=/var/cache/nginx/proxy_temp >> --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp >> --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp >> --http-scgi-temp-path=/var/cache/nginx/scgi_temp >> >> Server information : >> >> Linux hostname.goes.here 2.6.32-279.5.2.el6.centos.plus.i686 #1 >> SMP Thu Aug 23 22:13:33 UTC 2012 i686 i686 i386 GNU/Linux >> >> It's not a major issue for me (browsers will accept octet-stream >> just fine) but might be annoying for other people, should they >> ever run into this. >> >> Tom >> > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From mdounin at mdounin.ru Wed Oct 24 14:32:47 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 24 Oct 2012 18:32:47 +0400 Subject: Invalid content types served when using alias In-Reply-To: <5087F445.6020102@mail.ru> References: <50872D3A.4060306@tvdw.eu> <5087F445.6020102@mail.ru> Message-ID: <20121024143247.GC40452@mdounin.ru> Hello! On Wed, Oct 24, 2012 at 05:59:33PM +0400, Vladimir Shebordaev wrote: > Hi! > > On 24.10.2012 03:50, Tom van der Woerdt wrote: > >Hi all, > > > >I'm using nginx' locations to serve a javascript file on '/client' : > > > > location = /client { > > expires epoch; > > alias /path/to/a/file.js; > > } > > > >Works fine, with one major exception: it gets an octet-stream > >Content-Type header. > > This is due to a bug in static module, it doesn't properly set > r->exten in this case, so the redirect gets default mime-type. As already explained by Igor, this is not a bug, but intentional behaviour. Extension as seen in the URI is used to determine mime type, not one on the file system (which might not be present at all). > > Please try this patch > > Index: src/http/modules/ngx_http_static_module.c > =================================================================== > --- src/http/modules/ngx_http_static_module.c (revision 4892) > +++ src/http/modules/ngx_http_static_module.c (working copy) > @@ -224,6 +224,9 @@ > return NGX_HTTP_INTERNAL_SERVER_ERROR; > } > > + r->uri = path; > + ngx_http_set_exten(r); > + > if (ngx_http_set_content_type(r) != NGX_OK) { > return NGX_HTTP_INTERNAL_SERVER_ERROR; > } The patch is completely incorrect. While it might work for you in this particular case, it a) Breaks the behaviour which is expected to work (in contrast to the above), e.g. like this: location = /foo.js { alias /path/to/a/file; } b) Breaks unrelated things like the $uri variable. -- Maxim Dounin http://nginx.com/support.html From mdounin at mdounin.ru Wed Oct 24 17:17:50 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Wed, 24 Oct 2012 21:17:50 +0400 Subject: proxying from pipes In-Reply-To: References: Message-ID: <20121024171750.GJ40452@mdounin.ru> Hello! On Tue, Oct 23, 2012 at 11:14:52AM -0400, Jeff Kaufman wrote: > My module wants to sit in the filter chain passing buffers to an > asynchronous optimization thread and then send them out to the user > when they finish. When a request comes in I have my module roughly > doing: > > body filter: > - if first set of buffers > - create pipe, pass pipe_write_fd to optimization thread > - pass all input data to optimization thread > - don't call ngx_http_next_body_filter > > Is there a way I can ask nginx to watch this pipe and treat any data > appearing on the pipe as if it is output from my body filter? Passing > it through ngx_http_next_body_filter, etc? And finalizing the request > if there's a problem with the pipe? You may try to reuse ngx_event_pipe() code for this, much like it's used in the upstream module. It's not designed to be used in a filter chain though, and anyway I suspect it would be an overkill unless you need some request buffering as well. In any case you'll need to handle events and filter chain invocations yourself anyway. > What I'm doing now is: > - when creating the pipe > c = ngx_get_connection(pipe_read_fd, r->connection->log); > c.read->handler = my_read_handler; > ngx_add_event(c->read, NGX_READ_EVENT, 0); > - when my_read_handler is called > read() from pipe_read_fd > create buffer, chain link > call ngx_http_next_body_filter > > Is this the right way to go about this? It seems very low level and I > would expect proxying from a pipe would be something nginx already > supported. This looks reasonable, though may need more details. In particular you may need to look into ngx_event_connect() to see how to properly set up event notification for a connection, as e.g. ngx_add_conn() is needed for rtsig. > Another option would be to simply use the pipe for notification and > load the data from the optimization thread through shared memory. Depending on use case this aproach may be more efficient. -- Maxim Dounin http://nginx.com/support.html From vshebordaev at mail.ru Wed Oct 24 19:38:20 2012 From: vshebordaev at mail.ru (Vladimir Shebordaev) Date: Wed, 24 Oct 2012 23:38:20 +0400 Subject: Invalid content types served when using alias In-Reply-To: <20121024143247.GC40452@mdounin.ru> References: <50872D3A.4060306@tvdw.eu> <5087F445.6020102@mail.ru> <20121024143247.GC40452@mdounin.ru> Message-ID: <508843AC.3080004@mail.ru> Hi! On 24.10.2012 18:32, Maxim Dounin wrote: > Hello! > > On Wed, Oct 24, 2012 at 05:59:33PM +0400, Vladimir Shebordaev wrote: > >> Hi! >> >> On 24.10.2012 03:50, Tom van der Woerdt wrote: >>> Hi all, >>> >>> I'm using nginx' locations to serve a javascript file on '/client' : >>> >>> location = /client { >>> expires epoch; >>> alias /path/to/a/file.js; >>> } >>> >>> Works fine, with one major exception: it gets an octet-stream >>> Content-Type header. >> >> This is due to a bug in static module, it doesn't properly set >> r->exten in this case, so the redirect gets default mime-type. > > As already explained by Igor, this is not a bug, but intentional > behaviour. I daresay that is also why your message does not make much sense :) > > Extension as seen in the URI is used to determine mime type, not > one on the file system (which might not be present at all). > It is the case when the MIME type can not be guessed from URI but can be inferred from the file redirect extension. >> >> Please try this patch >> >> Index: src/http/modules/ngx_http_static_module.c >> =================================================================== >> --- src/http/modules/ngx_http_static_module.c (revision 4892) >> +++ src/http/modules/ngx_http_static_module.c (working copy) >> @@ -224,6 +224,9 @@ >> return NGX_HTTP_INTERNAL_SERVER_ERROR; >> } >> >> + r->uri = path; >> + ngx_http_set_exten(r); >> + >> if (ngx_http_set_content_type(r) != NGX_OK) { >> return NGX_HTTP_INTERNAL_SERVER_ERROR; >> } > > The patch is completely incorrect. While it might work for you in > this particular case, it > > a) Breaks the behaviour which is expected to work (in contrast to > the above), e.g. like this: > > location = /foo.js { > alias /path/to/a/file; > } Sure, it should be done in more elaborate way, like by, e.g try_files handler. At least, it should be only done if r->exten has not been guessed from the original URI. I would also do that if I expect the patch to be applied as-is. > > b) Breaks unrelated things like the $uri variable. > > Well, as long as the Content-Length is already set to the length of the file I didn't expect variables to be evaluated, but, sure, there is log module and probably others. Thank you, but I guess no fixes are to be applied anyways. Regards, Vladimir From toli at webforge.bg Thu Oct 25 08:47:31 2012 From: toli at webforge.bg (Anatoli Marinov) Date: Thu, 25 Oct 2012 11:47:31 +0300 Subject: proxy_cache and chunked stream Message-ID: <5088FCA3.4070405@webforge.bg> Hello, Last week I had some issues with wrongly cached files without Content-Length. Now I'm playing with Transfer-Encoding: chunked. Nginx configured with proxy_cache don't cache objects that have Transfer-Encoding: chunked. Is it normal behaviour or there is a misconfiguration in my test bed? Thanks From toli at webforge.bg Thu Oct 25 09:31:45 2012 From: toli at webforge.bg (Anatoli Marinov) Date: Thu, 25 Oct 2012 12:31:45 +0300 Subject: proxy_cache and chunked stream In-Reply-To: <5088FCA3.4070405@webforge.bg> References: <5088FCA3.4070405@webforge.bg> Message-ID: <50890701.8050909@webforge.bg> Probably the issue is in my test bed. Sorry. On 10/25/2012 11:47 AM, Anatoli Marinov wrote: > Hello, > Last week I had some issues with wrongly cached files without > Content-Length. Now I'm playing with Transfer-Encoding: chunked. > > Nginx configured with proxy_cache don't cache objects that have > Transfer-Encoding: chunked. > Is it normal behaviour or there is a misconfiguration in my test bed? > > > Thanks > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel From info at bnoordhuis.nl Thu Oct 25 13:46:47 2012 From: info at bnoordhuis.nl (Ben Noordhuis) Date: Thu, 25 Oct 2012 15:46:47 +0200 Subject: [PATCH] Core: prefer ioctl(FIOCLEX) over fcntl(FD_CLOEXEC) Message-ID: <1351172807-6621-1-git-send-email-info@bnoordhuis.nl> Use ioctl(FIOCLEX) to set the close-on-exec flag on platforms where it makes sense: Linux, Darwin, the BSDs, *not* Solaris. ioctl(FIOCLEX) is generally slightly faster than fcntl(FD_CLOEXEC), on the order of 3-4%. Ad-hoc benchmark: #ifndef __linux__ #include #endif #include #include #include #include #include int main(int argc, char **argv) { int fd = open("/dev/null", O_RDWR); int n = 1e8; if (argc < 2 || atoi(argv[1]) == 0) while (n--) fcntl(fd, F_SETFD, FD_CLOEXEC); else while (n--) ioctl(fd, FIOCLEX); return 0; } Results, median-of-10 style: $ time tmp/cloexec 0 # FD_CLOEXEC real 0m5.558s user 0m2.388s sys 0m3.152s $ time tmp/cloexec 1 # FIOCLEX real 0m5.373s user 0m1.548s sys 0m3.808s The difference is small but consistent. Caveat emptor: This change slows down the close-on-exec operation by about 4-5% when a 32 bits nginx talks to a 64 bits kernel. The 32/64 bits conversion layer has to translate the 32 bits ioctl() system call to its 64 bits counterpart, which unfortunately is a slow operation. --- auto/unix | 21 +++++++++++++++++++++ src/core/ngx_cycle.c | 10 ++++------ src/os/unix/ngx_process.c | 10 ++++------ src/os/unix/ngx_socket.h | 19 +++++++++++++++++++ 4 files changed, 48 insertions(+), 12 deletions(-) diff --git a/auto/unix b/auto/unix index b0a0e4c..9784a1f 100755 --- a/auto/unix +++ b/auto/unix @@ -720,6 +720,27 @@ ngx_feature_test="int i = FIONBIO; printf(\"%d\", i)" . auto/feature +ngx_feature="ioctl(FIOCLEX)" +ngx_feature_name="NGX_HAVE_FIOCLEX" +ngx_feature_run=no +ngx_feature_incs="#include + #include + $NGX_INCLUDE_SYS_FILIO_H" +ngx_feature_path= +ngx_feature_libs= +ngx_feature_test="#ifdef __sun + /* With some version of SunOS, FIOCLEX simply doesn't work. + * With other versions it's roughly 3x times slower than + * fcntl(FD_CLOEXEC). + */ + fail + #else + int i = FIOCLEX, k = FIONCLEX; printf(\"%d %d\", i, k); + #endif + " +. auto/feature + + ngx_feature="struct tm.tm_gmtoff" ngx_feature_name="NGX_HAVE_GMTOFF" ngx_feature_run=no diff --git a/src/core/ngx_cycle.c b/src/core/ngx_cycle.c index f153729..ba6b3c0 100644 --- a/src/core/ngx_cycle.c +++ b/src/core/ngx_cycle.c @@ -383,10 +383,9 @@ ngx_init_cycle(ngx_cycle_t *old_cycle) } #if !(NGX_WIN32) - if (fcntl(file[i].fd, F_SETFD, FD_CLOEXEC) == -1) { + if (ngx_cloexec(file[i].fd) == -1) { ngx_log_error(NGX_LOG_EMERG, log, ngx_errno, - "fcntl(FD_CLOEXEC) \"%s\" failed", - file[i].name.data); + ngx_cloexec_n " \"%s\" failed", file[i].name.data); goto failed; } #endif @@ -1219,10 +1218,9 @@ ngx_reopen_files(ngx_cycle_t *cycle, ngx_uid_t user) } } - if (fcntl(fd, F_SETFD, FD_CLOEXEC) == -1) { + if (ngx_cloexec(fd) == -1) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno, - "fcntl(FD_CLOEXEC) \"%s\" failed", - file[i].name.data); + ngx_cloexec_n " \"%s\" failed", file[i].name.data); if (ngx_close_file(fd) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno, diff --git a/src/os/unix/ngx_process.c b/src/os/unix/ngx_process.c index 4ef3582..4969f06 100644 --- a/src/os/unix/ngx_process.c +++ b/src/os/unix/ngx_process.c @@ -157,18 +157,16 @@ ngx_spawn_process(ngx_cycle_t *cycle, ngx_spawn_proc_pt proc, void *data, return NGX_INVALID_PID; } - if (fcntl(ngx_processes[s].channel[0], F_SETFD, FD_CLOEXEC) == -1) { + if (ngx_cloexec(ngx_processes[s].channel[0]) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "fcntl(FD_CLOEXEC) failed while spawning \"%s\"", - name); + ngx_cloexec_n " failed while spawning \"%s\"", name); ngx_close_channel(ngx_processes[s].channel, cycle->log); return NGX_INVALID_PID; } - if (fcntl(ngx_processes[s].channel[1], F_SETFD, FD_CLOEXEC) == -1) { + if (ngx_cloexec(ngx_processes[s].channel[1]) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, - "fcntl(FD_CLOEXEC) failed while spawning \"%s\"", - name); + ngx_cloexec_n " failed while spawning \"%s\"", name); ngx_close_channel(ngx_processes[s].channel, cycle->log); return NGX_INVALID_PID; } diff --git a/src/os/unix/ngx_socket.h b/src/os/unix/ngx_socket.h index fcc5153..4263114 100644 --- a/src/os/unix/ngx_socket.h +++ b/src/os/unix/ngx_socket.h @@ -20,6 +20,25 @@ typedef int ngx_socket_t; #define ngx_socket_n "socket()" +#if (NGX_HAVE_FIOCLEX) + +#define ngx_cloexec(fd) ioctl(fd, FIOCLEX) +#define ngx_nocloexec(fd) ioctl(fd, FIONCLEX) + +#define ngx_cloexec_n "ioctl(FIOCLEX)" +#define ngx_nocloexec_n "ioctl(FIONCLEX)" + +#else + +#define ngx_cloexec(fd) fcntl(fd, F_SETFD, FD_CLOEXEC) +#define ngx_nocloexec(fd) fcntl(fd, F_SETFD, 0) + +#define ngx_cloexec_n "fcntl(FD_CLOEXEC)" +#define ngx_nocloexec_n "fcntl(!FD_CLOEXEC)" + +#endif + + #if (NGX_HAVE_FIONBIO) int ngx_nonblocking(ngx_socket_t s); -- 1.7.9.5 From mdounin at mdounin.ru Thu Oct 25 15:34:46 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Thu, 25 Oct 2012 19:34:46 +0400 Subject: [PATCH] Core: prefer ioctl(FIOCLEX) over fcntl(FD_CLOEXEC) In-Reply-To: <1351172807-6621-1-git-send-email-info@bnoordhuis.nl> References: <1351172807-6621-1-git-send-email-info@bnoordhuis.nl> Message-ID: <20121025153445.GU40452@mdounin.ru> Hello! On Thu, Oct 25, 2012 at 03:46:47PM +0200, Ben Noordhuis wrote: > Use ioctl(FIOCLEX) to set the close-on-exec flag on platforms where it makes > sense: Linux, Darwin, the BSDs, *not* Solaris. > > ioctl(FIOCLEX) is generally slightly faster than fcntl(FD_CLOEXEC), on the > order of 3-4%. While ioctl(FIOCLEX) may be a bit faster, fcntl(FD_CLOEXEC) is only used on rarely executed code paths. Given the fact that fcntl() aproach is much more portable it doesn't make sense to change anything here. [...] -- Maxim Dounin http://nginx.com/support.html From jossandavis at gmail.com Thu Oct 25 18:38:41 2012 From: jossandavis at gmail.com (Jossan Davis) Date: Fri, 26 Oct 2012 02:38:41 +0800 Subject: is that possible to queue request one by one Message-ID: Hi nginx module limit_req only can limit request frequency by url param key. it delay a request by "ngx_add_timer(r->connection->write, delay)". my requirement is a little bit different. i need the next request be process immediately after the prev one have done. i have tried to use "ngx_http_cleanup_add". but it seem that the ngx_http_request_t is not in shared mem, i can't use "ngx_add_timer(r->connection->write, 0)" to notify the next request. thx -------------- next part -------------- An HTML attachment was scrubbed... URL: From vshebordaev at mail.ru Fri Oct 26 02:19:29 2012 From: vshebordaev at mail.ru (Vladimir Shebordaev) Date: Fri, 26 Oct 2012 06:19:29 +0400 Subject: Invalid content types served when using alias In-Reply-To: <20121024143247.GC40452@mdounin.ru> References: <50872D3A.4060306@tvdw.eu> <5087F445.6020102@mail.ru> <20121024143247.GC40452@mdounin.ru> Message-ID: <5089F331.1080802@mail.ru> Hi! Well, just in case, this inlined version of ngx_http_set_exten() function seems to set r->exten in non-intrusive way when it is not known from URI but can be obtained from the file redirect path Index: src/http/modules/ngx_http_static_module.c =================================================================== --- src/http/modules/ngx_http_static_module.c (revision 4892) +++ src/http/modules/ngx_http_static_module.c (working copy) @@ -224,6 +224,24 @@ return NGX_HTTP_INTERNAL_SERVER_ERROR; } + if (!r->exten.len) { + last = path.data + path.len - 1; + while (last > path.data + 1) { + if (last[-1] == '.' && last[-2] != '/') { + + r->exten.data = last; + r->exten.len = path.len - (last - path.data); + + break; + + } else if (last[-1] == '/') { + break; + } + + last--; + } + } + if (ngx_http_set_content_type(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } This way, the MIME type guessed from the file redirect extension is sent to client instead of the default one, when it is not known from the original URI. Otherwise, I'd suggest to update an 'alias' directive documentation to explicitly state that the default MIME type is used when it can not be guessed from the original location URI even if it is a file alias. Hope it helps. Regards, Vladimir On 24.10.2012 18:32, Maxim Dounin wrote: > Hello! > > On Wed, Oct 24, 2012 at 05:59:33PM +0400, Vladimir Shebordaev wrote: > >> Hi! >> >> On 24.10.2012 03:50, Tom van der Woerdt wrote: >>> Hi all, >>> >>> I'm using nginx' locations to serve a javascript file on '/client' : >>> >>> location = /client { >>> expires epoch; >>> alias /path/to/a/file.js; >>> } >>> >>> Works fine, with one major exception: it gets an octet-stream >>> Content-Type header. >> >> This is due to a bug in static module, it doesn't properly set >> r->exten in this case, so the redirect gets default mime-type. > > As already explained by Igor, this is not a bug, but intentional > behaviour. > > Extension as seen in the URI is used to determine mime type, not > one on the file system (which might not be present at all). > >> >> Please try this patch >> >> Index: src/http/modules/ngx_http_static_module.c >> =================================================================== >> --- src/http/modules/ngx_http_static_module.c (revision 4892) >> +++ src/http/modules/ngx_http_static_module.c (working copy) >> @@ -224,6 +224,9 @@ >> return NGX_HTTP_INTERNAL_SERVER_ERROR; >> } >> >> + r->uri = path; >> + ngx_http_set_exten(r); >> + >> if (ngx_http_set_content_type(r) != NGX_OK) { >> return NGX_HTTP_INTERNAL_SERVER_ERROR; >> } > > The patch is completely incorrect. While it might work for you in > this particular case, it > > a) Breaks the behaviour which is expected to work (in contrast to > the above), e.g. like this: > > location = /foo.js { > alias /path/to/a/file; > } > > b) Breaks unrelated things like the $uri variable. > > From ranier at cultura.com.br Fri Oct 26 13:03:50 2012 From: ranier at cultura.com.br (Ranier Vilela) Date: Fri, 26 Oct 2012 10:03:50 -0300 Subject: Intel's cripple non-intel competitors Message-ID: <508A8A36.4020002@cultura.com.br> Hi, This is out topic nginx development, but may be interesting to know. Intel software (tools, libraries, compilers, etc), only perform optimal with Intel chips! Thay can detect non-intel chips and disable intentional any faster or better feature. See all in http://www.agner.org/optimize/blog/read.php?i=49 and http://software.intel.com/en-us/articles/optimization-notice/#opt-en Best regards, Ranier Vilela From tyler at monkeypox.org Sun Oct 28 04:59:27 2012 From: tyler at monkeypox.org (R. Tyler Croy) Date: Sat, 27 Oct 2012 21:59:27 -0700 Subject: Extract headers/body from subrequests Message-ID: <20121028045927.GD29692@kiwi.flexilis.local> I'm using the auth_request module from Maxim Dounin as a basis for some of my experimenting. What I'm trying to do at the end of the day is bring a header from the response of a subrequest and feed it into the headers_in for the "main" request, but I can't figure how this works. The code I'm focused most on is in the ngx_http_auth_request_handler function: Any pointers on how to fish data out of the subreqeusts would be most appreciated :) - R. Tyler Croy -------------------------------------- Code: https://github.com/rtyler Chatter: https://twitter.com/agentdero -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 198 bytes Desc: Digital signature URL: From agentzh at gmail.com Sun Oct 28 18:44:45 2012 From: agentzh at gmail.com (agentzh) Date: Sun, 28 Oct 2012 11:44:45 -0700 Subject: Extract headers/body from subrequests In-Reply-To: <20121028045927.GD29692@kiwi.flexilis.local> References: <20121028045927.GD29692@kiwi.flexilis.local> Message-ID: Hello! On Sat, Oct 27, 2012 at 9:59 PM, R. Tyler Croy wrote: > > I'm using the auth_request module from Maxim Dounin as a basis for some of my > experimenting. > > What I'm trying to do at the end of the day is bring a header from the response > of a subrequest and feed it into the headers_in for the "main" request, but I > can't figure how this works. > > The code I'm focused most on is in the ngx_http_auth_request_handler function: > You can take a look at how our ngx_lua and ngx_srcache modules do this: http://wiki.nginx.org/HttpLuaModule#ngx.location.capture http://wiki.nginx.org/HttpSRCacheModule#srcache_fetch I think maybe you can just use ngx_lua's access_by_lua directly without writing your own C module for your task ;) Best regards, -agentzh From crk_world at yahoo.com.cn Mon Oct 29 03:08:23 2012 From: crk_world at yahoo.com.cn (chen cw) Date: Mon, 29 Oct 2012 11:08:23 +0800 Subject: is that possible to queue request one by one In-Reply-To: References: Message-ID: On Fri, Oct 26, 2012 at 2:38 AM, Jossan Davis wrote: > Hi > > nginx module limit_req only can limit request frequency by url param key. > it delay a request by "ngx_add_timer(r->connection->write, delay)". > > my requirement is a little bit different. i need the next request be > process immediately after the prev one have done. > i have tried to use "ngx_http_cleanup_add". but it seem that the > ngx_http_request_t is not in shared mem, i can't use > "ngx_add_timer(r->connection->write, 0)" to notify the next request. > > you can store the counter in share mem as module limit_req, and also store blocked requests list in global queue for each worker, such as module upstream_keeplive. so when you use "ngx_http_cleanup_add", you can get the ngx_http_request_t in such list. I think this method meets your requirement. -- -- Charles Chen Software Engineer Server Platforms Team at Taobao.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From crk_world at yahoo.com.cn Mon Oct 29 03:47:02 2012 From: crk_world at yahoo.com.cn (chen cw) Date: Mon, 29 Oct 2012 11:47:02 +0800 Subject: description for "crop" directive in ngx_http_image_filter_module's document is not correct Message-ID: Hi, we think the description for "crop" directive in ngx_http_image_filter_module's document is not correct, or at least not clear. It says "proportionally reduces an image to the size of the largest side". we have tested and now believes it should be not "the largest side", but "the smallest side". -- -- Charles Chen Software Engineer Server Platforms Team at Taobao.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From jossandavis at gmail.com Mon Oct 29 05:22:58 2012 From: jossandavis at gmail.com (Jossan Davis) Date: Mon, 29 Oct 2012 13:22:58 +0800 Subject: is that possible to queue request one by one In-Reply-To: References: Message-ID: Hi, thanks for your reply! my current solution ismodified limit_con module.i add an counter in share mem. when a request have a prev request it will blocked and recheck the counter in 500ms by using nginx timer. your solution is better than mine, but i still have a question .*a finished request is how to notify a blocked request which is in the same worker?* ? 2012/10/29 11:08 AM?"chen cw" ??? > On Fri, Oct 26, 2012 at 2:38 AM, Jossan Davis wrote: > >> Hi >> >> nginx module limit_req only can limit request frequency by url param key. >> it delay a request by "ngx_add_timer(r->connection->write, delay)". >> >> my requirement is a little bit different. i need the next request be >> process immediately after the prev one have done. >> i have tried to use "ngx_http_cleanup_add". but it seem that the >> ngx_http_request_t is not in shared mem, i can't use >> "ngx_add_timer(r->connection->write, 0)" to notify the next request. >> >> > you can store the counter in share mem as module limit_req, and also store > blocked requests list in global queue for each worker, such as module > upstream_keeplive. so when you use "ngx_http_cleanup_add", you can get the > ngx_http_request_t in such list. I think this method meets your requirement. > > -- > > -- > > Charles Chen > > Software Engineer > > Server Platforms Team at Taobao.com > > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: From vl at nginx.com Mon Oct 29 08:28:47 2012 From: vl at nginx.com (Homutov Vladimir) Date: Mon, 29 Oct 2012 12:28:47 +0400 Subject: description for "crop" directive in ngx_http_image_filter_module's document is not correct In-Reply-To: References: Message-ID: <20121029082846.GA27250@vlpc> On Mon, Oct 29, 2012 at 11:47:02AM +0800, chen cw wrote: > Hi, > we think the description for "crop" directive in > ngx_http_image_filter_module's document is not correct, or at least not > clear. It says "proportionally reduces an image to the size of the largest > side". we have tested and now believes it should be not "the largest > side", but "the smallest side". > Hi! The 'largest side' is related to the arguments of the 'crop' directive, not the source image size. With this in mind, the description is correct. For example, command 'crop 600x50' for source image 640x480 will: 1) proportionally reduce largest (600 - horizontal) side: 640->600 2) crops extraneous edges by another side - vertical: 480->50 the result is that image got reduced horizontally a bit (proportionally) and cropped by vertical side (50 center pixels left). command 'crop 600x50' for 480x640 image will give 480x50: 1) proportionally reduce horizontal side (no actions, 480 < 600 requested) 2) leave 50 center pixels on another side command 'crop 50x600' for 640x480 image will give 50x480: 1) largest side is 600 (vertical), no actions (requested 600 < 480 actual) 2) another side is cropped to 50 pixels. From crk_world at yahoo.com.cn Mon Oct 29 10:03:29 2012 From: crk_world at yahoo.com.cn (chen cw) Date: Mon, 29 Oct 2012 18:03:29 +0800 Subject: is that possible to queue request one by one In-Reply-To: References: Message-ID: use ngx_http_cleanup_add to add a handler for each request. when a request is done, this handler will run. then, in the handler, get ngx_request_t of a blocked request, and run ngx_http_core_run_phases(r). the method is not fit for one situation, that is requests are processed all by nginx itself, e.g, static files. the problem for this situation is stack overflow or OFM. so i first use this method in limit_upstream https://github.com/cfsego/nginx-limit-upstream On Mon, Oct 29, 2012 at 1:22 PM, Jossan Davis wrote: > Hi, > > thanks for your reply! > > > my current solution ismodified limit_con module.i add an counter in share > mem. when a request have a prev request it will blocked and recheck the > counter in 500ms by using nginx timer. > > > your solution is better than mine, but i still have a question .*a > finished request is how to notify a blocked request which is in the same > worker?* > ? 2012/10/29 11:08 AM?"chen cw" ??? > >> On Fri, Oct 26, 2012 at 2:38 AM, Jossan Davis wrote: >> >>> Hi >>> >>> nginx module limit_req only can limit request frequency by url param >>> key. it delay a request by "ngx_add_timer(r->connection->write, delay)". >>> >>> my requirement is a little bit different. i need the next request be >>> process immediately after the prev one have done. >>> i have tried to use "ngx_http_cleanup_add". but it seem that the >>> ngx_http_request_t is not in shared mem, i can't use >>> "ngx_add_timer(r->connection->write, 0)" to notify the next request. >>> >>> >> you can store the counter in share mem as module limit_req, and also >> store blocked requests list in global queue for each worker, such as module >> upstream_keeplive. so when you use "ngx_http_cleanup_add", you can get the >> ngx_http_request_t in such list. I think this method meets your requirement. >> >> -- >> >> -- >> >> Charles Chen >> >> Software Engineer >> >> Server Platforms Team at Taobao.com >> >> >> _______________________________________________ >> nginx-devel mailing list >> nginx-devel at nginx.org >> http://mailman.nginx.org/mailman/listinfo/nginx-devel >> > > _______________________________________________ > nginx-devel mailing list > nginx-devel at nginx.org > http://mailman.nginx.org/mailman/listinfo/nginx-devel > -- -- Charles Chen Software Engineer Server Platforms Team at Taobao.com -------------- next part -------------- An HTML attachment was scrubbed... URL: From vl at nginx.com Mon Oct 29 15:22:55 2012 From: vl at nginx.com (Homutov Vladimir) Date: Mon, 29 Oct 2012 19:22:55 +0400 Subject: description for "crop" directive in ngx_http_image_filter_module's document is not correct In-Reply-To: <20121029082846.GA27250@vlpc> References: <20121029082846.GA27250@vlpc> Message-ID: <20121029152254.GA23202@vlpc> On Mon, Oct 29, 2012 at 12:28:47PM +0400, Homutov Vladimir wrote: > On Mon, Oct 29, 2012 at 11:47:02AM +0800, chen cw wrote: > > Hi, > > we think the description for "crop" directive in > > ngx_http_image_filter_module's document is not correct, or at least not > > clear. It says "proportionally reduces an image to the size of the largest > > side". we have tested and now believes it should be not "the largest > > side", but "the smallest side". > > > > Hi! > > The 'largest side' is related to the arguments of the 'crop' > directive, not the source image size. > > With this in mind, the description is correct. > > For example, > > command 'crop 600x50' for source image 640x480 will: > > 1) proportionally reduce largest (600 - horizontal) side: 640->600 > 2) crops extraneous edges by another side - vertical: 480->50 > > the result is that image got reduced horizontally a bit (proportionally) > and cropped by vertical side (50 center pixels left). > > command 'crop 600x50' for 480x640 image will give 480x50: > > 1) proportionally reduce horizontal side (no actions, 480 < 600 > requested) > 2) leave 50 center pixels on another side > > command 'crop 50x600' for 640x480 image will give 50x480: > > 1) largest side is 600 (vertical), no actions (requested 600 < 480 > actual) > 2) another side is cropped to 50 pixels. actually, things are a bit more complicated: what is important, it is the correlation between width/height ratio of the source image and the desired size. The 'largest side' phrase is misleading, as ratios are compared, not sizes; thus largest ratio is important, not side. The effect is that crop chooses dimension (width or height) to be resized to reach the best fit of source image in the destination frame. if the source image has w/h ratio less than requested frame, it will be proportionally resized up to new width and extra height will be cut. if the source image has w/h ratio greater than or equal to requested frame, it will be proportionally resized to the new height and extra width will be cut. Example: # source image:640x480 (w/h = 1.333) image_filter crop 320 250; # (w/h = 1.28) since 1.33 > 1.28, source image is resized up to desired height first (333x250) and then extra width cut (320x250). From mdounin at mdounin.ru Mon Oct 29 17:17:59 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Mon, 29 Oct 2012 17:17:59 +0000 Subject: [nginx] svn commit: r4894 - in trunk/src/http: . modules Message-ID: <20121029171759.D46353F9C0C@mail.nginx.com> Author: mdounin Date: 2012-10-29 17:17:59 +0000 (Mon, 29 Oct 2012) New Revision: 4894 URL: http://trac.nginx.org/nginx/changeset/4894/nginx Log: Variables $connection and $connection_requests. Log module counterparts are removed as they aren't used often and there is no need to preserve them for efficiency. Modified: trunk/src/http/modules/ngx_http_log_module.c trunk/src/http/ngx_http_variables.c Modified: trunk/src/http/modules/ngx_http_log_module.c =================================================================== --- trunk/src/http/modules/ngx_http_log_module.c 2012-10-24 14:07:08 UTC (rev 4893) +++ trunk/src/http/modules/ngx_http_log_module.c 2012-10-29 17:17:59 UTC (rev 4894) @@ -78,10 +78,6 @@ static ssize_t ngx_http_log_script_write(ngx_http_request_t *r, ngx_http_log_script_t *script, u_char **name, u_char *buf, size_t len); -static u_char *ngx_http_log_connection(ngx_http_request_t *r, u_char *buf, - ngx_http_log_op_t *op); -static u_char *ngx_http_log_connection_requests(ngx_http_request_t *r, - u_char *buf, ngx_http_log_op_t *op); static u_char *ngx_http_log_pipe(ngx_http_request_t *r, u_char *buf, ngx_http_log_op_t *op); static u_char *ngx_http_log_time(ngx_http_request_t *r, u_char *buf, @@ -194,9 +190,6 @@ static ngx_http_log_var_t ngx_http_log_vars[] = { - { ngx_string("connection"), NGX_ATOMIC_T_LEN, ngx_http_log_connection }, - { ngx_string("connection_requests"), NGX_INT_T_LEN, - ngx_http_log_connection_requests }, { ngx_string("pipe"), 1, ngx_http_log_pipe }, { ngx_string("time_local"), sizeof("28/Sep/1970:12:00:00 +0600") - 1, ngx_http_log_time }, @@ -500,22 +493,6 @@ static u_char * -ngx_http_log_connection(ngx_http_request_t *r, u_char *buf, - ngx_http_log_op_t *op) -{ - return ngx_sprintf(buf, "%uA", r->connection->number); -} - - -static u_char * -ngx_http_log_connection_requests(ngx_http_request_t *r, u_char *buf, - ngx_http_log_op_t *op) -{ - return ngx_sprintf(buf, "%ui", r->connection->requests); -} - - -static u_char * ngx_http_log_pipe(ngx_http_request_t *r, u_char *buf, ngx_http_log_op_t *op) { if (r->pipeline) { Modified: trunk/src/http/ngx_http_variables.c =================================================================== --- trunk/src/http/ngx_http_variables.c 2012-10-24 14:07:08 UTC (rev 4893) +++ trunk/src/http/ngx_http_variables.c 2012-10-29 17:17:59 UTC (rev 4894) @@ -97,6 +97,11 @@ static ngx_int_t ngx_http_variable_sent_transfer_encoding(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); +static ngx_int_t ngx_http_variable_connection(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data); +static ngx_int_t ngx_http_variable_connection_requests(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data); + static ngx_int_t ngx_http_variable_nginx_version(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data); static ngx_int_t ngx_http_variable_hostname(ngx_http_request_t *r, @@ -265,6 +270,12 @@ offsetof(ngx_http_request_t, limit_rate), NGX_HTTP_VAR_CHANGEABLE|NGX_HTTP_VAR_NOCACHEABLE, 0 }, + { ngx_string("connection"), NULL, + ngx_http_variable_connection, 0, 0, 0 }, + + { ngx_string("connection_requests"), NULL, + ngx_http_variable_connection_requests, 0, 0, 0 }, + { ngx_string("nginx_version"), NULL, ngx_http_variable_nginx_version, 0, 0, 0 }, @@ -1814,6 +1825,48 @@ static ngx_int_t +ngx_http_variable_connection(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data) +{ + u_char *p; + + p = ngx_pnalloc(r->pool, NGX_ATOMIC_T_LEN); + if (p == NULL) { + return NGX_ERROR; + } + + v->len = ngx_sprintf(p, "%uA", r->connection->number) - p; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + v->data = p; + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_variable_connection_requests(ngx_http_request_t *r, + ngx_http_variable_value_t *v, uintptr_t data) +{ + u_char *p; + + p = ngx_pnalloc(r->pool, NGX_INT_T_LEN); + if (p == NULL) { + return NGX_ERROR; + } + + v->len = ngx_sprintf(p, "%ui", r->connection->requests) - p; + v->valid = 1; + v->no_cacheable = 0; + v->not_found = 0; + v->data = p; + + return NGX_OK; +} + + +static ngx_int_t ngx_http_variable_nginx_version(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data) { From brianmwaters at gmail.com Tue Oct 30 00:03:38 2012 From: brianmwaters at gmail.com (Brian M. Waters) Date: Mon, 29 Oct 2012 20:03:38 -0400 Subject: [PATCH] Don't install config files for unused modules Message-ID: Hello, This patch fixes a slightly annoying behavior whereby "make install" causes configuration files for charset, fastcgi, uwsgi and scgi modules to be installed, even if those modules have been excluded from the build. Thanks! Brian Waters Index: auto/install =================================================================== --- auto/install (revision 4894) +++ auto/install (working copy) @@ -109,15 +109,30 @@ test -d '\$(DESTDIR)$NGX_CONF_PREFIX' \ || mkdir -p '\$(DESTDIR)$NGX_CONF_PREFIX' +END +if [ $HTTP_CHARSET = YES ]; then + + cat << END >> $NGX_MAKEFILE + cp conf/koi-win '\$(DESTDIR)$NGX_CONF_PREFIX' cp conf/koi-utf '\$(DESTDIR)$NGX_CONF_PREFIX' cp conf/win-utf '\$(DESTDIR)$NGX_CONF_PREFIX' +END +fi + +cat << END >> $NGX_MAKEFILE + test -f '\$(DESTDIR)$NGX_CONF_PREFIX/mime.types' \ || cp conf/mime.types '\$(DESTDIR)$NGX_CONF_PREFIX' cp conf/mime.types '\$(DESTDIR)$NGX_CONF_PREFIX/mime.types.default' +END +if [ $HTTP_FASTCGI = YES ]; then + + cat << END >> $NGX_MAKEFILE + test -f '\$(DESTDIR)$NGX_CONF_PREFIX/fastcgi_params' \ || cp conf/fastcgi_params '\$(DESTDIR)$NGX_CONF_PREFIX' cp conf/fastcgi_params \ @@ -126,17 +141,36 @@ test -f '\$(DESTDIR)$NGX_CONF_PREFIX/fastcgi.conf' \ || cp conf/fastcgi.conf '\$(DESTDIR)$NGX_CONF_PREFIX' cp conf/fastcgi.conf '\$(DESTDIR)$NGX_CONF_PREFIX/fastcgi.conf.default' +END +fi + +if [ $HTTP_UWSGI = YES ]; then + + cat << END >> $NGX_MAKEFILE + test -f '\$(DESTDIR)$NGX_CONF_PREFIX/uwsgi_params' \ || cp conf/uwsgi_params '\$(DESTDIR)$NGX_CONF_PREFIX' cp conf/uwsgi_params \ '\$(DESTDIR)$NGX_CONF_PREFIX/uwsgi_params.default' +END +fi + +if [ $HTTP_SCGI = YES ]; then + + cat << END >> $NGX_MAKEFILE + test -f '\$(DESTDIR)$NGX_CONF_PREFIX/scgi_params' \ || cp conf/scgi_params '\$(DESTDIR)$NGX_CONF_PREFIX' cp conf/scgi_params \ '\$(DESTDIR)$NGX_CONF_PREFIX/scgi_params.default' +END +fi + +cat << END >> $NGX_MAKEFILE + test -f '\$(DESTDIR)$NGX_CONF_PATH' \ || cp conf/nginx.conf '\$(DESTDIR)$NGX_CONF_PATH' cp conf/nginx.conf '\$(DESTDIR)$NGX_CONF_PREFIX/nginx.conf.default' From mdounin at mdounin.ru Tue Oct 30 11:09:39 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Tue, 30 Oct 2012 11:09:39 +0000 Subject: [nginx] svn commit: r4895 - trunk/misc Message-ID: <20121030110939.540143F9C4E@mail.nginx.com> Author: mdounin Date: 2012-10-30 11:09:38 +0000 (Tue, 30 Oct 2012) New Revision: 4895 URL: http://trac.nginx.org/nginx/changeset/4895/nginx Log: Style, parentheses instead of braces in misc/GNUMakefile. Modified: trunk/misc/GNUmakefile Modified: trunk/misc/GNUmakefile =================================================================== --- trunk/misc/GNUmakefile 2012-10-29 17:17:59 UTC (rev 4894) +++ trunk/misc/GNUmakefile 2012-10-30 11:09:38 UTC (rev 4895) @@ -85,7 +85,7 @@ win32: ./auto/configure \ --with-cc=cl \ - --builddir=${OBJS} \ + --builddir=$(OBJS) \ --with-debug \ --prefix= \ --conf-path=conf/nginx.conf \ @@ -99,8 +99,8 @@ --http-scgi-temp-path=temp/scgi_temp \ --http-uwsgi-temp-path=temp/uwsgi_temp \ --with-cc-opt=-DFD_SETSIZE=1024 \ - --with-pcre=${OBJS}/lib/${PCRE} \ - --with-zlib=${OBJS}/lib/${ZLIB} \ + --with-pcre=$(OBJS)/lib/$(PCRE) \ + --with-zlib=$(OBJS)/lib/$(ZLIB) \ --with-select_module \ --with-http_realip_module \ --with-http_addition_module \ @@ -114,7 +114,7 @@ --with-http_random_index_module \ --with-http_secure_link_module \ --with-mail \ - --with-openssl=${OBJS}/lib/${OPENSSL} \ + --with-openssl=$(OBJS)/lib/$(OPENSSL) \ --with-openssl-opt=enable-tlsext \ --with-http_ssl_module \ --with-mail_ssl_module \ From mdounin at mdounin.ru Tue Oct 30 11:14:25 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Tue, 30 Oct 2012 11:14:25 +0000 Subject: [nginx] svn commit: r4896 - in trunk/src: event http Message-ID: <20121030111425.2E9E63F9F22@mail.nginx.com> Author: mdounin Date: 2012-10-30 11:14:24 +0000 (Tue, 30 Oct 2012) New Revision: 4896 URL: http://trac.nginx.org/nginx/changeset/4896/nginx Log: Event pipe: fixed handling of buf_to_file data. Input filter might free a buffer if there is no data in it, and in case of first buffer (used for cache header and request header, aka p->buf_to_file) this resulted in cache corruption. Buffer memory was reused to read upstream response before headers were written to disk. Fix is to avoid moving pointers in ngx_event_pipe_add_free_buf() to a buffer start if we were asked to free a buffer used by p->buf_to_file. This fixes occasional cache file corruption, usually resulted in "cache file ... has md5 collision" alerts. Reported by Anatoli Marinov. Modified: trunk/src/event/ngx_event_pipe.c trunk/src/http/ngx_http_upstream.c Modified: trunk/src/event/ngx_event_pipe.c =================================================================== --- trunk/src/event/ngx_event_pipe.c 2012-10-30 11:09:38 UTC (rev 4895) +++ trunk/src/event/ngx_event_pipe.c 2012-10-30 11:14:24 UTC (rev 4896) @@ -946,8 +946,15 @@ return NGX_ERROR; } - b->pos = b->start; - b->last = b->start; + if (p->buf_to_file && b->start == p->buf_to_file->start) { + b->pos = p->buf_to_file->last; + b->last = p->buf_to_file->last; + + } else { + b->pos = b->start; + b->last = b->start; + } + b->shadow = NULL; cl->buf = b; Modified: trunk/src/http/ngx_http_upstream.c =================================================================== --- trunk/src/http/ngx_http_upstream.c 2012-10-30 11:09:38 UTC (rev 4895) +++ trunk/src/http/ngx_http_upstream.c 2012-10-30 11:14:24 UTC (rev 4896) @@ -2287,6 +2287,7 @@ return; } + p->buf_to_file->start = u->buffer.start; p->buf_to_file->pos = u->buffer.start; p->buf_to_file->last = u->buffer.pos; p->buf_to_file->temporary = 1; From mdounin at mdounin.ru Tue Oct 30 13:34:23 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Tue, 30 Oct 2012 13:34:23 +0000 Subject: [nginx] svn commit: r4897 - trunk/docs/xml/nginx Message-ID: <20121030133424.03E223F9C3E@mail.nginx.com> Author: mdounin Date: 2012-10-30 13:34:23 +0000 (Tue, 30 Oct 2012) New Revision: 4897 URL: http://trac.nginx.org/nginx/changeset/4897/nginx Log: nginx-1.3.8-RELEASE Modified: trunk/docs/xml/nginx/changes.xml Modified: trunk/docs/xml/nginx/changes.xml =================================================================== --- trunk/docs/xml/nginx/changes.xml 2012-10-30 11:14:24 UTC (rev 4896) +++ trunk/docs/xml/nginx/changes.xml 2012-10-30 13:34:23 UTC (rev 4897) @@ -5,6 +5,71 @@ + + + + +???????? optional_no_ca ????????? ssl_verify_client.
+??????? ??????? ????????? ? Eric O'Connor. +
+ +the "optional_no_ca" parameter of the "ssl_verify_client" directive.
+Thanks to Mike Kazantsev and Eric O'Connor. +
+
+ + + +?????????? $bytes_sent, $connection ? $connection_requests +?????? ????? ???????????? ?? ?????? ? ????????? log_format.
+??????? Benjamin Gr?ssing. +
+ +the $bytes_sent, $connection, and $connection_requests variables +can now be used not only in the "log_format" directive.
+Thanks to Benjamin Gr?ssing. +
+
+ + + +???????? auto ????????? worker_processes. + + +the "auto" parameter of the "worker_processes" directive. + + + + + +????????? "cache file ... has md5 collision". + + +"cache file ... has md5 collision" alert. + + + + + +? ?????? ngx_http_gunzip_filter_module. + + +in the ngx_http_gunzip_filter_module. + + + + + +? ????????? ssl_stapling. + + +in the "ssl_stapling" directive. + + + +
+ + From mdounin at mdounin.ru Tue Oct 30 13:35:18 2012 From: mdounin at mdounin.ru (mdounin at mdounin.ru) Date: Tue, 30 Oct 2012 13:35:18 +0000 Subject: [nginx] svn commit: r4898 - tags Message-ID: <20121030133519.1B2C93F9C3E@mail.nginx.com> Author: mdounin Date: 2012-10-30 13:35:18 +0000 (Tue, 30 Oct 2012) New Revision: 4898 URL: http://trac.nginx.org/nginx/changeset/4898/nginx Log: release-1.3.8 tag Added: tags/release-1.3.8/ From toli at webforge.bg Tue Oct 30 13:39:58 2012 From: toli at webforge.bg (Anatoli Marinov) Date: Tue, 30 Oct 2012 15:39:58 +0200 Subject: patch for Transfer-Encoding: chunked and proxy_cache - check if the file is complete Message-ID: <508FD8AE.2030709@webforge.bg> This patch is for 1.2.4. It adds some code that checks if the file is complete when Transfer-Encoding: chunked is used. --- nginx-1.2.4/src/http/modules/ngx_http_proxy_module.c 2012-04-23 13:40:01.000000000 +0300 +++ nginx-1.2.4_patched/src/http/modules/ngx_http_proxy_module.c 2012-10-30 14:55:24.106188123 +0200 @@ -1867,7 +1867,7 @@ return rc; done: + r->upstream->chunked_cacheable = 1; return NGX_DONE; invalid: --- nginx-1.2.4/src/http/ngx_http_upstream.c 2012-08-06 20:34:08.000000000 +0300 +++ nginx-1.2.4_patched/src/http/ngx_http_upstream.c 2012-10-30 15:36:23.126381731 +0200 @@ -2687,7 +2687,8 @@ tf = u->pipe->temp_file; - if (u->headers_in.content_length_n == -1 + if (((u->headers_in.content_length_n == -1 && !u->headers_in.chunked) || + (u->headers_in.chunked && u->chunked_cacheable)) || u->headers_in.content_length_n == tf->offset - (off_t) r->cache->body_start) { @@ -3442,6 +3443,7 @@ != NULL) { r->upstream->headers_in.chunked = 1; + r->upstream->chunked_cacheable = 0; } return NGX_OK; --- nginx-1.2.4/src/http/ngx_http_upstream.h 2012-02-13 13:01:58.000000000 +0200 +++ nginx-1.2.4_patched/src/http/ngx_http_upstream.h 2012-10-30 14:56:36.642547808 +0200 @@ -314,6 +314,8 @@ unsigned store:1; unsigned cacheable:1; + unsigned chunked_cacheable:1; unsigned accel:1; unsigned ssl:1; #if (NGX_HTTP_CACHE) Best regards Anatoli Marinov From mdounin at mdounin.ru Tue Oct 30 16:25:28 2012 From: mdounin at mdounin.ru (Maxim Dounin) Date: Tue, 30 Oct 2012 20:25:28 +0400 Subject: patch for Transfer-Encoding: chunked and proxy_cache - check if the file is complete In-Reply-To: <508FD8AE.2030709@webforge.bg> References: <508FD8AE.2030709@webforge.bg> Message-ID: <20121030162527.GQ40452@mdounin.ru> Hello! On Tue, Oct 30, 2012 at 03:39:58PM +0200, Anatoli Marinov wrote: > This patch is for 1.2.4. It adds some code that checks if the file > is complete when Transfer-Encoding: chunked is used. > > --- nginx-1.2.4/src/http/modules/ngx_http_proxy_module.c 2012-04-23 > 13:40:01.000000000 +0300 > +++ nginx-1.2.4_patched/src/http/modules/ngx_http_proxy_module.c > 2012-10-30 14:55:24.106188123 +0200 > @@ -1867,7 +1867,7 @@ > return rc; > > done: > > + r->upstream->chunked_cacheable = 1; > return NGX_DONE; > > invalid: > --- nginx-1.2.4/src/http/ngx_http_upstream.c 2012-08-06 > 20:34:08.000000000 +0300 > +++ nginx-1.2.4_patched/src/http/ngx_http_upstream.c 2012-10-30 > 15:36:23.126381731 +0200 > @@ -2687,7 +2687,8 @@ > > tf = u->pipe->temp_file; > > - if (u->headers_in.content_length_n == -1 > + if (((u->headers_in.content_length_n == -1 && > !u->headers_in.chunked) || > + (u->headers_in.chunked && u->chunked_cacheable)) > || u->headers_in.content_length_n > == tf->offset - (off_t) r->cache->body_start) > { Have you tried the u->length / p->length aproach I've already suggested at [1]? In either case I don't think that introduction of a protocol-specific flag is a way to go. It should be either length check, or a separate protocol independant flag. If you are ok with testing headers_in.chunked, you may just safely omit any additional checks, as p->upstream_eof case can't happen with complete chunked response (p->upstream_done will be set in this case). [1] http://mailman.nginx.org/pipermail/nginx-devel/2012-September/002699.html -- Maxim Dounin http://nginx.com/support.html From toli at webforge.bg Wed Oct 31 08:10:00 2012 From: toli at webforge.bg (Anatoli Marinov) Date: Wed, 31 Oct 2012 10:10:00 +0200 Subject: patch for Transfer-Encoding: chunked and proxy_cache - check if the file is complete In-Reply-To: <20121030162527.GQ40452@mdounin.ru> References: <508FD8AE.2030709@webforge.bg> <20121030162527.GQ40452@mdounin.ru> Message-ID: <5090DCD8.9030304@webforge.bg> Thanks Maxim, You are right. The following patch is enough: --- src/http/ngx_http_upstream.c +++ src/http/ngx_http_upstream.c @@ -2687,7 +2687,8 @@ tf = u->pipe->temp_file; - if (u->headers_in.content_length_n == -1 + if ((u->headers_in.content_length_n == -1 + && !u->headers_in.chunked) || u->headers_in.content_length_n == tf->offset - (off_t) r->cache->body_start) { For chunked responses p->upstream_done should be true even p->upstream_eof is true. On 10/30/2012 06:25 PM, Maxim Dounin wrote: > Hello! > > On Tue, Oct 30, 2012 at 03:39:58PM +0200, Anatoli Marinov wrote: > >> This patch is for 1.2.4. It adds some code that checks if the file >> is complete when Transfer-Encoding: chunked is used. >> >> --- nginx-1.2.4/src/http/modules/ngx_http_proxy_module.c 2012-04-23 >> 13:40:01.000000000 +0300 >> +++ nginx-1.2.4_patched/src/http/modules/ngx_http_proxy_module.c >> 2012-10-30 14:55:24.106188123 +0200 >> @@ -1867,7 +1867,7 @@ >> return rc; >> >> done: >> >> + r->upstream->chunked_cacheable = 1; >> return NGX_DONE; >> >> invalid: >> --- nginx-1.2.4/src/http/ngx_http_upstream.c 2012-08-06 >> 20:34:08.000000000 +0300 >> +++ nginx-1.2.4_patched/src/http/ngx_http_upstream.c 2012-10-30 >> 15:36:23.126381731 +0200 >> @@ -2687,7 +2687,8 @@ >> >> tf = u->pipe->temp_file; >> >> - if (u->headers_in.content_length_n == -1 >> + if (((u->headers_in.content_length_n == -1 && >> !u->headers_in.chunked) || >> + (u->headers_in.chunked && u->chunked_cacheable)) >> || u->headers_in.content_length_n >> == tf->offset - (off_t) r->cache->body_start) >> { > Have you tried the u->length / p->length aproach I've already > suggested at [1]? In either case I don't think that introduction > of a protocol-specific flag is a way to go. It should be either > length check, or a separate protocol independant flag. > > If you are ok with testing headers_in.chunked, you may just safely > omit any additional checks, as p->upstream_eof case can't happen > with complete chunked response (p->upstream_done will be set in > this case). > > [1] http://mailman.nginx.org/pipermail/nginx-devel/2012-September/002699.html >