# HG changeset patch
# User Tracey Jaquith <tracey(a)archive.org>
# Date 1623797180 0
# Tue Jun 15 22:46:20 2021 +0000
# Node ID 1879d49fe0cf739f48287b5a38a83d3a1adab939
# Parent 5f765427c17ac8cf753967387562201cf4f78dc4
Add optional "mp4_exact_start" nginx config off/on to show video between keyframes.
archive.org has been using mod_h264_streaming with a similar "exact start" patch from me since 2013.
We just moved to nginx mp4 module and are using this patch.
The technique is to find the video keyframe just before the desired "start" time, and send
that down the wire so video playback can start immediately.
Next calculate how many video samples are between the keyframe and desired "start" time
and update the STTS atom where those samples move the duration from (typically) 1001 to 1.
This way, initial unwanted video frames play at ~1/30,000s -- so visually the
video & audio start playing immediately.
You can see an example before/after here (nginx binary built with mp4 module + patch):
https://pi.archive.org/0/items/CSPAN_20160425_022500_2011_White_House_Corre…https://pi.archive.org/0/items/CSPAN_20160425_022500_2011_White_House_Corre…
Tested on linux and macosx.
(this is me: https://github.com/traceypooh )
diff -r 5f765427c17a -r 1879d49fe0cf src/http/modules/ngx_http_mp4_module.c
--- a/src/http/modules/ngx_http_mp4_module.c Tue Jun 01 17:37:51 2021 +0300
+++ b/src/http/modules/ngx_http_mp4_module.c Tue Jun 15 22:46:20 2021 +0000
@@ -43,6 +43,7 @@
typedef struct {
size_t buffer_size;
size_t max_buffer_size;
+ ngx_flag_t exact_start;
} ngx_http_mp4_conf_t;
@@ -340,6 +341,13 @@
offsetof(ngx_http_mp4_conf_t, max_buffer_size),
NULL },
+ { ngx_string("mp4_exact_start"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1,
+ ngx_conf_set_flag_slot,
+ NGX_HTTP_LOC_CONF_OFFSET,
+ offsetof(ngx_http_mp4_conf_t, exact_start),
+ NULL },
+
ngx_null_command
};
@@ -2156,6 +2164,83 @@
static ngx_int_t
+ngx_http_mp4_exact_start_video(ngx_http_mp4_file_t *mp4, ngx_http_mp4_trak_t *trak)
+{
+ uint32_t n, speedup_samples, current_count;
+ ngx_uint_t sample_keyframe, start_sample_exact;
+ ngx_mp4_stts_entry_t *entry, *entries_array;
+ ngx_buf_t *data;
+
+ data = trak->out[NGX_HTTP_MP4_STTS_DATA].buf;
+
+ // Find the keyframe just before the desired start time - so that we can emit an mp4
+ // where the first frame is a keyframe. We'll "speed up" the first frames to 1000x
+ // normal speed (typically), so they won't be noticed. But this way, perceptively,
+ // playback of the _video_ track can start immediately
+ // (and not have to wait until the keyframe _after_ the desired starting time frame).
+ start_sample_exact = trak->start_sample;
+ for (n = 0; n < trak->sync_samples_entries; n++) {
+ // each element of array is the sample number of a keyframe
+ // sync samples starts from 1 -- so subtract 1
+ sample_keyframe = ngx_mp4_get_32value(trak->stss_data_buf.pos + (n * 4)) - 1;
+ if (sample_keyframe <= trak->start_sample) {
+ start_sample_exact = sample_keyframe;
+ }
+ if (sample_keyframe >= trak->start_sample) {
+ break;
+ }
+ }
+
+ if (start_sample_exact < trak->start_sample) {
+ // We're going to prepend an entry with duration=1 for the frames we want to "not see".
+ // MOST of the time (eg: constant video framerate),
+ // we're taking a single element entry array and making it two.
+ speedup_samples = trak->start_sample - start_sample_exact;
+
+ ngx_log_debug3(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0,
+ "exact trak start_sample move %l to %l (speed up %d samples)\n",
+ trak->start_sample, start_sample_exact, speedup_samples);
+
+ entries_array = ngx_palloc(mp4->request->pool,
+ (1 + trak->time_to_sample_entries) * sizeof(ngx_mp4_stts_entry_t));
+ if (entries_array == NULL) {
+ return NGX_ERROR;
+ }
+ entry = &(entries_array[1]);
+ ngx_memcpy(entry, (ngx_mp4_stts_entry_t *)data->pos,
+ trak->time_to_sample_entries * sizeof(ngx_mp4_stts_entry_t));
+
+ current_count = ngx_mp4_get_32value(entry->count);
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0,
+ "exact split in 2 video STTS entry from count:%d", current_count);
+
+ if (current_count <= speedup_samples) {
+ return NGX_ERROR;
+ }
+
+ ngx_mp4_set_32value(entry->count, current_count - speedup_samples);
+ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0,
+ "exact split new[1]: count:%d duration:%d",
+ ngx_mp4_get_32value(entry->count),
+ ngx_mp4_get_32value(entry->duration));
+ entry--;
+ ngx_mp4_set_32value(entry->count, speedup_samples);
+ ngx_mp4_set_32value(entry->duration, 1);
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0,
+ "exact split new[0]: count:%d duration:1",
+ ngx_mp4_get_32value(entry->count));
+
+ data->pos = (u_char *) entry;
+ trak->time_to_sample_entries++;
+ trak->start_sample = start_sample_exact;
+ data->last = (u_char *) (entry + trak->time_to_sample_entries);
+ }
+
+ return NGX_OK;
+}
+
+
+static ngx_int_t
ngx_http_mp4_crop_stts_data(ngx_http_mp4_file_t *mp4,
ngx_http_mp4_trak_t *trak, ngx_uint_t start)
{
@@ -2164,6 +2249,8 @@
ngx_buf_t *data;
ngx_uint_t start_sample, entries, start_sec;
ngx_mp4_stts_entry_t *entry, *end;
+ ngx_http_mp4_conf_t *conf;
+
if (start) {
start_sec = mp4->start;
@@ -2238,6 +2325,10 @@
"start_sample:%ui, new count:%uD",
trak->start_sample, count - rest);
+ conf = ngx_http_get_module_loc_conf(mp4->request, ngx_http_mp4_module);
+ if (conf->exact_start) {
+ ngx_http_mp4_exact_start_video(mp4, trak);
+ }
} else {
ngx_mp4_set_32value(entry->count, rest);
data->last = (u_char *) (entry + 1);
@@ -3590,6 +3681,7 @@
conf->buffer_size = NGX_CONF_UNSET_SIZE;
conf->max_buffer_size = NGX_CONF_UNSET_SIZE;
+ conf->exact_start = NGX_CONF_UNSET;
return conf;
}
Hi,
I am trying to run the test suite, but it seems that, no matter how I build
Nginx, it systematically fails.
It seems that, most (all?) of the time, tests fail because Nginx returns
403 error codes, e.g.:
./ssi_waited.t ............................. 1/3
# Failed test 'waited non-active'
# at ./ssi_waited.t line 60.
# 'HTTP/1.1 403 Forbidden
# Server: nginx/1.21.0
# Date: Sat, 03 Jul 2021 08:06:00 GMT
# Content-Type: text/html
# Connection: close
#
# <html>
# <head><title>403 Forbidden</title></head>
# <body>
# <center><h1>403 Forbidden</h1></center>
# <hr><center>nginx/1.21.0</center>
# </body>
# </html>
# '
# doesn't match '(?^m:^xFIRSTxWAITEDxSECONDx$)'
The runtime configuration is the default one from nginx-1.20.1.tar.gz
(conf/nginx.conf).
I must be doing something wrong with the build or run time configuration,
but I cannot pinpoint what. Any idea?
Best,
Hugo
--
Hugo Lefeuvre (hle) | www.owl.eu.com
RSA4096_ 360B 03B3 BF27 4F4D 7A3F D5E8 14AA 1EB8 A247 3DFD
ed25519_ 37B2 6D38 0B25 B8A2 6B9F 3A65 A36F 5357 5F2D DC4C
The series implements some improvements in HTTP/3. On top of them Stream
Cancellation send is added.
- patch #1 improves throwing stream/connection errors
- patch #2 adds connection reuse and delayed request allocation
- patch #3 adds Stream Cancellation send
Hello,
some pre information:
I started using Nginx as proxy for mail. Currently I have the following setup:
ngin-conf: mail { }
-----------------------------------------------------------------------------
server_name mail.roessner-net.de;
auth_http http://localhost.localdomain:8180/authmail;
proxy_pass_error_message on;
ssl_certificate /etc/ssl/letsencrypt/cert/star.roessner-net.de-fullchain.crt;
ssl_certificate_key /etc/ssl/letsencrypt/private/star.roessner-net.de.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
imap_capabilities "IMAP4rev1" "LITERAL+" "SASL-IR" "LOGIN-REFERRALS" "ID" "ENABLE" "IDLE" "NAMESPACE";
smtp_capabilities "SIZE 104857600" ENHANCEDSTATUSCODES 8BITMIME DSN SMTPUTF8 CHUNKING;
resolver 127.0.0.1;
server {
listen 127.0.0.1:465 ssl;
listen 192.168.0.2:465 ssl;
listen 134.255.226.248:465 ssl;
listen [::1]:465 ssl;
listen [2a05:bec0:28:1:134:255:226:248]:465 ssl;
protocol smtp;
xclient on;
smtp_auth login plain;
error_log /var/log/nginx/smtp.log info;
auth_http_header X-Auth-Port "465";
}
server {
listen 127.0.0.1:587;
listen 192.168.0.2:587;
listen 134.255.226.248:587;
listen [::1]:587;
listen [2a05:bec0:28:1:134:255:226:248]:587;
protocol smtp;
xclient on;
smtp_auth login plain;
starttls on;
error_log /var/log/nginx/smtp.log info;
auth_http_header X-Auth-Port "587";
}
server {
listen 127.0.0.1:143;
listen 192.168.0.2:143;
listen 134.255.226.248:143;
listen [::1]:143;
listen [2a05:bec0:28:1:134:255:226:248]:143;
protocol imap;
#proxy_protocol on;
imap_auth login plain;
starttls on;
error_log /var/log/nginx/imap.log info;
auth_http_header X-Auth-Port "143";
}
server {
listen 127.0.0.1:993 ssl;
listen 192.168.0.2:993 ssl;
listen 134.255.226.248:993 ssl;
listen [::1]:993 ssl;
listen [2a05:bec0:28:1:134:255:226:248]:993 ssl;
protocol imap;
#proxy_protocol on;
imap_auth login plain;
error_log /var/log/nginx/imap.log info;
auth_http_header X-Auth-Port "993";
}
-----------------------------------------------------------------------------
I started an open source proof of concept auth server project here:
https://gitlab.roessner-net.de/croessner/authserv
It uses the auth header to authenticate to an OpenLDAP server and replies the required server and port stuff.
This works very nicles by using a stunnel.conf between Nginx and the main mail server backends:
-----------------------------------------------------------------------------
[imaps]
accept = 127.0.0.1:9931
client = yes
connect = mail.roessner-net.de:9932
cert = /etc/ssl/letsencrypt/cert/star.roessner-net.de-fullchain.crt
key = /etc/ssl/letsencrypt/private/star.roessner-net.de.key
CAfile = /etc/pki/tls/certs/ca-bundle.crt
verify = 2
[submission]
accept = 127.0.0.1:5871
client = yes
connect = 127.0.0.1:5872
cert = /etc/ssl/letsencrypt/cert/star.roessner-net.de-fullchain.crt
key = /etc/ssl/letsencrypt/private/star.roessner-net.de.key
CAfile = /etc/pki/tls/certs/ca-bundle.crt
verify = 2
-----------------------------------------------------------------------------
So the mechanism is: Clients connect to Nginx. That authenticates plain with the help of the authserv process. Retrieves information and connects plain again to stunnel which in turn connects TLS to the backends. (I know that I currently use all this on a single system, but I evaluate it for distributed systems in different firewall zones).
Now come my questions :-)
1. It would really be awsome, if someone could implment auth_http in that way that it also accepts https.
2. It would also be very nice, if the server {} blocks could use a client SSL certificate to there backends. Why is this important? If I want to speak haproxy with Dovecot, the connection must be secured. Else the Dovecot server does not accept the login process from Nginx. With the current implementation, it is not possible to keep the original source address and source port from the clients outside. For Postfix servers it works in plain, but it requires to set sasl security options in that way that it accept plain text auth. Even if using XCLIENT here.
Summary for the request:
1. Having SSL for auth_http, example: auth_http https://some.auth.serv:443/authmail
2. Have client SSL from server{} to backend to have HAproxy protocol working.
I know that there are not many people yet, using Nginx as proxy for mail, but I guess that might change, if the missing security features would exist. My part is to enhance the auth serv project, so people can use it, if they want. It's really at the beginning.
Thanks a lot for reading and I thank you in advance.
Christian
--
Rößner-Network-Solutions
Zertifizierter ITSiBe / CISO
Karl-Bröger-Str. 10, 36304 Alsfeld
Fax: +49 6631 78823409, Mobil: +49 171 9905345
USt-IdNr.: DE225643613, https://roessner.website
PGP fingerprint: 658D 1342 B762 F484 2DDF 1E88 38A5 4346 D727 94E5
details: https://hg.nginx.org/nginx/rev/c7a8bdf5af55
branches:
changeset: 7951:c7a8bdf5af55
user: Maxim Dounin <mdounin(a)mdounin.ru>
date: Sat Oct 30 02:39:19 2021 +0300
description:
Changed ngx_chain_update_chains() to test tag first (ticket #2248).
Without this change, aio used with HTTP/2 can result in connection hang,
as observed with "aio threads; aio_write on;" and proxying (ticket #2248).
The problem is that HTTP/2 updates buffers outside of the output filters
(notably, marks them as sent), and then posts a write event to call
output filters. If a filter does not call the next one for some reason
(for example, because of an AIO operation in progress), this might
result in a state when the owner of a buffer already called
ngx_chain_update_chains() and can reuse the buffer, while the same buffer
is still sitting in the busy chain of some other filter.
In the particular case a buffer was sitting in output chain's ctx->busy,
and was reused by event pipe. Output chain's ctx->busy was permanently
blocked by it, and this resulted in connection hang.
Fix is to change ngx_chain_update_chains() to skip buffers from other
modules unconditionally, without trying to wait for these buffers to
become empty.
diffstat:
src/core/ngx_buf.c | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)
diffs (24 lines):
diff -r e3dbd9449b14 -r c7a8bdf5af55 src/core/ngx_buf.c
--- a/src/core/ngx_buf.c Fri Oct 29 20:21:57 2021 +0300
+++ b/src/core/ngx_buf.c Sat Oct 30 02:39:19 2021 +0300
@@ -203,16 +203,16 @@ ngx_chain_update_chains(ngx_pool_t *p, n
while (*busy) {
cl = *busy;
- if (ngx_buf_size(cl->buf) != 0) {
- break;
- }
-
if (cl->buf->tag != tag) {
*busy = cl->next;
ngx_free_chain(p, cl);
continue;
}
+ if (ngx_buf_size(cl->buf) != 0) {
+ break;
+ }
+
cl->buf->pos = cl->buf->start;
cl->buf->last = cl->buf->start;
details: https://hg.nginx.org/nginx/rev/e3dbd9449b14
branches:
changeset: 7950:e3dbd9449b14
user: Maxim Dounin <mdounin(a)mdounin.ru>
date: Fri Oct 29 20:21:57 2021 +0300
description:
Changed default value of sendfile_max_chunk to 2m.
The "sendfile_max_chunk" directive is important to prevent worker
monopolization by fast connections. The 2m value implies maximum 200ms
delay with 100 Mbps links, 20ms delay with 1 Gbps links, and 2ms on
10 Gbps links. It also seems to be a good value for disks.
diffstat:
src/http/ngx_http_core_module.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diffs (12 lines):
diff -r 862f6130d357 -r e3dbd9449b14 src/http/ngx_http_core_module.c
--- a/src/http/ngx_http_core_module.c Fri Oct 29 20:21:54 2021 +0300
+++ b/src/http/ngx_http_core_module.c Fri Oct 29 20:21:57 2021 +0300
@@ -3720,7 +3720,7 @@ ngx_http_core_merge_loc_conf(ngx_conf_t
ngx_conf_merge_value(conf->internal, prev->internal, 0);
ngx_conf_merge_value(conf->sendfile, prev->sendfile, 0);
ngx_conf_merge_size_value(conf->sendfile_max_chunk,
- prev->sendfile_max_chunk, 0);
+ prev->sendfile_max_chunk, 2 * 1024 * 1024);
ngx_conf_merge_size_value(conf->subrequest_output_buffer_size,
prev->subrequest_output_buffer_size,
(size_t) ngx_pagesize);
details: https://hg.nginx.org/nginx/rev/a2613fc1bce5
branches:
changeset: 7948:a2613fc1bce5
user: Maxim Dounin <mdounin(a)mdounin.ru>
date: Fri Oct 29 20:21:51 2021 +0300
description:
Fixed sendfile() limit handling on Linux.
On Linux starting with 2.6.16, sendfile() silently limits all operations
to MAX_RW_COUNT, defined as (INT_MAX & PAGE_MASK). This incorrectly
triggered the interrupt check, and resulted in 0-sized writev() on the
next loop iteration.
Fix is to make sure the limit is always checked, so we will return from
the loop if the limit is already reached even if number of bytes sent is
not exactly equal to the number of bytes we've tried to send.
diffstat:
src/os/unix/ngx_linux_sendfile_chain.c | 4 +++-
1 files changed, 3 insertions(+), 1 deletions(-)
diffs (21 lines):
diff -r 51a260276425 -r a2613fc1bce5 src/os/unix/ngx_linux_sendfile_chain.c
--- a/src/os/unix/ngx_linux_sendfile_chain.c Fri Oct 29 20:21:48 2021 +0300
+++ b/src/os/unix/ngx_linux_sendfile_chain.c Fri Oct 29 20:21:51 2021 +0300
@@ -38,6 +38,9 @@ static void ngx_linux_sendfile_thread_ha
* On Linux up to 2.6.16 sendfile() does not allow to pass the count parameter
* more than 2G-1 bytes even on 64-bit platforms: it returns EINVAL,
* so we limit it to 2G-1 bytes.
+ *
+ * On Linux 2.6.16 and later, sendfile() silently limits the count parameter
+ * to 2G minus the page size, even on 64-bit platforms.
*/
#define NGX_SENDFILE_MAXSIZE 2147483647L
@@ -216,7 +219,6 @@ ngx_linux_sendfile_chain(ngx_connection_
*/
send = prev_send + sent;
- continue;
}
if (send >= limit || in == NULL) {
details: https://hg.nginx.org/nginx/rev/51a260276425
branches:
changeset: 7947:51a260276425
user: Maxim Dounin <mdounin(a)mdounin.ru>
date: Fri Oct 29 20:21:48 2021 +0300
description:
Simplified sendfile_max_chunk handling.
Previously, it was checked that sendfile_max_chunk was enabled and
almost whole sendfile_max_chunk was sent (see e67ef50c3176), to avoid
delaying connections where sendfile_max_chunk wasn't reached (for example,
when sending responses smaller than sendfile_max_chunk). Now we instead
check if there are unsent data, and the connection is still ready for writing.
Additionally we also check c->write->delayed to ignore connections already
delayed by limit_rate.
This approach is believed to be more robust, and correctly handles
not only sendfile_max_chunk, but also internal limits of c->send_chain(),
such as sendfile() maximum supported length (ticket #1870).
diffstat:
src/http/ngx_http_write_filter_module.c | 6 +-----
1 files changed, 1 insertions(+), 5 deletions(-)
diffs (21 lines):
diff -r 61e9c078ee3d -r 51a260276425 src/http/ngx_http_write_filter_module.c
--- a/src/http/ngx_http_write_filter_module.c Fri Oct 29 20:21:43 2021 +0300
+++ b/src/http/ngx_http_write_filter_module.c Fri Oct 29 20:21:48 2021 +0300
@@ -321,16 +321,12 @@ ngx_http_write_filter(ngx_http_request_t
delay = (ngx_msec_t) ((nsent - sent) * 1000 / r->limit_rate);
if (delay > 0) {
- limit = 0;
c->write->delayed = 1;
ngx_add_timer(c->write, delay);
}
}
- if (limit
- && c->write->ready
- && c->sent - sent >= limit - (off_t) (2 * ngx_pagesize))
- {
+ if (chain && c->write->ready && !c->write->delayed) {
ngx_post_event(c->write, &ngx_posted_next_events);
}