# HG changeset patch
# User Tracey Jaquith <tracey(a)archive.org>
# Date 1623797180 0
# Tue Jun 15 22:46:20 2021 +0000
# Node ID 1879d49fe0cf739f48287b5a38a83d3a1adab939
# Parent 5f765427c17ac8cf753967387562201cf4f78dc4
Add optional "mp4_exact_start" nginx config off/on to show video between keyframes.
archive.org has been using mod_h264_streaming with a similar "exact start" patch from me since 2013.
We just moved to nginx mp4 module and are using this patch.
The technique is to find the video keyframe just before the desired "start" time, and send
that down the wire so video playback can start immediately.
Next calculate how many video samples are between the keyframe and desired "start" time
and update the STTS atom where those samples move the duration from (typically) 1001 to 1.
This way, initial unwanted video frames play at ~1/30,000s -- so visually the
video & audio start playing immediately.
You can see an example before/after here (nginx binary built with mp4 module + patch):
https://pi.archive.org/0/items/CSPAN_20160425_022500_2011_White_House_Corre…https://pi.archive.org/0/items/CSPAN_20160425_022500_2011_White_House_Corre…
Tested on linux and macosx.
(this is me: https://github.com/traceypooh )
diff -r 5f765427c17a -r 1879d49fe0cf src/http/modules/ngx_http_mp4_module.c
--- a/src/http/modules/ngx_http_mp4_module.c Tue Jun 01 17:37:51 2021 +0300
+++ b/src/http/modules/ngx_http_mp4_module.c Tue Jun 15 22:46:20 2021 +0000
@@ -43,6 +43,7 @@
typedef struct {
size_t buffer_size;
size_t max_buffer_size;
+ ngx_flag_t exact_start;
} ngx_http_mp4_conf_t;
@@ -340,6 +341,13 @@
offsetof(ngx_http_mp4_conf_t, max_buffer_size),
NULL },
+ { ngx_string("mp4_exact_start"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1,
+ ngx_conf_set_flag_slot,
+ NGX_HTTP_LOC_CONF_OFFSET,
+ offsetof(ngx_http_mp4_conf_t, exact_start),
+ NULL },
+
ngx_null_command
};
@@ -2156,6 +2164,83 @@
static ngx_int_t
+ngx_http_mp4_exact_start_video(ngx_http_mp4_file_t *mp4, ngx_http_mp4_trak_t *trak)
+{
+ uint32_t n, speedup_samples, current_count;
+ ngx_uint_t sample_keyframe, start_sample_exact;
+ ngx_mp4_stts_entry_t *entry, *entries_array;
+ ngx_buf_t *data;
+
+ data = trak->out[NGX_HTTP_MP4_STTS_DATA].buf;
+
+ // Find the keyframe just before the desired start time - so that we can emit an mp4
+ // where the first frame is a keyframe. We'll "speed up" the first frames to 1000x
+ // normal speed (typically), so they won't be noticed. But this way, perceptively,
+ // playback of the _video_ track can start immediately
+ // (and not have to wait until the keyframe _after_ the desired starting time frame).
+ start_sample_exact = trak->start_sample;
+ for (n = 0; n < trak->sync_samples_entries; n++) {
+ // each element of array is the sample number of a keyframe
+ // sync samples starts from 1 -- so subtract 1
+ sample_keyframe = ngx_mp4_get_32value(trak->stss_data_buf.pos + (n * 4)) - 1;
+ if (sample_keyframe <= trak->start_sample) {
+ start_sample_exact = sample_keyframe;
+ }
+ if (sample_keyframe >= trak->start_sample) {
+ break;
+ }
+ }
+
+ if (start_sample_exact < trak->start_sample) {
+ // We're going to prepend an entry with duration=1 for the frames we want to "not see".
+ // MOST of the time (eg: constant video framerate),
+ // we're taking a single element entry array and making it two.
+ speedup_samples = trak->start_sample - start_sample_exact;
+
+ ngx_log_debug3(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0,
+ "exact trak start_sample move %l to %l (speed up %d samples)\n",
+ trak->start_sample, start_sample_exact, speedup_samples);
+
+ entries_array = ngx_palloc(mp4->request->pool,
+ (1 + trak->time_to_sample_entries) * sizeof(ngx_mp4_stts_entry_t));
+ if (entries_array == NULL) {
+ return NGX_ERROR;
+ }
+ entry = &(entries_array[1]);
+ ngx_memcpy(entry, (ngx_mp4_stts_entry_t *)data->pos,
+ trak->time_to_sample_entries * sizeof(ngx_mp4_stts_entry_t));
+
+ current_count = ngx_mp4_get_32value(entry->count);
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0,
+ "exact split in 2 video STTS entry from count:%d", current_count);
+
+ if (current_count <= speedup_samples) {
+ return NGX_ERROR;
+ }
+
+ ngx_mp4_set_32value(entry->count, current_count - speedup_samples);
+ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0,
+ "exact split new[1]: count:%d duration:%d",
+ ngx_mp4_get_32value(entry->count),
+ ngx_mp4_get_32value(entry->duration));
+ entry--;
+ ngx_mp4_set_32value(entry->count, speedup_samples);
+ ngx_mp4_set_32value(entry->duration, 1);
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, mp4->file.log, 0,
+ "exact split new[0]: count:%d duration:1",
+ ngx_mp4_get_32value(entry->count));
+
+ data->pos = (u_char *) entry;
+ trak->time_to_sample_entries++;
+ trak->start_sample = start_sample_exact;
+ data->last = (u_char *) (entry + trak->time_to_sample_entries);
+ }
+
+ return NGX_OK;
+}
+
+
+static ngx_int_t
ngx_http_mp4_crop_stts_data(ngx_http_mp4_file_t *mp4,
ngx_http_mp4_trak_t *trak, ngx_uint_t start)
{
@@ -2164,6 +2249,8 @@
ngx_buf_t *data;
ngx_uint_t start_sample, entries, start_sec;
ngx_mp4_stts_entry_t *entry, *end;
+ ngx_http_mp4_conf_t *conf;
+
if (start) {
start_sec = mp4->start;
@@ -2238,6 +2325,10 @@
"start_sample:%ui, new count:%uD",
trak->start_sample, count - rest);
+ conf = ngx_http_get_module_loc_conf(mp4->request, ngx_http_mp4_module);
+ if (conf->exact_start) {
+ ngx_http_mp4_exact_start_video(mp4, trak);
+ }
} else {
ngx_mp4_set_32value(entry->count, rest);
data->last = (u_char *) (entry + 1);
@@ -3590,6 +3681,7 @@
conf->buffer_size = NGX_CONF_UNSET_SIZE;
conf->max_buffer_size = NGX_CONF_UNSET_SIZE;
+ conf->exact_start = NGX_CONF_UNSET;
return conf;
}
it is desirable to avoid pool allocations at early stages of quic connection
processing. Currently, code in protection.c and tokens.c allocates memory
dynamically, while this is not strictly necessary, as allocated objects have
fixed size and sometimes short lifetime. The patchset revises this cases
and removes pool usage.
This patchset prepares base to more lightweight early packet processing
(parsing, retry and rejection with error without creating connection object
and memory allocations)
Hi
I am looking to replace some lua scripting we have to do an allowlist,
I found https://medium.com/geekculture/building-a-simple-bot-protection-with-nginx-…
and it seems to give me a good idea how to port my lua
const fs = require('fs');
const badReputationIPs = loadFile('/var/lib/njs/ips.txt');
function loadFile(file: string): string[] {
let data: string[] = [];
try {
data = fs.readFileSync(file).toString().split('\n');
} catch (e) {
// unable to read file
}
return data;
}
function verifyIP(r: NginxHTTPRequest): void {
if (badReputationIPs.some((ip: string) => ip === r.remoteAddress)) {
r.return(302, '/block.html');
return;
}
r.internalRedirect('@pages');
}
export default { verifyIP };
except I have the lua reloading every 60s by calling ngx.timer.at(60,
helpers.load_json) in a init_worker_by_lua
Is there an equivalent in njs? It seems not if you are starting a new
lightweight VM for every request.
Is the answer to do a subrequest and cache it or something?
Thanks,
Tom
Hello,
I recently encountered an issue where Nginx would hang for a very long
time, if not indefinitely, on responses which exceeded the FastCGI
buffer size (> ~4000 bytes) from an upstream source which, in this case,
was PHP-FPM. This issue appeared to only be happening on DigitalOcean's
App Platform service; I couldn't reproduce it locally. I did a lot of
testing and digging around, I eventually tracked it back to
DigitalOcean's system not supporting the `EPOLLRDHUP` event. After much
debugging and reading through Nginx's source code, I believe I found the
source to be two conditions which were missing a check for
`ngx_use_epoll_rdhup`. I made the changes and rebuilt nginx and
everything appears to be working fine now.
If anyone needs to reproduce the issue, I've published a basic example
at https://github.com/marcusball/nginx-epoll-bug. There are also
corresponding Docker Hub images which should be able to demonstrate an
example project with the bug and with the fix if they are deployed to
App Platform: `marcusball/nginx-rdhup-bug:without-fix` and
`marcusball/nginx-rdhup-bug:with-fix` respectively.
This is my first time contributing to Nginx, as well as the first time
trying to contribute via mailing list, so let me know if anything else
is needed. I also can't get the Mercurial Patchbomb extension working,
so I'm sending this manually and my apologies if anything gets formatted
incorrectly.
Marcus Ball
In https://www.nginx.com/blog/our-roadmap-quic-http-3-support-nginx/ (from
July 12, 2021) it is stated:
"Our current target for completing the code merge into the NGINX mainline
branch is the end of 2021 [...]"
It seems that it is not currently done yet. What is the current ETA on QUIC
and HTTP/3 mainline support?
Hi,
We work on cloud platforms and we have recently come across an nginx
vulnerability described at
https://mailman.nginx.org/pipermail/nginx-announce/2021/000300.html?_ga=2.6…
We are using Ubuntu 20.04 OS versions which have nginx 1.18 version. We are
trying to upgrade
the nginx version to 1.20.1 where this vulnerability is remediated. But we
need nginx-extras as well. But we can't find the nginx-extras package of
version 1.20. Only 1.18 is available. Can you suggest what is the best way
to install nginx 1.20.1 with nginx-extras ?
Thanks
Kumar
Hi
I've been working on adding support for the OAUTHBEARER (RFC7628) and also the legacy XOAUTH2 (pre-RFC version still usable at google, microsoft and others and still the most commonly supported protocol in client libraries unfortunately) to the nginx mail proxy module.
Mostly this has been fairly straight forward, it's just adding ngx_*_auth_{oauthbearer,xoauth2} states, constants, handlers, etc. I'm passing the bearer token provided by the client to the backend auth process in the `Auth-Pass` header.
One change however, there's an additional optional response header from the backend auth process that's supported, `Auth-Error-Sasl`. It's expected in the failure case that the backend auth server will generate a base64 encoded JSON object that conforms to the error reporting in https://datatracker.ietf.org/doc/html/rfc7628#section-3.2.2 in this header.
If present, the value in this header is prefixed with a `+ ` and returned as the SASL response. We then wait for any line from the client (which we ignore) and then we exit the SASL mode and return back to standard protocol parsing. There's an example of this looks like in https://datatracker.ietf.org/doc/html/rfc7628#section-4.3
Anyway I'd appreciate if someone could look over these changes to see that they all look reasonable and like something that would be accepted back into the nginx upstream. There's a bit of a push within the mail community to try and bring more modern OAUTH2 style authentication to more services (not just the big players), but for that to be possible services need to be able to actually handle OAUTHBEARER/XOAUTH2 authentication, which as a first step means support on the server side in things like nginx, and then obviously adding support in the auth systems behind that.
https://github.com/robmueller/nginx/commits/add-xaouth2-oauthbearer-auth
Thanks in advance
Rob Mueller
robm(a)fastmail.fm