From 2afc050bd0e59d1ae5391c962e4c6c83120e8ebf Mon Sep 17 00:00:00 2001 From: Quantum Date: Mon, 15 Jun 2020 17:35:26 -0400 Subject: Correctly flush request body to uwsgi with SSL. The flush flag was not set when forwarding the request body to the uwsgi server. When using uwsgi_pass suwsgi://..., this causes the uwsgi server to wait indefinitely for the request body and eventually time out due to SSL buffering. This is essentially the same change as 4009:3183165283cc, which was made to ngx_http_proxy_module.c. This will fix the uwsgi bug https://github.com/unbit/uwsgi/issues/1490. --- src/http/modules/ngx_http_uwsgi_module.c | 1 + 1 file changed, 1 insertion(+) (limited to 'src/http/modules') diff --git a/src/http/modules/ngx_http_uwsgi_module.c b/src/http/modules/ngx_http_uwsgi_module.c index 56dc236ef..bfc8b1d78 100644 --- a/src/http/modules/ngx_http_uwsgi_module.c +++ b/src/http/modules/ngx_http_uwsgi_module.c @@ -1141,6 +1141,7 @@ ngx_http_uwsgi_create_request(ngx_http_request_t *r) r->upstream->request_bufs = cl; } + b->flush = 1; cl->next = NULL; return NGX_OK; -- cgit From b835b571846abb13b995498e7ee36fe0f4aaa3bf Mon Sep 17 00:00:00 2001 From: Maxim Dounin Date: Mon, 6 Jul 2020 18:36:17 +0300 Subject: Memcached: protect from too long responses. If a memcached response was followed by a correct trailer, and then the NUL character followed by some extra data - this was accepted by the trailer checking code. This in turn resulted in ctx->rest underflow and caused negative size buffer on the next reading from the upstream, followed by the "negative size buf in writer" alert. Fix is to always check for too long responses, so a correct trailer cannot be followed by extra data. --- src/http/modules/ngx_http_memcached_module.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'src/http/modules') diff --git a/src/http/modules/ngx_http_memcached_module.c b/src/http/modules/ngx_http_memcached_module.c index 775bd7e81..c82df6e33 100644 --- a/src/http/modules/ngx_http_memcached_module.c +++ b/src/http/modules/ngx_http_memcached_module.c @@ -485,10 +485,11 @@ ngx_http_memcached_filter(void *data, ssize_t bytes) if (u->length == (ssize_t) ctx->rest) { - if (ngx_strncmp(b->last, + if (bytes > u->length + || ngx_strncmp(b->last, ngx_http_memcached_end + NGX_HTTP_MEMCACHED_END - ctx->rest, bytes) - != 0) + != 0) { ngx_log_error(NGX_LOG_ERR, ctx->request->connection->log, 0, "memcached sent invalid trailer"); @@ -540,7 +541,9 @@ ngx_http_memcached_filter(void *data, ssize_t bytes) last += (size_t) (u->length - NGX_HTTP_MEMCACHED_END); - if (ngx_strncmp(last, ngx_http_memcached_end, b->last - last) != 0) { + if (bytes > u->length + || ngx_strncmp(last, ngx_http_memcached_end, b->last - last) != 0) + { ngx_log_error(NGX_LOG_ERR, ctx->request->connection->log, 0, "memcached sent invalid trailer"); -- cgit From a2abe31a85c030d14aabcbe1f13ef6cc538e86fa Mon Sep 17 00:00:00 2001 From: Maxim Dounin Date: Mon, 6 Jul 2020 18:36:19 +0300 Subject: Proxy: drop extra data sent by upstream. Previous behaviour was to pass everything to the client, but this seems to be suboptimal and causes issues (ticket #1695). Fix is to drop extra data instead, as it naturally happens in most clients. --- src/http/modules/ngx_http_proxy_module.c | 52 ++++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 9 deletions(-) (limited to 'src/http/modules') diff --git a/src/http/modules/ngx_http_proxy_module.c b/src/http/modules/ngx_http_proxy_module.c index 3aafb9996..c1c555ee4 100644 --- a/src/http/modules/ngx_http_proxy_module.c +++ b/src/http/modules/ngx_http_proxy_module.c @@ -2015,6 +2015,25 @@ ngx_http_proxy_copy_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) return NGX_OK; } + if (p->upstream_done) { + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0, + "http proxy data after close"); + return NGX_OK; + } + + if (p->length == 0) { + + ngx_log_error(NGX_LOG_WARN, p->log, 0, + "upstream sent more data than specified in " + "\"Content-Length\" header"); + + r = p->input_ctx; + r->upstream->keepalive = 0; + p->upstream_done = 1; + + return NGX_OK; + } + cl = ngx_chain_get_free_buf(p->pool, &p->free); if (cl == NULL) { return NGX_ERROR; @@ -2042,20 +2061,23 @@ ngx_http_proxy_copy_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) return NGX_OK; } + if (b->last - b->pos > p->length) { + + ngx_log_error(NGX_LOG_WARN, p->log, 0, + "upstream sent more data than specified in " + "\"Content-Length\" header"); + + b->last = b->pos + p->length; + p->upstream_done = 1; + + return NGX_OK; + } + p->length -= b->last - b->pos; if (p->length == 0) { r = p->input_ctx; - p->upstream_done = 1; r->upstream->keepalive = !r->upstream->headers_in.connection_close; - - } else if (p->length < 0) { - r = p->input_ctx; - p->upstream_done = 1; - - ngx_log_error(NGX_LOG_WARN, r->connection->log, 0, - "upstream sent more data than specified in " - "\"Content-Length\" header"); } return NGX_OK; @@ -2227,6 +2249,18 @@ ngx_http_proxy_non_buffered_copy_filter(void *data, ssize_t bytes) return NGX_OK; } + if (bytes > u->length) { + + ngx_log_error(NGX_LOG_WARN, r->connection->log, 0, + "upstream sent more data than specified in " + "\"Content-Length\" header"); + + cl->buf->last = cl->buf->pos + u->length; + u->length = 0; + + return NGX_OK; + } + u->length -= bytes; if (u->length == 0) { -- cgit From 156e193408f8c1847f911b8758aa315d71c52211 Mon Sep 17 00:00:00 2001 From: Maxim Dounin Date: Mon, 6 Jul 2020 18:36:20 +0300 Subject: Proxy: detection of data after final chunk. Previously, additional data after final chunk was either ignored (in the same buffer, or during unbuffered proxying) or sent to the client (in the next buffer already if it was already read from the socket). Now additional data are properly detected and ignored in all cases. Additionally, a warning is now logged and keepalive is disabled in the connection. --- src/http/modules/ngx_http_proxy_module.c | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) (limited to 'src/http/modules') diff --git a/src/http/modules/ngx_http_proxy_module.c b/src/http/modules/ngx_http_proxy_module.c index c1c555ee4..6cf15759c 100644 --- a/src/http/modules/ngx_http_proxy_module.c +++ b/src/http/modules/ngx_http_proxy_module.c @@ -2104,6 +2104,23 @@ ngx_http_proxy_chunked_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) return NGX_ERROR; } + if (p->upstream_done) { + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0, + "http proxy data after close"); + return NGX_OK; + } + + if (p->length == 0) { + + ngx_log_error(NGX_LOG_WARN, p->log, 0, + "upstream sent data after final chunk"); + + r->upstream->keepalive = 0; + p->upstream_done = 1; + + return NGX_OK; + } + b = NULL; prev = &buf->shadow; @@ -2166,9 +2183,15 @@ ngx_http_proxy_chunked_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) /* a whole response has been parsed successfully */ - p->upstream_done = 1; + p->length = 0; r->upstream->keepalive = !r->upstream->headers_in.connection_close; + if (buf->pos != buf->last) { + ngx_log_error(NGX_LOG_WARN, p->log, 0, + "upstream sent data after final chunk"); + r->upstream->keepalive = 0; + } + break; } @@ -2347,6 +2370,12 @@ ngx_http_proxy_non_buffered_chunked_filter(void *data, ssize_t bytes) u->keepalive = !u->headers_in.connection_close; u->length = 0; + if (buf->pos != buf->last) { + ngx_log_error(NGX_LOG_WARN, r->connection->log, 0, + "upstream sent data after final chunk"); + u->keepalive = 0; + } + break; } -- cgit From 7f2490c43cec0367c94e9e0a3881f4e5e32063de Mon Sep 17 00:00:00 2001 From: Maxim Dounin Date: Mon, 6 Jul 2020 18:36:21 +0300 Subject: Proxy: style. --- src/http/modules/ngx_http_proxy_module.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/http/modules') diff --git a/src/http/modules/ngx_http_proxy_module.c b/src/http/modules/ngx_http_proxy_module.c index 6cf15759c..6cf2cbde0 100644 --- a/src/http/modules/ngx_http_proxy_module.c +++ b/src/http/modules/ngx_http_proxy_module.c @@ -2206,13 +2206,13 @@ ngx_http_proxy_chunked_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) /* invalid response */ - ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + ngx_log_error(NGX_LOG_ERR, p->log, 0, "upstream sent invalid chunked response"); return NGX_ERROR; } - ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, p->log, 0, "http proxy chunked state %ui, length %O", ctx->chunked.state, p->length); -- cgit From dfcfcc5a881bf4b349f74c9a0a04da2d861f02bf Mon Sep 17 00:00:00 2001 From: Maxim Dounin Date: Mon, 6 Jul 2020 18:36:22 +0300 Subject: Upstream: drop extra data sent by upstream. Previous behaviour was to pass everything to the client, but this seems to be suboptimal and causes issues (ticket #1695). Fix is to drop extra data instead, as it naturally happens in most clients. This change covers generic buffered and unbuffered filters as used in the scgi and uwsgi modules. Appropriate input filter init handlers are provided by the scgi and uwsgi modules to set corresponding lengths. Note that for responses to HEAD requests there is an exception: we do allow any response length. This is because responses to HEAD requests might be actual full responses, and it is up to nginx to remove the response body. If caching is enabled, only full responses matching the Content-Length header will be cached (see b779728b180c). --- src/http/modules/ngx_http_scgi_module.c | 36 ++++++++++++++++++++++++++++++++ src/http/modules/ngx_http_uwsgi_module.c | 36 ++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) (limited to 'src/http/modules') diff --git a/src/http/modules/ngx_http_scgi_module.c b/src/http/modules/ngx_http_scgi_module.c index 7216f781d..600999c88 100644 --- a/src/http/modules/ngx_http_scgi_module.c +++ b/src/http/modules/ngx_http_scgi_module.c @@ -49,6 +49,7 @@ static ngx_int_t ngx_http_scgi_create_request(ngx_http_request_t *r); static ngx_int_t ngx_http_scgi_reinit_request(ngx_http_request_t *r); static ngx_int_t ngx_http_scgi_process_status_line(ngx_http_request_t *r); static ngx_int_t ngx_http_scgi_process_header(ngx_http_request_t *r); +static ngx_int_t ngx_http_scgi_input_filter_init(void *data); static void ngx_http_scgi_abort_request(ngx_http_request_t *r); static void ngx_http_scgi_finalize_request(ngx_http_request_t *r, ngx_int_t rc); @@ -534,6 +535,10 @@ ngx_http_scgi_handler(ngx_http_request_t *r) u->pipe->input_filter = ngx_event_pipe_copy_input_filter; u->pipe->input_ctx = r; + u->input_filter_init = ngx_http_scgi_input_filter_init; + u->input_filter = ngx_http_upstream_non_buffered_filter; + u->input_filter_ctx = r; + if (!scf->upstream.request_buffering && scf->upstream.pass_request_body && !r->headers_in.chunked) @@ -1145,6 +1150,37 @@ ngx_http_scgi_process_header(ngx_http_request_t *r) } +static ngx_int_t +ngx_http_scgi_input_filter_init(void *data) +{ + ngx_http_request_t *r = data; + ngx_http_upstream_t *u; + + u = r->upstream; + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http scgi filter init s:%ui l:%O", + u->headers_in.status_n, u->headers_in.content_length_n); + + if (u->headers_in.status_n == NGX_HTTP_NO_CONTENT + || u->headers_in.status_n == NGX_HTTP_NOT_MODIFIED) + { + u->pipe->length = 0; + u->length = 0; + + } else if (r->method == NGX_HTTP_HEAD) { + u->pipe->length = -1; + u->length = -1; + + } else { + u->pipe->length = u->headers_in.content_length_n; + u->length = u->headers_in.content_length_n; + } + + return NGX_OK; +} + + static void ngx_http_scgi_abort_request(ngx_http_request_t *r) { diff --git a/src/http/modules/ngx_http_uwsgi_module.c b/src/http/modules/ngx_http_uwsgi_module.c index bfc8b1d78..fe15ee80d 100644 --- a/src/http/modules/ngx_http_uwsgi_module.c +++ b/src/http/modules/ngx_http_uwsgi_module.c @@ -67,6 +67,7 @@ static ngx_int_t ngx_http_uwsgi_create_request(ngx_http_request_t *r); static ngx_int_t ngx_http_uwsgi_reinit_request(ngx_http_request_t *r); static ngx_int_t ngx_http_uwsgi_process_status_line(ngx_http_request_t *r); static ngx_int_t ngx_http_uwsgi_process_header(ngx_http_request_t *r); +static ngx_int_t ngx_http_uwsgi_input_filter_init(void *data); static void ngx_http_uwsgi_abort_request(ngx_http_request_t *r); static void ngx_http_uwsgi_finalize_request(ngx_http_request_t *r, ngx_int_t rc); @@ -703,6 +704,10 @@ ngx_http_uwsgi_handler(ngx_http_request_t *r) u->pipe->input_filter = ngx_event_pipe_copy_input_filter; u->pipe->input_ctx = r; + u->input_filter_init = ngx_http_uwsgi_input_filter_init; + u->input_filter = ngx_http_upstream_non_buffered_filter; + u->input_filter_ctx = r; + if (!uwcf->upstream.request_buffering && uwcf->upstream.pass_request_body && !r->headers_in.chunked) @@ -1356,6 +1361,37 @@ ngx_http_uwsgi_process_header(ngx_http_request_t *r) } +static ngx_int_t +ngx_http_uwsgi_input_filter_init(void *data) +{ + ngx_http_request_t *r = data; + ngx_http_upstream_t *u; + + u = r->upstream; + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http uwsgi filter init s:%ui l:%O", + u->headers_in.status_n, u->headers_in.content_length_n); + + if (u->headers_in.status_n == NGX_HTTP_NO_CONTENT + || u->headers_in.status_n == NGX_HTTP_NOT_MODIFIED) + { + u->pipe->length = 0; + u->length = 0; + + } else if (r->method == NGX_HTTP_HEAD) { + u->pipe->length = -1; + u->length = -1; + + } else { + u->pipe->length = u->headers_in.content_length_n; + u->length = u->headers_in.content_length_n; + } + + return NGX_OK; +} + + static void ngx_http_uwsgi_abort_request(ngx_http_request_t *r) { -- cgit From 1194ba36a0685efb0818d28dad5ef518949c910b Mon Sep 17 00:00:00 2001 From: Maxim Dounin Date: Mon, 6 Jul 2020 18:36:23 +0300 Subject: FastCGI: protection from responses with wrong length. Previous behaviour was to pass everything to the client, but this seems to be suboptimal and causes issues (ticket #1695). Fix is to drop extra data instead, as it naturally happens in most clients. Additionally, we now also issue a warning if the response is too short, and make sure the fact it is truncated is propagated to the client. The u->error flag is introduced to make it possible to propagate the error to the client in case of unbuffered proxying. For responses to HEAD requests there is an exception: we do allow both responses without body and responses with body matching the Content-Length header. --- src/http/modules/ngx_http_fastcgi_module.c | 120 +++++++++++++++++++++++++---- 1 file changed, 106 insertions(+), 14 deletions(-) (limited to 'src/http/modules') diff --git a/src/http/modules/ngx_http_fastcgi_module.c b/src/http/modules/ngx_http_fastcgi_module.c index 2be067214..e50d1a70d 100644 --- a/src/http/modules/ngx_http_fastcgi_module.c +++ b/src/http/modules/ngx_http_fastcgi_module.c @@ -81,12 +81,15 @@ typedef struct { size_t length; size_t padding; + off_t rest; + ngx_chain_t *free; ngx_chain_t *busy; unsigned fastcgi_stdout:1; unsigned large_stderr:1; unsigned header_sent:1; + unsigned closed:1; ngx_array_t *split_parts; @@ -2075,13 +2078,31 @@ ngx_http_fastcgi_process_header(ngx_http_request_t *r) static ngx_int_t ngx_http_fastcgi_input_filter_init(void *data) { - ngx_http_request_t *r = data; + ngx_http_request_t *r = data; + + ngx_http_upstream_t *u; + ngx_http_fastcgi_ctx_t *f; ngx_http_fastcgi_loc_conf_t *flcf; + u = r->upstream; + + f = ngx_http_get_module_ctx(r, ngx_http_fastcgi_module); flcf = ngx_http_get_module_loc_conf(r, ngx_http_fastcgi_module); - r->upstream->pipe->length = flcf->keep_conn ? - (off_t) sizeof(ngx_http_fastcgi_header_t) : -1; + u->pipe->length = flcf->keep_conn ? + (off_t) sizeof(ngx_http_fastcgi_header_t) : -1; + + if (u->headers_in.status_n == NGX_HTTP_NO_CONTENT + || u->headers_in.status_n == NGX_HTTP_NOT_MODIFIED) + { + f->rest = 0; + + } else if (r->method == NGX_HTTP_HEAD) { + f->rest = -2; + + } else { + f->rest = u->headers_in.content_length_n; + } return NGX_OK; } @@ -2106,6 +2127,15 @@ ngx_http_fastcgi_input_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) f = ngx_http_get_module_ctx(r, ngx_http_fastcgi_module); flcf = ngx_http_get_module_loc_conf(r, ngx_http_fastcgi_module); + if (p->upstream_done || f->closed) { + r->upstream->keepalive = 0; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0, + "http fastcgi data after close"); + + return NGX_OK; + } + b = NULL; prev = &buf->shadow; @@ -2128,13 +2158,25 @@ ngx_http_fastcgi_input_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) if (f->type == NGX_HTTP_FASTCGI_STDOUT && f->length == 0) { f->state = ngx_http_fastcgi_st_padding; + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0, + "http fastcgi closed stdout"); + + if (f->rest > 0) { + ngx_log_error(NGX_LOG_ERR, p->log, 0, + "upstream prematurely closed " + "FastCGI stdout"); + + p->upstream_error = 1; + p->upstream_eof = 0; + f->closed = 1; + + break; + } + if (!flcf->keep_conn) { p->upstream_done = 1; } - ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0, - "http fastcgi closed stdout"); - continue; } @@ -2143,6 +2185,18 @@ ngx_http_fastcgi_input_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0, "http fastcgi sent end request"); + if (f->rest > 0) { + ngx_log_error(NGX_LOG_ERR, p->log, 0, + "upstream prematurely closed " + "FastCGI request"); + + p->upstream_error = 1; + p->upstream_eof = 0; + f->closed = 1; + + break; + } + if (!flcf->keep_conn) { p->upstream_done = 1; break; @@ -2289,15 +2343,31 @@ ngx_http_fastcgi_input_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) f->pos += f->length; b->last = f->pos; - continue; + } else { + f->length -= f->last - f->pos; + f->pos = f->last; + b->last = f->last; } - f->length -= f->last - f->pos; + if (f->rest == -2) { + f->rest = r->upstream->headers_in.content_length_n; + } - b->last = f->last; + if (f->rest >= 0) { - break; + if (b->last - b->pos > f->rest) { + ngx_log_error(NGX_LOG_WARN, p->log, 0, + "upstream sent more data than specified in " + "\"Content-Length\" header"); + + b->last = b->pos + f->rest; + p->upstream_done = 1; + + break; + } + f->rest -= b->last - b->pos; + } } if (flcf->keep_conn) { @@ -2391,6 +2461,14 @@ ngx_http_fastcgi_non_buffered_filter(void *data, ssize_t bytes) if (f->type == NGX_HTTP_FASTCGI_END_REQUEST) { + if (f->rest > 0) { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "upstream prematurely closed " + "FastCGI request"); + u->error = 1; + break; + } + if (f->pos + f->padding < f->last) { u->length = 0; break; @@ -2510,13 +2588,27 @@ ngx_http_fastcgi_non_buffered_filter(void *data, ssize_t bytes) f->pos += f->length; b->last = f->pos; - continue; + } else { + f->length -= f->last - f->pos; + f->pos = f->last; + b->last = f->last; } - f->length -= f->last - f->pos; - b->last = f->last; + if (f->rest >= 0) { + + if (b->last - b->pos > f->rest) { + ngx_log_error(NGX_LOG_WARN, r->connection->log, 0, + "upstream sent more data than specified in " + "\"Content-Length\" header"); - break; + b->last = b->pos + f->rest; + u->length = 0; + + break; + } + + f->rest -= b->last - b->pos; + } } return NGX_OK; -- cgit From 5348706fe607c2b6704b52078cba77ee8fa298b8 Mon Sep 17 00:00:00 2001 From: Maxim Dounin Date: Mon, 6 Jul 2020 18:36:25 +0300 Subject: gRPC: generate error when response size is wrong. As long as the "Content-Length" header is given, we now make sure it exactly matches the size of the response. If it doesn't, the response is considered malformed and must not be forwarded (https://tools.ietf.org/html/rfc7540#section-8.1.2.6). While it is not really possible to "not forward" the response which is already being forwarded, we generate an error instead, which is the closest equivalent. Previous behaviour was to pass everything to the client, but this seems to be suboptimal and causes issues (ticket #1695). Also this directly contradicts HTTP/2 specification requirements. Note that the new behaviour for the gRPC proxy is more strict than that applied in other variants of proxying. This is intentional, as HTTP/2 specification requires us to do so, while in other types of proxying malformed responses from backends are well known and historically tolerated. --- src/http/modules/ngx_http_grpc_module.c | 39 ++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) (limited to 'src/http/modules') diff --git a/src/http/modules/ngx_http_grpc_module.c b/src/http/modules/ngx_http_grpc_module.c index 992211e73..ab4ad6be1 100644 --- a/src/http/modules/ngx_http_grpc_module.c +++ b/src/http/modules/ngx_http_grpc_module.c @@ -84,6 +84,8 @@ typedef struct { ngx_uint_t pings; ngx_uint_t settings; + off_t length; + ssize_t send_window; size_t recv_window; @@ -1953,10 +1955,28 @@ ngx_http_grpc_filter_init(void *data) r = ctx->request; u = r->upstream; - u->length = 1; + if (u->headers_in.status_n == NGX_HTTP_NO_CONTENT + || u->headers_in.status_n == NGX_HTTP_NOT_MODIFIED + || r->method == NGX_HTTP_HEAD) + { + ctx->length = 0; + + } else { + ctx->length = u->headers_in.content_length_n; + } if (ctx->end_stream) { + + if (ctx->length > 0) { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "upstream prematurely closed stream"); + return NGX_ERROR; + } + u->length = 0; + + } else { + u->length = 1; } return NGX_OK; @@ -1999,6 +2019,12 @@ ngx_http_grpc_filter(void *data, ssize_t bytes) if (ctx->done) { + if (ctx->length > 0) { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "upstream prematurely closed stream"); + return NGX_ERROR; + } + /* * We have finished parsing the response and the * remaining control frames. If there are unsent @@ -2052,6 +2078,17 @@ ngx_http_grpc_filter(void *data, ssize_t bytes) return NGX_ERROR; } + if (ctx->length != -1) { + if ((off_t) ctx->rest > ctx->length) { + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "upstream sent response body larger " + "than indicated content length"); + return NGX_ERROR; + } + + ctx->length -= ctx->rest; + } + if (ctx->rest > ctx->recv_window) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "upstream violated stream flow control, " -- cgit