From 829bf07c7c7a4d31106c19c3ed2151ea883a55ce Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Thu, 28 May 2020 19:32:52 +0300 Subject: Adjusted tag 1.18.0 to include 9e14c63773be. --- .hgtags | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.hgtags b/.hgtags index 7bf00581..f123c0f6 100644 --- a/.hgtags +++ b/.hgtags @@ -24,4 +24,4 @@ b391df5f0102aa6afe660cfc863729c1b1111c9e 1.12.0 801ac82f80fb2b2333f2c03ac9c3df6b7cec130a 1.15.0 8bab088952dd9d7caa3d04fd4b3026cef26fcf7d 1.16.0 4b13438632bc37ca599113be90af64f6e2f09d83 1.17.0 -a34bc498d976affa5b50584d3d93a4a9a04d5c39 1.18.0 +9e14c63773be52613dd47dea9fd113037f15a3eb 1.18.0 -- cgit From 5fa3108e63ed0029daac34a3a24b45d4a8644add Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Mon, 8 Jun 2020 16:09:07 +0300 Subject: Version bump. --- version | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version b/version index f1cf1dc6..6b8e9c50 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ # Copyright (C) NGINX, Inc. -NXT_VERSION=1.18.0 -NXT_VERNUM=11800 +NXT_VERSION=1.19.0 +NXT_VERNUM=11900 -- cgit From 22c917bead38df560bb112006d0b4a28aa8919d3 Mon Sep 17 00:00:00 2001 From: Andrei Belov Date: Tue, 16 Jun 2020 15:02:29 +0300 Subject: Packages: added Fedora 32 support. --- pkg/rpm/Makefile | 6 ++- pkg/rpm/Makefile.python38 | 57 ++++++++++++++++++++++ .../rpmbuild/SOURCES/unit.example-python38-config | 17 +++++++ 3 files changed, 79 insertions(+), 1 deletion(-) create mode 100644 pkg/rpm/Makefile.python38 create mode 100644 pkg/rpm/rpmbuild/SOURCES/unit.example-python38-config diff --git a/pkg/rpm/Makefile b/pkg/rpm/Makefile index 8bc96d99..70100896 100644 --- a/pkg/rpm/Makefile +++ b/pkg/rpm/Makefile @@ -124,8 +124,12 @@ endif ifeq ($(OSVER), fedora) include Makefile.php +ifeq ($(shell test `rpm --eval '0%{?fedora} -lt 32'`; echo $$?),0) include Makefile.python27 -ifeq ($(shell test `rpm --eval '0%{?fedora} -ge 29'`; echo $$?),0) +endif +ifeq ($(shell test `rpm --eval '0%{?fedora} -ge 32'`; echo $$?),0) +include Makefile.python38 +else ifeq ($(shell test `rpm --eval '0%{?fedora} -ge 29'`; echo $$?),0) include Makefile.python37 else include Makefile.python36 diff --git a/pkg/rpm/Makefile.python38 b/pkg/rpm/Makefile.python38 new file mode 100644 index 00000000..ffcca07f --- /dev/null +++ b/pkg/rpm/Makefile.python38 @@ -0,0 +1,57 @@ +MODULES+= python38 +MODULE_SUFFIX_python38= python3.8 + +MODULE_SUMMARY_python38= Python 3.8 module for NGINX Unit + +MODULE_VERSION_python38= $(VERSION) +MODULE_RELEASE_python38= 1 + +MODULE_CONFARGS_python38= python --config=python3.8-config +MODULE_MAKEARGS_python38= python3.8 +MODULE_INSTARGS_python38= python3.8-install + +MODULE_SOURCES_python38= unit.example-python-app \ + unit.example-python38-config + +ifneq (,$(findstring $(OSVER),opensuse-tumbleweed sles fedora amazonlinux2)) +BUILD_DEPENDS_python38= python3-devel +else +BUILD_DEPENDS_python38= python38-devel +endif + +BUILD_DEPENDS+= $(BUILD_DEPENDS_python38) + +define MODULE_PREINSTALL_python38 +%{__mkdir} -p %{buildroot}%{_datadir}/doc/unit-python38/examples/python-app +%{__install} -m 644 -p %{SOURCE100} \ + %{buildroot}%{_datadir}/doc/unit-python38/examples/python-app/wsgi.py +%{__install} -m 644 -p %{SOURCE101} \ + %{buildroot}%{_datadir}/doc/unit-python38/examples/unit.config +endef +export MODULE_PREINSTALL_python38 + +define MODULE_FILES_python38 +%{_libdir}/unit/modules/* +%{_libdir}/unit/debug-modules/* +endef +export MODULE_FILES_python38 + +define MODULE_POST_python38 +cat < Date: Tue, 23 Jun 2020 11:01:20 +0100 Subject: Python: fixed interpreter path in ./configure. --- auto/modules/python | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/auto/modules/python b/auto/modules/python index ad862f3c..ab314013 100644 --- a/auto/modules/python +++ b/auto/modules/python @@ -68,7 +68,6 @@ if /bin/sh -c "$NXT_PYTHON_CONFIG --prefix" >> $NXT_AUTOCONF_ERR 2>&1; then NXT_PYTHON_CONFIG="${NXT_PYTHON_CONFIG} --embed" fi - NXT_PYTHON_EXEC=`${NXT_PYTHON_CONFIG} --exec-prefix`/bin/${NXT_PYTHON} NXT_PYTHON_INCLUDE=`${NXT_PYTHON_CONFIG} --includes` NXT_PYTHON_LIBS=`${NXT_PYTHON_CONFIG} --ldflags` @@ -133,7 +132,7 @@ fi NXT_PYTHON_MOUNTS_HEADER=$NXT_BUILD_DIR/nxt_python_mounts.h -$NXT_PYTHON_EXEC -c 'import os.path +$NXT_PYTHON -c 'import os.path import sys pyver = "python" + str(sys.version_info[0]) + "." + str(sys.version_info[1]) -- cgit From f8ba5d6c0093090e81819481e523af5fd27ab1e3 Mon Sep 17 00:00:00 2001 From: Tiago Natel de Moura Date: Tue, 23 Jun 2020 12:11:27 +0100 Subject: Isolation: fixed build when features aren't detected. --- auto/isolation | 20 +---- src/nxt_application.c | 203 +++++++++++++++++++++++++---------------------- src/nxt_process.c | 16 +--- src/perl/nxt_perl_psgi.c | 4 - src/ruby/nxt_ruby.c | 2 - 5 files changed, 116 insertions(+), 129 deletions(-) diff --git a/auto/isolation b/auto/isolation index 4238b859..fd35f8ed 100644 --- a/auto/isolation +++ b/auto/isolation @@ -94,24 +94,8 @@ nxt_feature_libs= nxt_feature_test="#include int main() { - return mount((void*)0, (void*)0, (void*)0, 0, (void*)0); - }" -. auto/feature - -if [ $nxt_found = yes ]; then - NXT_HAVE_MOUNT=YES -fi - - -nxt_feature="Bind mount()" -nxt_feature_name=NXT_HAVE_BIND_MOUNT -nxt_feature_run=no -nxt_feature_incs= -nxt_feature_libs= -nxt_feature_test="#include - - int main() { - return MS_BIND | MS_REC + return mount(\"/\", \"/\", \"bind\", + MS_BIND | MS_REC, \"\"); }" . auto/feature diff --git a/src/nxt_application.c b/src/nxt_application.c index 566bf256..62167040 100644 --- a/src/nxt_application.c +++ b/src/nxt_application.c @@ -41,19 +41,21 @@ static void nxt_discovery_quit(nxt_task_t *task, nxt_port_recv_msg_t *msg, void *data); static nxt_app_module_t *nxt_app_module_load(nxt_task_t *task, const char *name); -static nxt_int_t nxt_app_prefork(nxt_task_t *task, nxt_process_t *process, +static nxt_int_t nxt_app_main_prefork(nxt_task_t *task, nxt_process_t *process, nxt_mp_t *mp); static nxt_int_t nxt_app_setup(nxt_task_t *task, nxt_process_t *process); static nxt_int_t nxt_app_set_environment(nxt_conf_value_t *environment); static u_char *nxt_cstr_dup(nxt_mp_t *mp, u_char *dst, u_char *src); #if (NXT_HAVE_ISOLATION_ROOTFS) -static nxt_int_t nxt_app_prepare_rootfs(nxt_task_t *task, - nxt_process_t *process); -static nxt_int_t nxt_app_prepare_lang_mounts(nxt_task_t *task, +static nxt_int_t nxt_app_set_isolation_mounts(nxt_task_t *task, + nxt_process_t *process, nxt_str_t *app_type); +static nxt_int_t nxt_app_set_lang_mounts(nxt_task_t *task, nxt_process_t *process, nxt_array_t *syspaths); static nxt_int_t nxt_app_set_isolation_rootfs(nxt_task_t *task, nxt_conf_value_t *isolation, nxt_process_t *process); +static nxt_int_t nxt_app_prepare_rootfs(nxt_task_t *task, + nxt_process_t *process); #endif static nxt_int_t nxt_app_set_isolation(nxt_task_t *task, @@ -124,7 +126,7 @@ const nxt_process_init_t nxt_discovery_process = { const nxt_process_init_t nxt_app_process = { .type = NXT_PROCESS_APP, .setup = nxt_app_setup, - .prefork = nxt_app_prefork, + .prefork = nxt_app_main_prefork, .restart = 0, .start = NULL, /* set to module->start */ .port_handlers = &nxt_app_process_port_handlers, @@ -472,22 +474,16 @@ nxt_discovery_quit(nxt_task_t *task, nxt_port_recv_msg_t *msg, void *data) static nxt_int_t -nxt_app_prefork(nxt_task_t *task, nxt_process_t *process, nxt_mp_t *mp) +nxt_app_main_prefork(nxt_task_t *task, nxt_process_t *process, nxt_mp_t *mp) { - nxt_int_t cap_setid, cap_chroot; + nxt_int_t cap_setid; nxt_int_t ret; nxt_runtime_t *rt; nxt_common_app_conf_t *app_conf; - nxt_app_lang_module_t *lang; rt = task->thread->runtime; app_conf = process->data.app; cap_setid = rt->capabilities.setid; - cap_chroot = rt->capabilities.chroot; - - lang = nxt_app_lang_module(rt, &app_conf->type); - - nxt_assert(lang != NULL); if (app_conf->isolation != NULL) { ret = nxt_app_set_isolation(task, app_conf->isolation, process); @@ -499,24 +495,14 @@ nxt_app_prefork(nxt_task_t *task, nxt_process_t *process, nxt_mp_t *mp) #if (NXT_HAVE_CLONE_NEWUSER) if (nxt_is_clone_flag_set(process->isolation.clone.flags, NEWUSER)) { cap_setid = 1; - cap_chroot = 1; } #endif #if (NXT_HAVE_ISOLATION_ROOTFS) if (process->isolation.rootfs != NULL) { - if (!cap_chroot) { - nxt_log(task, NXT_LOG_ERR, - "The \"rootfs\" field requires privileges"); - - return NXT_ERROR; - } - - if (lang->mounts != NULL && lang->mounts->nelts > 0) { - ret = nxt_app_prepare_lang_mounts(task, process, lang->mounts); - if (nxt_slow_path(ret != NXT_OK)) { - return NXT_ERROR; - } + ret = nxt_app_set_isolation_mounts(task, process, &app_conf->type); + if (nxt_slow_path(ret != NXT_OK)) { + return ret; } } #endif @@ -765,71 +751,6 @@ nxt_app_set_isolation_namespaces(nxt_task_t *task, nxt_conf_value_t *isolation, #endif -#if (NXT_HAVE_ISOLATION_ROOTFS) - -static nxt_int_t -nxt_app_set_isolation_rootfs(nxt_task_t *task, nxt_conf_value_t *isolation, - nxt_process_t *process) -{ - nxt_str_t str; - nxt_conf_value_t *obj; - - static nxt_str_t rootfs_name = nxt_string("rootfs"); - - obj = nxt_conf_get_object_member(isolation, &rootfs_name, NULL); - if (obj != NULL) { - nxt_conf_get_string(obj, &str); - - if (nxt_slow_path(str.length <= 1 || str.start[0] != '/')) { - nxt_log(task, NXT_LOG_ERR, "rootfs requires an absolute path other " - "than \"/\" but given \"%V\"", &str); - - return NXT_ERROR; - } - - if (str.start[str.length - 1] == '/') { - str.length--; - } - - process->isolation.rootfs = nxt_mp_alloc(process->mem_pool, - str.length + 1); - - if (nxt_slow_path(process->isolation.rootfs == NULL)) { - return NXT_ERROR; - } - - nxt_memcpy(process->isolation.rootfs, str.start, str.length); - - process->isolation.rootfs[str.length] = '\0'; - } - - return NXT_OK; -} - -#endif - - -#if (NXT_HAVE_PR_SET_NO_NEW_PRIVS) - -static nxt_int_t -nxt_app_set_isolation_new_privs(nxt_task_t *task, nxt_conf_value_t *isolation, - nxt_process_t *process) -{ - nxt_conf_value_t *obj; - - static nxt_str_t new_privs_name = nxt_string("new_privs"); - - obj = nxt_conf_get_object_member(isolation, &new_privs_name, NULL); - if (obj != NULL) { - process->isolation.new_privs = nxt_conf_get_boolean(obj); - } - - return NXT_OK; -} - -#endif - - #if (NXT_HAVE_CLONE_NEWUSER) static nxt_int_t @@ -1002,7 +923,83 @@ nxt_app_clone_flags(nxt_task_t *task, nxt_conf_value_t *namespaces, #if (NXT_HAVE_ISOLATION_ROOTFS) static nxt_int_t -nxt_app_prepare_lang_mounts(nxt_task_t *task, nxt_process_t *process, +nxt_app_set_isolation_rootfs(nxt_task_t *task, nxt_conf_value_t *isolation, + nxt_process_t *process) +{ + nxt_str_t str; + nxt_conf_value_t *obj; + + static nxt_str_t rootfs_name = nxt_string("rootfs"); + + obj = nxt_conf_get_object_member(isolation, &rootfs_name, NULL); + if (obj != NULL) { + nxt_conf_get_string(obj, &str); + + if (nxt_slow_path(str.length <= 1 || str.start[0] != '/')) { + nxt_log(task, NXT_LOG_ERR, "rootfs requires an absolute path other " + "than \"/\" but given \"%V\"", &str); + + return NXT_ERROR; + } + + if (str.start[str.length - 1] == '/') { + str.length--; + } + + process->isolation.rootfs = nxt_mp_alloc(process->mem_pool, + str.length + 1); + + if (nxt_slow_path(process->isolation.rootfs == NULL)) { + return NXT_ERROR; + } + + nxt_memcpy(process->isolation.rootfs, str.start, str.length); + + process->isolation.rootfs[str.length] = '\0'; + } + + return NXT_OK; +} + + +static nxt_int_t +nxt_app_set_isolation_mounts(nxt_task_t *task, nxt_process_t *process, + nxt_str_t *app_type) +{ + nxt_int_t ret, cap_chroot; + nxt_runtime_t *rt; + nxt_app_lang_module_t *lang; + + rt = task->thread->runtime; + cap_chroot = rt->capabilities.chroot; + lang = nxt_app_lang_module(rt, app_type); + + nxt_assert(lang != NULL); + +#if (NXT_HAVE_CLONE_NEWUSER) + if (nxt_is_clone_flag_set(process->isolation.clone.flags, NEWUSER)) { + cap_chroot = 1; + } +#endif + + if (!cap_chroot) { + nxt_log(task, NXT_LOG_ERR, "The \"rootfs\" field requires privileges"); + return NXT_ERROR; + } + + if (lang->mounts != NULL && lang->mounts->nelts > 0) { + ret = nxt_app_set_lang_mounts(task, process, lang->mounts); + if (nxt_slow_path(ret != NXT_OK)) { + return NXT_ERROR; + } + } + + return NXT_OK; +} + + +static nxt_int_t +nxt_app_set_lang_mounts(nxt_task_t *task, nxt_process_t *process, nxt_array_t *lang_mounts) { u_char *p; @@ -1045,7 +1042,6 @@ nxt_app_prepare_lang_mounts(nxt_task_t *task, nxt_process_t *process, } - static nxt_int_t nxt_app_prepare_rootfs(nxt_task_t *task, nxt_process_t *process) { @@ -1137,6 +1133,27 @@ undo: #endif +#if (NXT_HAVE_PR_SET_NO_NEW_PRIVS) + +static nxt_int_t +nxt_app_set_isolation_new_privs(nxt_task_t *task, nxt_conf_value_t *isolation, + nxt_process_t *process) +{ + nxt_conf_value_t *obj; + + static nxt_str_t new_privs_name = nxt_string("new_privs"); + + obj = nxt_conf_get_object_member(isolation, &new_privs_name, NULL); + if (obj != NULL) { + process->isolation.new_privs = nxt_conf_get_boolean(obj); + } + + return NXT_OK; +} + +#endif + + static u_char * nxt_cstr_dup(nxt_mp_t *mp, u_char *dst, u_char *src) { diff --git a/src/nxt_process.c b/src/nxt_process.c index c4c44d14..215c529c 100644 --- a/src/nxt_process.c +++ b/src/nxt_process.c @@ -35,17 +35,14 @@ static void nxt_process_created_error(nxt_task_t *task, #if (NXT_HAVE_ISOLATION_ROOTFS) static nxt_int_t nxt_process_chroot(nxt_task_t *task, const char *path); -#endif -#if (NXT_HAVE_PIVOT_ROOT) +#if (NXT_HAVE_PIVOT_ROOT) && (NXT_HAVE_CLONE_NEWNS) static nxt_int_t nxt_process_pivot_root(nxt_task_t *task, const char *rootfs); static nxt_int_t nxt_process_private_mount(nxt_task_t *task, const char *rootfs); -#endif - -#if (NXT_HAVE_PIVOT_ROOT) static int nxt_pivot_root(const char *new_root, const char *old_root); #endif +#endif /* A cached process pid. */ nxt_pid_t nxt_pid; @@ -590,11 +587,6 @@ nxt_process_change_root(nxt_task_t *task, nxt_process_t *process) #endif -#endif - - -#if (NXT_HAVE_ISOLATION_ROOTFS) - static nxt_int_t nxt_process_chroot(nxt_task_t *task, const char *path) { @@ -625,8 +617,6 @@ nxt_process_unmount_all(nxt_task_t *task, nxt_process_t *process) } } -#endif - #if (NXT_HAVE_PIVOT_ROOT) && (NXT_HAVE_CLONE_NEWNS) @@ -856,6 +846,8 @@ nxt_pivot_root(const char *new_root, const char *old_root) #endif +#endif + static nxt_int_t nxt_process_send_ready(nxt_task_t *task, nxt_process_t *process) diff --git a/src/perl/nxt_perl_psgi.c b/src/perl/nxt_perl_psgi.c index 14e107e4..16079a38 100644 --- a/src/perl/nxt_perl_psgi.c +++ b/src/perl/nxt_perl_psgi.c @@ -118,12 +118,8 @@ NXT_EXPORT nxt_app_module_t nxt_app_module = { nxt_perl_psgi_compat, nxt_string("perl"), PERL_VERSION_STRING, - -#if (NXT_HAVE_ISOLATION_ROOTFS) NULL, 0, -#endif - NULL, nxt_perl_psgi_start, }; diff --git a/src/ruby/nxt_ruby.c b/src/ruby/nxt_ruby.c index 489ddcf4..9c4126f6 100644 --- a/src/ruby/nxt_ruby.c +++ b/src/ruby/nxt_ruby.c @@ -79,10 +79,8 @@ NXT_EXPORT nxt_app_module_t nxt_app_module = { compat, nxt_string("ruby"), ruby_version, -#if (NXT_HAVE_ISOLATION_ROOTFS) nxt_ruby_mounts, nxt_nitems(nxt_ruby_mounts), -#endif NULL, nxt_ruby_start, }; -- cgit From f671d1bc54d6db164cf4b03a9ef0e1ddcdd39c72 Mon Sep 17 00:00:00 2001 From: Igor Sysoev Date: Tue, 23 Jun 2020 14:16:43 +0300 Subject: Decreased level of some socket close() errors. --- src/nxt_socket.c | 47 +++++++++++++++++++++++++++++++++++------------ src/nxt_socket.h | 2 +- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/src/nxt_socket.c b/src/nxt_socket.c index cc3d7378..a8e0d514 100644 --- a/src/nxt_socket.c +++ b/src/nxt_socket.c @@ -50,18 +50,6 @@ nxt_socket_create(nxt_task_t *task, nxt_uint_t domain, nxt_uint_t type, } -void -nxt_socket_close(nxt_task_t *task, nxt_socket_t s) -{ - if (nxt_fast_path(close(s) == 0)) { - nxt_debug(task, "socket close(%d)", s); - - } else { - nxt_alert(task, "socket close(%d) failed %E", s, nxt_socket_errno); - } -} - - void nxt_socket_defer_accept(nxt_task_t *task, nxt_socket_t s, nxt_sockaddr_t *sa) { @@ -291,6 +279,41 @@ nxt_socket_shutdown(nxt_task_t *task, nxt_socket_t s, nxt_uint_t how) } +void +nxt_socket_close(nxt_task_t *task, nxt_socket_t s) +{ + nxt_err_t err; + nxt_uint_t level; + + if (nxt_fast_path(close(s) == 0)) { + nxt_debug(task, "socket close(%d)", s); + return; + } + + err = nxt_socket_errno; + + switch (err) { + + case NXT_ENOTCONN: + level = NXT_LOG_DEBUG; + break; + + case NXT_ECONNRESET: + case NXT_ENETDOWN: + case NXT_ENETUNREACH: + case NXT_EHOSTDOWN: + case NXT_EHOSTUNREACH: + level = NXT_LOG_ERR; + break; + + default: + level = NXT_LOG_ALERT; + } + + nxt_log(task, level, "socket close(%d) failed %E", s, err); +} + + nxt_err_t nxt_socket_error(nxt_socket_t s) { diff --git a/src/nxt_socket.h b/src/nxt_socket.h index e39d8e4d..718ad398 100644 --- a/src/nxt_socket.h +++ b/src/nxt_socket.h @@ -93,7 +93,6 @@ typedef union { NXT_EXPORT nxt_socket_t nxt_socket_create(nxt_task_t *task, nxt_uint_t family, nxt_uint_t type, nxt_uint_t protocol, nxt_uint_t flags); -NXT_EXPORT void nxt_socket_close(nxt_task_t *task, nxt_socket_t s); NXT_EXPORT void nxt_socket_defer_accept(nxt_task_t *task, nxt_socket_t s, nxt_sockaddr_t *sa); NXT_EXPORT nxt_int_t nxt_socket_getsockopt(nxt_task_t *task, nxt_socket_t s, @@ -106,6 +105,7 @@ NXT_EXPORT nxt_int_t nxt_socket_connect(nxt_task_t *task, nxt_socket_t s, nxt_sockaddr_t *sa); NXT_EXPORT void nxt_socket_shutdown(nxt_task_t *task, nxt_socket_t s, nxt_uint_t how); +NXT_EXPORT void nxt_socket_close(nxt_task_t *task, nxt_socket_t s); nxt_err_t nxt_socket_error(nxt_socket_t s); nxt_uint_t nxt_socket_error_level(nxt_err_t err); -- cgit From 65799c7252e56d287d967bf3f036a10d5764f82c Mon Sep 17 00:00:00 2001 From: Igor Sysoev Date: Tue, 23 Jun 2020 14:16:45 +0300 Subject: Upstream chunked transfer encoding support. --- auto/sources | 2 +- src/nxt_h1proto.c | 101 ++++++++++++++++++++++++++++++++++------ src/nxt_h1proto.h | 2 + src/nxt_http.h | 1 - src/nxt_http_chunk_parse.c | 113 +++++++++++++++++++++++++-------------------- src/nxt_http_parse.h | 16 +++++++ src/nxt_http_proxy.c | 42 +++++------------ 7 files changed, 179 insertions(+), 98 deletions(-) diff --git a/auto/sources b/auto/sources index 2075ca0f..0dd2cbd6 100644 --- a/auto/sources +++ b/auto/sources @@ -90,6 +90,7 @@ NXT_LIB_SRCS=" \ src/nxt_http_return.c \ src/nxt_http_static.c \ src/nxt_http_proxy.c \ + src/nxt_http_chunk_parse.c \ src/nxt_application.c \ src/nxt_external.c \ src/nxt_port_hash.c \ @@ -107,7 +108,6 @@ NXT_LIB_SRC0=" \ src/nxt_stream_source.c \ src/nxt_upstream_source.c \ src/nxt_http_source.c \ - src/nxt_http_chunk_parse.c \ src/nxt_fastcgi_source.c \ src/nxt_fastcgi_record_parse.c \ \ diff --git a/src/nxt_h1proto.c b/src/nxt_h1proto.c index a139f611..859ed02f 100644 --- a/src/nxt_h1proto.c +++ b/src/nxt_h1proto.c @@ -99,6 +99,7 @@ static nxt_int_t nxt_h1p_peer_header_parse(nxt_http_peer_t *peer, nxt_buf_mem_t *bm); static void nxt_h1p_peer_read(nxt_task_t *task, nxt_http_peer_t *peer); static void nxt_h1p_peer_read_done(nxt_task_t *task, void *obj, void *data); +static void nxt_h1p_peer_body_process(nxt_task_t *task, nxt_http_peer_t *peer, nxt_buf_t *out); static void nxt_h1p_peer_closed(nxt_task_t *task, void *obj, void *data); static void nxt_h1p_peer_error(nxt_task_t *task, void *obj, void *data); static void nxt_h1p_peer_send_timeout(nxt_task_t *task, void *obj, void *data); @@ -106,6 +107,8 @@ static void nxt_h1p_peer_read_timeout(nxt_task_t *task, void *obj, void *data); static nxt_msec_t nxt_h1p_peer_timer_value(nxt_conn_t *c, uintptr_t data); static void nxt_h1p_peer_close(nxt_task_t *task, nxt_http_peer_t *peer); static void nxt_h1p_peer_free(nxt_task_t *task, void *obj, void *data); +static nxt_int_t nxt_h1p_peer_transfer_encoding(void *ctx, + nxt_http_field_t *field, uintptr_t data); #if (NXT_TLS) static const nxt_conn_state_t nxt_http_idle_state; @@ -178,7 +181,7 @@ static nxt_lvlhsh_t nxt_h1p_peer_fields_hash; static nxt_http_field_proc_t nxt_h1p_peer_fields[] = { { nxt_string("Connection"), &nxt_http_proxy_skip, 0 }, - { nxt_string("Transfer-Encoding"), &nxt_http_proxy_skip, 0 }, + { nxt_string("Transfer-Encoding"), &nxt_h1p_peer_transfer_encoding, 0 }, { nxt_string("Server"), &nxt_http_proxy_skip, 0 }, { nxt_string("Date"), &nxt_http_proxy_date, 0 }, { nxt_string("Content-Length"), &nxt_http_proxy_content_length, 0 }, @@ -2139,9 +2142,6 @@ nxt_h1p_peer_connect(nxt_task_t *task, nxt_http_peer_t *peer) peer->proto.h1 = h1p; h1p->request = r; - c->socket.task = task; - c->read_timer.task = task; - c->write_timer.task = task; c->socket.data = peer; c->remote = peer->server->sockaddr; @@ -2238,7 +2238,8 @@ nxt_h1p_peer_header_send(nxt_task_t *task, nxt_http_peer_t *peer) r = peer->request; size = r->method->length + sizeof(" ") + r->target.length - + sizeof(" HTTP/1.0\r\n") + + sizeof(" HTTP/1.1\r\n") + + sizeof("Connection: close\r\n") + sizeof("\r\n"); nxt_list_each(field, r->fields) { @@ -2261,7 +2262,8 @@ nxt_h1p_peer_header_send(nxt_task_t *task, nxt_http_peer_t *peer) p = nxt_cpymem(p, r->method->start, r->method->length); *p++ = ' '; p = nxt_cpymem(p, r->target.start, r->target.length); - p = nxt_cpymem(p, " HTTP/1.0\r\n", 11); + p = nxt_cpymem(p, " HTTP/1.1\r\n", 11); + p = nxt_cpymem(p, "Connection: close\r\n", 19); nxt_list_each(field, r->fields) { @@ -2466,6 +2468,7 @@ nxt_h1p_peer_header_read_done(nxt_task_t *task, void *obj, void *data) nxt_int_t ret; nxt_buf_t *b; nxt_conn_t *c; + nxt_h1proto_t *h1p; nxt_http_peer_t *peer; nxt_http_request_t *r; nxt_event_engine_t *engine; @@ -2503,11 +2506,26 @@ nxt_h1p_peer_header_read_done(nxt_task_t *task, void *obj, void *data) c->read = NULL; - if (nxt_buf_mem_used_size(&b->mem) != 0) { - peer->body = b; + peer->header_received = 1; + + h1p = peer->proto.h1; + + if (h1p->chunked) { + if (r->resp.content_length != NULL) { + peer->status = NXT_HTTP_BAD_GATEWAY; + break; + } + + h1p->chunked_parse.mem_pool = c->mem_pool; + + } else if (r->resp.content_length_n > 0) { + h1p->remainder = r->resp.content_length_n; } - peer->header_received = 1; + if (nxt_buf_mem_used_size(&b->mem) != 0) { + nxt_h1p_peer_body_process(task, peer, b); + return; + } r->state->ready_handler(task, r, peer); return; @@ -2613,18 +2631,54 @@ static const nxt_conn_state_t nxt_h1p_peer_read_state static void nxt_h1p_peer_read_done(nxt_task_t *task, void *obj, void *data) { - nxt_conn_t *c; - nxt_http_peer_t *peer; - nxt_http_request_t *r; + nxt_buf_t *out; + nxt_conn_t *c; + nxt_http_peer_t *peer; c = obj; peer = data; nxt_debug(task, "h1p peer read done"); - peer->body = c->read; + out = c->read; c->read = NULL; + nxt_h1p_peer_body_process(task, peer, out); +} + + +static void +nxt_h1p_peer_body_process(nxt_task_t *task, nxt_http_peer_t *peer, + nxt_buf_t *out) +{ + size_t length; + nxt_h1proto_t *h1p; + nxt_http_request_t *r; + + h1p = peer->proto.h1; + + if (h1p->chunked) { + out = nxt_http_chunk_parse(task, &h1p->chunked_parse, out); + + if (h1p->chunked_parse.chunk_error || h1p->chunked_parse.error) { + peer->status = NXT_HTTP_BAD_GATEWAY; + r = peer->request; + r->state->error_handler(task, r, peer); + return; + } + + if (h1p->chunked_parse.last) { + nxt_buf_chain_add(&out, nxt_http_buf_last(peer->request)); + peer->closed = 1; + } + + } else if (h1p->remainder > 0) { + length = nxt_buf_chain_length(out); + h1p->remainder -= length; + } + + peer->body = out; + r = peer->request; r->state->ready_handler(task, r, peer); } @@ -2644,8 +2698,8 @@ nxt_h1p_peer_closed(nxt_task_t *task, void *obj, void *data) if (peer->header_received) { peer->body = nxt_http_buf_last(r); - peer->closed = 1; + r->inconsistent = (peer->proto.h1->remainder != 0); r->state->ready_handler(task, r, peer); @@ -2777,3 +2831,22 @@ nxt_h1p_peer_free(nxt_task_t *task, void *obj, void *data) nxt_conn_free(task, c); } + + +static nxt_int_t +nxt_h1p_peer_transfer_encoding(void *ctx, nxt_http_field_t *field, + uintptr_t data) +{ + nxt_http_request_t *r; + + r = ctx; + field->skip = 1; + + if (field->value_length == 7 + && nxt_memcmp(field->value, "chunked", 7) == 0) + { + r->peer->proto.h1->chunked = 1; + } + + return NXT_OK; +} diff --git a/src/nxt_h1proto.h b/src/nxt_h1proto.h index 3294713f..f8500963 100644 --- a/src/nxt_h1proto.h +++ b/src/nxt_h1proto.h @@ -18,6 +18,8 @@ typedef struct nxt_h1p_websocket_timer_s nxt_h1p_websocket_timer_t; struct nxt_h1proto_s { nxt_http_request_parse_t parser; + nxt_http_chunk_parse_t chunked_parse; + nxt_off_t remainder; uint8_t nbuffers; uint8_t header_buffer_slot; diff --git a/src/nxt_http.h b/src/nxt_http.h index 68051e69..67ac00d8 100644 --- a/src/nxt_http.h +++ b/src/nxt_http.h @@ -119,7 +119,6 @@ typedef struct { nxt_upstream_server_t *server; nxt_list_t *fields; nxt_buf_t *body; - nxt_off_t remainder; nxt_http_status_t status:16; nxt_http_protocol_t protocol:8; /* 2 bits */ diff --git a/src/nxt_http_chunk_parse.c b/src/nxt_http_chunk_parse.c index 644b9805..2164524b 100644 --- a/src/nxt_http_chunk_parse.c +++ b/src/nxt_http_chunk_parse.c @@ -21,13 +21,17 @@ static nxt_int_t nxt_http_chunk_buffer(nxt_http_chunk_parse_t *hcp, nxt_buf_t ***tail, nxt_buf_t *in); +static void nxt_http_chunk_buf_completion(nxt_task_t *task, void *obj, + void *data); + + nxt_buf_t * nxt_http_chunk_parse(nxt_task_t *task, nxt_http_chunk_parse_t *hcp, nxt_buf_t *in) { u_char c, ch; nxt_int_t ret; - nxt_buf_t *b, *out, *nb, **tail; + nxt_buf_t *b, *out, *next, **tail; enum { sw_start = 0, sw_chunk_size, @@ -37,12 +41,13 @@ nxt_http_chunk_parse(nxt_task_t *task, nxt_http_chunk_parse_t *hcp, sw_chunk, } state; + next = NULL; out = NULL; tail = &out; state = hcp->state; - for (b = in; b != NULL; b = b->next) { + for (b = in; b != NULL; b = next) { hcp->pos = b->mem.pos; @@ -60,7 +65,7 @@ nxt_http_chunk_parse(nxt_task_t *task, nxt_http_chunk_parse_t *hcp, if (nxt_slow_path(ret == NXT_ERROR)) { hcp->error = 1; - goto done; + return out; } state = sw_chunk_end_newline; @@ -152,7 +157,7 @@ nxt_http_chunk_parse(nxt_task_t *task, nxt_http_chunk_parse_t *hcp, continue; } - goto done; + return out; } goto chunk_error; @@ -168,15 +173,15 @@ nxt_http_chunk_parse(nxt_task_t *task, nxt_http_chunk_parse_t *hcp, if (b->retain == 0) { /* No chunk data was found in a buffer. */ - nxt_thread_current_work_queue_add(task->thread, - b->completion_handler, - task, b, b->parent); + nxt_work_queue_add(&task->thread->engine->fast_work_queue, + b->completion_handler, task, b, b->parent); } next: - continue; + next = b->next; + b->next = NULL; } hcp->state = state; @@ -187,20 +192,6 @@ chunk_error: hcp->chunk_error = 1; -done: - - nb = nxt_buf_sync_alloc(hcp->mem_pool, NXT_BUF_SYNC_LAST); - - if (nxt_fast_path(nb != NULL)) { - *tail = nb; - - } else { - hcp->error = 1; - } - - // STUB: hcp->chunk_error = 1; - // STUB: hcp->error = 1; - return out; } @@ -216,43 +207,35 @@ nxt_http_chunk_buffer(nxt_http_chunk_parse_t *hcp, nxt_buf_t ***tail, p = hcp->pos; size = in->mem.free - p; - if (hcp->chunk_size >= size && in->retain == 0) { - /* - * Use original buffer if the buffer is lesser than or equal - * to a chunk size and this is the first chunk in the buffer. - */ - in->mem.pos = p; - **tail = in; - *tail = &in->next; - - } else { - b = nxt_buf_mem_alloc(hcp->mem_pool, 0, 0); - if (nxt_slow_path(b == NULL)) { - return NXT_ERROR; - } + b = nxt_buf_mem_alloc(hcp->mem_pool, 0, 0); + if (nxt_slow_path(b == NULL)) { + return NXT_ERROR; + } - **tail = b; - *tail = &b->next; + **tail = b; + *tail = &b->next; - b->parent = in; - in->retain++; - b->mem.pos = p; - b->mem.start = p; + nxt_mp_retain(hcp->mem_pool); + b->completion_handler = nxt_http_chunk_buf_completion; - if (hcp->chunk_size < size) { - p += hcp->chunk_size; - hcp->pos = p; + b->parent = in; + in->retain++; + b->mem.pos = p; + b->mem.start = p; - b->mem.free = p; - b->mem.end = p; + if (hcp->chunk_size < size) { + p += hcp->chunk_size; + hcp->pos = p; - return NXT_HTTP_CHUNK_END; - } + b->mem.free = p; + b->mem.end = p; - b->mem.free = in->mem.free; - b->mem.end = in->mem.free; + return NXT_HTTP_CHUNK_END; } + b->mem.free = in->mem.free; + b->mem.end = in->mem.free; + hcp->chunk_size -= size; if (hcp->chunk_size == 0) { @@ -261,3 +244,31 @@ nxt_http_chunk_buffer(nxt_http_chunk_parse_t *hcp, nxt_buf_t ***tail, return NXT_HTTP_CHUNK_MIDDLE; } + + +static void +nxt_http_chunk_buf_completion(nxt_task_t *task, void *obj, void *data) +{ + nxt_mp_t *mp; + nxt_buf_t *b, *next, *parent; + + b = obj; + parent = data; + + nxt_debug(task, "buf completion: %p %p", b, b->mem.start); + + nxt_assert(data == b->parent); + + do { + next = b->next; + parent = b->parent; + mp = b->data; + + nxt_mp_free(mp, b); + nxt_mp_release(mp); + + nxt_buf_parent_completion(task, parent); + + b = next; + } while (b != NULL); +} diff --git a/src/nxt_http_parse.h b/src/nxt_http_parse.h index 0f888949..cbfc8433 100644 --- a/src/nxt_http_parse.h +++ b/src/nxt_http_parse.h @@ -90,6 +90,19 @@ struct nxt_http_field_s { }; +typedef struct { + u_char *pos; + nxt_mp_t *mem_pool; + + uint64_t chunk_size; + + uint8_t state; + uint8_t last; /* 1 bit */ + uint8_t chunk_error; /* 1 bit */ + uint8_t error; /* 1 bit */ +} nxt_http_chunk_parse_t; + + #define NXT_HTTP_FIELD_HASH_INIT 159406U #define nxt_http_field_hash_char(h, c) (((h) << 4) + (h) + (c)) #define nxt_http_field_hash_end(h) (((h) >> 16) ^ (h)) @@ -109,6 +122,9 @@ nxt_uint_t nxt_http_fields_hash_collisions(nxt_lvlhsh_t *hash, nxt_int_t nxt_http_fields_process(nxt_list_t *fields, nxt_lvlhsh_t *hash, void *ctx); +nxt_buf_t *nxt_http_chunk_parse(nxt_task_t *task, nxt_http_chunk_parse_t *hcp, + nxt_buf_t *in); + extern const nxt_lvlhsh_proto_t nxt_http_fields_hash_proto; diff --git a/src/nxt_http_proxy.c b/src/nxt_http_proxy.c index 893e9303..34d0f36e 100644 --- a/src/nxt_http_proxy.c +++ b/src/nxt_http_proxy.c @@ -27,8 +27,6 @@ static void nxt_http_proxy_header_send(nxt_task_t *task, void *obj, void *data); static void nxt_http_proxy_header_sent(nxt_task_t *task, void *obj, void *data); static void nxt_http_proxy_header_read(nxt_task_t *task, void *obj, void *data); static void nxt_http_proxy_send_body(nxt_task_t *task, void *obj, void *data); -static void nxt_http_proxy_request_send(nxt_task_t *task, - nxt_http_request_t *r, nxt_buf_t *out); static void nxt_http_proxy_read(nxt_task_t *task, void *obj, void *data); static void nxt_http_proxy_buf_mem_completion(nxt_task_t *task, void *obj, void *data); @@ -253,10 +251,6 @@ nxt_http_proxy_header_read(nxt_task_t *task, void *obj, void *data) nxt_debug(task, "http proxy status: %d", peer->status); - if (r->resp.content_length_n > 0) { - peer->remainder = r->resp.content_length_n; - } - nxt_list_each(field, peer->fields) { nxt_debug(task, "http proxy header: \"%*s: %*s\"", @@ -275,6 +269,8 @@ nxt_http_proxy_header_read(nxt_task_t *task, void *obj, void *data) } nxt_list_loop; + r->state = &nxt_http_proxy_read_state; + nxt_http_request_header_send(task, r, nxt_http_proxy_send_body, peer); } @@ -292,27 +288,13 @@ nxt_http_proxy_send_body(nxt_task_t *task, void *obj, void *data) if (out != NULL) { peer->body = NULL; - nxt_http_proxy_request_send(task, r, out); - } - - r->state = &nxt_http_proxy_read_state; - - nxt_http_proto[peer->protocol].peer_read(task, peer); -} - + nxt_http_request_send(task, r, out); -static void -nxt_http_proxy_request_send(nxt_task_t *task, nxt_http_request_t *r, - nxt_buf_t *out) -{ - size_t length; - - if (r->peer->remainder > 0) { - length = nxt_buf_chain_length(out); - r->peer->remainder -= length; } - nxt_http_request_send(task, r, out); + if (!peer->closed) { + nxt_http_proto[peer->protocol].peer_read(task, peer); + } } @@ -328,7 +310,6 @@ static void nxt_http_proxy_read(nxt_task_t *task, void *obj, void *data) { nxt_buf_t *out; - nxt_bool_t last; nxt_http_peer_t *peer; nxt_http_request_t *r; @@ -336,16 +317,15 @@ nxt_http_proxy_read(nxt_task_t *task, void *obj, void *data) peer = data; out = peer->body; peer->body = NULL; - last = nxt_buf_is_last(out); - nxt_http_proxy_request_send(task, r, out); + if (out != NULL) { + nxt_http_request_send(task, r, out); + } - if (!last) { + if (!peer->closed) { nxt_http_proto[peer->protocol].peer_read(task, peer); } else { - r->inconsistent = (peer->remainder != 0); - nxt_http_proto[peer->protocol].peer_close(task, peer); nxt_mp_release(r->mem_pool); @@ -422,7 +402,7 @@ nxt_http_proxy_error(nxt_task_t *task, void *obj, void *data) nxt_mp_release(r->mem_pool); - nxt_http_request_error(task, r, peer->status); + nxt_http_request_error(&r->task, r, peer->status); } -- cgit From 6e55f03dcd006ae97fa994ef09ebe35a255ea606 Mon Sep 17 00:00:00 2001 From: Andrei Zeliankou Date: Wed, 24 Jun 2020 04:11:09 +0100 Subject: Tests: added chunked tests. --- test/test_proxy_chunked.py | 249 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 249 insertions(+) create mode 100644 test/test_proxy_chunked.py diff --git a/test/test_proxy_chunked.py b/test/test_proxy_chunked.py new file mode 100644 index 00000000..2d4f7b94 --- /dev/null +++ b/test/test_proxy_chunked.py @@ -0,0 +1,249 @@ +import os +import re +import socket +import select +import time + +from unit.applications.lang.python import TestApplicationPython + + +class TestProxyChunked(TestApplicationPython): + prerequisites = {'modules': {'python': 'any'}} + + SERVER_PORT = 7999 + + @staticmethod + def run_server(server_port, testdir): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + + server_address = ('127.0.0.1', server_port) + sock.bind(server_address) + sock.listen(10) + + def recvall(sock): + buff_size = 4096 * 4096 + data = b'' + while True: + rlist = select.select([sock], [], [], 0.1) + + if not rlist[0]: + break + + part = sock.recv(buff_size) + data += part + + if not len(part): + break + + return data + + while True: + connection, client_address = sock.accept() + + req = """HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked""" + + data = recvall(connection).decode() + + m = re.search('\x0d\x0a\x0d\x0a(.*)', data, re.M | re.S) + if m is not None: + body = m.group(1) + + for line in re.split('\r\n', body): + add = '' + m1 = re.search('(.*)\sX\s(\d+)', line) + + if m1 is not None: + add = m1.group(1) * int(m1.group(2)) + else: + add = line + + req = req + add + '\r\n' + + for chunk in re.split(r'([@#])', req): + if chunk == '@' or chunk == '#': + if chunk == '#': + time.sleep(0.1) + continue + + connection.sendall(chunk.encode()) + + connection.close() + + def chunks(self, chunks): + body = '\r\n\r\n' + + for l, c in chunks: + body = body + l + '\r\n' + c + '\r\n' + + return body + '0\r\n\r\n' + + def get_http10(self, *args, **kwargs): + return self.get(*args, http_10=True, **kwargs) + + def setUp(self): + super().setUp() + + self.run_process(self.run_server, self.SERVER_PORT, self.testdir) + self.waitforsocket(self.SERVER_PORT) + + self.assertIn( + 'success', + self.conf( + { + "listeners": {"*:7080": {"pass": "routes"},}, + "routes": [ + { + "action": { + "proxy": "http://127.0.0.1:" + str(self.SERVER_PORT) + } + } + ], + } + ), + 'proxy initial configuration', + ) + + def test_proxy_chunked(self): + for _ in range(10): + self.assertEqual( + self.get_http10(body='\r\n\r\n0\r\n\r\n')['status'], 200 + ) + + def test_proxy_chunked_body(self): + part = '0123456789abcdef' + + self.assertEqual( + self.get_http10(body=self.chunks([('1000', part + ' X 256')]))[ + 'body' + ], + part * 256, + ) + self.assertEqual( + self.get_http10(body=self.chunks([('100000', part + ' X 65536')]))[ + 'body' + ], + part * 65536, + ) + self.assertEqual( + self.get_http10( + body=self.chunks([('1000000', part + ' X 1048576')]), + read_buffer_size=4096 * 4096, + )['body'], + part * 1048576, + ) + + self.assertEqual( + self.get_http10( + body=self.chunks( + [('1000', part + ' X 256'), ('1000', part + ' X 256')] + ) + )['body'], + part * 256 * 2, + ) + self.assertEqual( + self.get_http10( + body=self.chunks( + [ + ('100000', part + ' X 65536'), + ('100000', part + ' X 65536'), + ] + ) + )['body'], + part * 65536 * 2, + ) + self.assertEqual( + self.get_http10( + body=self.chunks( + [ + ('1000000', part + ' X 1048576'), + ('1000000', part + ' X 1048576'), + ] + ), + read_buffer_size=4096 * 4096, + )['body'], + part * 1048576 * 2, + ) + + def test_proxy_chunked_fragmented(self): + part = '0123456789abcdef' + + self.assertEqual( + self.get_http10( + body=self.chunks( + [('1', hex(i % 16)[2:]) for i in range(4096)] + ), + )['body'], + part * 256, + ) + + def test_proxy_chunked_send(self): + self.assertEqual( + self.get_http10(body='\r\n\r\n@0@\r\n\r\n')['status'], 200 + ) + self.assertEqual( + self.get_http10( + body='\r@\n\r\n2\r@\na@b\r\n2\r\ncd@\r\n0\r@\n\r\n' + )['body'], + 'abcd', + ) + self.assertEqual( + self.get_http10( + body='\r\n\r\n2\r#\na#b\r\n##2\r\n#cd\r\n0\r\n#\r#\n' + )['body'], + 'abcd', + ) + + def test_proxy_chunked_invalid(self): + def check_invalid(body): + self.assertNotEqual(self.get_http10(body=body)['status'], 200) + + check_invalid('\r\n\r0') + check_invalid('\r\n\r\n\r0') + check_invalid('\r\n\r\n\r\n0') + check_invalid('\r\nContent-Length: 5\r\n\r\n0\r\n\r\n') + check_invalid('\r\n\r\n1\r\nXX\r\n0\r\n\r\n') + check_invalid('\r\n\r\n2\r\nX\r\n0\r\n\r\n') + check_invalid('\r\n\r\nH\r\nXX\r\n0\r\n\r\n') + check_invalid('\r\n\r\n0\r\nX') + + resp = self.get_http10(body='\r\n\r\n65#\r\nA X 100') + self.assertEqual(resp['status'], 200, 'incomplete chunk status') + self.assertNotEqual(resp['body'][-5:], '0\r\n\r\n', 'incomplete chunk') + + resp = self.get_http10(body='\r\n\r\n64#\r\nA X 100') + self.assertEqual(resp['status'], 200, 'no zero chunk status') + self.assertNotEqual(resp['body'][-5:], '0\r\n\r\n', 'no zero chunk') + + self.assertEqual( + self.get_http10(body='\r\n\r\n80000000\r\nA X 100')['status'], 200, + ) + self.assertEqual( + self.get_http10(body='\r\n\r\n10000000000000000\r\nA X 100')[ + 'status' + ], + 502, + ) + self.assertGreaterEqual( + len( + self.get_http10( + body='\r\n\r\n1000000\r\nA X 1048576\r\n1000000\r\nA X 100', + read_buffer_size=4096 * 4096, + )['body'] + ), + 1048576, + ) + self.assertGreaterEqual( + len( + self.get_http10( + body='\r\n\r\n1000000\r\nA X 1048576\r\nXXX\r\nA X 100', + read_buffer_size=4096 * 4096, + )['body'] + ), + 1048576, + ) + + +if __name__ == '__main__': + TestProxyChunked.main() -- cgit From 9bd6baefaee3a3e9b8dfe6427404512c9a66c969 Mon Sep 17 00:00:00 2001 From: Andrei Zeliankou Date: Fri, 26 Jun 2020 04:23:47 +0100 Subject: Tests: fixed opcache detection. opcache_get_status() returns array, so square brackets should be used to access "opcache_enabled" value. --- test/php/cwd/index.php | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/php/cwd/index.php b/test/php/cwd/index.php index 24ae3a21..de3797e4 100644 --- a/test/php/cwd/index.php +++ b/test/php/cwd/index.php @@ -10,7 +10,8 @@ if (isset($_GET['chdir']) && $_GET['chdir'] != "") { $opcache = -1; if (function_exists('opcache_get_status')) { - $opcache = opcache_get_status()->opcache_enabled; + $status = opcache_get_status(); + $opcache = $status['opcache_enabled']; } header('X-OPcache: ' . $opcache); -- cgit From a9a21f6fe41f4da4c1dcf2d9b35287d67b1fa4d1 Mon Sep 17 00:00:00 2001 From: Axel Duch Date: Sat, 4 Jul 2020 03:24:07 +0100 Subject: Router: route patterns multi wildcards support. --- src/nxt_conf_validation.c | 44 +------ src/nxt_http_route.c | 318 ++++++++++++++++++++++++++++++---------------- test/test_routing.py | 49 ++++++- 3 files changed, 254 insertions(+), 157 deletions(-) diff --git a/src/nxt_conf_validation.c b/src/nxt_conf_validation.c index c4f78608..f34712bd 100644 --- a/src/nxt_conf_validation.c +++ b/src/nxt_conf_validation.c @@ -1346,15 +1346,8 @@ static nxt_int_t nxt_conf_vldt_match_pattern(nxt_conf_validation_t *vldt, nxt_conf_value_t *value) { - u_char ch; nxt_str_t pattern; - nxt_uint_t i, first, last; - - enum { - sw_none, - sw_side, - sw_middle - } state; + nxt_uint_t i, first; if (nxt_conf_type(value) != NXT_CONF_STRING) { return nxt_conf_vldt_error(vldt, "The \"match\" patterns for \"host\", " @@ -1368,38 +1361,11 @@ nxt_conf_vldt_match_pattern(nxt_conf_validation_t *vldt, } first = (pattern.start[0] == '!'); - last = pattern.length - 1; - state = sw_none; - - for (i = first; i != pattern.length; i++) { - - ch = pattern.start[i]; - - if (ch != '*') { - continue; - } - - switch (state) { - case sw_none: - state = (i == first) ? sw_side : sw_middle; - break; - - case sw_side: - if (i == last) { - if (last - first != 1) { - break; - } - - return nxt_conf_vldt_error(vldt, "The \"match\" pattern must " - "not contain double \"*\" markers."); - } - - /* Fall through. */ - case sw_middle: - return nxt_conf_vldt_error(vldt, "The \"match\" patterns can " - "either contain \"*\" markers at " - "the sides or only one in the middle."); + for (i = first; i < pattern.length; i++) { + if (pattern.start[i] == '*' && pattern.start[i + 1] == '*') { + return nxt_conf_vldt_error(vldt, "The \"match\" pattern must " + "not contain double \"*\" markers."); } } diff --git a/src/nxt_http_route.c b/src/nxt_http_route.c index a8a6b181..66ab0fcd 100644 --- a/src/nxt_http_route.c +++ b/src/nxt_http_route.c @@ -26,7 +26,6 @@ typedef enum { typedef enum { NXT_HTTP_ROUTE_PATTERN_EXACT = 0, NXT_HTTP_ROUTE_PATTERN_BEGIN, - NXT_HTTP_ROUTE_PATTERN_MIDDLE, NXT_HTTP_ROUTE_PATTERN_END, NXT_HTTP_ROUTE_PATTERN_SUBSTRING, } nxt_http_route_pattern_type_t; @@ -70,13 +69,16 @@ typedef struct { typedef struct { - u_char *start1; - u_char *start2; - uint32_t length1; - uint32_t length2; + u_char *start; + uint32_t length; + nxt_http_route_pattern_type_t type:8; +} nxt_http_route_pattern_slice_t; + + +typedef struct { uint32_t min_length; + nxt_array_t *pattern_slices; - nxt_http_route_pattern_type_t type:8; uint8_t case_sensitive; /* 1 bit */ uint8_t negative; /* 1 bit */ uint8_t any; /* 1 bit */ @@ -209,7 +211,10 @@ static nxt_int_t nxt_http_route_pattern_create(nxt_task_t *task, nxt_mp_t *mp, nxt_http_route_encoding_t encoding); static nxt_int_t nxt_http_route_decode_str(nxt_str_t *str, nxt_http_route_encoding_t encoding); -static u_char *nxt_http_route_pattern_copy(nxt_mp_t *mp, nxt_str_t *test, +static nxt_int_t nxt_http_route_pattern_slice(nxt_array_t *slices, + nxt_str_t *test, + nxt_http_route_pattern_type_t type, + nxt_http_route_encoding_t encoding, nxt_http_route_pattern_case_t pattern_case); static nxt_int_t nxt_http_route_resolve(nxt_task_t *task, @@ -1044,103 +1049,163 @@ nxt_http_route_pattern_create(nxt_task_t *task, nxt_mp_t *mp, nxt_http_route_pattern_case_t pattern_case, nxt_http_route_encoding_t encoding) { - u_char *start; - nxt_str_t test, test2; - nxt_int_t ret; - nxt_uint_t n, length; - nxt_http_route_pattern_type_t type; + u_char c, *p, *end; + nxt_str_t test, tmp; + nxt_int_t ret; + nxt_array_t *slices; + nxt_http_route_pattern_type_t type; + + nxt_http_route_pattern_slice_t *slice; type = NXT_HTTP_ROUTE_PATTERN_EXACT; nxt_conf_get_string(cv, &test); + slices = nxt_array_create(mp, 1, sizeof(nxt_http_route_pattern_slice_t)); + if (nxt_slow_path(slices == NULL)) { + return NXT_ERROR; + } + + pattern->pattern_slices = slices; + pattern->negative = 0; pattern->any = 1; pattern->min_length = 0; - if (test.length != 0) { + if (test.length != 0 && test.start[0] == '!') { + test.start++; + test.length--; - if (test.start[0] == '!') { - test.start++; - test.length--; + pattern->negative = 1; + pattern->any = 0; - pattern->negative = 1; - pattern->any = 0; + if (test.length == 0) { + return NXT_OK; } + } - if (test.length != 0) { - if (test.start[0] == '*') { - test.start++; - test.length--; + if (test.length == 0) { + slice = nxt_array_add(slices); + if (nxt_slow_path(slice == NULL)) { + return NXT_ERROR; + } - if (test.length != 0) { - if (test.start[test.length - 1] == '*') { - test.length--; - type = NXT_HTTP_ROUTE_PATTERN_SUBSTRING; + slice->type = NXT_HTTP_ROUTE_PATTERN_EXACT; + slice->start = NULL; + slice->length = 0; - } else { - type = NXT_HTTP_ROUTE_PATTERN_END; - } + return NXT_OK; + } - } else { - type = NXT_HTTP_ROUTE_PATTERN_BEGIN; - } + if (test.start[0] == '*') { + /* 'type' is no longer 'EXACT', assume 'END'. */ + type = NXT_HTTP_ROUTE_PATTERN_END; + test.start++; + test.length--; + } - } else if (test.start[test.length - 1] == '*') { - test.length--; - type = NXT_HTTP_ROUTE_PATTERN_BEGIN; + if (type == NXT_HTTP_ROUTE_PATTERN_EXACT && test.length != 0) { + tmp.start = test.start; - } else { - length = test.length - 1; + p = nxt_memchr(test.start, '*', test.length); + + if (p == NULL) { + /* No '*' found - EXACT pattern. */ + tmp.length = test.length; + type = NXT_HTTP_ROUTE_PATTERN_EXACT; + + test.start += test.length; + test.length = 0; - for (n = 1; n < length; n++) { - if (test.start[n] != '*') { - continue; - } + } else { + /* '*' found - BEGIN pattern. */ + tmp.length = p - test.start; + type = NXT_HTTP_ROUTE_PATTERN_BEGIN; - test.length = n; + test.start = p + 1; + test.length -= tmp.length + 1; + } - test2.start = &test.start[n + 1]; - test2.length = length - n; + ret = nxt_http_route_pattern_slice(slices, &tmp, type, encoding, + pattern_case); + if (nxt_slow_path(ret != NXT_OK)) { + return ret; + } - ret = nxt_http_route_decode_str(&test2, encoding); - if (nxt_slow_path(ret != NXT_OK)) { - return ret; - } + pattern->min_length += tmp.length; + } - type = NXT_HTTP_ROUTE_PATTERN_MIDDLE; + end = test.start + test.length; - pattern->length2 = test2.length; - pattern->min_length += test2.length; + if (test.length != 0 && end[-1] != '*') { + p = end - 1; - start = nxt_http_route_pattern_copy(mp, &test2, - pattern_case); - if (nxt_slow_path(start == NULL)) { - return NXT_ERROR; - } + while (p != test.start) { + c = *p--; - pattern->start2 = start; - break; - } + if (c == '*') { + p += 2; + break; } + } - ret = nxt_http_route_decode_str(&test, encoding); - if (nxt_slow_path(ret != NXT_OK)) { - return ret; - } + tmp.start = p; + tmp.length = end - p; + + test.length -= tmp.length; + end = p; + + ret = nxt_http_route_pattern_slice(slices, &tmp, + NXT_HTTP_ROUTE_PATTERN_END, + encoding, pattern_case); + if (nxt_slow_path(ret != NXT_OK)) { + return ret; } + + pattern->min_length += tmp.length; } - pattern->type = type; - pattern->min_length += test.length; - pattern->length1 = test.length; + tmp.start = test.start; + tmp.length = 0; - start = nxt_http_route_pattern_copy(mp, &test, pattern_case); - if (nxt_slow_path(start == NULL)) { - return NXT_ERROR; + p = tmp.start; + + while (p != end) { + c = *p++; + + if (c != '*') { + tmp.length++; + continue; + } + + if (tmp.length == 0) { + tmp.start = p; + continue; + } + + ret = nxt_http_route_pattern_slice(slices, &tmp, + NXT_HTTP_ROUTE_PATTERN_SUBSTRING, + encoding, pattern_case); + if (nxt_slow_path(ret != NXT_OK)) { + return ret; + } + + pattern->min_length += tmp.length; + + tmp.start = p; + tmp.length = 0; } - pattern->start1 = start; + if (tmp.length != 0) { + ret = nxt_http_route_pattern_slice(slices, &tmp, + NXT_HTTP_ROUTE_PATTERN_SUBSTRING, + encoding, pattern_case); + if (nxt_slow_path(ret != NXT_OK)) { + return ret; + } + + pattern->min_length += tmp.length; + } return NXT_OK; } @@ -1185,15 +1250,25 @@ nxt_http_route_decode_str(nxt_str_t *str, nxt_http_route_encoding_t encoding) } -static u_char * -nxt_http_route_pattern_copy(nxt_mp_t *mp, nxt_str_t *test, +static nxt_int_t +nxt_http_route_pattern_slice(nxt_array_t *slices, + nxt_str_t *test, + nxt_http_route_pattern_type_t type, + nxt_http_route_encoding_t encoding, nxt_http_route_pattern_case_t pattern_case) { - u_char *start; + u_char *start; + nxt_int_t ret; + nxt_http_route_pattern_slice_t *slice; - start = nxt_mp_nget(mp, test->length); + ret = nxt_http_route_decode_str(test, encoding); + if (nxt_slow_path(ret != NXT_OK)) { + return ret; + } + + start = nxt_mp_nget(slices->mem_pool, test->length); if (nxt_slow_path(start == NULL)) { - return start; + return NXT_ERROR; } switch (pattern_case) { @@ -1211,7 +1286,16 @@ nxt_http_route_pattern_copy(nxt_mp_t *mp, nxt_str_t *test, break; } - return start; + slice = nxt_array_add(slices); + if (nxt_slow_path(slices == NULL)) { + return NXT_ERROR; + } + + slice->type = type; + slice->start = start; + slice->length = test->length; + + return NXT_OK; } @@ -2037,9 +2121,11 @@ nxt_http_route_test_argument(nxt_http_request_t *r, static nxt_int_t nxt_http_route_scheme(nxt_http_request_t *r, nxt_http_route_rule_t *rule) { - nxt_bool_t tls, https; + nxt_bool_t tls, https; + nxt_http_route_pattern_slice_t *pattern_slice; - https = (rule->pattern[0].length1 == nxt_length("https")); + pattern_slice = rule->pattern[0].pattern_slices->elts; + https = (pattern_slice->length == nxt_length("https")); tls = (r->tls != NULL); return (tls == https); @@ -2246,60 +2332,66 @@ static nxt_int_t nxt_http_route_pattern(nxt_http_request_t *r, nxt_http_route_pattern_t *pattern, u_char *start, size_t length) { - u_char *p, *end, *test; - size_t test_length; - nxt_int_t ret; + u_char *p, *end, *test; + size_t test_length; + uint32_t i; + nxt_array_t *pattern_slices; + nxt_http_route_pattern_slice_t *pattern_slice; if (length < pattern->min_length) { return 0; } - test = pattern->start1; - test_length = pattern->length1; + pattern_slices = pattern->pattern_slices; + pattern_slice = pattern_slices->elts; - switch (pattern->type) { + for (i = 0; i < pattern_slices->nelts; i++, pattern_slice++) { + test = pattern_slice->start; + test_length = pattern_slice->length; - case NXT_HTTP_ROUTE_PATTERN_EXACT: - if (length != test_length) { - return 0; - } + switch (pattern_slice->type) { + case NXT_HTTP_ROUTE_PATTERN_EXACT: + return ((length == pattern->min_length) && + nxt_http_route_memcmp(start, test, test_length, + pattern->case_sensitive)); - break; + case NXT_HTTP_ROUTE_PATTERN_BEGIN: + if (nxt_http_route_memcmp(start, test, test_length, + pattern->case_sensitive)) + { + break; + } - case NXT_HTTP_ROUTE_PATTERN_BEGIN: - break; + return 0; - case NXT_HTTP_ROUTE_PATTERN_MIDDLE: - ret = nxt_http_route_memcmp(start, test, test_length, - pattern->case_sensitive); - if (!ret) { - return ret; - } + case NXT_HTTP_ROUTE_PATTERN_END: + p = start + length - test_length; - test = pattern->start2; - test_length = pattern->length2; + if (nxt_http_route_memcmp(p, test, test_length, + pattern->case_sensitive)) + { + break; + } - /* Fall through. */ + return 0; - case NXT_HTTP_ROUTE_PATTERN_END: - start += length - test_length; - break; + case NXT_HTTP_ROUTE_PATTERN_SUBSTRING: + end = start + length; - case NXT_HTTP_ROUTE_PATTERN_SUBSTRING: - end = start + length; + if (pattern->case_sensitive) { + p = nxt_memstrn(start, end, (char *) test, test_length); - if (pattern->case_sensitive) { - p = nxt_memstrn(start, end, (char *) test, test_length); + } else { + p = nxt_memcasestrn(start, end, (char *) test, test_length); + } - } else { - p = nxt_memcasestrn(start, end, (char *) test, test_length); + if (p == NULL) { + return 0; + } } - - return (p != NULL); } - return nxt_http_route_memcmp(start, test, test_length, - pattern->case_sensitive); + return 1; } diff --git a/test/test_routing.py b/test/test_routing.py index 3cf4009c..8a196e88 100644 --- a/test/test_routing.py +++ b/test/test_routing.py @@ -115,10 +115,41 @@ class TestRouting(TestApplicationProto): def test_routes_match_invalid(self): self.route_match_invalid({"method": "**"}) - self.route_match_invalid({"method": "blah**"}) - self.route_match_invalid({"host": "*blah*blah"}) - self.route_match_invalid({"host": "blah*blah*blah"}) - self.route_match_invalid({"host": "blah*blah*"}) + + def test_routes_match_valid(self): + self.route_match({"method": "blah*"}) + self.route_match({"host": "*blah*blah"}) + self.route_match({"host": "blah*blah*blah"}) + self.route_match({"host": "blah*blah*"}) + + def test_routes_match_empty_exact(self): + self.route_match({"uri": ""}) + self.assertEqual(self.get()['status'], 404) + + self.route_match({"uri": "/"}) + self.assertEqual(self.get()['status'], 200) + self.assertEqual(self.get(url='/blah')['status'], 404) + + def test_routes_match_negative(self): + self.route_match({"uri": "!"}) + self.assertEqual(self.get()['status'], 404) + + self.route_match({"uri": "!/"}) + self.assertEqual(self.get()['status'], 404) + self.assertEqual(self.get(url='/blah')['status'], 200) + + self.route_match({"uri": "!*blah"}) + self.assertEqual(self.get()['status'], 200) + self.assertEqual(self.get(url='/bla')['status'], 200) + self.assertEqual(self.get(url='/blah')['status'], 404) + self.assertEqual(self.get(url='/blah1')['status'], 200) + + self.route_match({"uri": "!/blah*1*"}) + self.assertEqual(self.get()['status'], 200) + self.assertEqual(self.get(url='/blah')['status'], 200) + self.assertEqual(self.get(url='/blah1')['status'], 404) + self.assertEqual(self.get(url='/blah12')['status'], 404) + self.assertEqual(self.get(url='/blah2')['status'], 200) def test_routes_match_wildcard_middle(self): self.route_match({"host": "ex*le"}) @@ -181,6 +212,15 @@ class TestRouting(TestApplicationProto): self.assertEqual(self.get(url='/blah')['status'], 200, '/blah') self.assertEqual(self.get(url='/BLAH')['status'], 404, '/BLAH') + def test_routes_match_many_wildcard_substrings_case_sensitive(self): + self.route_match({"uri": "*a*B*c*"}) + + self.assertEqual(self.get(url='/blah-a-B-c-blah')['status'], 200) + self.assertEqual(self.get(url='/a-B-c')['status'], 200) + self.assertEqual(self.get(url='/aBc')['status'], 200) + self.assertEqual(self.get(url='/aBCaBbc')['status'], 200) + self.assertEqual(self.get(url='/ABc')['status'], 404) + def test_routes_pass_encode(self): def check_pass(path, name): self.assertIn( @@ -1362,7 +1402,6 @@ class TestRouting(TestApplicationProto): self.route_match_invalid({"arguments": ["var"]}) self.route_match_invalid({"arguments": [{"var1": {}}]}) self.route_match_invalid({"arguments": {"": "bar"}}) - self.route_match_invalid({"arguments": {"foo": "*ba*r"}}) self.route_match_invalid({"arguments": {"foo": "%"}}) self.route_match_invalid({"arguments": {"foo": "%1G"}}) self.route_match_invalid({"arguments": {"%": "bar"}}) -- cgit From 18fbfc3d5027df68b7696afb16323c66f2582100 Mon Sep 17 00:00:00 2001 From: Igor Sysoev Date: Mon, 6 Jul 2020 15:32:20 +0300 Subject: Destroying temporary router configuration. The lifespan of a listening socket is longer than both router configuration's and temporary router configuration's lifespan, so the sockets should be stored in persistent queues. Safety is ensured by the fact that the router processes only one new configuration at any time. --- src/nxt_router.c | 68 +++++++++++++++++++++++++++++++++----------------------- src/nxt_router.h | 6 ----- 2 files changed, 40 insertions(+), 34 deletions(-) diff --git a/src/nxt_router.c b/src/nxt_router.c index 788199c7..88b87323 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -297,6 +297,14 @@ const nxt_process_init_t nxt_router_process = { }; +/* Queues of nxt_socket_conf_t */ +nxt_queue_t creating_sockets; +nxt_queue_t pending_sockets; +nxt_queue_t updating_sockets; +nxt_queue_t keeping_sockets; +nxt_queue_t deleting_sockets; + + static nxt_int_t nxt_router_prefork(nxt_task_t *task, nxt_process_t *process, nxt_mp_t *mp) { @@ -1027,11 +1035,11 @@ nxt_router_temp_conf(nxt_task_t *task) goto temp_fail; } - nxt_queue_init(&tmcf->deleting); - nxt_queue_init(&tmcf->keeping); - nxt_queue_init(&tmcf->updating); - nxt_queue_init(&tmcf->pending); - nxt_queue_init(&tmcf->creating); + nxt_queue_init(&creating_sockets); + nxt_queue_init(&pending_sockets); + nxt_queue_init(&updating_sockets); + nxt_queue_init(&keeping_sockets); + nxt_queue_init(&deleting_sockets); #if (NXT_TLS) nxt_queue_init(&tmcf->tls); @@ -1088,11 +1096,11 @@ nxt_router_conf_apply(nxt_task_t *task, void *obj, void *data) tmcf = obj; - qlk = nxt_queue_first(&tmcf->pending); + qlk = nxt_queue_first(&pending_sockets); - if (qlk != nxt_queue_tail(&tmcf->pending)) { + if (qlk != nxt_queue_tail(&pending_sockets)) { nxt_queue_remove(qlk); - nxt_queue_insert_tail(&tmcf->creating, qlk); + nxt_queue_insert_tail(&creating_sockets, qlk); skcf = nxt_queue_link_data(qlk, nxt_socket_conf_t, link); @@ -1150,8 +1158,8 @@ nxt_router_conf_apply(nxt_task_t *task, void *obj, void *data) nxt_router_engines_post(router, tmcf); - nxt_queue_add(&router->sockets, &tmcf->updating); - nxt_queue_add(&router->sockets, &tmcf->creating); + nxt_queue_add(&router->sockets, &updating_sockets); + nxt_queue_add(&router->sockets, &creating_sockets); router->access_log = rtcf->access_log; @@ -1185,6 +1193,8 @@ nxt_router_conf_ready(nxt_task_t *task, nxt_router_temp_conf_t *tmcf) if (--tmcf->count == 0) { nxt_router_conf_send(task, tmcf, NXT_PORT_MSG_RPC_READY_LAST); + + nxt_mp_destroy(tmcf->mem_pool); } } @@ -1202,8 +1212,8 @@ nxt_router_conf_error(nxt_task_t *task, nxt_router_temp_conf_t *tmcf) nxt_alert(task, "failed to apply new conf"); - for (qlk = nxt_queue_first(&tmcf->creating); - qlk != nxt_queue_tail(&tmcf->creating); + for (qlk = nxt_queue_first(&creating_sockets); + qlk != nxt_queue_tail(&creating_sockets); qlk = nxt_queue_next(qlk)) { skcf = nxt_queue_link_data(qlk, nxt_socket_conf_t, link); @@ -1217,9 +1227,9 @@ nxt_router_conf_error(nxt_task_t *task, nxt_router_temp_conf_t *tmcf) } nxt_queue_init(&new_socket_confs); - nxt_queue_add(&new_socket_confs, &tmcf->updating); - nxt_queue_add(&new_socket_confs, &tmcf->pending); - nxt_queue_add(&new_socket_confs, &tmcf->creating); + nxt_queue_add(&new_socket_confs, &updating_sockets); + nxt_queue_add(&new_socket_confs, &pending_sockets); + nxt_queue_add(&new_socket_confs, &creating_sockets); rtcf = tmcf->router_conf; @@ -1241,8 +1251,8 @@ nxt_router_conf_error(nxt_task_t *task, nxt_router_temp_conf_t *tmcf) router = rtcf->router; - nxt_queue_add(&router->sockets, &tmcf->keeping); - nxt_queue_add(&router->sockets, &tmcf->deleting); + nxt_queue_add(&router->sockets, &keeping_sockets); + nxt_queue_add(&router->sockets, &deleting_sockets); nxt_queue_add(&router->apps, &tmcf->previous); @@ -1253,6 +1263,8 @@ nxt_router_conf_error(nxt_task_t *task, nxt_router_temp_conf_t *tmcf) nxt_mp_destroy(rtcf->mem_pool); nxt_router_conf_send(task, tmcf, NXT_PORT_MSG_RPC_ERROR); + + nxt_mp_destroy(tmcf->mem_pool); } @@ -1902,7 +1914,7 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, tmcf->router_conf->access_log = access_log; } - nxt_queue_add(&tmcf->deleting, &router->sockets); + nxt_queue_add(&deleting_sockets, &router->sockets); nxt_queue_init(&router->sockets); return NXT_OK; @@ -2141,15 +2153,15 @@ nxt_router_listen_socket_find(nxt_router_temp_conf_t *tmcf, nskcf->listen = skcf->listen; nxt_queue_remove(qlk); - nxt_queue_insert_tail(&tmcf->keeping, qlk); + nxt_queue_insert_tail(&keeping_sockets, qlk); - nxt_queue_insert_tail(&tmcf->updating, &nskcf->link); + nxt_queue_insert_tail(&updating_sockets, &nskcf->link); return NXT_OK; } } - nxt_queue_insert_tail(&tmcf->pending, &nskcf->link); + nxt_queue_insert_tail(&pending_sockets, &nskcf->link); return NXT_DECLINED; } @@ -2577,13 +2589,13 @@ nxt_router_engine_conf_create(nxt_router_temp_conf_t *tmcf, { nxt_int_t ret; - ret = nxt_router_engine_joints_create(tmcf, recf, &tmcf->creating, + ret = nxt_router_engine_joints_create(tmcf, recf, &creating_sockets, nxt_router_listen_socket_create); if (nxt_slow_path(ret != NXT_OK)) { return ret; } - ret = nxt_router_engine_joints_create(tmcf, recf, &tmcf->updating, + ret = nxt_router_engine_joints_create(tmcf, recf, &updating_sockets, nxt_router_listen_socket_create); if (nxt_slow_path(ret != NXT_OK)) { return ret; @@ -2599,19 +2611,19 @@ nxt_router_engine_conf_update(nxt_router_temp_conf_t *tmcf, { nxt_int_t ret; - ret = nxt_router_engine_joints_create(tmcf, recf, &tmcf->creating, + ret = nxt_router_engine_joints_create(tmcf, recf, &creating_sockets, nxt_router_listen_socket_create); if (nxt_slow_path(ret != NXT_OK)) { return ret; } - ret = nxt_router_engine_joints_create(tmcf, recf, &tmcf->updating, + ret = nxt_router_engine_joints_create(tmcf, recf, &updating_sockets, nxt_router_listen_socket_update); if (nxt_slow_path(ret != NXT_OK)) { return ret; } - ret = nxt_router_engine_joints_delete(tmcf, recf, &tmcf->deleting); + ret = nxt_router_engine_joints_delete(tmcf, recf, &deleting_sockets); if (nxt_slow_path(ret != NXT_OK)) { return ret; } @@ -2631,12 +2643,12 @@ nxt_router_engine_conf_delete(nxt_router_temp_conf_t *tmcf, return ret; } - ret = nxt_router_engine_joints_delete(tmcf, recf, &tmcf->updating); + ret = nxt_router_engine_joints_delete(tmcf, recf, &updating_sockets); if (nxt_slow_path(ret != NXT_OK)) { return ret; } - return nxt_router_engine_joints_delete(tmcf, recf, &tmcf->deleting); + return nxt_router_engine_joints_delete(tmcf, recf, &deleting_sockets); } diff --git a/src/nxt_router.h b/src/nxt_router.h index 6004a459..d8e93be6 100644 --- a/src/nxt_router.h +++ b/src/nxt_router.h @@ -66,12 +66,6 @@ typedef struct { typedef struct { - nxt_queue_t creating; /* of nxt_socket_conf_t */ - nxt_queue_t pending; /* of nxt_socket_conf_t */ - nxt_queue_t updating; /* of nxt_socket_conf_t */ - nxt_queue_t keeping; /* of nxt_socket_conf_t */ - nxt_queue_t deleting; /* of nxt_socket_conf_t */ - #if (NXT_TLS) nxt_queue_t tls; /* of nxt_router_tlssock_t */ #endif -- cgit From b6792b00aebbe08f0fa3a4cb7826075114f717fa Mon Sep 17 00:00:00 2001 From: Axel Duch Date: Fri, 10 Jul 2020 10:28:53 +0100 Subject: Router: route patterns multi wildcards fix. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Matching 'start' and 'end' position now adjusted to avoid false matching. This is related to #434 issue on Github. Thanks to 洪志道 (Hong Zhi Dao). --- src/nxt_http_route.c | 9 ++++++--- test/test_routing.py | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/src/nxt_http_route.c b/src/nxt_http_route.c index 66ab0fcd..6bdf2937 100644 --- a/src/nxt_http_route.c +++ b/src/nxt_http_route.c @@ -2344,6 +2344,7 @@ nxt_http_route_pattern(nxt_http_request_t *r, nxt_http_route_pattern_t *pattern, pattern_slices = pattern->pattern_slices; pattern_slice = pattern_slices->elts; + end = start + length; for (i = 0; i < pattern_slices->nelts; i++, pattern_slice++) { test = pattern_slice->start; @@ -2359,25 +2360,25 @@ nxt_http_route_pattern(nxt_http_request_t *r, nxt_http_route_pattern_t *pattern, if (nxt_http_route_memcmp(start, test, test_length, pattern->case_sensitive)) { + start += test_length; break; } return 0; case NXT_HTTP_ROUTE_PATTERN_END: - p = start + length - test_length; + p = end - test_length; if (nxt_http_route_memcmp(p, test, test_length, pattern->case_sensitive)) { + end = p; break; } return 0; case NXT_HTTP_ROUTE_PATTERN_SUBSTRING: - end = start + length; - if (pattern->case_sensitive) { p = nxt_memstrn(start, end, (char *) test, test_length); @@ -2388,6 +2389,8 @@ nxt_http_route_pattern(nxt_http_request_t *r, nxt_http_route_pattern_t *pattern, if (p == NULL) { return 0; } + + start = p + test_length; } } diff --git a/test/test_routing.py b/test/test_routing.py index 8a196e88..269e8efc 100644 --- a/test/test_routing.py +++ b/test/test_routing.py @@ -200,6 +200,27 @@ class TestRouting(TestApplicationProto): self.assertEqual(self.get(url='/blah')['status'], 200, '/blah') self.assertEqual(self.get(url='/BLAH')['status'], 404, '/BLAH') + def test_route_match_wildcards_ordered(self): + self.route_match({"uri": "/a*x*y*"}) + + self.assertEqual(self.get(url='/axy')['status'], 200, '/axy') + self.assertEqual(self.get(url='/ayx')['status'], 404, '/ayx') + + def test_route_match_wildcards_adjust_start(self): + self.route_match({"uri": "/bla*bla*"}) + + self.assertEqual(self.get(url='/bla_foo')['status'], 404, '/bla_foo') + + def test_route_match_wildcards_adjust_start_substr(self): + self.route_match({"uri": "*bla*bla*"}) + + self.assertEqual(self.get(url='/bla_foo')['status'], 404, '/bla_foo') + + def test_route_match_wildcards_adjust_end(self): + self.route_match({"uri": "/bla*bla"}) + + self.assertEqual(self.get(url='/foo_bla')['status'], 404, '/foo_bla') + def test_routes_match_wildcard_right_case_sensitive(self): self.route_match({"uri": "/bla*"}) -- cgit From 55bac9153b5e5d62cf7beab4cf53b3cec33ea8df Mon Sep 17 00:00:00 2001 From: Konstantin Pavlov Date: Mon, 13 Jul 2020 15:06:26 +0300 Subject: Docker: run entrypoint scripts for unitd-debug as well. --- pkg/docker/docker-entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/docker/docker-entrypoint.sh b/pkg/docker/docker-entrypoint.sh index 4ad7cb9a..f455a958 100755 --- a/pkg/docker/docker-entrypoint.sh +++ b/pkg/docker/docker-entrypoint.sh @@ -18,7 +18,7 @@ curl_put() return 0 } -if [ "$1" = "unitd" ]; then +if [ "$1" = "unitd" -o "$1" = "unitd-debug" ]; then if /usr/bin/find "/var/lib/unit/" -mindepth 1 -print -quit 2>/dev/null | /bin/grep -q .; then echo "$0: /var/lib/unit/ is not empty, skipping initial configuration..." else -- cgit From f69d4707527da8c48e93cb49f85c71c890ae8edd Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Tue, 21 Jul 2020 20:27:37 +0300 Subject: Fixed non-debug log time format in libunit. This makes log format used in libunit consistent with the daemon, where milliseconds are printed only in the debug log level. Currently a compile time switch is used, since there's no support for runtime changing of a log level for now. But in the future this should be a runtime condition, similar to nxt_log_time_handler(). --- src/nxt_unit.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/nxt_unit.c b/src/nxt_unit.c index 9f6eab95..89998e3f 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -4977,11 +4977,18 @@ nxt_unit_snprint_prefix(char *p, char *end, pid_t pid, int level) tm = *localtime(&ts.tv_sec); #endif +#if (NXT_DEBUG) p += snprintf(p, end - p, "%4d/%02d/%02d %02d:%02d:%02d.%03d ", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, (int) ts.tv_nsec / 1000000); +#else + p += snprintf(p, end - p, + "%4d/%02d/%02d %02d:%02d:%02d ", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec); +#endif p += snprintf(p, end - p, "[%s] %d#%"PRIu64" [unit] ", nxt_unit_log_levels[level], -- cgit From f46ef1b121e421eb4b773dd9a3a951bb021b66ff Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Tue, 21 Jul 2020 20:27:37 +0300 Subject: PHP: fixed incorrect time in interpreter error log messages. Previously, the log message callback used a generic log function, that relied on the process time cache. Since there were no time update calls in the application processes, all log lines were printed with the same time, usually correlated with the process start. Now, a non-cached logging function from libunit is used. --- src/nxt_php_sapi.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/nxt_php_sapi.c b/src/nxt_php_sapi.c index 7ae8484d..ab888f57 100644 --- a/src/nxt_php_sapi.c +++ b/src/nxt_php_sapi.c @@ -251,9 +251,9 @@ NXT_EXPORT nxt_app_module_t nxt_app_module = { static nxt_php_target_t *nxt_php_targets; static nxt_int_t nxt_php_last_target = -1; -static nxt_task_t *nxt_php_task; +static nxt_unit_ctx_t *nxt_php_unit_ctx; #if defined(ZTS) && PHP_VERSION_ID < 70400 -static void ***tsrm_ls; +static void ***tsrm_ls; #endif @@ -277,8 +277,6 @@ nxt_php_start(nxt_task_t *task, nxt_process_data_t *data) static nxt_str_t user_str = nxt_string("user"); static nxt_str_t admin_str = nxt_string("admin"); - nxt_php_task = task; - conf = data->app; c = &conf->u.php; @@ -405,6 +403,8 @@ nxt_php_start(nxt_task_t *task, nxt_process_data_t *data) return NXT_ERROR; } + nxt_php_unit_ctx = unit_ctx; + nxt_unit_run(unit_ctx); nxt_unit_done(unit_ctx); @@ -1277,5 +1277,6 @@ static void nxt_php_log_message(char *message TSRMLS_DC) #endif { - nxt_log(nxt_php_task, NXT_LOG_NOTICE, "php message: %s", message); + nxt_unit_log(nxt_php_unit_ctx, NXT_UNIT_LOG_NOTICE, + "php message: %s", message); } -- cgit From d86e0a7aec86bf2ed472aec3a69dd17a34329301 Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Tue, 21 Jul 2020 20:27:37 +0300 Subject: PHP: logging in request context when possible. --- src/nxt_php_sapi.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/nxt_php_sapi.c b/src/nxt_php_sapi.c index ab888f57..ee0976ce 100644 --- a/src/nxt_php_sapi.c +++ b/src/nxt_php_sapi.c @@ -1277,6 +1277,16 @@ static void nxt_php_log_message(char *message TSRMLS_DC) #endif { - nxt_unit_log(nxt_php_unit_ctx, NXT_UNIT_LOG_NOTICE, - "php message: %s", message); + nxt_php_run_ctx_t *ctx; + + ctx = SG(server_context); + + if (ctx != NULL) { + nxt_unit_req_log(ctx->req, NXT_UNIT_LOG_NOTICE, + "php message: %s", message); + + } else { + nxt_unit_log(nxt_php_unit_ctx, NXT_UNIT_LOG_NOTICE, + "php message: %s", message); + } } -- cgit From ef7194819662975f53822ac27a071bf00259e38e Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Wed, 22 Jul 2020 10:04:57 +0300 Subject: Fixing buffer overflow check in discovery. Incorrect check prevents Unit to start without modules. This issue was introduced in 4a3ec07f4b19. --- src/nxt_application.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nxt_application.c b/src/nxt_application.c index 62167040..834badf9 100644 --- a/src/nxt_application.c +++ b/src/nxt_application.c @@ -289,7 +289,7 @@ nxt_discovery_modules(nxt_task_t *task, const char *path) *p++ = ']'; - if (nxt_slow_path(p >= end)) { + if (nxt_slow_path(p > end)) { nxt_alert(task, "discovery write past the buffer"); goto fail; } -- cgit From 661c223eda641eeb2ee02db3d1e1cd4e5cd583f7 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Wed, 22 Jul 2020 10:05:10 +0300 Subject: Tests: switching stdout to blocking before printing log. This is another attempt to fix the following error message: BlockingIOError: [Errno 11] write could not complete without blocking --- test/unit/main.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/unit/main.py b/test/unit/main.py index 408cf31c..8bca888c 100644 --- a/test/unit/main.py +++ b/test/unit/main.py @@ -407,8 +407,11 @@ class TestUnit(unittest.TestCase): print('Path to unit.log:\n' + path + '\n') if TestUnit.print_log: + os.set_blocking(sys.stdout.fileno(), True) + sys.stdout.flush() + if data is None: with open(path, 'r', encoding='utf-8', errors='ignore') as f: - data = f.read() - - print(data) + shutil.copyfileobj(f, sys.stdout) + else: + sys.stdout.write(data) -- cgit From fa696569f9e2c8d3d286eb14891c0adc797200d8 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Thu, 23 Jul 2020 14:24:16 +0300 Subject: PHP: removing assertion to fix build on macOS. The nxt_assert macro uses nxt_thread_context, which caused the following linker error when using it in the library: ld: illegal thread local variable reference to regular symbol _nxt_thread_context for architecture x86_64 --- src/nxt_php_sapi.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/nxt_php_sapi.c b/src/nxt_php_sapi.c index ee0976ce..767dcaec 100644 --- a/src/nxt_php_sapi.c +++ b/src/nxt_php_sapi.c @@ -707,7 +707,11 @@ nxt_php_dirname(const nxt_str_t *file, nxt_str_t *dir) { size_t length; - nxt_assert(file->length > 0 && file->start[0] == '/'); + if (file->length == 0 || file->start[0] != '/') { + nxt_unit_alert(NULL, "php_dirname: invalid file name " + "(not starts from '/')"); + return NXT_ERROR; + } length = file->length; -- cgit From 137c1e736f4572198929ecd4f7e88a2586224650 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Thu, 23 Jul 2020 14:24:55 +0300 Subject: Fixing main and application port structs file descriptor init. Correct value for non-initialized file descriptor is -1, because most of the checks in libunit compares file descriptor with -1 before performing an action. Using 0 as default value, may cause to close file descriptor #0, this may affect application logic. It is not required to list this patch in changelog because impact is not seen by end users. --- src/nxt_application.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/nxt_application.c b/src/nxt_application.c index 834badf9..c331764f 100644 --- a/src/nxt_application.c +++ b/src/nxt_application.c @@ -1282,6 +1282,7 @@ nxt_unit_default_init(nxt_task_t *task, nxt_unit_init_t *init) init->ready_port.id.pid = main_port->pid; init->ready_port.id.id = main_port->id; + init->ready_port.in_fd = -1; init->ready_port.out_fd = main_port->pair[1]; nxt_fd_blocking(task, main_port->pair[1]); @@ -1291,6 +1292,7 @@ nxt_unit_default_init(nxt_task_t *task, nxt_unit_init_t *init) init->read_port.id.pid = my_port->pid; init->read_port.id.id = my_port->id; init->read_port.in_fd = my_port->pair[0]; + init->read_port.out_fd = -1; nxt_fd_blocking(task, my_port->pair[0]); -- cgit From d3c8d622807c058c0f8aff6714cf40585b08faa5 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Thu, 23 Jul 2020 14:25:12 +0300 Subject: PHP: using nxt_unit_default_init() for module structure init. Using this function in all language modules helps to avoid code duplication and reduce the size of future patches. --- src/nxt_php_sapi.c | 36 ++++-------------------------------- 1 file changed, 4 insertions(+), 32 deletions(-) diff --git a/src/nxt_php_sapi.c b/src/nxt_php_sapi.c index 767dcaec..a6c490bd 100644 --- a/src/nxt_php_sapi.c +++ b/src/nxt_php_sapi.c @@ -265,8 +265,6 @@ nxt_php_start(nxt_task_t *task, nxt_process_data_t *data) nxt_str_t ini_path, name; nxt_int_t ret; nxt_uint_t n; - nxt_port_t *my_port, *main_port; - nxt_runtime_t *rt; nxt_unit_ctx_t *unit_ctx; nxt_unit_init_t php_init; nxt_conf_value_t *value; @@ -363,39 +361,13 @@ nxt_php_start(nxt_task_t *task, nxt_process_data_t *data) nxt_php_set_options(task, value, ZEND_INI_USER); } - rt = task->thread->runtime; - - main_port = rt->port_by_type[NXT_PROCESS_MAIN]; - if (nxt_slow_path(main_port == NULL)) { - nxt_alert(task, "main process not found"); - return NXT_ERROR; - } - - my_port = nxt_runtime_port_find(rt, nxt_pid, 0); - if (nxt_slow_path(my_port == NULL)) { - nxt_alert(task, "my_port not found"); - return NXT_ERROR; + ret = nxt_unit_default_init(task, &php_init); + if (nxt_slow_path(ret != NXT_OK)) { + nxt_alert(task, "nxt_unit_default_init() failed"); + return ret; } - nxt_memzero(&php_init, sizeof(nxt_unit_init_t)); - php_init.callbacks.request_handler = nxt_php_request_handler; - - php_init.ready_port.id.pid = main_port->pid; - php_init.ready_port.id.id = main_port->id; - php_init.ready_port.out_fd = main_port->pair[1]; - - nxt_fd_blocking(task, main_port->pair[1]); - - php_init.ready_stream = my_port->process->stream; - - php_init.read_port.id.pid = my_port->pid; - php_init.read_port.id.id = my_port->id; - php_init.read_port.in_fd = my_port->pair[0]; - - nxt_fd_blocking(task, my_port->pair[0]); - - php_init.log_fd = 2; php_init.shm_limit = conf->shm_limit; unit_ctx = nxt_unit_init(&php_init); -- cgit From 9641fb0ef1d708bb9ec8c00ea5ec694829e4fd67 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Thu, 23 Jul 2020 14:25:21 +0300 Subject: Fixing various router crashes on exit caused by runtime pool free. Currently, the router exits without waiting for the worker threads to stop. There is a short gap between the runtime memory pool's free and the exit, during which a worker thread may try to access a runtime structure. In turn, this may cause a crash. For now, it is better to keep this memory allocated. --- src/nxt_runtime.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/src/nxt_runtime.c b/src/nxt_runtime.c index 5aa061dd..694ce74d 100644 --- a/src/nxt_runtime.c +++ b/src/nxt_runtime.c @@ -527,7 +527,7 @@ nxt_runtime_stop_all_processes(nxt_task_t *task, nxt_runtime_t *rt) static void nxt_runtime_exit(nxt_task_t *task, void *obj, void *data) { - int status; + int status, engine_count; nxt_runtime_t *rt; nxt_process_t *process; nxt_event_engine_t *engine; @@ -571,14 +571,25 @@ nxt_runtime_exit(nxt_task_t *task, void *obj, void *data) } nxt_runtime_process_loop; - if (rt->port_by_type[rt->type] != NULL) { - nxt_port_use(task, rt->port_by_type[rt->type], -1); - } + status = rt->status; - nxt_thread_mutex_destroy(&rt->processes_mutex); + engine_count = 0; - status = rt->status; - nxt_mp_destroy(rt->mem_pool); + nxt_queue_each(engine, &rt->engines, nxt_event_engine_t, link) { + + engine_count++; + + } nxt_queue_loop; + + if (engine_count <= 1) { + if (rt->port_by_type[rt->type] != NULL) { + nxt_port_use(task, rt->port_by_type[rt->type], -1); + } + + nxt_thread_mutex_destroy(&rt->processes_mutex); + + nxt_mp_destroy(rt->mem_pool); + } nxt_debug(task, "exit: %d", status); -- cgit From 762511c5105119c45c676578f45473c7f906de60 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Thu, 23 Jul 2020 14:25:46 +0300 Subject: Fixing request_app_link reference counting. Racing conditions reproduced periodically on test_python_process_switch. --- src/nxt_router.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/nxt_router.c b/src/nxt_router.c index 88b87323..bf82501c 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -731,9 +731,7 @@ nxt_request_app_link_release_handler(nxt_task_t *task, void *obj, void *data) nxt_assert(req_app_link->work.data == data); - nxt_atomic_fetch_add(&req_app_link->use_count, -1); - - nxt_request_app_link_release(task, req_app_link); + nxt_request_app_link_use(task, req_app_link, -1); } @@ -4695,7 +4693,7 @@ nxt_router_port_select(nxt_task_t *task, nxt_port_select_state_t *state) &req_app_link->link_app_requests); } - ra_use_delta++; + nxt_request_app_link_inc_use(req_app_link); nxt_debug(task, "req_app_link stream #%uD enqueue to app->requests", req_app_link->stream); -- cgit From 85a1e083af931e3179595bcf9d6ea32810813752 Mon Sep 17 00:00:00 2001 From: Axel Duch Date: Fri, 24 Jul 2020 13:10:24 +0100 Subject: Minor changes and renaming an NJS artifact to NXT. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is partially related to #434 issue on Github. Thanks to 洪志道 (Hong Zhi Dao). --- src/nxt_http_route.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/nxt_http_route.c b/src/nxt_http_route.c index 6bdf2937..ae745f11 100644 --- a/src/nxt_http_route.c +++ b/src/nxt_http_route.c @@ -171,7 +171,7 @@ struct nxt_http_routes_s { }; -#define NJS_COOKIE_HASH \ +#define NXT_COOKIE_HASH \ (nxt_http_field_hash_end( \ nxt_http_field_hash_char( \ nxt_http_field_hash_char( \ @@ -1104,7 +1104,7 @@ nxt_http_route_pattern_create(nxt_task_t *task, nxt_mp_t *mp, test.length--; } - if (type == NXT_HTTP_ROUTE_PATTERN_EXACT && test.length != 0) { + if (type == NXT_HTTP_ROUTE_PATTERN_EXACT) { tmp.start = test.start; p = nxt_memchr(test.start, '*', test.length); @@ -2164,7 +2164,7 @@ nxt_http_route_cookies_parse(nxt_http_request_t *r) nxt_list_each(f, r->fields) { - if (f->hash != NJS_COOKIE_HASH + if (f->hash != NXT_COOKIE_HASH || f->name_length != 6 || nxt_strncasecmp(f->name, (u_char *) "Cookie", 6) != 0) { -- cgit From 29cf3cc6c100af9eff79ff96e51e859a34fa159e Mon Sep 17 00:00:00 2001 From: Axel Duch Date: Fri, 24 Jul 2020 17:10:26 +0100 Subject: Configuration: removing redundant check. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thanks to 洪志道 (Hong Zhi Dao). --- src/nxt_conf_validation.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/nxt_conf_validation.c b/src/nxt_conf_validation.c index f34712bd..cdbcf858 100644 --- a/src/nxt_conf_validation.c +++ b/src/nxt_conf_validation.c @@ -861,7 +861,7 @@ nxt_conf_vldt_type(nxt_conf_validation_t *vldt, nxt_str_t *name, { u_char *p; nxt_str_t expected; - nxt_bool_t serial; + nxt_bool_t comma; nxt_uint_t value_type, n, t; u_char buf[nxt_length(NXT_CONF_VLDT_ANY_TYPE_STR)]; @@ -889,7 +889,7 @@ nxt_conf_vldt_type(nxt_conf_validation_t *vldt, nxt_str_t *name, p = nxt_cpymem(p, "either ", 7); } - serial = (n > 2); + comma = (n > 2); for ( ;; ) { t = __builtin_ffs(type) - 1; @@ -902,7 +902,7 @@ nxt_conf_vldt_type(nxt_conf_validation_t *vldt, nxt_str_t *name, break; } - if (n > 1 || serial) { + if (comma) { *p++ = ','; } -- cgit From 2a71a8a9f4fcd3aedc65fc0c7f33aacf79457492 Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Fri, 24 Jul 2020 20:25:15 +0300 Subject: Added missing ending indicator in object members validation lists. This fixes undefined behaviour due to array over-read if an unknown parameter is specified in an uidmap, a gidmap, or a php target object. --- src/nxt_conf_validation.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/nxt_conf_validation.c b/src/nxt_conf_validation.c index cdbcf858..553cda19 100644 --- a/src/nxt_conf_validation.c +++ b/src/nxt_conf_validation.c @@ -550,6 +550,8 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_procmap_members[] = { NXT_CONF_VLDT_INTEGER, NULL, NULL }, + + NXT_CONF_VLDT_END }; #endif @@ -691,7 +693,9 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_php_target_members[] = { { nxt_string("index"), NXT_CONF_VLDT_STRING, NULL, - NULL } + NULL }, + + NXT_CONF_VLDT_END }; -- cgit From 10f90f0d483d1a46a58d7fd42fb406cd46a9c1a6 Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Fri, 24 Jul 2020 20:25:20 +0300 Subject: Configuration: added checking for presence of mandatory fields. --- src/nxt_conf_validation.c | 134 ++++++++++++++++++++++++++++++++++++++++++++-- src/nxt_php_sapi.c | 5 -- src/nxt_python_wsgi.c | 5 -- 3 files changed, 131 insertions(+), 13 deletions(-) diff --git a/src/nxt_conf_validation.c b/src/nxt_conf_validation.c index 553cda19..27a08861 100644 --- a/src/nxt_conf_validation.c +++ b/src/nxt_conf_validation.c @@ -31,6 +31,11 @@ typedef enum { |NXT_CONF_VLDT_OBJECT) +typedef enum { + NXT_CONF_VLDT_REQUIRED = 1, +} nxt_conf_vldt_flags_t; + + typedef nxt_int_t (*nxt_conf_vldt_handler_t)(nxt_conf_validation_t *vldt, nxt_conf_value_t *value, void *data); @@ -38,14 +43,15 @@ typedef nxt_int_t (*nxt_conf_vldt_handler_t)(nxt_conf_validation_t *vldt, typedef struct { nxt_str_t name; - nxt_conf_vldt_type_t type; + nxt_conf_vldt_type_t type:32; + nxt_conf_vldt_flags_t flags:32; nxt_conf_vldt_handler_t validator; void *data; } nxt_conf_vldt_object_t; -#define NXT_CONF_VLDT_NEXT(f) { nxt_null_string, 0, NULL, (f) } -#define NXT_CONF_VLDT_END { nxt_null_string, 0, NULL, NULL } +#define NXT_CONF_VLDT_NEXT(f) { nxt_null_string, 0, 0, NULL, (f) } +#define NXT_CONF_VLDT_END { nxt_null_string, 0, 0, NULL, NULL } typedef nxt_int_t (*nxt_conf_vldt_member_t)(nxt_conf_validation_t *vldt, @@ -165,16 +171,19 @@ static nxt_int_t nxt_conf_vldt_clone_gidmap(nxt_conf_validation_t *vldt, static nxt_conf_vldt_object_t nxt_conf_vldt_websocket_members[] = { { nxt_string("read_timeout"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("keepalive_interval"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("max_frame_size"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, @@ -185,6 +194,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_websocket_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_static_members[] = { { nxt_string("mime_types"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_mtypes, NULL }, @@ -195,46 +205,55 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_static_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_http_members[] = { { nxt_string("header_read_timeout"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("body_read_timeout"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("send_timeout"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("idle_timeout"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("body_buffer_size"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("max_body_size"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("body_temp_path"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, { nxt_string("websocket"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object, (void *) &nxt_conf_vldt_websocket_members }, { nxt_string("static"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object, (void *) &nxt_conf_vldt_static_members }, @@ -245,6 +264,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_http_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_setting_members[] = { { nxt_string("http"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object, (void *) &nxt_conf_vldt_http_members }, @@ -255,31 +275,37 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_setting_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_root_members[] = { { nxt_string("settings"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object, (void *) &nxt_conf_vldt_setting_members }, { nxt_string("listeners"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object_iterator, (void *) &nxt_conf_vldt_listener }, { nxt_string("routes"), NXT_CONF_VLDT_ARRAY | NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_routes, NULL }, { nxt_string("applications"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object_iterator, (void *) &nxt_conf_vldt_app }, { nxt_string("upstreams"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object_iterator, (void *) &nxt_conf_vldt_upstream }, { nxt_string("access_log"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, @@ -292,6 +318,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_root_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_tls_members[] = { { nxt_string("certificate"), NXT_CONF_VLDT_STRING, + 0, &nxt_conf_vldt_certificate, NULL }, @@ -304,11 +331,13 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_tls_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_listener_members[] = { { nxt_string("pass"), NXT_CONF_VLDT_STRING, + 0, &nxt_conf_vldt_pass, NULL }, { nxt_string("application"), NXT_CONF_VLDT_STRING, + 0, &nxt_conf_vldt_app_name, NULL }, @@ -316,6 +345,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_listener_members[] = { { nxt_string("tls"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object, (void *) &nxt_conf_vldt_tls_members }, @@ -328,46 +358,55 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_listener_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_match_members[] = { { nxt_string("method"), NXT_CONF_VLDT_STRING | NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_match_patterns, NULL }, { nxt_string("scheme"), NXT_CONF_VLDT_STRING, + 0, &nxt_conf_vldt_match_scheme_pattern, NULL }, { nxt_string("host"), NXT_CONF_VLDT_STRING | NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_match_patterns, NULL }, { nxt_string("source"), NXT_CONF_VLDT_STRING | NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_match_addrs, NULL }, { nxt_string("destination"), NXT_CONF_VLDT_STRING | NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_match_addrs, NULL }, { nxt_string("uri"), NXT_CONF_VLDT_STRING | NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_match_encoded_patterns, NULL }, { nxt_string("arguments"), NXT_CONF_VLDT_OBJECT | NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_match_encoded_patterns_sets, NULL }, { nxt_string("headers"), NXT_CONF_VLDT_OBJECT | NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_match_patterns_sets, NULL }, { nxt_string("cookies"), NXT_CONF_VLDT_OBJECT | NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_match_patterns_sets, NULL }, @@ -378,6 +417,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_match_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_pass_action_members[] = { { nxt_string("pass"), NXT_CONF_VLDT_STRING, + 0, &nxt_conf_vldt_pass, NULL }, @@ -388,11 +428,13 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_pass_action_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_return_action_members[] = { { nxt_string("return"), NXT_CONF_VLDT_INTEGER, + 0, &nxt_conf_vldt_return, NULL }, { nxt_string("location"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, @@ -403,11 +445,13 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_return_action_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_share_action_members[] = { { nxt_string("share"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, { nxt_string("fallback"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_action, NULL }, @@ -418,6 +462,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_share_action_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_proxy_action_members[] = { { nxt_string("proxy"), NXT_CONF_VLDT_STRING, + 0, &nxt_conf_vldt_proxy, NULL }, @@ -428,11 +473,13 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_proxy_action_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_route_members[] = { { nxt_string("match"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object, (void *) &nxt_conf_vldt_match_members }, { nxt_string("action"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_action, NULL }, @@ -443,21 +490,25 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_route_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_app_limits_members[] = { { nxt_string("timeout"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("reschedule_timeout"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("requests"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("shm"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, @@ -468,16 +519,19 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_limits_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_app_processes_members[] = { { nxt_string("spare"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("max"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("idle_timeout"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, @@ -490,6 +544,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_namespaces_members[] = { #if (NXT_HAVE_CLONE_NEWUSER) { nxt_string("credential"), NXT_CONF_VLDT_BOOLEAN, + 0, NULL, NULL }, #endif @@ -497,6 +552,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_namespaces_members[] = { #if (NXT_HAVE_CLONE_NEWPID) { nxt_string("pid"), NXT_CONF_VLDT_BOOLEAN, + 0, NULL, NULL }, #endif @@ -504,6 +560,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_namespaces_members[] = { #if (NXT_HAVE_CLONE_NEWNET) { nxt_string("network"), NXT_CONF_VLDT_BOOLEAN, + 0, NULL, NULL }, #endif @@ -511,6 +568,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_namespaces_members[] = { #if (NXT_HAVE_CLONE_NEWNS) { nxt_string("mount"), NXT_CONF_VLDT_BOOLEAN, + 0, NULL, NULL }, #endif @@ -518,6 +576,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_namespaces_members[] = { #if (NXT_HAVE_CLONE_NEWUTS) { nxt_string("uname"), NXT_CONF_VLDT_BOOLEAN, + 0, NULL, NULL }, #endif @@ -525,6 +584,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_namespaces_members[] = { #if (NXT_HAVE_CLONE_NEWCGROUP) { nxt_string("cgroup"), NXT_CONF_VLDT_BOOLEAN, + 0, NULL, NULL }, #endif @@ -538,16 +598,19 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_namespaces_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_app_procmap_members[] = { { nxt_string("container"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("host"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, { nxt_string("size"), NXT_CONF_VLDT_INTEGER, + 0, NULL, NULL }, @@ -560,6 +623,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_procmap_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_app_isolation_members[] = { { nxt_string("namespaces"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_clone_namespaces, (void *) &nxt_conf_vldt_app_namespaces_members }, @@ -567,11 +631,13 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_isolation_members[] = { { nxt_string("uidmap"), NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_array_iterator, (void *) &nxt_conf_vldt_clone_uidmap }, { nxt_string("gidmap"), NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_array_iterator, (void *) &nxt_conf_vldt_clone_gidmap }, @@ -581,6 +647,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_isolation_members[] = { { nxt_string("rootfs"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, @@ -590,6 +657,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_isolation_members[] = { { nxt_string("new_privs"), NXT_CONF_VLDT_BOOLEAN, + 0, NULL, NULL }, @@ -602,41 +670,49 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_app_isolation_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_common_members[] = { { nxt_string("type"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, { nxt_string("limits"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object, (void *) &nxt_conf_vldt_app_limits_members }, { nxt_string("processes"), NXT_CONF_VLDT_INTEGER | NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_processes, (void *) &nxt_conf_vldt_app_processes_members }, { nxt_string("user"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, { nxt_string("group"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, { nxt_string("working_directory"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, { nxt_string("environment"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object_iterator, (void *) &nxt_conf_vldt_environment }, { nxt_string("isolation"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_isolation, (void *) &nxt_conf_vldt_app_isolation_members }, @@ -647,11 +723,13 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_common_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_external_members[] = { { nxt_string("executable"), NXT_CONF_VLDT_STRING, + NXT_CONF_VLDT_REQUIRED, NULL, NULL }, { nxt_string("arguments"), NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_array_iterator, (void *) &nxt_conf_vldt_argument }, @@ -662,16 +740,19 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_external_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_python_members[] = { { nxt_string("home"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, { nxt_string("path"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, { nxt_string("module"), NXT_CONF_VLDT_STRING, + NXT_CONF_VLDT_REQUIRED, NULL, NULL }, @@ -682,16 +763,19 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_python_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_php_target_members[] = { { nxt_string("root"), NXT_CONF_VLDT_STRING, + NXT_CONF_VLDT_REQUIRED, NULL, NULL }, { nxt_string("script"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, { nxt_string("index"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, @@ -702,16 +786,19 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_php_target_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_php_options_members[] = { { nxt_string("file"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, { nxt_string("admin"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object_iterator, (void *) &nxt_conf_vldt_php_option }, { nxt_string("user"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object_iterator, (void *) &nxt_conf_vldt_php_option }, @@ -722,6 +809,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_php_options_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_php_common_members[] = { { nxt_string("options"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object, (void *) &nxt_conf_vldt_php_options_members }, @@ -732,16 +820,19 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_php_common_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_php_notargets_members[] = { { nxt_string("root"), NXT_CONF_VLDT_STRING, + NXT_CONF_VLDT_REQUIRED, NULL, NULL }, { nxt_string("script"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, { nxt_string("index"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, @@ -752,21 +843,25 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_php_notargets_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_php_members[] = { { nxt_string("root"), NXT_CONF_VLDT_ANY_TYPE, + 0, &nxt_conf_vldt_php_targets_exclusive, (void *) "root" }, { nxt_string("script"), NXT_CONF_VLDT_ANY_TYPE, + 0, &nxt_conf_vldt_php_targets_exclusive, (void *) "script" }, { nxt_string("index"), NXT_CONF_VLDT_ANY_TYPE, + 0, &nxt_conf_vldt_php_targets_exclusive, (void *) "index" }, { nxt_string("targets"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_php_targets, NULL }, @@ -777,6 +872,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_php_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_perl_members[] = { { nxt_string("script"), NXT_CONF_VLDT_STRING, + NXT_CONF_VLDT_REQUIRED, NULL, NULL }, @@ -787,6 +883,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_perl_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_ruby_members[] = { { nxt_string("script"), NXT_CONF_VLDT_STRING, + NXT_CONF_VLDT_REQUIRED, NULL, NULL }, @@ -797,21 +894,25 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_ruby_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_java_members[] = { { nxt_string("classpath"), NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_array_iterator, (void *) &nxt_conf_vldt_java_classpath }, { nxt_string("webapp"), NXT_CONF_VLDT_STRING, + NXT_CONF_VLDT_REQUIRED, NULL, NULL }, { nxt_string("options"), NXT_CONF_VLDT_ARRAY, + 0, &nxt_conf_vldt_array_iterator, (void *) &nxt_conf_vldt_java_option }, { nxt_string("unit_jars"), NXT_CONF_VLDT_STRING, + 0, NULL, NULL }, @@ -822,6 +923,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_java_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_upstream_members[] = { { nxt_string("servers"), NXT_CONF_VLDT_OBJECT, + 0, &nxt_conf_vldt_object_iterator, (void *) &nxt_conf_vldt_server }, @@ -832,6 +934,7 @@ static nxt_conf_vldt_object_t nxt_conf_vldt_upstream_members[] = { static nxt_conf_vldt_object_t nxt_conf_vldt_upstream_server_members[] = { { nxt_string("weight"), NXT_CONF_VLDT_NUMBER, + 0, &nxt_conf_vldt_server_weight, NULL }, @@ -1729,6 +1832,31 @@ nxt_conf_vldt_object(nxt_conf_validation_t *vldt, nxt_conf_value_t *value, nxt_conf_value_t *member; nxt_conf_vldt_object_t *vals; + vals = data; + + for ( ;; ) { + if (vals->name.length == 0) { + + if (vals->data != NULL) { + vals = vals->data; + continue; + } + + break; + } + + if (vals->flags & NXT_CONF_VLDT_REQUIRED) { + member = nxt_conf_get_object_member(value, &vals->name, NULL); + + if (member == NULL) { + return nxt_conf_vldt_error(vldt, "Required parameter \"%V\" " + "is missing.", &vals->name); + } + } + + vals++; + } + index = 0; for ( ;; ) { diff --git a/src/nxt_php_sapi.c b/src/nxt_php_sapi.c index a6c490bd..00671b4a 100644 --- a/src/nxt_php_sapi.c +++ b/src/nxt_php_sapi.c @@ -402,11 +402,6 @@ nxt_php_set_target(nxt_task_t *task, nxt_php_target_t *target, value = nxt_conf_get_object_member(conf, &root_str, NULL); - if (value == NULL) { - nxt_alert(task, "no php root specified"); - return NXT_ERROR; - } - nxt_conf_get_string(value, &str); tmp = nxt_malloc(str.length + 1); diff --git a/src/nxt_python_wsgi.c b/src/nxt_python_wsgi.c index b9033a75..7e8d1d79 100644 --- a/src/nxt_python_wsgi.c +++ b/src/nxt_python_wsgi.c @@ -243,11 +243,6 @@ nxt_python_start(nxt_task_t *task, nxt_process_data_t *data) app_conf = data->app; c = &app_conf->u.python; - if (c->module.length == 0) { - nxt_alert(task, "python module is empty"); - return NXT_ERROR; - } - if (c->home != NULL) { len = nxt_strlen(c->home); -- cgit From c617480eefc0822d52f9153906bb526ad483b9a3 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Sat, 25 Jul 2020 11:06:32 +0300 Subject: Using plain shared memory for configuration pass. There is no restrictions on configration size and using segmented shared memory only doubles memory usage because to parse configration on router side, it needs to be 'plain' e. g. located in single continous memory buffer. --- src/nxt_controller.c | 50 ++++++++++++++----- src/nxt_port_memory.c | 135 +++++++++++++++++++++++++++++--------------------- src/nxt_port_memory.h | 1 + src/nxt_router.c | 49 ++++++++++++------ 4 files changed, 152 insertions(+), 83 deletions(-) diff --git a/src/nxt_controller.c b/src/nxt_controller.c index a61c127d..8c9d4c53 100644 --- a/src/nxt_controller.c +++ b/src/nxt_controller.c @@ -54,7 +54,7 @@ static nxt_int_t nxt_controller_conf_default(void); static void nxt_controller_conf_init_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg, void *data); static void nxt_controller_flush_requests(nxt_task_t *task); -static nxt_int_t nxt_controller_conf_send(nxt_task_t *task, +static nxt_int_t nxt_controller_conf_send(nxt_task_t *task, nxt_mp_t *mp, nxt_conf_value_t *conf, nxt_port_rpc_handler_t handler, void *data); static void nxt_controller_conn_init(nxt_task_t *task, void *obj, void *data); @@ -344,7 +344,7 @@ nxt_controller_send_current_conf(nxt_task_t *task) conf = nxt_controller_conf.root; if (conf != NULL) { - rc = nxt_controller_conf_send(task, conf, + rc = nxt_controller_conf_send(task, nxt_controller_conf.pool, conf, nxt_controller_conf_init_handler, NULL); if (nxt_fast_path(rc == NXT_OK)) { @@ -497,11 +497,14 @@ nxt_controller_flush_requests(nxt_task_t *task) static nxt_int_t -nxt_controller_conf_send(nxt_task_t *task, nxt_conf_value_t *conf, +nxt_controller_conf_send(nxt_task_t *task, nxt_mp_t *mp, nxt_conf_value_t *conf, nxt_port_rpc_handler_t handler, void *data) { + void *mem; + u_char *end; size_t size; uint32_t stream; + nxt_fd_t fd; nxt_int_t rc; nxt_buf_t *b; nxt_port_t *router_port, *controller_port; @@ -518,30 +521,53 @@ nxt_controller_conf_send(nxt_task_t *task, nxt_conf_value_t *conf, size = nxt_conf_json_length(conf, NULL); - b = nxt_port_mmap_get_buf(task, router_port, size); + b = nxt_buf_mem_alloc(mp, sizeof(size_t), 0); if (nxt_slow_path(b == NULL)) { return NXT_ERROR; } - b->mem.free = nxt_conf_json_print(b->mem.free, conf, NULL); + fd = nxt_shm_open(task, size); + if (nxt_slow_path(fd == -1)) { + return NXT_ERROR; + } + + mem = nxt_mem_mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (nxt_slow_path(mem == MAP_FAILED)) { + goto fail; + } + + end = nxt_conf_json_print(mem, conf, NULL); + + nxt_mem_munmap(mem, size); + + size = end - (u_char *) mem; + + b->mem.free = nxt_cpymem(b->mem.pos, &size, sizeof(size_t)); stream = nxt_port_rpc_register_handler(task, controller_port, handler, handler, router_port->pid, data); - if (nxt_slow_path(stream == 0)) { - return NXT_ERROR; + goto fail; } - rc = nxt_port_socket_write(task, router_port, NXT_PORT_MSG_DATA_LAST, -1, - stream, controller_port->id, b); + rc = nxt_port_socket_write(task, router_port, + NXT_PORT_MSG_DATA_LAST | NXT_PORT_MSG_CLOSE_FD, + fd, stream, controller_port->id, b); if (nxt_slow_path(rc != NXT_OK)) { nxt_port_rpc_cancel(task, controller_port, stream); - return NXT_ERROR; + + goto fail; } return NXT_OK; + +fail: + + nxt_fd_close(fd); + + return NXT_ERROR; } @@ -1201,7 +1227,7 @@ nxt_controller_process_config(nxt_task_t *task, nxt_controller_request_t *req, goto alloc_fail; } - rc = nxt_controller_conf_send(task, value, + rc = nxt_controller_conf_send(task, mp, value, nxt_controller_conf_handler, req); if (nxt_slow_path(rc != NXT_OK)) { @@ -1282,7 +1308,7 @@ nxt_controller_process_config(nxt_task_t *task, nxt_controller_request_t *req, goto alloc_fail; } - rc = nxt_controller_conf_send(task, value, + rc = nxt_controller_conf_send(task, mp, value, nxt_controller_conf_handler, req); if (nxt_slow_path(rc != NXT_OK)) { diff --git a/src/nxt_port_memory.c b/src/nxt_port_memory.c index f4d2125c..fd472cc6 100644 --- a/src/nxt_port_memory.c +++ b/src/nxt_port_memory.c @@ -286,7 +286,6 @@ nxt_port_new_port_mmap(nxt_task_t *task, nxt_process_t *process, nxt_port_t *port, nxt_bool_t tracking, nxt_int_t n) { void *mem; - u_char *p, name[64]; nxt_fd_t fd; nxt_int_t i; nxt_free_map_t *free_map; @@ -310,63 +309,8 @@ nxt_port_new_port_mmap(nxt_task_t *task, nxt_process_t *process, return NULL; } - p = nxt_sprintf(name, name + sizeof(name), NXT_SHM_PREFIX "unit.%PI.%uxD", - nxt_pid, nxt_random(&task->thread->random)); - *p = '\0'; - -#if (NXT_HAVE_MEMFD_CREATE) - - fd = syscall(SYS_memfd_create, name, MFD_CLOEXEC); - - if (nxt_slow_path(fd == -1)) { - nxt_alert(task, "memfd_create(%s) failed %E", name, nxt_errno); - - goto remove_fail; - } - - nxt_debug(task, "memfd_create(%s): %FD", name, fd); - -#elif (NXT_HAVE_SHM_OPEN_ANON) - - fd = shm_open(SHM_ANON, O_RDWR, S_IRUSR | S_IWUSR); - - nxt_debug(task, "shm_open(SHM_ANON): %FD", fd); - - if (nxt_slow_path(fd == -1)) { - nxt_alert(task, "shm_open(SHM_ANON) failed %E", nxt_errno); - - goto remove_fail; - } - -#elif (NXT_HAVE_SHM_OPEN) - - /* Just in case. */ - shm_unlink((char *) name); - - fd = shm_open((char *) name, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR); - - nxt_debug(task, "shm_open(%s): %FD", name, fd); - + fd = nxt_shm_open(task, PORT_MMAP_SIZE); if (nxt_slow_path(fd == -1)) { - nxt_alert(task, "shm_open(%s) failed %E", name, nxt_errno); - - goto remove_fail; - } - - if (nxt_slow_path(shm_unlink((char *) name) == -1)) { - nxt_log(task, NXT_LOG_WARN, "shm_unlink(%s) failed %E", name, - nxt_errno); - } - -#else - -#error No working shared memory implementation. - -#endif - - if (nxt_slow_path(ftruncate(fd, PORT_MMAP_SIZE) == -1)) { - nxt_log(task, NXT_LOG_WARN, "ftruncate() failed %E", nxt_errno); - goto remove_fail; } @@ -423,6 +367,83 @@ remove_fail: } +nxt_int_t +nxt_shm_open(nxt_task_t *task, size_t size) +{ + nxt_fd_t fd; + +#if (NXT_HAVE_MEMFD_CREATE || NXT_HAVE_SHM_OPEN) + + u_char *p, name[64]; + + p = nxt_sprintf(name, name + sizeof(name), NXT_SHM_PREFIX "unit.%PI.%uxD", + nxt_pid, nxt_random(&task->thread->random)); + *p = '\0'; + +#endif + +#if (NXT_HAVE_MEMFD_CREATE) + + fd = syscall(SYS_memfd_create, name, MFD_CLOEXEC); + + if (nxt_slow_path(fd == -1)) { + nxt_alert(task, "memfd_create(%s) failed %E", name, nxt_errno); + + return -1; + } + + nxt_debug(task, "memfd_create(%s): %FD", name, fd); + +#elif (NXT_HAVE_SHM_OPEN_ANON) + + fd = shm_open(SHM_ANON, O_RDWR, S_IRUSR | S_IWUSR); + + if (nxt_slow_path(fd == -1)) { + nxt_alert(task, "shm_open(SHM_ANON) failed %E", nxt_errno); + + return -1; + } + + nxt_debug(task, "shm_open(SHM_ANON): %FD", fd); + +#elif (NXT_HAVE_SHM_OPEN) + + /* Just in case. */ + shm_unlink((char *) name); + + fd = shm_open((char *) name, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR); + + if (nxt_slow_path(fd == -1)) { + nxt_alert(task, "shm_open(%s) failed %E", name, nxt_errno); + + return -1; + } + + nxt_debug(task, "shm_open(%s): %FD", name, fd); + + if (nxt_slow_path(shm_unlink((char *) name) == -1)) { + nxt_log(task, NXT_LOG_WARN, "shm_unlink(%s) failed %E", name, + nxt_errno); + } + +#else + +#error No working shared memory implementation. + +#endif + + if (nxt_slow_path(ftruncate(fd, size) == -1)) { + nxt_alert(task, "ftruncate() failed %E", nxt_errno); + + nxt_fd_close(fd); + + return -1; + } + + return fd; +} + + static nxt_port_mmap_handler_t * nxt_port_mmap_get(nxt_task_t *task, nxt_port_t *port, nxt_chunk_id_t *c, nxt_int_t n, nxt_bool_t tracking) diff --git a/src/nxt_port_memory.h b/src/nxt_port_memory.h index 748549b1..2cd4bd76 100644 --- a/src/nxt_port_memory.h +++ b/src/nxt_port_memory.h @@ -71,5 +71,6 @@ typedef enum nxt_port_method_e nxt_port_method_t; nxt_port_method_t nxt_port_mmap_get_method(nxt_task_t *task, nxt_port_t *port, nxt_buf_t *b); +nxt_int_t nxt_shm_open(nxt_task_t *task, size_t size); #endif /* _NXT_PORT_MEMORY_H_INCLUDED_ */ diff --git a/src/nxt_router.c b/src/nxt_router.c index bf82501c..d4d037e1 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -906,8 +906,9 @@ nxt_router_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) void nxt_router_conf_data_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) { + void *p; + size_t size; nxt_int_t ret; - nxt_buf_t *b; nxt_router_temp_conf_t *tmcf; tmcf = nxt_router_temp_conf(task); @@ -915,9 +916,33 @@ nxt_router_conf_data_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) return; } - nxt_debug(task, "nxt_router_conf_data_handler(%O): %*s", - nxt_buf_used_size(msg->buf), - (size_t) nxt_buf_used_size(msg->buf), msg->buf->mem.pos); + if (nxt_slow_path(msg->fd == -1)) { + nxt_alert(task, "conf_data_handler: invalid file shm fd"); + return; + } + + if (nxt_buf_mem_used_size(&msg->buf->mem) != sizeof(size_t)) { + nxt_alert(task, "conf_data_handler: unexpected buffer size (%d)", + (int) nxt_buf_mem_used_size(&msg->buf->mem)); + + nxt_fd_close(msg->fd); + msg->fd = -1; + + return; + } + + nxt_memcpy(&size, msg->buf->mem.pos, sizeof(size_t)); + + p = nxt_mem_mmap(NULL, size, PROT_READ, MAP_SHARED, msg->fd, 0); + + nxt_fd_close(msg->fd); + msg->fd = -1; + + if (nxt_slow_path(p == MAP_FAILED)) { + return; + } + + nxt_debug(task, "conf_data_handler(%uz): %*s", size, size, p); tmcf->router_conf->router = nxt_router; tmcf->stream = msg->port_msg.stream; @@ -928,20 +953,12 @@ nxt_router_conf_data_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) if (nxt_slow_path(tmcf->port == NULL)) { nxt_alert(task, "reply port not found"); - return; + goto fail; } nxt_port_use(task, tmcf->port, 1); - b = nxt_buf_chk_make_plain(tmcf->router_conf->mem_pool, - msg->buf, msg->size); - if (nxt_slow_path(b == NULL)) { - nxt_router_conf_error(task, tmcf); - - return; - } - - ret = nxt_router_conf_create(task, tmcf, b->mem.pos, b->mem.free); + ret = nxt_router_conf_create(task, tmcf, p, nxt_pointer_to(p, size)); if (nxt_fast_path(ret == NXT_OK)) { nxt_router_conf_apply(task, tmcf, NULL); @@ -949,6 +966,10 @@ nxt_router_conf_data_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) } else { nxt_router_conf_error(task, tmcf); } + +fail: + + nxt_mem_munmap(p, size); } -- cgit From dc1377dc489937abcc6a5d0dcbfa628e0e6bdf1c Mon Sep 17 00:00:00 2001 From: Andrei Zeliankou Date: Tue, 28 Jul 2020 03:09:50 +0100 Subject: Tests: style. --- test/test_php_targets.py | 1 - test/test_proxy_chunked.py | 6 +++--- test/unit/applications/lang/python.py | 2 +- test/unit/main.py | 3 +-- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/test/test_php_targets.py b/test/test_php_targets.py index 9c1ba2a6..0657554a 100644 --- a/test/test_php_targets.py +++ b/test/test_php_targets.py @@ -1,4 +1,3 @@ -import unittest from unit.applications.lang.php import TestApplicationPHP class TestPHPTargets(TestApplicationPHP): diff --git a/test/test_proxy_chunked.py b/test/test_proxy_chunked.py index 2d4f7b94..f344b69a 100644 --- a/test/test_proxy_chunked.py +++ b/test/test_proxy_chunked.py @@ -1,7 +1,6 @@ -import os import re -import socket import select +import socket import time from unit.applications.lang.python import TestApplicationPython @@ -96,7 +95,8 @@ class TestProxyChunked(TestApplicationPython): "routes": [ { "action": { - "proxy": "http://127.0.0.1:" + str(self.SERVER_PORT) + "proxy": "http://127.0.0.1:" + + str(self.SERVER_PORT) } } ], diff --git a/test/unit/applications/lang/python.py b/test/unit/applications/lang/python.py index 31a04107..91559f4b 100644 --- a/test/unit/applications/lang/python.py +++ b/test/unit/applications/lang/python.py @@ -1,5 +1,5 @@ -import shutil import os +import shutil from unit.applications.proto import TestApplicationProto diff --git a/test/unit/main.py b/test/unit/main.py index 8bca888c..253ddc71 100644 --- a/test/unit/main.py +++ b/test/unit/main.py @@ -58,7 +58,6 @@ class TestUnit(unittest.TestCase): if prereq_version == 'all': for version in available_versions: self.application_type = type + ' ' + version - self.application_version = version super().run(result) elif prereq_version == 'any': self.application_type = type + ' ' + available_versions[0] @@ -166,7 +165,7 @@ class TestUnit(unittest.TestCase): self._run() def _run(self): - build_dir = os.path.join(self.pardir, 'build') + build_dir = self.pardir + '/build' self.unitd = build_dir + '/unitd' if not os.path.isfile(self.unitd): -- cgit From 355ed9697d10f163f4b96bc459f9c402aefa5d55 Mon Sep 17 00:00:00 2001 From: Andrei Zeliankou Date: Tue, 28 Jul 2020 04:53:32 +0100 Subject: Tests: fixed double stop() call for some tests. --- test/unit/main.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/test/unit/main.py b/test/unit/main.py index 253ddc71..83aa9139 100644 --- a/test/unit/main.py +++ b/test/unit/main.py @@ -201,6 +201,8 @@ class TestUnit(unittest.TestCase): self._print_log() exit("Could not start unit") + self._started = True + self.skip_alerts = [ r'read signalfd\(4\) failed', r'sendmsg.+failed', @@ -209,7 +211,7 @@ class TestUnit(unittest.TestCase): self.skip_sanitizer = False def tearDown(self): - stop_errs = self.stop() + self.stop() # detect errors and failures for current test @@ -244,18 +246,21 @@ class TestUnit(unittest.TestCase): else: self._print_log() - self.assertListEqual(stop_errs, [None, None], 'stop errors') + self.assertListEqual(self.stop_errors, [None, None], 'stop errors') def stop(self): - errors = [] + if not self._started: + return + + self.stop_errors = [] - errors.append(self._stop()) + self.stop_errors.append(self._stop()) - errors.append(self.stop_processes()) + self.stop_errors.append(self.stop_processes()) atexit.unregister(self.stop) - return errors + self._started = False def _stop(self): if self._p.poll() is not None: -- cgit From f1e445bdef64ceba047d07b05d1b78137ddc2a7a Mon Sep 17 00:00:00 2001 From: Andrei Zeliankou Date: Tue, 28 Jul 2020 04:53:40 +0100 Subject: Tests: added PHP test with time check in error log messages. --- test/php/error_log/index.php | 3 +++ test/test_php_application.py | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 test/php/error_log/index.php diff --git a/test/php/error_log/index.php b/test/php/error_log/index.php new file mode 100644 index 00000000..fd90adfe --- /dev/null +++ b/test/php/error_log/index.php @@ -0,0 +1,3 @@ + diff --git a/test/test_php_application.py b/test/test_php_application.py index 1259d22d..d8bfade2 100644 --- a/test/test_php_application.py +++ b/test/test_php_application.py @@ -1,5 +1,7 @@ import os +import re import shutil +import time import unittest from unit.applications.lang.php import TestApplicationPHP @@ -488,6 +490,30 @@ class TestPHPApplication(TestApplicationPHP): self.get()['body'], r'012345', 'disable_classes before' ) + def test_php_application_error_log(self): + self.load('error_log') + + self.assertEqual(self.get()['status'], 200, 'status') + + time.sleep(1) + + self.assertEqual(self.get()['status'], 200, 'status 2') + + self.stop() + + pattern = r'\d{4}\/\d\d\/\d\d\s\d\d:.+\[notice\].+Error in application' + + self.assertIsNotNone(self.wait_for_record(pattern), 'errors print') + + with open(self.testdir + '/unit.log', 'r', errors='ignore') as f: + errs = re.findall(pattern, f.read()) + + self.assertEqual(len(errs), 2, 'error_log count') + + date = errs[0].split('[')[0] + date2 = errs[1].split('[')[0] + self.assertNotEqual(date, date2, 'date diff') + def test_php_application_script(self): self.assertIn( 'success', -- cgit From c3e6901f5328ffaaf3201dc75262e21ee0eedc32 Mon Sep 17 00:00:00 2001 From: Axel Duch Date: Tue, 28 Jul 2020 14:51:33 +0100 Subject: Configuration: fixed buffer over-read in pattern validation. There was an undefined behavior in the validation function, caused by testing one character after the string if a wildcard was at the end. --- src/nxt_conf_validation.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/nxt_conf_validation.c b/src/nxt_conf_validation.c index 27a08861..a5e0663f 100644 --- a/src/nxt_conf_validation.c +++ b/src/nxt_conf_validation.c @@ -1454,7 +1454,7 @@ nxt_conf_vldt_match_pattern(nxt_conf_validation_t *vldt, nxt_conf_value_t *value) { nxt_str_t pattern; - nxt_uint_t i, first; + nxt_uint_t i, first, last; if (nxt_conf_type(value) != NXT_CONF_STRING) { return nxt_conf_vldt_error(vldt, "The \"match\" patterns for \"host\", " @@ -1468,8 +1468,9 @@ nxt_conf_vldt_match_pattern(nxt_conf_validation_t *vldt, } first = (pattern.start[0] == '!'); + last = pattern.length - 1; - for (i = first; i < pattern.length; i++) { + for (i = first; i < last; i++) { if (pattern.start[i] == '*' && pattern.start[i + 1] == '*') { return nxt_conf_vldt_error(vldt, "The \"match\" pattern must " "not contain double \"*\" markers."); -- cgit From f3471c29c0870cffff3d96c1c50c88c47a4d8009 Mon Sep 17 00:00:00 2001 From: Tiago Natel de Moura Date: Tue, 28 Jul 2020 16:17:18 +0100 Subject: PHP: fixed version comparison in configure script. Some PPAs for Ubuntu package PHP with versions like: 7.2.28-3+ubuntu18.04.1+deb.sury.org+1 But the script expected only "X.Y.Z". The issue was introduced in: http://hg.nginx.org/unit/rev/2ecb15904ba5 --- auto/modules/php | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/auto/modules/php b/auto/modules/php index 2cec2f44..75d60242 100644 --- a/auto/modules/php +++ b/auto/modules/php @@ -77,8 +77,8 @@ if /bin/sh -c "${NXT_PHP_CONFIG} --version" >> $NXT_AUTOCONF_ERR 2>&1; then $echo " + PHP SAPI: [`${NXT_PHP_CONFIG} --php-sapis`]" NXT_PHP_MAJOR_VERSION=${NXT_PHP_VERSION%%.*} - NXT_PHP_MINOR_VERSION=${NXT_PHP_VERSION#??} - NXT_PHP_MINOR_VERSION=${NXT_PHP_MINOR_VERSION%.*} + NXT_PHP_MINOR_VERSION=${NXT_PHP_VERSION#*.} + NXT_PHP_MINOR_VERSION=${NXT_PHP_MINOR_VERSION%%.*} if [ $NXT_PHP_MAJOR_VERSION = 5 -a $NXT_PHP_MINOR_VERSION -lt 4 ]; then NXT_PHP_ADDITIONAL_FLAGS=-Wno-write-strings -- cgit From b28b4459b0899cb8357df5f6c1e904fd1a34ebe3 Mon Sep 17 00:00:00 2001 From: Tiago Natel de Moura Date: Fri, 31 Jul 2020 12:21:21 +0100 Subject: Isolation: fixed the generation of mounts table. Since the introduction of rootfs feature, some language modules can't be configured multiple times. Now the configure generates a separate nxt__mounts.h for each module compiled. --- auto/modules/java | 9 ++++++--- auto/modules/python | 9 +++++---- auto/modules/ruby | 15 +++++++++------ src/nxt_java.c | 3 ++- src/nxt_python_wsgi.c | 12 ++++-------- src/ruby/nxt_ruby.c | 3 ++- 6 files changed, 28 insertions(+), 23 deletions(-) diff --git a/auto/modules/java b/auto/modules/java index a3b1b958..fa68f573 100644 --- a/auto/modules/java +++ b/auto/modules/java @@ -227,7 +227,6 @@ NXT_JAVA_INSTALL_JARS= NXT_JAVA_UNINSTALL_JARS= NXT_JAVA_JARS=$NXT_BUILD_DIR/$NXT_JAVA_MODULE/nxt_jars.h -NXT_JAVA_MOUNTS_HEADER=$NXT_BUILD_DIR/$NXT_JAVA_MODULE/nxt_java_mounts.h mkdir -p $NXT_BUILD_DIR/$NXT_JAVA_MODULE cat << END > $NXT_JAVA_JARS @@ -318,7 +317,10 @@ NXT_JAVA_LIBC_DIR=`ldd "$NXT_JAVA_LIBJVM" | grep libc.so | cut -d' ' -f3` NXT_JAVA_LIBC_DIR=`dirname $NXT_JAVA_LIBC_DIR` fi -cat << END > $NXT_JAVA_MOUNTS_HEADER + +NXT_JAVA_MOUNTS_HEADER=nxt_${NXT_JAVA_MODULE}_mounts.h + +cat << END > $NXT_BUILD_DIR/$NXT_JAVA_MOUNTS_HEADER #ifndef _NXT_JAVA_MOUNTS_H_INCLUDED_ #define _NXT_JAVA_MOUNTS_H_INCLUDED_ @@ -371,7 +373,8 @@ for nxt_src in $NXT_JAVA_MODULE_SRCS; do $NXT_BUILD_DIR/$nxt_obj: $nxt_src $NXT_VERSION_H mkdir -p $NXT_BUILD_DIR/src/java - \$(CC) -c \$(CFLAGS) \$(NXT_INCS) $NXT_JAVA_INCLUDE \\ + \$(CC) -c \$(CFLAGS) -DNXT_JAVA_MOUNTS_H=\"$NXT_JAVA_MOUNTS_HEADER\" \\ + \$(NXT_INCS) $NXT_JAVA_INCLUDE \\ $nxt_dep_flags \\ -o $NXT_BUILD_DIR/$nxt_obj $nxt_src $nxt_dep_post diff --git a/auto/modules/python b/auto/modules/python index ab314013..c14bf7e0 100644 --- a/auto/modules/python +++ b/auto/modules/python @@ -130,13 +130,13 @@ if grep ^$NXT_PYTHON_MODULE: $NXT_MAKEFILE 2>&1 > /dev/null; then fi -NXT_PYTHON_MOUNTS_HEADER=$NXT_BUILD_DIR/nxt_python_mounts.h +NXT_PYTHON_MOUNTS_HEADER=nxt_${NXT_PYTHON_MODULE}_mounts.h $NXT_PYTHON -c 'import os.path import sys pyver = "python" + str(sys.version_info[0]) + "." + str(sys.version_info[1]) -print("static const nxt_fs_mount_t nxt_python%d%d_mounts[] = {" % (sys.version_info[0], sys.version_info[1])) +print("static const nxt_fs_mount_t nxt_python_mounts[] = {") pattern = "{(u_char *) \"%s\", (u_char *) \"%s\", (u_char *) \"bind\", NXT_MS_BIND|NXT_MS_REC, NULL}," base = None @@ -157,7 +157,7 @@ for p in sys.path: print("};\n\n") -' >> $NXT_PYTHON_MOUNTS_HEADER +' > $NXT_BUILD_DIR/$NXT_PYTHON_MOUNTS_HEADER $echo " + Python module: ${NXT_PYTHON_MODULE}.unit.so" @@ -185,7 +185,8 @@ for nxt_src in $NXT_PYTHON_MODULE_SRCS; do cat << END >> $NXT_MAKEFILE $NXT_BUILD_DIR/$nxt_obj: $nxt_src $NXT_VERSION_H - \$(CC) -c \$(CFLAGS) \$(NXT_INCS) $NXT_PYTHON_INCLUDE \\ + \$(CC) -c \$(CFLAGS) -DNXT_PYTHON_MOUNTS_H=\"$NXT_PYTHON_MOUNTS_HEADER\" \\ + \$(NXT_INCS) $NXT_PYTHON_INCLUDE \\ $nxt_dep_flags \\ -o $NXT_BUILD_DIR/$nxt_obj $nxt_src $nxt_dep_post diff --git a/auto/modules/ruby b/auto/modules/ruby index f7334cc7..5cead26b 100644 --- a/auto/modules/ruby +++ b/auto/modules/ruby @@ -51,7 +51,6 @@ $echo "configuring Ruby module ..." >> $NXT_AUTOCONF_ERR NXT_RUBY=${NXT_RUBY=ruby} NXT_RUBY_MODULE=${NXT_RUBY_MODULE=${NXT_RUBY}} -NXT_RUBY_MOUNTS_HEADER=$NXT_BUILD_DIR/nxt_ruby_mounts.h nxt_found=no @@ -145,7 +144,10 @@ if grep ^$NXT_RUBY_MODULE: $NXT_MAKEFILE 2>&1 > /dev/null; then fi -cat << END > $NXT_RUBY_MOUNTS_HEADER +NXT_RUBY_MOUNTS_HEADER=nxt_${NXT_RUBY_MODULE}_mounts.h +NXT_RUBY_MOUNTS_PATH=$NXT_BUILD_DIR/$NXT_RUBY_MOUNTS_HEADER + +cat << END > $NXT_RUBY_MOUNTS_PATH static const nxt_fs_mount_t nxt_ruby_mounts[] = { {(u_char *) "$NXT_RUBY_RUBYHDRDIR", (u_char *) "$NXT_RUBY_RUBYHDRDIR", @@ -166,11 +168,11 @@ static const nxt_fs_mount_t nxt_ruby_mounts[] = { END for path in `echo $NXT_RUBY_GEMPATH | tr ':' '\n'`; do - $echo "{(u_char *) \"$path\", (u_char *) \"$path\"," >> $NXT_RUBY_MOUNTS_HEADER - $echo "(u_char *) \"bind\", NXT_MS_BIND | NXT_MS_REC, NULL}," >> $NXT_RUBY_MOUNTS_HEADER + $echo "{(u_char *) \"$path\", (u_char *) \"$path\"," >> $NXT_RUBY_MOUNTS_PATH + $echo "(u_char *) \"bind\", NXT_MS_BIND | NXT_MS_REC, NULL}," >> $NXT_RUBY_MOUNTS_PATH done -$echo "};" >> $NXT_RUBY_MOUNTS_HEADER +$echo "};" >> $NXT_RUBY_MOUNTS_PATH $echo " + Ruby module: ${NXT_RUBY_MODULE}.unit.so" @@ -200,7 +202,8 @@ for nxt_src in $NXT_RUBY_MODULE_SRCS; do $NXT_BUILD_DIR/$nxt_obj: $nxt_src $NXT_VERSION_H mkdir -p $NXT_BUILD_DIR/src/ruby - \$(CC) -c \$(CFLAGS) \$(NXT_INCS) $NXT_RUBY_INCPATH \\ + \$(CC) -c \$(CFLAGS) -DNXT_RUBY_MOUNTS_H=\"$NXT_RUBY_MOUNTS_HEADER\" \\ + \$(NXT_INCS) $NXT_RUBY_INCPATH \\ $nxt_dep_flags \\ -o $NXT_BUILD_DIR/$nxt_obj $nxt_src $nxt_dep_post diff --git a/src/nxt_java.c b/src/nxt_java.c index c7471509..1f8864bd 100644 --- a/src/nxt_java.c +++ b/src/nxt_java.c @@ -26,7 +26,8 @@ #include "java/nxt_jni_URLClassLoader.h" #include "nxt_jars.h" -#include "nxt_java_mounts.h" + +#include NXT_JAVA_MOUNTS_H static nxt_int_t nxt_java_setup(nxt_task_t *task, nxt_process_t *process, nxt_common_app_conf_t *conf); diff --git a/src/nxt_python_wsgi.c b/src/nxt_python_wsgi.c index 7e8d1d79..c4b7702e 100644 --- a/src/nxt_python_wsgi.c +++ b/src/nxt_python_wsgi.c @@ -18,7 +18,8 @@ #include #include #include -#include + +#include NXT_PYTHON_MOUNTS_H /* * According to "PEP 3333 / A Note On String Types" @@ -39,11 +40,6 @@ */ -#define _NXT_PYTHON_MOUNTS(major, minor) \ - nxt_python ## major ## minor ## _mounts - -#define NXT_PYTHON_MOUNTS(major, minor) _NXT_PYTHON_MOUNTS(major, minor) - #if PY_MAJOR_VERSION == 3 #define NXT_PYTHON_BYTES_TYPE "bytestring" @@ -123,8 +119,8 @@ NXT_EXPORT nxt_app_module_t nxt_app_module = { compat, nxt_string("python"), PY_VERSION, - NXT_PYTHON_MOUNTS(PY_MAJOR_VERSION, PY_MINOR_VERSION), - nxt_nitems(NXT_PYTHON_MOUNTS(PY_MAJOR_VERSION, PY_MINOR_VERSION)), + nxt_python_mounts, + nxt_nitems(nxt_python_mounts), NULL, nxt_python_start, }; diff --git a/src/ruby/nxt_ruby.c b/src/ruby/nxt_ruby.c index 9c4126f6..743bf646 100644 --- a/src/ruby/nxt_ruby.c +++ b/src/ruby/nxt_ruby.c @@ -7,7 +7,8 @@ #include #include -#include + +#include NXT_RUBY_MOUNTS_H #define NXT_RUBY_RACK_API_VERSION_MAJOR 1 -- cgit From 2b53c7bbbd518131b46867343caaad18534ebd8f Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Wed, 5 Aug 2020 14:55:34 +0300 Subject: Fixed nxt_conn_accept_alloc() behavior in low memory conditions. Earlier, if nxt_mp_create() failed to allocate memory while accepting a new connection, the resulting NULL was subsequently passed to nxt_mp_destroy(), crashing the process. More, if nxt_mp_create() was successful but nxt_sockaddr_cache_alloc() failed, the connection object wasn't destroyed properly, leaving the connection counter in an inconsistent state. Repeated, this condition lowered the connection capacity of the process and could eventually prevent it from accepting connections altogether. --- src/nxt_conn_accept.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/nxt_conn_accept.c b/src/nxt_conn_accept.c index 6a89840c..77c44c58 100644 --- a/src/nxt_conn_accept.c +++ b/src/nxt_conn_accept.c @@ -98,7 +98,9 @@ nxt_conn_accept_alloc(nxt_task_t *task, nxt_listen_event_t *lev) if (nxt_fast_path(mp != NULL)) { c = nxt_conn_create(mp, lev->socket.task); if (nxt_slow_path(c == NULL)) { - goto fail; + nxt_mp_destroy(mp); + + return NULL; } c->socket.read_work_queue = lev->socket.read_work_queue; @@ -109,11 +111,9 @@ nxt_conn_accept_alloc(nxt_task_t *task, nxt_listen_event_t *lev) lev->next = c; return c; } - } - fail: - - nxt_mp_destroy(mp); + nxt_conn_free(task, c); + } } return NULL; -- cgit From b0ff245ca8609686a42025af6241279e2b5d2163 Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Wed, 5 Aug 2020 16:11:20 +0300 Subject: Improved mkstemp() error reporting. The invocation parameters should be logged as well, notably the path of the file that is failed to be created. Also, log level changed to ALERT as it's quite critical error. --- src/nxt_h1proto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nxt_h1proto.c b/src/nxt_h1proto.c index 859ed02f..b34be019 100644 --- a/src/nxt_h1proto.c +++ b/src/nxt_h1proto.c @@ -906,7 +906,7 @@ nxt_h1p_request_body_read(nxt_task_t *task, nxt_http_request_t *r) b->file->fd = mkstemp((char *) tmp_name.start); if (nxt_slow_path(b->file->fd == -1)) { - nxt_log(task, NXT_LOG_ERR, "mkstemp() failed %E", nxt_errno); + nxt_alert(task, "mkstemp(%s) failed %E", tmp_name.start, nxt_errno); status = NXT_HTTP_INTERNAL_SERVER_ERROR; goto error; -- cgit From 78fd04adcf398a00549c4912f68eff77c94ab6c0 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Fri, 7 Aug 2020 15:06:18 +0300 Subject: Fixing listen event connection leakage. A connection object is allocated in advance for each listen event object to be used for the established connection. This connection needs to be freed when the listen event is destroyed. --- src/nxt_router.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/nxt_router.c b/src/nxt_router.c index d4d037e1..b3e326d0 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -3178,6 +3178,10 @@ nxt_router_listen_event_release(nxt_task_t *task, nxt_listen_event_t *lev, nxt_debug(task, "listen event count: %D", lev->count); if (--lev->count == 0) { + if (lev->next != NULL) { + nxt_conn_free(task, lev->next); + } + nxt_free(lev); } -- cgit From 375cbc2cc4aa379727b7e0f02a257e1d8e35ce4f Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Fri, 7 Aug 2020 15:06:24 +0300 Subject: Node.js: correct port data memory release. According to libuv documentation, uv_poll_t memory should be released in a callback function passed to uv_close(). Otherwise, the Node.js application process may crash at exit. --- src/nodejs/unit-http/unit.cpp | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/nodejs/unit-http/unit.cpp b/src/nodejs/unit-http/unit.cpp index 975174d4..555b21fa 100644 --- a/src/nodejs/unit-http/unit.cpp +++ b/src/nodejs/unit-http/unit.cpp @@ -13,6 +13,8 @@ #include +static void delete_port_data(uv_handle_t* handle); + napi_ref Unit::constructor_; @@ -418,7 +420,8 @@ Unit::remove_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) if (node_ctx->port_id == *port_id) { uv_poll_stop(&node_ctx->poll); - delete node_ctx; + node_ctx->poll.data = node_ctx; + uv_close((uv_handle_t *) &node_ctx->poll, delete_port_data); ctx->data = NULL; } @@ -428,6 +431,17 @@ Unit::remove_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) } +static void +delete_port_data(uv_handle_t* handle) +{ + nxt_nodejs_ctx_t *node_ctx; + + node_ctx = (nxt_nodejs_ctx_t *) handle->data; + + delete node_ctx; +} + + void Unit::quit_cb(nxt_unit_ctx_t *ctx) { -- cgit From 0d7a6885a32e01a1d6986f9b77a57f101cd3e8d9 Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Sat, 8 Aug 2020 23:52:31 +0300 Subject: Configure: verifying the Ruby library path. An attempt to build a Ruby module for a custom Ruby installation that has the same major version as the system Ruby may unexpectedly cause the use of the system Ruby library. This closes #449 issue on GitHub. --- auto/modules/ruby | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/auto/modules/ruby b/auto/modules/ruby index 5cead26b..f5384f98 100644 --- a/auto/modules/ruby +++ b/auto/modules/ruby @@ -70,25 +70,31 @@ if /bin/sh -c "$NXT_RUBY -v" >> $NXT_AUTOCONF_ERR 2>&1; then NXT_RUBY_LIBNAME=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["RUBY_SO_NAME"])'` NXT_RUBY_LIBSCONF=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["LIBS"])'` + NXT_RUBY_LIBPATH=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["libdir"])'` NXT_RUBY_LIBS="-l$NXT_RUBY_LIBNAME $NXT_RUBY_LIBSCONF" nxt_feature="Ruby library" nxt_feature_name="" - nxt_feature_run=no + nxt_feature_run=value nxt_feature_incs="${NXT_RUBY_INCPATH}" nxt_feature_libs="${NXT_RUBY_LIBS}" nxt_feature_test=" #include int main() { + static const char *argv[3] = { + \"NGINX_Unit\", \"-rrbconfig\", + \"-eprint RbConfig::CONFIG['libdir']\" + }; + + RUBY_INIT_STACK; ruby_init(); - return ruby_cleanup(0); + return ruby_run_node(ruby_options(3, (char **) argv)); }" . auto/feature - if [ $nxt_found = no ]; then - NXT_RUBY_LIBPATH=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["libdir"])'` + if [ "$nxt_feature_value" != "$NXT_RUBY_LIBPATH" ]; then NXT_RUBY_LIBS="-L$NXT_RUBY_LIBPATH -Wl,-rpath,${NXT_RUBY_LIBPATH} $NXT_RUBY_LIBS" nxt_feature="Ruby library in $NXT_RUBY_LIBPATH" -- cgit From 7e1d800f08f365dd3be7cd614d0fac01ca16ad28 Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Sun, 9 Aug 2020 01:00:44 +0300 Subject: Ruby: removed unused variable from ./configure script. --- auto/modules/ruby | 1 - 1 file changed, 1 deletion(-) diff --git a/auto/modules/ruby b/auto/modules/ruby index f5384f98..7c3f8328 100644 --- a/auto/modules/ruby +++ b/auto/modules/ruby @@ -58,7 +58,6 @@ if /bin/sh -c "$NXT_RUBY -v" >> $NXT_AUTOCONF_ERR 2>&1; then NXT_RUBY_RUBYHDRDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["rubyhdrdir"])'` NXT_RUBY_ARCHHDRDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["rubyarchhdrdir"])'` - NXT_RUBY_SITEARCHDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["sitearchhdrdir"])'` NXT_RUBY_SITEDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["sitedir"])'` NXT_RUBY_LIBDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["rubylibdir"])'` NXT_RUBY_TOPDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["topdir"])'` -- cgit From 317fabc83e7ef7c423a64388de67ea851115b1b9 Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Sun, 9 Aug 2020 01:00:44 +0300 Subject: Ruby: simplified commands in ./configure script. There is no reason to use printf instead of just print. No functional changes. --- auto/modules/ruby | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/auto/modules/ruby b/auto/modules/ruby index 7c3f8328..bf628935 100644 --- a/auto/modules/ruby +++ b/auto/modules/ruby @@ -56,20 +56,20 @@ nxt_found=no if /bin/sh -c "$NXT_RUBY -v" >> $NXT_AUTOCONF_ERR 2>&1; then - NXT_RUBY_RUBYHDRDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["rubyhdrdir"])'` - NXT_RUBY_ARCHHDRDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["rubyarchhdrdir"])'` - NXT_RUBY_SITEDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["sitedir"])'` - NXT_RUBY_LIBDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["rubylibdir"])'` - NXT_RUBY_TOPDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["topdir"])'` - NXT_RUBY_PREFIXDIR=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["rubylibprefix"])'` + NXT_RUBY_RUBYHDRDIR=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["rubyhdrdir"]'` + NXT_RUBY_ARCHHDRDIR=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["rubyarchhdrdir"]'` + NXT_RUBY_SITEDIR=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["sitedir"]'` + NXT_RUBY_LIBDIR=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["rubylibdir"]'` + NXT_RUBY_TOPDIR=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["topdir"]'` + NXT_RUBY_PREFIXDIR=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["rubylibprefix"]'` NXT_RUBY_GEMDIR=`gem environment gemdir` NXT_RUBY_GEMPATH=`gem environment gempath` NXT_RUBY_INCPATH="-I$NXT_RUBY_ARCHHDRDIR -I$NXT_RUBY_RUBYHDRDIR" - NXT_RUBY_LIBNAME=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["RUBY_SO_NAME"])'` - NXT_RUBY_LIBSCONF=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["LIBS"])'` - NXT_RUBY_LIBPATH=`$NXT_RUBY -r rbconfig -e 'printf("%s",RbConfig::CONFIG["libdir"])'` + NXT_RUBY_LIBNAME=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["RUBY_SO_NAME"]'` + NXT_RUBY_LIBSCONF=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["LIBS"]'` + NXT_RUBY_LIBPATH=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["libdir"]'` NXT_RUBY_LIBS="-l$NXT_RUBY_LIBNAME $NXT_RUBY_LIBSCONF" nxt_feature="Ruby library" -- cgit From 91280b4c0b3ec0721a8f3ba09a1cc9f5ca56c238 Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Sun, 9 Aug 2020 01:00:44 +0300 Subject: Ruby: fixed gem mount paths. The gem paths must depend on the specified interpreter. Also, gemdir looks redundant as it's already included in Gem.default_path(). --- auto/modules/ruby | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/auto/modules/ruby b/auto/modules/ruby index bf628935..c1444f07 100644 --- a/auto/modules/ruby +++ b/auto/modules/ruby @@ -62,8 +62,8 @@ if /bin/sh -c "$NXT_RUBY -v" >> $NXT_AUTOCONF_ERR 2>&1; then NXT_RUBY_LIBDIR=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["rubylibdir"]'` NXT_RUBY_TOPDIR=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["topdir"]'` NXT_RUBY_PREFIXDIR=`$NXT_RUBY -rrbconfig -e 'print RbConfig::CONFIG["rubylibprefix"]'` - NXT_RUBY_GEMDIR=`gem environment gemdir` - NXT_RUBY_GEMPATH=`gem environment gempath` + + NXT_RUBY_GEMPATH=`$NXT_RUBY -rrubygems -e 'print Gem.default_path().join(":")'` NXT_RUBY_INCPATH="-I$NXT_RUBY_ARCHHDRDIR -I$NXT_RUBY_RUBYHDRDIR" @@ -163,8 +163,6 @@ static const nxt_fs_mount_t nxt_ruby_mounts[] = { (u_char *) "bind", NXT_MS_BIND | NXT_MS_REC, NULL}, {(u_char *) "$NXT_RUBY_LIBDIR", (u_char *) "$NXT_RUBY_LIBDIR", (u_char *) "bind", NXT_MS_BIND | NXT_MS_REC, NULL}, - {(u_char *) "$NXT_RUBY_GEMDIR", (u_char *) "$NXT_RUBY_GEMDIR", - (u_char *) "bind", NXT_MS_BIND | NXT_MS_REC, NULL}, {(u_char *) "$NXT_RUBY_TOPDIR", (u_char *) "$NXT_RUBY_TOPDIR", (u_char *) "bind", NXT_MS_BIND | NXT_MS_REC, NULL}, {(u_char *) "$NXT_RUBY_PREFIXDIR", (u_char *) "$NXT_RUBY_PREFIXDIR", -- cgit From 0f3abebd019130a6e4e69e53345f403ba802edfb Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Sun, 9 Aug 2020 10:22:05 +0300 Subject: Fixing connection remote sockaddr leakage. Earlier patch 1bf971f83571 fixes connection leakage. But connection free requires separate remote sockaddr release. --- src/nxt_router.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/nxt_router.c b/src/nxt_router.c index b3e326d0..8b3f3daf 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -3177,8 +3177,12 @@ nxt_router_listen_event_release(nxt_task_t *task, nxt_listen_event_t *lev, nxt_debug(task, "listen event count: %D", lev->count); + engine = task->thread->engine; + if (--lev->count == 0) { if (lev->next != NULL) { + nxt_sockaddr_cache_free(engine, lev->next); + nxt_conn_free(task, lev->next); } @@ -3189,8 +3193,6 @@ nxt_router_listen_event_release(nxt_task_t *task, nxt_listen_event_t *lev, nxt_router_conf_release(task, joint); } - engine = task->thread->engine; - if (engine->shutdown && nxt_queue_is_empty(&engine->joints)) { nxt_thread_exit(task->thread); } -- cgit From 3a721e1d96720505d4d6638e77d2c296d962519c Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Sun, 9 Aug 2020 10:26:19 +0300 Subject: Fixing leaked configuration objects. If there are no listen sockets, the router configuration usage counter remains 0 and never decreases. The only moment to release a configuration is right after a configuration update. --- src/nxt_router.c | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/src/nxt_router.c b/src/nxt_router.c index 8b3f3daf..758310a9 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -1208,13 +1208,39 @@ nxt_router_conf_wait(nxt_task_t *task, void *obj, void *data) static void nxt_router_conf_ready(nxt_task_t *task, nxt_router_temp_conf_t *tmcf) { - nxt_debug(task, "temp conf count:%D", tmcf->count); + uint32_t count; + nxt_router_conf_t *rtcf; + nxt_thread_spinlock_t *lock; - if (--tmcf->count == 0) { - nxt_router_conf_send(task, tmcf, NXT_PORT_MSG_RPC_READY_LAST); + nxt_debug(task, "temp conf %p count: %D", tmcf, tmcf->count); - nxt_mp_destroy(tmcf->mem_pool); + if (--tmcf->count > 0) { + return; } + + nxt_router_conf_send(task, tmcf, NXT_PORT_MSG_RPC_READY_LAST); + + rtcf = tmcf->router_conf; + + lock = &rtcf->router->lock; + + nxt_thread_spin_lock(lock); + + count = rtcf->count; + + nxt_thread_spin_unlock(lock); + + nxt_debug(task, "rtcf %p: %D", rtcf, count); + + if (count == 0) { + nxt_http_routes_cleanup(task, rtcf->routes); + + nxt_router_access_log_release(task, lock, rtcf->access_log); + + nxt_mp_destroy(rtcf->mem_pool); + } + + nxt_mp_destroy(tmcf->mem_pool); } -- cgit From ec3389b63bd7a9159d2be4a2863140f75095c7d3 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:19:55 +0300 Subject: Libunit refactoring: port management. - Changed the port management callbacks to notifications, which e. g. avoids the need to call the libunit function - Added context and library instance reference counts for a safer resource release - Added the router main port initialization --- go/nxt_cgo_lib.c | 13 +- src/nodejs/unit-http/unit.cpp | 46 ++-- src/nodejs/unit-http/unit.h | 2 +- src/nxt_application.c | 14 +- src/nxt_external.c | 14 +- src/nxt_process.c | 2 +- src/nxt_unit.c | 580 +++++++++++++++++++++++------------------- src/nxt_unit.h | 44 +--- 8 files changed, 379 insertions(+), 336 deletions(-) diff --git a/go/nxt_cgo_lib.c b/go/nxt_cgo_lib.c index a4fef9ea..1bb38f3c 100644 --- a/go/nxt_cgo_lib.c +++ b/go/nxt_cgo_lib.c @@ -14,7 +14,7 @@ static void nxt_cgo_request_handler(nxt_unit_request_info_t *req); static nxt_cgo_str_t *nxt_cgo_str_init(nxt_cgo_str_t *dst, nxt_unit_sptr_t *sptr, uint32_t length); static int nxt_cgo_add_port(nxt_unit_ctx_t *, nxt_unit_port_t *port); -static void nxt_cgo_remove_port(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id); +static void nxt_cgo_remove_port(nxt_unit_t *, nxt_unit_port_t *port); static ssize_t nxt_cgo_port_send(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id, const void *buf, size_t buf_size, const void *oob, size_t oob_size); static ssize_t nxt_cgo_port_recv(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id, @@ -108,16 +108,17 @@ nxt_cgo_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) nxt_go_add_port(port->id.pid, port->id.id, port->in_fd, port->out_fd); - return nxt_unit_add_port(ctx, port); + port->in_fd = -1; + port->out_fd = -1; + + return NXT_UNIT_OK; } static void -nxt_cgo_remove_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) +nxt_cgo_remove_port(nxt_unit_t *unit, nxt_unit_port_t *port) { - nxt_go_remove_port(port_id->pid, port_id->id); - - nxt_unit_remove_port(ctx, port_id); + nxt_go_remove_port(port->id.pid, port->id.id); } diff --git a/src/nodejs/unit-http/unit.cpp b/src/nodejs/unit-http/unit.cpp index 555b21fa..468acf96 100644 --- a/src/nodejs/unit-http/unit.cpp +++ b/src/nodejs/unit-http/unit.cpp @@ -18,7 +18,8 @@ static void delete_port_data(uv_handle_t* handle); napi_ref Unit::constructor_; -struct nxt_nodejs_ctx_t { +struct port_data_t { + nxt_unit_ctx_t *ctx; nxt_unit_port_id_t port_id; uv_poll_t poll; }; @@ -360,8 +361,8 @@ Unit::add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) int err; Unit *obj; uv_loop_t *loop; + port_data_t *data; napi_status status; - nxt_nodejs_ctx_t *node_ctx; if (port->in_fd != -1) { obj = reinterpret_cast(ctx->unit->data); @@ -378,27 +379,28 @@ Unit::add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) return NXT_UNIT_ERROR; } - node_ctx = new nxt_nodejs_ctx_t; + data = new port_data_t; - err = uv_poll_init(loop, &node_ctx->poll, port->in_fd); + err = uv_poll_init(loop, &data->poll, port->in_fd); if (err < 0) { nxt_unit_warn(ctx, "Failed to init uv.poll"); return NXT_UNIT_ERROR; } - err = uv_poll_start(&node_ctx->poll, UV_READABLE, nxt_uv_read_callback); + err = uv_poll_start(&data->poll, UV_READABLE, nxt_uv_read_callback); if (err < 0) { nxt_unit_warn(ctx, "Failed to start uv.poll"); return NXT_UNIT_ERROR; } - ctx->data = node_ctx; + port->data = data; - node_ctx->port_id = port->id; - node_ctx->poll.data = ctx; + data->ctx = ctx; + data->port_id = port->id; + data->poll.data = ctx; } - return nxt_unit_add_port(ctx, port); + return NXT_UNIT_OK; } @@ -410,35 +412,31 @@ operator == (const nxt_unit_port_id_t &p1, const nxt_unit_port_id_t &p2) void -Unit::remove_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) +Unit::remove_port(nxt_unit_t *unit, nxt_unit_port_t *port) { - nxt_nodejs_ctx_t *node_ctx; + port_data_t *data; - if (ctx->data != NULL) { - node_ctx = (nxt_nodejs_ctx_t *) ctx->data; + if (port->data != NULL) { + data = (port_data_t *) port->data; - if (node_ctx->port_id == *port_id) { - uv_poll_stop(&node_ctx->poll); + if (data->port_id == port->id) { + uv_poll_stop(&data->poll); - node_ctx->poll.data = node_ctx; - uv_close((uv_handle_t *) &node_ctx->poll, delete_port_data); - - ctx->data = NULL; + data->poll.data = data; + uv_close((uv_handle_t *) &data->poll, delete_port_data); } } - - nxt_unit_remove_port(ctx, port_id); } static void delete_port_data(uv_handle_t* handle) { - nxt_nodejs_ctx_t *node_ctx; + port_data_t *data; - node_ctx = (nxt_nodejs_ctx_t *) handle->data; + data = (port_data_t *) handle->data; - delete node_ctx; + delete data; } diff --git a/src/nodejs/unit-http/unit.h b/src/nodejs/unit-http/unit.h index 18359118..07823c26 100644 --- a/src/nodejs/unit-http/unit.h +++ b/src/nodejs/unit-http/unit.h @@ -40,7 +40,7 @@ private: void shm_ack_handler(nxt_unit_ctx_t *ctx); static int add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port); - static void remove_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id); + static void remove_port(nxt_unit_t *unit, nxt_unit_port_t *port); static void quit_cb(nxt_unit_ctx_t *ctx); void quit(nxt_unit_ctx_t *ctx); diff --git a/src/nxt_application.c b/src/nxt_application.c index c331764f..372a88b4 100644 --- a/src/nxt_application.c +++ b/src/nxt_application.c @@ -1263,7 +1263,7 @@ nxt_app_parse_type(u_char *p, size_t length) nxt_int_t nxt_unit_default_init(nxt_task_t *task, nxt_unit_init_t *init) { - nxt_port_t *my_port, *main_port; + nxt_port_t *my_port, *main_port, *router_port; nxt_runtime_t *rt; nxt_memzero(init, sizeof(nxt_unit_init_t)); @@ -1275,6 +1275,11 @@ nxt_unit_default_init(nxt_task_t *task, nxt_unit_init_t *init) return NXT_ERROR; } + router_port = rt->port_by_type[NXT_PROCESS_ROUTER]; + if (nxt_slow_path(router_port == NULL)) { + return NXT_ERROR; + } + my_port = nxt_runtime_port_find(rt, nxt_pid, 0); if (nxt_slow_path(my_port == NULL)) { return NXT_ERROR; @@ -1289,6 +1294,13 @@ nxt_unit_default_init(nxt_task_t *task, nxt_unit_init_t *init) init->ready_stream = my_port->process->stream; + init->router_port.id.pid = router_port->pid; + init->router_port.id.id = router_port->id; + init->router_port.in_fd = -1; + init->router_port.out_fd = router_port->pair[1]; + + nxt_fd_blocking(task, router_port->pair[1]); + init->read_port.id.pid = my_port->pid; init->read_port.id.id = my_port->id; init->read_port.in_fd = my_port->pair[0]; diff --git a/src/nxt_external.c b/src/nxt_external.c index 6370a9c4..2471c812 100644 --- a/src/nxt_external.c +++ b/src/nxt_external.c @@ -69,7 +69,7 @@ nxt_external_start(nxt_task_t *task, nxt_process_data_t *data) nxt_str_t str; nxt_int_t rc; nxt_uint_t i, argc; - nxt_port_t *my_port, *main_port; + nxt_port_t *my_port, *main_port, *router_port; nxt_runtime_t *rt; nxt_conf_value_t *value; nxt_common_app_conf_t *conf; @@ -79,9 +79,12 @@ nxt_external_start(nxt_task_t *task, nxt_process_data_t *data) conf = data->app; main_port = rt->port_by_type[NXT_PROCESS_MAIN]; + router_port = rt->port_by_type[NXT_PROCESS_ROUTER]; my_port = nxt_runtime_port_find(rt, nxt_pid, 0); - if (nxt_slow_path(main_port == NULL || my_port == NULL)) { + if (nxt_slow_path(main_port == NULL || my_port == NULL + || router_port == NULL)) + { return NXT_ERROR; } @@ -90,6 +93,11 @@ nxt_external_start(nxt_task_t *task, nxt_process_data_t *data) return NXT_ERROR; } + rc = nxt_external_fd_no_cloexec(task, router_port->pair[1]); + if (nxt_slow_path(rc != NXT_OK)) { + return NXT_ERROR; + } + rc = nxt_external_fd_no_cloexec(task, my_port->pair[0]); if (nxt_slow_path(rc != NXT_OK)) { return NXT_ERROR; @@ -101,9 +109,11 @@ nxt_external_start(nxt_task_t *task, nxt_process_data_t *data) "%s;%uD;" "%PI,%ud,%d;" "%PI,%ud,%d;" + "%PI,%ud,%d;" "%d,%z,%Z", NXT_VERSION, my_port->process->stream, main_port->pid, main_port->id, main_port->pair[1], + router_port->pid, router_port->id, router_port->pair[1], my_port->pid, my_port->id, my_port->pair[0], 2, conf->shm_limit); diff --git a/src/nxt_process.c b/src/nxt_process.c index 215c529c..5a01c21e 100644 --- a/src/nxt_process.c +++ b/src/nxt_process.c @@ -61,7 +61,7 @@ nxt_bool_t nxt_proc_conn_matrix[NXT_PROCESS_MAX][NXT_PROCESS_MAX] = { { 1, 0, 0, 0, 0 }, { 1, 0, 0, 1, 0 }, { 1, 0, 1, 0, 1 }, - { 1, 0, 0, 0, 0 }, + { 1, 0, 0, 1, 0 }, }; nxt_bool_t nxt_proc_remove_notify_matrix[NXT_PROCESS_MAX][NXT_PROCESS_MAX] = { diff --git a/src/nxt_unit.c b/src/nxt_unit.c index 89998e3f..8c964c7a 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -38,16 +38,19 @@ typedef struct nxt_unit_websocket_frame_impl_s nxt_unit_websocket_frame_impl_t; static nxt_unit_impl_t *nxt_unit_create(nxt_unit_init_t *init); static int nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, void *data); +nxt_inline void nxt_unit_ctx_use(nxt_unit_ctx_impl_t *ctx_impl); +nxt_inline void nxt_unit_ctx_release(nxt_unit_ctx_impl_t *ctx_impl); +nxt_inline void nxt_unit_lib_use(nxt_unit_impl_t *lib); +nxt_inline void nxt_unit_lib_release(nxt_unit_impl_t *lib); nxt_inline void nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head, nxt_unit_mmap_buf_t *mmap_buf); nxt_inline void nxt_unit_mmap_buf_insert_tail(nxt_unit_mmap_buf_t **prev, nxt_unit_mmap_buf_t *mmap_buf); nxt_inline void nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf); static int nxt_unit_read_env(nxt_unit_port_t *ready_port, - nxt_unit_port_t *read_port, int *log_fd, uint32_t *stream, - uint32_t *shm_limit); -static int nxt_unit_ready(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, - uint32_t stream); + nxt_unit_port_t *router_port, nxt_unit_port_t *read_port, + int *log_fd, uint32_t *stream, uint32_t *shm_limit); +static int nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream); static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg); static int nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, @@ -96,8 +99,8 @@ static int nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, static int nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd); static void nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps); -static void nxt_unit_process_use(nxt_unit_ctx_t *ctx, - nxt_unit_process_t *process, int i); +nxt_inline void nxt_unit_process_use(nxt_unit_process_t *process); +nxt_inline void nxt_unit_process_release(nxt_unit_process_t *process); static void nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps); static nxt_port_mmap_header_t *nxt_unit_get_incoming_mmap(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, uint32_t id); @@ -110,28 +113,35 @@ static void nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, nxt_port_mmap_header_t *hdr, void *start, uint32_t size); static int nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid); -static nxt_unit_process_t *nxt_unit_process_get(nxt_unit_ctx_t *ctx, +static nxt_unit_process_t *nxt_unit_process_get(nxt_unit_impl_t *lib, pid_t pid); -static nxt_unit_process_t *nxt_unit_process_find(nxt_unit_ctx_t *ctx, +static nxt_unit_process_t *nxt_unit_process_find(nxt_unit_impl_t *lib, pid_t pid, int remove); static nxt_unit_process_t *nxt_unit_process_pop_first(nxt_unit_impl_t *lib); static void nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf); +static void nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl); static int nxt_unit_create_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int *fd); static int nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, nxt_unit_port_id_t *new_port, int fd); -static void nxt_unit_remove_port_unsafe(nxt_unit_ctx_t *ctx, - nxt_unit_port_id_t *port_id, nxt_unit_port_t *r_port, +static int nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port); +static int nxt_unit_remove_port(nxt_unit_impl_t *lib, + nxt_unit_port_id_t *port_id); +static int nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, + nxt_unit_port_id_t *port_id, nxt_unit_port_t **r_port, nxt_unit_process_t **process); -static void nxt_unit_remove_process(nxt_unit_ctx_t *ctx, +static void nxt_unit_remove_pid(nxt_unit_impl_t *lib, pid_t pid); +static void nxt_unit_remove_process(nxt_unit_impl_t *lib, nxt_unit_process_t *process); - -static ssize_t nxt_unit_port_send_default(nxt_unit_ctx_t *ctx, +static void nxt_unit_quit(nxt_unit_ctx_t *ctx); +static ssize_t nxt_unit_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, const void *buf, size_t buf_size, const void *oob, size_t oob_size); +static ssize_t nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd, + const void *buf, size_t buf_size, const void *oob, size_t oob_size); static ssize_t nxt_unit_port_recv_default(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, void *buf, size_t buf_size, void *oob, size_t oob_size); @@ -233,6 +243,8 @@ struct nxt_unit_read_buf_s { struct nxt_unit_ctx_impl_s { nxt_unit_ctx_t ctx; + nxt_atomic_t use_count; + pthread_mutex_t mutex; nxt_unit_port_id_t read_port_id; @@ -269,6 +281,8 @@ struct nxt_unit_impl_s { nxt_unit_t unit; nxt_unit_callbacks_t callbacks; + nxt_atomic_t use_count; + uint32_t request_data_size; uint32_t shm_mmap_limit; @@ -277,7 +291,7 @@ struct nxt_unit_impl_s { nxt_lvlhsh_t processes; /* of nxt_unit_process_t */ nxt_lvlhsh_t ports; /* of nxt_unit_port_impl_t */ - nxt_unit_port_id_t ready_port_id; + nxt_unit_port_id_t router_port_id; nxt_queue_t contexts; /* of nxt_unit_ctx_impl_t */ @@ -341,7 +355,7 @@ nxt_unit_init(nxt_unit_init_t *init) uint32_t ready_stream, shm_limit; nxt_unit_ctx_t *ctx; nxt_unit_impl_t *lib; - nxt_unit_port_t ready_port, read_port; + nxt_unit_port_t ready_port, router_port, read_port; lib = nxt_unit_create(init); if (nxt_slow_path(lib == NULL)) { @@ -354,17 +368,20 @@ nxt_unit_init(nxt_unit_init_t *init) { ready_port = init->ready_port; ready_stream = init->ready_stream; + router_port = init->router_port; read_port = init->read_port; lib->log_fd = init->log_fd; nxt_unit_port_id_init(&ready_port.id, ready_port.id.pid, ready_port.id.id); + nxt_unit_port_id_init(&router_port.id, router_port.id.pid, + router_port.id.id); nxt_unit_port_id_init(&read_port.id, read_port.id.pid, read_port.id.id); } else { - rc = nxt_unit_read_env(&ready_port, &read_port, &lib->log_fd, - &ready_stream, &shm_limit); + rc = nxt_unit_read_env(&ready_port, &router_port, &read_port, + &lib->log_fd, &ready_stream, &shm_limit); if (nxt_slow_path(rc != NXT_UNIT_OK)) { goto fail; } @@ -380,14 +397,16 @@ nxt_unit_init(nxt_unit_init_t *init) lib->pid = read_port.id.pid; ctx = &lib->main_ctx.ctx; - rc = lib->callbacks.add_port(ctx, &ready_port); - if (rc != NXT_UNIT_OK) { - nxt_unit_alert(NULL, "failed to add ready_port"); + rc = nxt_unit_add_port(ctx, &router_port); + if (nxt_slow_path(rc != NXT_UNIT_OK)) { + nxt_unit_alert(NULL, "failed to add router_port"); goto fail; } - rc = lib->callbacks.add_port(ctx, &read_port); + lib->router_port_id = router_port.id; + + rc = nxt_unit_add_port(ctx, &read_port); if (nxt_slow_path(rc != NXT_UNIT_OK)) { nxt_unit_alert(NULL, "failed to add read_port"); @@ -395,15 +414,16 @@ nxt_unit_init(nxt_unit_init_t *init) } lib->main_ctx.read_port_id = read_port.id; - lib->ready_port_id = ready_port.id; - rc = nxt_unit_ready(ctx, &ready_port.id, ready_stream); + rc = nxt_unit_ready(ctx, ready_port.out_fd, ready_stream); if (nxt_slow_path(rc != NXT_UNIT_OK)) { nxt_unit_alert(NULL, "failed to send READY message"); goto fail; } + close(ready_port.out_fd); + return ctx; fail: @@ -450,6 +470,8 @@ nxt_unit_create(nxt_unit_init_t *init) nxt_queue_init(&lib->contexts); + lib->use_count = 0; + rc = nxt_unit_ctx_init(lib, &lib->main_ctx, init->ctx_data); if (nxt_slow_path(rc != NXT_UNIT_OK)) { goto fail; @@ -463,26 +485,6 @@ nxt_unit_create(nxt_unit_init_t *init) goto fail; } - if (cb->add_port == NULL) { - cb->add_port = nxt_unit_add_port; - } - - if (cb->remove_port == NULL) { - cb->remove_port = nxt_unit_remove_port; - } - - if (cb->remove_pid == NULL) { - cb->remove_pid = nxt_unit_remove_pid; - } - - if (cb->quit == NULL) { - cb->quit = nxt_unit_quit; - } - - if (cb->port_send == NULL) { - cb->port_send = nxt_unit_port_send_default; - } - if (cb->port_recv == NULL) { cb->port_recv = nxt_unit_port_recv_default; } @@ -506,8 +508,6 @@ nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, ctx_impl->ctx.data = data; ctx_impl->ctx.unit = &lib->unit; - nxt_queue_insert_tail(&lib->contexts, &ctx_impl->link); - rc = pthread_mutex_init(&ctx_impl->mutex, NULL); if (nxt_slow_path(rc != 0)) { nxt_unit_alert(NULL, "failed to initialize mutex (%d)", rc); @@ -515,6 +515,12 @@ nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, return NXT_UNIT_ERROR; } + nxt_unit_lib_use(lib); + + nxt_queue_insert_tail(&lib->contexts, &ctx_impl->link); + + ctx_impl->use_count = 1; + nxt_queue_init(&ctx_impl->free_req); nxt_queue_init(&ctx_impl->free_ws); nxt_queue_init(&ctx_impl->active_req); @@ -540,6 +546,62 @@ nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, } +nxt_inline void +nxt_unit_ctx_use(nxt_unit_ctx_impl_t *ctx_impl) +{ + nxt_atomic_fetch_add(&ctx_impl->use_count, 1); +} + + +nxt_inline void +nxt_unit_ctx_release(nxt_unit_ctx_impl_t *ctx_impl) +{ + long c; + + c = nxt_atomic_fetch_add(&ctx_impl->use_count, -1); + + if (c == 1) { + nxt_unit_ctx_free(ctx_impl); + } +} + + +nxt_inline void +nxt_unit_lib_use(nxt_unit_impl_t *lib) +{ + nxt_atomic_fetch_add(&lib->use_count, 1); +} + + +nxt_inline void +nxt_unit_lib_release(nxt_unit_impl_t *lib) +{ + long c; + nxt_unit_process_t *process; + + c = nxt_atomic_fetch_add(&lib->use_count, -1); + + if (c == 1) { + for ( ;; ) { + pthread_mutex_lock(&lib->mutex); + + process = nxt_unit_process_pop_first(lib); + if (process == NULL) { + pthread_mutex_unlock(&lib->mutex); + + break; + } + + nxt_unit_remove_process(lib, process); + } + + pthread_mutex_destroy(&lib->mutex); + + free(lib); + } +} + + nxt_inline void nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head, nxt_unit_mmap_buf_t *mmap_buf) @@ -585,15 +647,16 @@ nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf) static int -nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *read_port, - int *log_fd, uint32_t *stream, uint32_t *shm_limit) +nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *router_port, + nxt_unit_port_t *read_port, int *log_fd, uint32_t *stream, + uint32_t *shm_limit) { int rc; - int ready_fd, read_fd; + int ready_fd, router_fd, read_fd; char *unit_init, *version_end; long version_length; - int64_t ready_pid, read_pid; - uint32_t ready_stream, ready_id, read_id; + int64_t ready_pid, router_pid, read_pid; + uint32_t ready_stream, router_id, ready_id, read_id; unit_init = getenv(NXT_UNIT_INIT_ENV); if (nxt_slow_path(unit_init == NULL)) { @@ -621,13 +684,15 @@ nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *read_port, "%"PRIu32";" "%"PRId64",%"PRIu32",%d;" "%"PRId64",%"PRIu32",%d;" + "%"PRId64",%"PRIu32",%d;" "%d,%"PRIu32, &ready_stream, &ready_pid, &ready_id, &ready_fd, + &router_pid, &router_id, &router_fd, &read_pid, &read_id, &read_fd, log_fd, shm_limit); - if (nxt_slow_path(rc != 9)) { + if (nxt_slow_path(rc != 12)) { nxt_unit_alert(NULL, "failed to scan variables: %d", rc); return NXT_UNIT_ERROR; @@ -639,6 +704,12 @@ nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *read_port, ready_port->out_fd = ready_fd; ready_port->data = NULL; + nxt_unit_port_id_init(&router_port->id, (pid_t) router_pid, router_id); + + router_port->in_fd = -1; + router_port->out_fd = router_fd; + router_port->data = NULL; + nxt_unit_port_id_init(&read_port->id, (pid_t) read_pid, read_id); read_port->in_fd = read_fd; @@ -652,8 +723,7 @@ nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *read_port, static int -nxt_unit_ready(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, - uint32_t stream) +nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream) { ssize_t res; nxt_port_msg_t msg; @@ -671,7 +741,7 @@ nxt_unit_ready(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, msg.mf = 0; msg.tracking = 0; - res = lib->callbacks.port_send(ctx, port_id, &msg, sizeof(msg), NULL, 0); + res = nxt_unit_sendmsg(ctx, ready_fd, &msg, sizeof(msg), NULL, 0); if (res != sizeof(msg)) { return NXT_UNIT_ERROR; } @@ -684,13 +754,12 @@ int nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, void *buf, size_t buf_size, void *oob, size_t oob_size) { - int rc; - pid_t pid; - struct cmsghdr *cm; - nxt_port_msg_t *port_msg; - nxt_unit_impl_t *lib; - nxt_unit_recv_msg_t recv_msg; - nxt_unit_callbacks_t *cb; + int rc; + pid_t pid; + struct cmsghdr *cm; + nxt_port_msg_t *port_msg; + nxt_unit_impl_t *lib; + nxt_unit_recv_msg_t recv_msg; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); @@ -749,14 +818,12 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, } } - cb = &lib->callbacks; - switch (port_msg->type) { case _NXT_PORT_MSG_QUIT: nxt_unit_debug(ctx, "#%"PRIu32": quit", port_msg->stream); - cb->quit(ctx); + nxt_unit_quit(ctx); rc = NXT_UNIT_OK; break; @@ -812,7 +879,7 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, nxt_unit_debug(ctx, "#%"PRIu32": remove_pid: %d", port_msg->stream, (int) pid); - cb->remove_pid(ctx, pid); + nxt_unit_remove_pid(lib, pid); rc = NXT_UNIT_OK; break; @@ -839,7 +906,7 @@ fail: } if (recv_msg.process != NULL) { - nxt_unit_process_use(ctx, recv_msg.process, -1); + nxt_unit_process_release(recv_msg.process); } return rc; @@ -850,7 +917,6 @@ static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) { int nb; - nxt_unit_impl_t *lib; nxt_unit_port_t new_port; nxt_port_msg_new_port_t *new_port_msg; @@ -894,9 +960,7 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) recv_msg->fd = -1; - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - - return lib->callbacks.add_port(ctx, &new_port); + return nxt_unit_add_port(ctx, &new_port); } @@ -1206,7 +1270,7 @@ nxt_unit_request_info_release(nxt_unit_request_info_t *req) * existence. */ if (req_impl->process != NULL) { - nxt_unit_process_use(req->ctx, req_impl->process, -1); + nxt_unit_process_release(req_impl->process); req_impl->process = NULL; } @@ -1808,7 +1872,7 @@ nxt_unit_msg_get_process(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) pthread_mutex_lock(&lib->mutex); - recv_msg->process = nxt_unit_process_find(ctx, recv_msg->pid, 0); + recv_msg->process = nxt_unit_process_find(lib, recv_msg->pid, 0); pthread_mutex_unlock(&lib->mutex); @@ -1869,15 +1933,6 @@ nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf) } -typedef struct { - size_t len; - const char *str; -} nxt_unit_str_t; - - -#define nxt_unit_str(str) { nxt_length(str), str } - - int nxt_unit_request_is_websocket_handshake(nxt_unit_request_info_t *req) { @@ -2064,8 +2119,8 @@ nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, (int) m.mmap_msg.chunk_id, (int) m.mmap_msg.size); - res = lib->callbacks.port_send(ctx, &mmap_buf->port_id, &m, sizeof(m), - NULL, 0); + res = nxt_unit_port_send(ctx, &mmap_buf->port_id, &m, sizeof(m), + NULL, 0); if (nxt_slow_path(res != sizeof(m))) { goto free_buf; } @@ -2114,10 +2169,10 @@ nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, stream, (int) (sizeof(m.msg) + m.mmap_msg.size)); - res = lib->callbacks.port_send(ctx, &mmap_buf->port_id, - buf->start - sizeof(m.msg), - m.mmap_msg.size + sizeof(m.msg), - NULL, 0); + res = nxt_unit_port_send(ctx, &mmap_buf->port_id, + buf->start - sizeof(m.msg), + m.mmap_msg.size + sizeof(m.msg), + NULL, 0); if (nxt_slow_path(res != (ssize_t) (m.mmap_msg.size + sizeof(m.msg)))) { goto free_buf; } @@ -2689,8 +2744,8 @@ skip_response_send: msg.mf = 0; msg.tracking = 0; - (void) lib->callbacks.port_send(req->ctx, &req->response_port, - &msg, sizeof(msg), NULL, 0); + (void) nxt_unit_port_send(req->ctx, &req->response_port, + &msg, sizeof(msg), NULL, 0); nxt_unit_request_info_release(req); } @@ -3006,7 +3061,7 @@ nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) msg.mf = 0; msg.tracking = 0; - res = lib->callbacks.port_send(ctx, port_id, &msg, sizeof(msg), NULL, 0); + res = nxt_unit_port_send(ctx, port_id, &msg, sizeof(msg), NULL, 0); if (nxt_slow_path(res != sizeof(msg))) { return NXT_UNIT_ERROR; } @@ -3284,8 +3339,8 @@ nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int fd) */ memcpy(CMSG_DATA(&cmsg.cm), &fd, sizeof(int)); - res = lib->callbacks.port_send(ctx, port_id, &msg, sizeof(msg), - &cmsg, sizeof(cmsg)); + res = nxt_unit_port_send(ctx, port_id, &msg, sizeof(msg), + &cmsg, sizeof(cmsg)); if (nxt_slow_path(res != sizeof(msg))) { return NXT_UNIT_ERROR; } @@ -3382,7 +3437,7 @@ nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) pthread_mutex_lock(&lib->mutex); - process = nxt_unit_process_find(ctx, pid, 0); + process = nxt_unit_process_find(lib, pid, 0); pthread_mutex_unlock(&lib->mutex); @@ -3444,7 +3499,7 @@ nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) fail: - nxt_unit_process_use(ctx, process, -1); + nxt_unit_process_release(process); return rc; } @@ -3462,15 +3517,22 @@ nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps) } -static void -nxt_unit_process_use(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, int i) +nxt_inline void +nxt_unit_process_use(nxt_unit_process_t *process) +{ + nxt_atomic_fetch_add(&process->use_count, 1); +} + + +nxt_inline void +nxt_unit_process_release(nxt_unit_process_t *process) { long c; - c = nxt_atomic_fetch_add(&process->use_count, i); + c = nxt_atomic_fetch_add(&process->use_count, -1); - if (i < 0 && c == -i) { - nxt_unit_debug(ctx, "destroy process #%d", (int) process->pid); + if (c == 1) { + nxt_unit_debug(NULL, "destroy process #%d", (int) process->pid); nxt_unit_mmaps_destroy(&process->incoming); nxt_unit_mmaps_destroy(&process->outgoing); @@ -3727,7 +3789,7 @@ nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid) msg.mf = 0; msg.tracking = 0; - res = lib->callbacks.port_send(ctx, &port_id, &msg, sizeof(msg), NULL, 0); + res = nxt_unit_port_send(ctx, &port_id, &msg, sizeof(msg), NULL, 0); if (nxt_slow_path(res != sizeof(msg))) { return NXT_UNIT_ERROR; } @@ -3772,26 +3834,23 @@ nxt_unit_process_lhq_pid(nxt_lvlhsh_query_t *lhq, pid_t *pid) static nxt_unit_process_t * -nxt_unit_process_get(nxt_unit_ctx_t *ctx, pid_t pid) +nxt_unit_process_get(nxt_unit_impl_t *lib, pid_t pid) { - nxt_unit_impl_t *lib; nxt_unit_process_t *process; nxt_lvlhsh_query_t lhq; - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - nxt_unit_process_lhq_pid(&lhq, &pid); if (nxt_lvlhsh_find(&lib->processes, &lhq) == NXT_OK) { process = lhq.value; - nxt_unit_process_use(ctx, process, 1); + nxt_unit_process_use(process); return process; } process = malloc(sizeof(nxt_unit_process_t)); if (nxt_slow_path(process == NULL)) { - nxt_unit_warn(ctx, "failed to allocate process for #%d", (int) pid); + nxt_unit_alert(NULL, "failed to allocate process for #%d", (int) pid); return NULL; } @@ -3815,7 +3874,7 @@ nxt_unit_process_get(nxt_unit_ctx_t *ctx, pid_t pid) break; default: - nxt_unit_warn(ctx, "process %d insert failed", (int) pid); + nxt_unit_alert(NULL, "process %d insert failed", (int) pid); pthread_mutex_destroy(&process->outgoing.mutex); pthread_mutex_destroy(&process->incoming.mutex); @@ -3824,22 +3883,19 @@ nxt_unit_process_get(nxt_unit_ctx_t *ctx, pid_t pid) break; } - nxt_unit_process_use(ctx, process, 1); + nxt_unit_process_use(process); return process; } static nxt_unit_process_t * -nxt_unit_process_find(nxt_unit_ctx_t *ctx, pid_t pid, int remove) +nxt_unit_process_find(nxt_unit_impl_t *lib, pid_t pid, int remove) { int rc; - nxt_unit_impl_t *lib; nxt_unit_process_t *process; nxt_lvlhsh_query_t lhq; - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - nxt_unit_process_lhq_pid(&lhq, &pid); if (remove) { @@ -3853,7 +3909,7 @@ nxt_unit_process_find(nxt_unit_ctx_t *ctx, pid_t pid, int remove) process = lhq.value; if (!remove) { - nxt_unit_process_use(ctx, process, 1); + nxt_unit_process_use(process); } return process; @@ -3873,8 +3929,13 @@ nxt_unit_process_pop_first(nxt_unit_impl_t *lib) int nxt_unit_run(nxt_unit_ctx_t *ctx) { - int rc; - nxt_unit_impl_t *lib; + int rc; + nxt_unit_impl_t *lib; + nxt_unit_ctx_impl_t *ctx_impl; + + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + + nxt_unit_ctx_use(ctx_impl); lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); rc = NXT_UNIT_OK; @@ -3887,6 +3948,8 @@ nxt_unit_run(nxt_unit_ctx_t *ctx) } } + nxt_unit_ctx_release(ctx_impl); + return rc; } @@ -3900,6 +3963,8 @@ nxt_unit_run_once(nxt_unit_ctx_t *ctx) ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + nxt_unit_ctx_use(ctx_impl); + pthread_mutex_lock(&ctx_impl->mutex); if (ctx_impl->pending_read_head != NULL) { @@ -3915,6 +3980,9 @@ nxt_unit_run_once(nxt_unit_ctx_t *ctx) } else { rbuf = nxt_unit_read_buf_get_impl(ctx_impl); if (nxt_slow_path(rbuf == NULL)) { + + nxt_unit_ctx_release(ctx_impl); + return NXT_UNIT_ERROR; } @@ -3936,6 +4004,8 @@ nxt_unit_run_once(nxt_unit_ctx_t *ctx) nxt_unit_read_buf_release(ctx, rbuf); + nxt_unit_ctx_release(ctx_impl); + return rc; } @@ -3968,34 +4038,11 @@ nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) void nxt_unit_done(nxt_unit_ctx_t *ctx) { - nxt_unit_impl_t *lib; - nxt_unit_process_t *process; nxt_unit_ctx_impl_t *ctx_impl; - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - - nxt_queue_each(ctx_impl, &lib->contexts, nxt_unit_ctx_impl_t, link) { - - nxt_unit_ctx_free(&ctx_impl->ctx); - - } nxt_queue_loop; - - for ( ;; ) { - pthread_mutex_lock(&lib->mutex); - - process = nxt_unit_process_pop_first(lib); - if (process == NULL) { - pthread_mutex_unlock(&lib->mutex); - - break; - } - - nxt_unit_remove_process(ctx, process); - } - - pthread_mutex_destroy(&lib->mutex); + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); - free(lib); + nxt_unit_ctx_release(ctx_impl); } @@ -4023,9 +4070,9 @@ nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data) return NULL; } - rc = nxt_unit_send_port(ctx, &lib->ready_port_id, &new_port_id, fd); + rc = nxt_unit_send_port(ctx, &lib->router_port_id, &new_port_id, fd); if (nxt_slow_path(rc != NXT_UNIT_OK)) { - lib->callbacks.remove_port(ctx, &new_port_id); + nxt_unit_remove_port(lib, &new_port_id); close(fd); @@ -4038,7 +4085,7 @@ nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data) rc = nxt_unit_ctx_init(lib, new_ctx, data); if (nxt_slow_path(rc != NXT_UNIT_OK)) { - lib->callbacks.remove_port(ctx, &new_port_id); + nxt_unit_remove_port(lib, &new_port_id); free(new_ctx); @@ -4051,17 +4098,15 @@ nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data) } -void -nxt_unit_ctx_free(nxt_unit_ctx_t *ctx) +static void +nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl) { nxt_unit_impl_t *lib; - nxt_unit_ctx_impl_t *ctx_impl; nxt_unit_mmap_buf_t *mmap_buf; nxt_unit_request_info_impl_t *req_impl; nxt_unit_websocket_frame_impl_t *ws_impl; - ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit); nxt_queue_each(req_impl, &ctx_impl->active_req, nxt_unit_request_info_impl_t, link) @@ -4102,6 +4147,8 @@ nxt_unit_ctx_free(nxt_unit_ctx_t *ctx) if (ctx_impl != &lib->main_ctx) { free(ctx_impl); } + + nxt_unit_lib_release(lib); } @@ -4127,36 +4174,6 @@ nxt_unit_port_id_init(nxt_unit_port_id_t *port_id, pid_t pid, uint16_t id) } -int -nxt_unit_create_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, - nxt_unit_port_id_t *port_id) -{ - int rc, fd; - nxt_unit_impl_t *lib; - nxt_unit_port_id_t new_port_id; - - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - - rc = nxt_unit_create_port(ctx, &new_port_id, &fd); - if (nxt_slow_path(rc != NXT_UNIT_OK)) { - return rc; - } - - rc = nxt_unit_send_port(ctx, dst, &new_port_id, fd); - - if (nxt_fast_path(rc == NXT_UNIT_OK)) { - *port_id = new_port_id; - - } else { - lib->callbacks.remove_port(ctx, &new_port_id); - } - - close(fd); - - return rc; -} - - static int nxt_unit_create_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int *fd) { @@ -4180,7 +4197,7 @@ nxt_unit_create_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int *fd) pthread_mutex_lock(&lib->mutex); - process = nxt_unit_process_get(ctx, lib->pid); + process = nxt_unit_process_get(lib, lib->pid); if (nxt_slow_path(process == NULL)) { pthread_mutex_unlock(&lib->mutex); @@ -4198,9 +4215,9 @@ nxt_unit_create_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int *fd) pthread_mutex_unlock(&lib->mutex); - nxt_unit_process_use(ctx, process, -1); + nxt_unit_process_release(process); - rc = lib->callbacks.add_port(ctx, &new_port); + rc = nxt_unit_add_port(ctx, &new_port); if (nxt_slow_path(rc != NXT_UNIT_OK)) { nxt_unit_warn(ctx, "create_port: add_port() failed"); @@ -4269,14 +4286,13 @@ nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, */ memcpy(CMSG_DATA(&cmsg.cm), &fd, sizeof(int)); - res = lib->callbacks.port_send(ctx, dst, &m, sizeof(m), - &cmsg, sizeof(cmsg)); + res = nxt_unit_port_send(ctx, dst, &m, sizeof(m), &cmsg, sizeof(cmsg)); - return res == sizeof(m) ? NXT_UNIT_OK : NXT_UNIT_ERROR; + return (res == sizeof(m)) ? NXT_UNIT_OK : NXT_UNIT_ERROR; } -int +static int nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) { int rc; @@ -4295,18 +4311,41 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) port->id.pid, port->id.id, port->in_fd, port->out_fd); + if (old_port->port.data == NULL) { + old_port->port.data = port->data; + port->data = NULL; + } + + if (old_port->port.in_fd == -1) { + old_port->port.in_fd = port->in_fd; + port->in_fd = -1; + } + if (port->in_fd != -1) { close(port->in_fd); port->in_fd = -1; } + if (old_port->port.out_fd == -1) { + old_port->port.out_fd = port->out_fd; + port->out_fd = -1; + } + if (port->out_fd != -1) { close(port->out_fd); port->out_fd = -1; } + *port = old_port->port; + pthread_mutex_unlock(&lib->mutex); + if (lib->callbacks.add_port != NULL + && (port->in_fd != -1 || port->out_fd != -1)) + { + lib->callbacks.add_port(ctx, &old_port->port); + } + return NXT_UNIT_OK; } @@ -4314,7 +4353,7 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) port->id.pid, port->id.id, port->in_fd, port->out_fd); - process = nxt_unit_process_get(ctx, port->id.pid); + process = nxt_unit_process_get(lib, port->id.pid); if (nxt_slow_path(process == NULL)) { rc = NXT_UNIT_ERROR; goto unlock; @@ -4351,72 +4390,80 @@ unlock: pthread_mutex_unlock(&lib->mutex); if (nxt_slow_path(process != NULL && rc != NXT_UNIT_OK)) { - nxt_unit_process_use(ctx, process, -1); + nxt_unit_process_release(process); } - return rc; -} - + if (lib->callbacks.add_port != NULL + && rc == NXT_UNIT_OK + && (port->in_fd != -1 || port->out_fd != -1)) + { + lib->callbacks.add_port(ctx, &new_port->port); + } -void -nxt_unit_remove_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) -{ - nxt_unit_find_remove_port(ctx, port_id, NULL); + return rc; } -void -nxt_unit_find_remove_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, - nxt_unit_port_t *r_port) +static int +nxt_unit_remove_port(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id) { - nxt_unit_impl_t *lib; + int res; + nxt_unit_port_t *port; nxt_unit_process_t *process; - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + port = NULL; + process = NULL; pthread_mutex_lock(&lib->mutex); - process = NULL; - - nxt_unit_remove_port_unsafe(ctx, port_id, r_port, &process); + res = nxt_unit_remove_port_unsafe(lib, port_id, &port, &process); pthread_mutex_unlock(&lib->mutex); + if (lib->callbacks.remove_port != NULL && res == NXT_UNIT_OK) { + lib->callbacks.remove_port(&lib->unit, port); + } + + if (nxt_fast_path(port != NULL)) { + if (port->in_fd != -1) { + close(port->in_fd); + } + + if (port->out_fd != -1) { + close(port->out_fd); + } + } + if (nxt_slow_path(process != NULL)) { - nxt_unit_process_use(ctx, process, -1); + nxt_unit_process_release(process); + } + + if (nxt_fast_path(port != NULL)) { + free(port); } + + return res; } -static void -nxt_unit_remove_port_unsafe(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, - nxt_unit_port_t *r_port, nxt_unit_process_t **process) +static int +nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id, + nxt_unit_port_t **r_port, nxt_unit_process_t **process) { - nxt_unit_impl_t *lib; nxt_unit_port_impl_t *port; - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - port = nxt_unit_port_hash_find(&lib->ports, port_id, 1); if (nxt_slow_path(port == NULL)) { - nxt_unit_debug(ctx, "remove_port: port %d,%d not found", + nxt_unit_debug(NULL, "remove_port: port %d,%d not found", (int) port_id->pid, (int) port_id->id); - return; + return NXT_UNIT_ERROR; } - nxt_unit_debug(ctx, "remove_port: port %d,%d, fds %d,%d, data %p", + nxt_unit_debug(NULL, "remove_port: port %d,%d, fds %d,%d, data %p", (int) port_id->pid, (int) port_id->id, port->port.in_fd, port->port.out_fd, port->port.data); - if (port->port.in_fd != -1) { - close(port->port.in_fd); - } - - if (port->port.out_fd != -1) { - close(port->port.out_fd); - } - if (port->process != NULL) { nxt_queue_remove(&port->link); } @@ -4426,60 +4473,55 @@ nxt_unit_remove_port_unsafe(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, } if (r_port != NULL) { - *r_port = port->port; + *r_port = &port->port; } - free(port); + return NXT_UNIT_OK; } -void -nxt_unit_remove_pid(nxt_unit_ctx_t *ctx, pid_t pid) +static void +nxt_unit_remove_pid(nxt_unit_impl_t *lib, pid_t pid) { - nxt_unit_impl_t *lib; nxt_unit_process_t *process; - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - pthread_mutex_lock(&lib->mutex); - process = nxt_unit_process_find(ctx, pid, 1); + process = nxt_unit_process_find(lib, pid, 1); if (nxt_slow_path(process == NULL)) { - nxt_unit_debug(ctx, "remove_pid: process %d not found", (int) pid); + nxt_unit_debug(NULL, "remove_pid: process %d not found", (int) pid); pthread_mutex_unlock(&lib->mutex); return; } - nxt_unit_remove_process(ctx, process); + nxt_unit_remove_process(lib, process); + + if (lib->callbacks.remove_pid != NULL) { + lib->callbacks.remove_pid(&lib->unit, pid); + } } static void -nxt_unit_remove_process(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process) +nxt_unit_remove_process(nxt_unit_impl_t *lib, nxt_unit_process_t *process) { nxt_queue_t ports; - nxt_unit_impl_t *lib; nxt_unit_port_impl_t *port; - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - nxt_queue_init(&ports); nxt_queue_add(&ports, &process->ports); nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) { - nxt_unit_process_use(ctx, process, -1); - port->process = NULL; + nxt_unit_process_release(process); - /* Shortcut for default callback. */ - if (lib->callbacks.remove_port == nxt_unit_remove_port) { - nxt_queue_remove(&port->link); + /* To avoid unlink port. */ + port->process = NULL; - nxt_unit_remove_port_unsafe(ctx, &port->port.id, NULL, NULL); - } + nxt_unit_remove_port_unsafe(lib, &port->port.id, NULL, NULL); } nxt_queue_loop; @@ -4489,15 +4531,27 @@ nxt_unit_remove_process(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process) nxt_queue_remove(&port->link); - lib->callbacks.remove_port(ctx, &port->port.id); + if (lib->callbacks.remove_port != NULL) { + lib->callbacks.remove_port(&lib->unit, &port->port); + } + + if (port->port.in_fd != -1) { + close(port->port.in_fd); + } + + if (port->port.out_fd != -1) { + close(port->port.out_fd); + } + + free(port); } nxt_queue_loop; - nxt_unit_process_use(ctx, process, -1); + nxt_unit_process_release(process); } -void +static void nxt_unit_quit(nxt_unit_ctx_t *ctx) { nxt_unit_impl_t *lib; @@ -4505,11 +4559,15 @@ nxt_unit_quit(nxt_unit_ctx_t *ctx) lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); lib->online = 0; + + if (lib->callbacks.quit != NULL) { + lib->callbacks.quit(ctx); + } } static ssize_t -nxt_unit_port_send_default(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, +nxt_unit_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, const void *buf, size_t buf_size, const void *oob, size_t oob_size) { int fd; @@ -4522,35 +4580,35 @@ nxt_unit_port_send_default(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, port = nxt_unit_port_hash_find(&lib->ports, port_id, 0); - if (nxt_fast_path(port != NULL)) { + if (nxt_fast_path(port != NULL && port->port.out_fd != -1)) { fd = port->port.out_fd; - } else { - nxt_unit_warn(ctx, "port_send: port %d,%d not found", - (int) port_id->pid, (int) port_id->id); - fd = -1; - } + pthread_mutex_unlock(&lib->mutex); - pthread_mutex_unlock(&lib->mutex); + } else { + pthread_mutex_unlock(&lib->mutex); - if (nxt_slow_path(fd == -1)) { - if (port != NULL) { - nxt_unit_warn(ctx, "port_send: port %d,%d: fd == -1", - (int) port_id->pid, (int) port_id->id); - } + nxt_unit_alert(ctx, "port_send: port %d,%d not found", + (int) port_id->pid, (int) port_id->id); - return -1; + return -NXT_UNIT_ERROR; } nxt_unit_debug(ctx, "port_send: found port %d,%d fd %d", (int) port_id->pid, (int) port_id->id, fd); - return nxt_unit_port_send(ctx, fd, buf, buf_size, oob, oob_size); + if (lib->callbacks.port_send == NULL) { + return nxt_unit_sendmsg(ctx, fd, buf, buf_size, oob, oob_size); + + } else { + return lib->callbacks.port_send(ctx, port_id, buf, buf_size, + oob, oob_size); + } } -ssize_t -nxt_unit_port_send(nxt_unit_ctx_t *ctx, int fd, +static ssize_t +nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd, const void *buf, size_t buf_size, const void *oob, size_t oob_size) { ssize_t res; diff --git a/src/nxt_unit.h b/src/nxt_unit.h index 596dd8b6..fa1fa843 100644 --- a/src/nxt_unit.h +++ b/src/nxt_unit.h @@ -130,15 +130,15 @@ struct nxt_unit_callbacks_s { int (*add_port)(nxt_unit_ctx_t *, nxt_unit_port_t *port); /* Remove previously added port. Optional. */ - void (*remove_port)(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id); + void (*remove_port)(nxt_unit_t *, nxt_unit_port_t *port); /* Remove all data associated with process pid including ports. Optional. */ - void (*remove_pid)(nxt_unit_ctx_t *, pid_t pid); + void (*remove_pid)(nxt_unit_t *, pid_t pid); /* Gracefully quit the application. Optional. */ void (*quit)(nxt_unit_ctx_t *); - /* Shared memory release acknowledgement. */ + /* Shared memory release acknowledgement. Optional. */ void (*shm_ack_handler)(nxt_unit_ctx_t *); /* Send data and control to process pid using port id. Optional. */ @@ -149,7 +149,6 @@ struct nxt_unit_callbacks_s { /* Receive data on port id. Optional. */ ssize_t (*port_recv)(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id, void *buf, size_t buf_size, void *oob, size_t oob_size); - }; @@ -165,6 +164,7 @@ struct nxt_unit_init_s { nxt_unit_port_t ready_port; uint32_t ready_stream; + nxt_unit_port_t router_port; nxt_unit_port_t read_port; int log_fd; }; @@ -222,45 +222,9 @@ void nxt_unit_done(nxt_unit_ctx_t *); */ nxt_unit_ctx_t *nxt_unit_ctx_alloc(nxt_unit_ctx_t *, void *); -/* Free unused context. It is not required to free main context. */ -void nxt_unit_ctx_free(nxt_unit_ctx_t *); - /* Initialize port_id, calculate hash. */ void nxt_unit_port_id_init(nxt_unit_port_id_t *port_id, pid_t pid, uint16_t id); -/* - * Create extra incoming port, perform all required actions to propogate - * the port to Unit server. Fills structure referenced by port_id with - * current pid and new port id. - */ -int nxt_unit_create_send_port(nxt_unit_ctx_t *, nxt_unit_port_id_t *dst, - nxt_unit_port_id_t *port_id); - -/* Default 'add_port' implementation. */ -int nxt_unit_add_port(nxt_unit_ctx_t *, nxt_unit_port_t *port); - -/* Find previously added port. */ -nxt_unit_port_t *nxt_unit_find_port(nxt_unit_ctx_t *, - nxt_unit_port_id_t *port_id); - -/* Find, fill output 'port' and remove port from storage. */ -void nxt_unit_find_remove_port(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id, - nxt_unit_port_t *port); - -/* Default 'remove_port' implementation. */ -void nxt_unit_remove_port(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id); - -/* Default 'remove_pid' implementation. */ -void nxt_unit_remove_pid(nxt_unit_ctx_t *, pid_t pid); - -/* Default 'quit' implementation. */ -void nxt_unit_quit(nxt_unit_ctx_t *); - -/* Default 'port_send' implementation. */ -ssize_t nxt_unit_port_send(nxt_unit_ctx_t *, int fd, - const void *buf, size_t buf_size, - const void *oob, size_t oob_size); - /* Default 'port_recv' implementation. */ ssize_t nxt_unit_port_recv(nxt_unit_ctx_t *, int fd, void *buf, size_t buf_size, void *oob, size_t oob_size); -- cgit From bf647588ff781e606651f001b53a4e83bb34c000 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:06 +0300 Subject: Adding a reference counter to the libunit port structure. The goal is to minimize the number of (pid, id) to port hash lookups which require a library mutex lock. The response port is found once per request, while the read port is initialized at startup. --- go/nxt_cgo_lib.c | 12 +- src/nxt_unit.c | 678 +++++++++++++++++++++++++++---------------------------- src/nxt_unit.h | 13 +- 3 files changed, 346 insertions(+), 357 deletions(-) diff --git a/go/nxt_cgo_lib.c b/go/nxt_cgo_lib.c index 1bb38f3c..937996b0 100644 --- a/go/nxt_cgo_lib.c +++ b/go/nxt_cgo_lib.c @@ -15,9 +15,9 @@ static nxt_cgo_str_t *nxt_cgo_str_init(nxt_cgo_str_t *dst, nxt_unit_sptr_t *sptr, uint32_t length); static int nxt_cgo_add_port(nxt_unit_ctx_t *, nxt_unit_port_t *port); static void nxt_cgo_remove_port(nxt_unit_t *, nxt_unit_port_t *port); -static ssize_t nxt_cgo_port_send(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id, +static ssize_t nxt_cgo_port_send(nxt_unit_ctx_t *, nxt_unit_port_t *port, const void *buf, size_t buf_size, const void *oob, size_t oob_size); -static ssize_t nxt_cgo_port_recv(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id, +static ssize_t nxt_cgo_port_recv(nxt_unit_ctx_t *, nxt_unit_port_t *port, void *buf, size_t buf_size, void *oob, size_t oob_size); static void nxt_cgo_shm_ack_handler(nxt_unit_ctx_t *ctx); @@ -123,19 +123,19 @@ nxt_cgo_remove_port(nxt_unit_t *unit, nxt_unit_port_t *port) static ssize_t -nxt_cgo_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, +nxt_cgo_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, const void *buf, size_t buf_size, const void *oob, size_t oob_size) { - return nxt_go_port_send(port_id->pid, port_id->id, + return nxt_go_port_send(port->id.pid, port->id.id, (void *) buf, buf_size, (void *) oob, oob_size); } static ssize_t -nxt_cgo_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, +nxt_cgo_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, void *buf, size_t buf_size, void *oob, size_t oob_size) { - return nxt_go_port_recv(port_id->pid, port_id->id, + return nxt_go_port_recv(port->id.pid, port->id.id, buf, buf_size, oob, oob_size); } diff --git a/src/nxt_unit.c b/src/nxt_unit.c index 8c964c7a..ddfd9c80 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -70,7 +70,7 @@ static nxt_unit_process_t *nxt_unit_msg_get_process(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg); static nxt_unit_mmap_buf_t *nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx); static void nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf); -static int nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, +static int nxt_unit_mmap_buf_send(nxt_unit_request_info_t *req, nxt_unit_mmap_buf_t *mmap_buf, int last); static void nxt_unit_mmap_buf_free(nxt_unit_mmap_buf_t *mmap_buf); static void nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf); @@ -84,17 +84,16 @@ static nxt_unit_mmap_buf_t *nxt_unit_request_preread( static ssize_t nxt_unit_buf_read(nxt_unit_buf_t **b, uint64_t *len, void *dst, size_t size); static nxt_port_mmap_header_t *nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, - nxt_unit_process_t *process, nxt_unit_port_id_t *port_id, - nxt_chunk_id_t *c, int *n, int min_n); -static int nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id); + nxt_unit_port_t *port, nxt_chunk_id_t *c, int *n, int min_n); +static int nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port); static int nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx); static nxt_unit_mmap_t *nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i); static nxt_port_mmap_header_t *nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, - nxt_unit_process_t *process, nxt_unit_port_id_t *port_id, int n); -static int nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, + nxt_unit_port_t *port, int n); +static int nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int fd); static int nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, - nxt_unit_process_t *process, nxt_unit_port_id_t *port_id, uint32_t size, + nxt_unit_port_t *port, uint32_t size, uint32_t min_size, nxt_unit_mmap_buf_t *mmap_buf, char *local_buf); static int nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd); @@ -121,34 +120,36 @@ static nxt_unit_process_t *nxt_unit_process_pop_first(nxt_unit_impl_t *lib); static void nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf); static void nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl); -static int nxt_unit_create_port(nxt_unit_ctx_t *ctx, - nxt_unit_port_id_t *port_id, int *fd); +static nxt_unit_port_t *nxt_unit_create_port(nxt_unit_ctx_t *ctx); -static int nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, - nxt_unit_port_id_t *new_port, int fd); +static int nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst, + nxt_unit_port_t *port); -static int nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port); -static int nxt_unit_remove_port(nxt_unit_impl_t *lib, +nxt_inline void nxt_unit_port_use(nxt_unit_port_t *port); +nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port); +nxt_inline nxt_unit_process_t *nxt_unit_port_process(nxt_unit_port_t *port); +static nxt_unit_port_t *nxt_unit_add_port(nxt_unit_ctx_t *ctx, + nxt_unit_port_t *port); +static void nxt_unit_remove_port(nxt_unit_impl_t *lib, + nxt_unit_port_id_t *port_id); +static nxt_unit_port_t *nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id); -static int nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, - nxt_unit_port_id_t *port_id, nxt_unit_port_t **r_port, - nxt_unit_process_t **process); static void nxt_unit_remove_pid(nxt_unit_impl_t *lib, pid_t pid); static void nxt_unit_remove_process(nxt_unit_impl_t *lib, nxt_unit_process_t *process); static void nxt_unit_quit(nxt_unit_ctx_t *ctx); static ssize_t nxt_unit_port_send(nxt_unit_ctx_t *ctx, - nxt_unit_port_id_t *port_id, const void *buf, size_t buf_size, + nxt_unit_port_t *port, const void *buf, size_t buf_size, const void *oob, size_t oob_size); static ssize_t nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd, const void *buf, size_t buf_size, const void *oob, size_t oob_size); -static ssize_t nxt_unit_port_recv_default(nxt_unit_ctx_t *ctx, - nxt_unit_port_id_t *port_id, void *buf, size_t buf_size, +static ssize_t nxt_unit_port_recv(nxt_unit_ctx_t *ctx, + nxt_unit_port_t *port, void *buf, size_t buf_size, void *oob, size_t oob_size); static int nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port); -static nxt_unit_port_impl_t *nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, +static nxt_unit_port_t *nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, nxt_unit_port_id_t *port_id, int remove); static int nxt_unit_request_hash_add(nxt_lvlhsh_t *request_hash, @@ -166,7 +167,6 @@ struct nxt_unit_mmap_buf_s { nxt_unit_mmap_buf_t **prev; nxt_port_mmap_header_t *hdr; - nxt_unit_port_id_t port_id; nxt_unit_request_info_t *req; nxt_unit_ctx_impl_t *ctx_impl; nxt_unit_process_t *process; @@ -247,8 +247,7 @@ struct nxt_unit_ctx_impl_s { pthread_mutex_t mutex; - nxt_unit_port_id_t read_port_id; - int read_port_fd; + nxt_unit_port_t *read_port; nxt_queue_link_t link; @@ -291,7 +290,7 @@ struct nxt_unit_impl_s { nxt_lvlhsh_t processes; /* of nxt_unit_process_t */ nxt_lvlhsh_t ports; /* of nxt_unit_port_impl_t */ - nxt_unit_port_id_t router_port_id; + nxt_unit_port_t *router_port; nxt_queue_t contexts; /* of nxt_unit_ctx_impl_t */ @@ -306,6 +305,8 @@ struct nxt_unit_impl_s { struct nxt_unit_port_impl_s { nxt_unit_port_t port; + nxt_atomic_t use_count; + nxt_queue_link_t link; nxt_unit_process_t *process; }; @@ -395,26 +396,23 @@ nxt_unit_init(nxt_unit_init_t *init) } lib->pid = read_port.id.pid; + ctx = &lib->main_ctx.ctx; - rc = nxt_unit_add_port(ctx, &router_port); - if (nxt_slow_path(rc != NXT_UNIT_OK)) { + lib->router_port = nxt_unit_add_port(ctx, &router_port); + if (nxt_slow_path(lib->router_port == NULL)) { nxt_unit_alert(NULL, "failed to add router_port"); goto fail; } - lib->router_port_id = router_port.id; - - rc = nxt_unit_add_port(ctx, &read_port); - if (nxt_slow_path(rc != NXT_UNIT_OK)) { + lib->main_ctx.read_port = nxt_unit_add_port(ctx, &read_port); + if (nxt_slow_path(lib->main_ctx.read_port == NULL)) { nxt_unit_alert(NULL, "failed to add read_port"); goto fail; } - lib->main_ctx.read_port_id = read_port.id; - rc = nxt_unit_ready(ctx, ready_port.out_fd, ready_stream); if (nxt_slow_path(rc != NXT_UNIT_OK)) { nxt_unit_alert(NULL, "failed to send READY message"); @@ -428,7 +426,7 @@ nxt_unit_init(nxt_unit_init_t *init) fail: - free(lib); + nxt_unit_ctx_release(&lib->main_ctx); return NULL; } @@ -471,6 +469,7 @@ nxt_unit_create(nxt_unit_init_t *init) nxt_queue_init(&lib->contexts); lib->use_count = 0; + lib->router_port = NULL; rc = nxt_unit_ctx_init(lib, &lib->main_ctx, init->ctx_data); if (nxt_slow_path(rc != NXT_UNIT_OK)) { @@ -485,10 +484,6 @@ nxt_unit_create(nxt_unit_init_t *init) goto fail; } - if (cb->port_recv == NULL) { - cb->port_recv = nxt_unit_port_recv_default; - } - return lib; fail: @@ -539,7 +534,7 @@ nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, ctx_impl->req.req.ctx = &ctx_impl->ctx; ctx_impl->req.req.unit = &lib->unit; - ctx_impl->read_port_fd = -1; + ctx_impl->read_port = NULL; ctx_impl->requests.slot = 0; return NXT_UNIT_OK; @@ -597,6 +592,10 @@ nxt_unit_lib_release(nxt_unit_impl_t *lib) pthread_mutex_destroy(&lib->mutex); + if (nxt_fast_path(lib->router_port != NULL)) { + nxt_unit_port_release(lib->router_port); + } + free(lib); } } @@ -751,7 +750,7 @@ nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream) int -nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, +nxt_unit_process_msg(nxt_unit_ctx_t *ctx, void *buf, size_t buf_size, void *oob, size_t oob_size) { int rc; @@ -917,7 +916,7 @@ static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) { int nb; - nxt_unit_port_t new_port; + nxt_unit_port_t new_port, *port; nxt_port_msg_new_port_t *new_port_msg; if (nxt_slow_path(recv_msg->size != sizeof(nxt_port_msg_new_port_t))) { @@ -960,7 +959,14 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) recv_msg->fd = -1; - return nxt_unit_add_port(ctx, &new_port); + port = nxt_unit_add_port(ctx, &new_port); + if (nxt_slow_path(port == NULL)) { + return NXT_UNIT_ERROR; + } + + nxt_unit_port_release(port); + + return NXT_UNIT_OK; } @@ -968,6 +974,8 @@ static int nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) { nxt_unit_impl_t *lib; + nxt_unit_port_t *port; + nxt_unit_port_id_t port_id; nxt_unit_request_t *r; nxt_unit_mmap_buf_t *b; nxt_unit_request_info_t *req; @@ -996,10 +1004,27 @@ nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) return NXT_UNIT_ERROR; } + nxt_unit_port_id_init(&port_id, recv_msg->pid, recv_msg->reply_port); + + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + + pthread_mutex_lock(&lib->mutex); + + port = nxt_unit_port_hash_find(&lib->ports, &port_id, 0); + + pthread_mutex_unlock(&lib->mutex); + + if (nxt_slow_path(port == NULL)) { + nxt_unit_alert(ctx, "#%"PRIu32": response port %d,%d not found", + recv_msg->stream, + (int) recv_msg->pid, (int) recv_msg->reply_port); + + return NXT_UNIT_ERROR; + } + req = &req_impl->req; - nxt_unit_port_id_init(&req->response_port, recv_msg->pid, - recv_msg->reply_port); + req->response_port = port; req->request = recv_msg->start; @@ -1051,8 +1076,6 @@ nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) (char *) nxt_unit_sptr_get(&r->target), (int) r->content_length); - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - lib->callbacks.request_handler(req); return NXT_UNIT_OK; @@ -1275,6 +1298,12 @@ nxt_unit_request_info_release(nxt_unit_request_info_t *req) req_impl->process = NULL; } + if (req->response_port != NULL) { + nxt_unit_port_release(req->response_port); + + req->response_port = NULL; + } + pthread_mutex_lock(&ctx_impl->mutex); nxt_queue_remove(&req_impl->link); @@ -1793,7 +1822,7 @@ nxt_unit_response_send(nxt_unit_request_info_t *req) mmap_buf = nxt_container_of(req->response_buf, nxt_unit_mmap_buf_t, buf); - rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, mmap_buf, 0); + rc = nxt_unit_mmap_buf_send(req, mmap_buf, 0); if (nxt_fast_path(rc == NXT_UNIT_OK)) { req->response = NULL; req->response_buf = NULL; @@ -1846,8 +1875,8 @@ nxt_unit_response_buf_alloc(nxt_unit_request_info_t *req, uint32_t size) nxt_unit_mmap_buf_insert_tail(&req_impl->outgoing_buf, mmap_buf); - rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, - &req->response_port, size, size, mmap_buf, + rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, + size, size, mmap_buf, NULL); if (nxt_slow_path(rc != NXT_UNIT_OK)) { nxt_unit_mmap_buf_release(mmap_buf); @@ -2035,7 +2064,7 @@ nxt_unit_buf_send(nxt_unit_buf_t *buf) } if (nxt_fast_path(buf->free > buf->start)) { - rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, mmap_buf, 0); + rc = nxt_unit_mmap_buf_send(req, mmap_buf, 0); if (nxt_slow_path(rc != NXT_UNIT_OK)) { return rc; } @@ -2050,17 +2079,15 @@ nxt_unit_buf_send(nxt_unit_buf_t *buf) static void nxt_unit_buf_send_done(nxt_unit_buf_t *buf) { - int rc; - nxt_unit_mmap_buf_t *mmap_buf; - nxt_unit_request_info_t *req; - nxt_unit_request_info_impl_t *req_impl; + int rc; + nxt_unit_mmap_buf_t *mmap_buf; + nxt_unit_request_info_t *req; mmap_buf = nxt_container_of(buf, nxt_unit_mmap_buf_t, buf); req = mmap_buf->req; - req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); - rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, mmap_buf, 1); + rc = nxt_unit_mmap_buf_send(req, mmap_buf, 1); if (nxt_slow_path(rc == NXT_UNIT_OK)) { nxt_unit_mmap_buf_free(mmap_buf); @@ -2073,7 +2100,7 @@ nxt_unit_buf_send_done(nxt_unit_buf_t *buf) static int -nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, +nxt_unit_mmap_buf_send(nxt_unit_request_info_t *req, nxt_unit_mmap_buf_t *mmap_buf, int last) { struct { @@ -2081,22 +2108,24 @@ nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, nxt_port_mmap_msg_t mmap_msg; } m; - int rc; - u_char *last_used, *first_free; - ssize_t res; - nxt_chunk_id_t first_free_chunk; - nxt_unit_buf_t *buf; - nxt_unit_impl_t *lib; - nxt_port_mmap_header_t *hdr; + int rc; + u_char *last_used, *first_free; + ssize_t res; + nxt_chunk_id_t first_free_chunk; + nxt_unit_buf_t *buf; + nxt_unit_impl_t *lib; + nxt_port_mmap_header_t *hdr; + nxt_unit_request_info_impl_t *req_impl; - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit); + req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); buf = &mmap_buf->buf; hdr = mmap_buf->hdr; m.mmap_msg.size = buf->free - buf->start; - m.msg.stream = stream; + m.msg.stream = req_impl->stream; m.msg.pid = lib->pid; m.msg.reply_port = 0; m.msg.type = _NXT_PORT_MSG_DATA; @@ -2113,13 +2142,13 @@ nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, m.mmap_msg.chunk_id = nxt_port_mmap_chunk_id(hdr, (u_char *) buf->start); - nxt_unit_debug(ctx, "#%"PRIu32": send mmap: (%d,%d,%d)", - stream, + nxt_unit_debug(req->ctx, "#%"PRIu32": send mmap: (%d,%d,%d)", + req_impl->stream, (int) m.mmap_msg.mmap_id, (int) m.mmap_msg.chunk_id, (int) m.mmap_msg.size); - res = nxt_unit_port_send(ctx, &mmap_buf->port_id, &m, sizeof(m), + res = nxt_unit_port_send(req->ctx, req->response_port, &m, sizeof(m), NULL, 0); if (nxt_slow_path(res != sizeof(m))) { goto free_buf; @@ -2149,7 +2178,7 @@ nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, nxt_atomic_fetch_add(&mmap_buf->process->outgoing.allocated_chunks, (int) m.mmap_msg.chunk_id - (int) first_free_chunk); - nxt_unit_debug(ctx, "process %d allocated_chunks %d", + nxt_unit_debug(req->ctx, "process %d allocated_chunks %d", mmap_buf->process->pid, (int) mmap_buf->process->outgoing.allocated_chunks); @@ -2157,19 +2186,21 @@ nxt_unit_mmap_buf_send(nxt_unit_ctx_t *ctx, uint32_t stream, if (nxt_slow_path(mmap_buf->plain_ptr == NULL || mmap_buf->plain_ptr > buf->start - sizeof(m.msg))) { - nxt_unit_warn(ctx, "#%"PRIu32": failed to send plain memory buffer" - ": no space reserved for message header", stream); + nxt_unit_alert(req->ctx, + "#%"PRIu32": failed to send plain memory buffer" + ": no space reserved for message header", + req_impl->stream); goto free_buf; } memcpy(buf->start - sizeof(m.msg), &m.msg, sizeof(m.msg)); - nxt_unit_debug(ctx, "#%"PRIu32": send plain: %d", - stream, + nxt_unit_debug(req->ctx, "#%"PRIu32": send plain: %d", + req_impl->stream, (int) (sizeof(m.msg) + m.mmap_msg.size)); - res = nxt_unit_port_send(ctx, &mmap_buf->port_id, + res = nxt_unit_port_send(req->ctx, req->response_port, buf->start - sizeof(m.msg), m.mmap_msg.size + sizeof(m.msg), NULL, 0); @@ -2337,7 +2368,7 @@ nxt_unit_response_write_nb(nxt_unit_request_info_t *req, const void *start, sent = 0; if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { - nxt_unit_req_warn(req, "write: response not initialized yet"); + nxt_unit_req_alert(req, "write: response not initialized yet"); return -NXT_UNIT_ERROR; } @@ -2369,8 +2400,7 @@ nxt_unit_response_write_nb(nxt_unit_request_info_t *req, const void *start, min_part_size = nxt_min(min_size, part_size); min_part_size = nxt_min(min_part_size, PORT_MMAP_CHUNK_SIZE); - rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, - &req->response_port, part_size, + rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, part_size, min_part_size, &mmap_buf, local_buf); if (nxt_slow_path(rc != NXT_UNIT_OK)) { return -rc; @@ -2385,7 +2415,7 @@ nxt_unit_response_write_nb(nxt_unit_request_info_t *req, const void *start, mmap_buf.buf.free = nxt_cpymem(mmap_buf.buf.free, part_start, part_size); - rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, &mmap_buf, 0); + rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); if (nxt_slow_path(rc != NXT_UNIT_OK)) { return -rc; } @@ -2415,8 +2445,14 @@ nxt_unit_response_write_cb(nxt_unit_request_info_t *req, req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); + if (nxt_slow_path(req_impl->state < NXT_UNIT_RS_RESPONSE_INIT)) { + nxt_unit_req_alert(req, "write: response not initialized yet"); + + return NXT_UNIT_ERROR; + } + /* Check if response is not send yet. */ - if (nxt_slow_path(req->response_buf)) { + if (nxt_slow_path(req->response_buf != NULL)) { /* Enable content in headers buf. */ rc = nxt_unit_response_add_content(req, "", 0); @@ -2463,8 +2499,7 @@ nxt_unit_response_write_cb(nxt_unit_request_info_t *req, buf_size = nxt_min(read_info->buf_size, PORT_MMAP_DATA_SIZE); - rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, - &req->response_port, + rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, buf_size, buf_size, &mmap_buf, local_buf); if (nxt_slow_path(rc != NXT_UNIT_OK)) { @@ -2486,7 +2521,7 @@ nxt_unit_response_write_cb(nxt_unit_request_info_t *req, buf->free += n; } - rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, &mmap_buf, 0); + rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); if (nxt_slow_path(rc != NXT_UNIT_OK)) { nxt_unit_req_error(req, "Failed to send content"); @@ -2744,7 +2779,7 @@ skip_response_send: msg.mf = 0; msg.tracking = 0; - (void) nxt_unit_port_send(req->ctx, &req->response_port, + (void) nxt_unit_port_send(req->ctx, req->response_port, &msg, sizeof(msg), NULL, 0); nxt_unit_request_info_release(req); @@ -2765,17 +2800,14 @@ int nxt_unit_websocket_sendv(nxt_unit_request_info_t *req, uint8_t opcode, uint8_t last, const struct iovec *iov, int iovcnt) { - int i, rc; - size_t l, copy; - uint32_t payload_len, buf_size, alloc_size; - const uint8_t *b; - nxt_unit_buf_t *buf; - nxt_unit_mmap_buf_t mmap_buf; - nxt_websocket_header_t *wh; - nxt_unit_request_info_impl_t *req_impl; - char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; - - req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); + int i, rc; + size_t l, copy; + uint32_t payload_len, buf_size, alloc_size; + const uint8_t *b; + nxt_unit_buf_t *buf; + nxt_unit_mmap_buf_t mmap_buf; + nxt_websocket_header_t *wh; + char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; payload_len = 0; @@ -2786,8 +2818,7 @@ nxt_unit_websocket_sendv(nxt_unit_request_info_t *req, uint8_t opcode, buf_size = 10 + payload_len; alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE); - rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, - &req->response_port, + rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, alloc_size, alloc_size, &mmap_buf, local_buf); if (nxt_slow_path(rc != NXT_UNIT_OK)) { @@ -2821,8 +2852,7 @@ nxt_unit_websocket_sendv(nxt_unit_request_info_t *req, uint8_t opcode, if (l > 0) { if (nxt_fast_path(buf->free > buf->start)) { - rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, - &mmap_buf, 0); + rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); if (nxt_slow_path(rc != NXT_UNIT_OK)) { return rc; @@ -2831,8 +2861,7 @@ nxt_unit_websocket_sendv(nxt_unit_request_info_t *req, uint8_t opcode, alloc_size = nxt_min(buf_size, PORT_MMAP_DATA_SIZE); - rc = nxt_unit_get_outgoing_buf(req->ctx, req_impl->process, - &req->response_port, + rc = nxt_unit_get_outgoing_buf(req->ctx, req->response_port, alloc_size, alloc_size, &mmap_buf, local_buf); if (nxt_slow_path(rc != NXT_UNIT_OK)) { @@ -2845,8 +2874,7 @@ nxt_unit_websocket_sendv(nxt_unit_request_info_t *req, uint8_t opcode, } if (buf->free > buf->start) { - rc = nxt_unit_mmap_buf_send(req->ctx, req_impl->stream, - &mmap_buf, 0); + rc = nxt_unit_mmap_buf_send(req, &mmap_buf, 0); } return rc; @@ -2919,17 +2947,26 @@ nxt_unit_websocket_done(nxt_unit_websocket_frame_t *ws) static nxt_port_mmap_header_t * -nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, - nxt_unit_port_id_t *port_id, nxt_chunk_id_t *c, int *n, int min_n) +nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, + nxt_chunk_id_t *c, int *n, int min_n) { int res, nchunks, i; uint32_t outgoing_size; nxt_unit_mmap_t *mm, *mm_end; nxt_unit_impl_t *lib; + nxt_unit_process_t *process; nxt_port_mmap_header_t *hdr; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + process = nxt_unit_port_process(port); + if (nxt_slow_path(process == NULL)) { + nxt_unit_alert(ctx, "mmap_get: port %d,%d already closed", + (int) port->id.pid, (int) port->id.id); + + return NULL; + } + pthread_mutex_lock(&process->outgoing.mutex); retry: @@ -2941,7 +2978,7 @@ retry: for (mm = process->outgoing.elts; mm < mm_end; mm++) { hdr = mm->hdr; - if (hdr->sent_over != 0xFFFFu && hdr->sent_over != port_id->id) { + if (hdr->sent_over != 0xFFFFu && hdr->sent_over != port->id.id) { continue; } @@ -3000,7 +3037,7 @@ retry: /* Notify router about OOSM condition. */ - res = nxt_unit_send_oosm(ctx, port_id); + res = nxt_unit_send_oosm(ctx, port); if (nxt_slow_path(res != NXT_UNIT_OK)) { return NULL; } @@ -3026,7 +3063,7 @@ retry: } *c = 0; - hdr = nxt_unit_new_mmap(ctx, process, port_id, *n); + hdr = nxt_unit_new_mmap(ctx, port, *n); unlock: @@ -3043,7 +3080,7 @@ unlock: static int -nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) +nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) { ssize_t res; nxt_port_msg_t msg; @@ -3061,7 +3098,7 @@ nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) msg.mf = 0; msg.tracking = 0; - res = nxt_unit_port_send(ctx, port_id, &msg, sizeof(msg), NULL, 0); + res = nxt_unit_port_send(ctx, lib->router_port, &msg, sizeof(msg), NULL, 0); if (nxt_slow_path(res != sizeof(msg))) { return NXT_UNIT_ERROR; } @@ -3163,21 +3200,29 @@ nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i) static nxt_port_mmap_header_t * -nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, - nxt_unit_port_id_t *port_id, int n) +nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) { int i, fd, rc; void *mem; char name[64]; nxt_unit_mmap_t *mm; nxt_unit_impl_t *lib; + nxt_unit_process_t *process; nxt_port_mmap_header_t *hdr; - lib = process->lib; + process = nxt_unit_port_process(port); + if (nxt_slow_path(process == NULL)) { + nxt_unit_alert(ctx, "new_mmap: port %d,%d already closed", + (int) port->id.pid, (int) port->id.id); + + return NULL; + } + + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); mm = nxt_unit_mmap_at(&process->outgoing, process->outgoing.size); if (nxt_slow_path(mm == NULL)) { - nxt_unit_warn(ctx, "failed to add mmap to outgoing array"); + nxt_unit_alert(ctx, "failed to add mmap to outgoing array"); return NULL; } @@ -3255,7 +3300,7 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, hdr->id = process->outgoing.size - 1; hdr->src_pid = lib->pid; hdr->dst_pid = process->pid; - hdr->sent_over = port_id->id; + hdr->sent_over = port->id.id; /* Mark first n chunk(s) as busy */ for (i = 0; i < n; i++) { @@ -3268,7 +3313,7 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, pthread_mutex_unlock(&process->outgoing.mutex); - rc = nxt_unit_send_mmap(ctx, port_id, fd); + rc = nxt_unit_send_mmap(ctx, port, fd); if (nxt_slow_path(rc != NXT_UNIT_OK)) { munmap(mem, PORT_MMAP_SIZE); hdr = NULL; @@ -3295,7 +3340,7 @@ remove_fail: static int -nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int fd) +nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int fd) { ssize_t res; nxt_port_msg_t msg; @@ -3339,7 +3384,7 @@ nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int fd) */ memcpy(CMSG_DATA(&cmsg.cm), &fd, sizeof(int)); - res = nxt_unit_port_send(ctx, port_id, &msg, sizeof(msg), + res = nxt_unit_port_send(ctx, port, &msg, sizeof(msg), &cmsg, sizeof(cmsg)); if (nxt_slow_path(res != sizeof(msg))) { return NXT_UNIT_ERROR; @@ -3350,8 +3395,8 @@ nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int fd) static int -nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, - nxt_unit_port_id_t *port_id, uint32_t size, uint32_t min_size, +nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, + uint32_t size, uint32_t min_size, nxt_unit_mmap_buf_t *mmap_buf, char *local_buf) { int nchunks, min_nchunks; @@ -3376,8 +3421,7 @@ nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, mmap_buf->buf.start = mmap_buf->plain_ptr + sizeof(nxt_port_msg_t); mmap_buf->buf.free = mmap_buf->buf.start; mmap_buf->buf.end = mmap_buf->buf.start + size; - mmap_buf->port_id = *port_id; - mmap_buf->process = process; + mmap_buf->process = nxt_unit_port_process(port); nxt_unit_debug(ctx, "outgoing plain buffer allocation: (%p, %d)", mmap_buf->buf.start, (int) size); @@ -3388,7 +3432,7 @@ nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, nchunks = (size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE; min_nchunks = (min_size + PORT_MMAP_CHUNK_SIZE - 1) / PORT_MMAP_CHUNK_SIZE; - hdr = nxt_unit_mmap_get(ctx, process, port_id, &c, &nchunks, min_nchunks); + hdr = nxt_unit_mmap_get(ctx, port, &c, &nchunks, min_nchunks); if (nxt_slow_path(hdr == NULL)) { if (nxt_fast_path(min_nchunks == 0 && nchunks == 0)) { mmap_buf->hdr = NULL; @@ -3407,8 +3451,7 @@ nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, mmap_buf->buf.start = (char *) nxt_port_mmap_chunk_start(hdr, c); mmap_buf->buf.free = mmap_buf->buf.start; mmap_buf->buf.end = mmap_buf->buf.start + nchunks * PORT_MMAP_CHUNK_SIZE; - mmap_buf->port_id = *port_id; - mmap_buf->process = process; + mmap_buf->process = nxt_unit_port_process(port); mmap_buf->free_ptr = NULL; mmap_buf->ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); @@ -3770,15 +3813,12 @@ nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, static int nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid) { - ssize_t res; - nxt_port_msg_t msg; - nxt_unit_impl_t *lib; - nxt_unit_port_id_t port_id; + ssize_t res; + nxt_port_msg_t msg; + nxt_unit_impl_t *lib; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - nxt_unit_port_id_init(&port_id, pid, 0); - msg.stream = 0; msg.pid = lib->pid; msg.reply_port = 0; @@ -3789,7 +3829,7 @@ nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid) msg.mf = 0; msg.tracking = 0; - res = nxt_unit_port_send(ctx, &port_id, &msg, sizeof(msg), NULL, 0); + res = nxt_unit_port_send(ctx, lib->router_port, &msg, sizeof(msg), NULL, 0); if (nxt_slow_path(res != sizeof(msg))) { return NXT_UNIT_ERROR; } @@ -3893,7 +3933,6 @@ static nxt_unit_process_t * nxt_unit_process_find(nxt_unit_impl_t *lib, pid_t pid, int remove) { int rc; - nxt_unit_process_t *process; nxt_lvlhsh_query_t lhq; nxt_unit_process_lhq_pid(&lhq, &pid); @@ -3906,13 +3945,11 @@ nxt_unit_process_find(nxt_unit_impl_t *lib, pid_t pid, int remove) } if (rc == NXT_OK) { - process = lhq.value; - if (!remove) { - nxt_unit_process_use(process); + nxt_unit_process_use(lhq.value); } - return process; + return lhq.value; } return NULL; @@ -3990,7 +4027,7 @@ nxt_unit_run_once(nxt_unit_ctx_t *ctx) } if (nxt_fast_path(rbuf->size > 0)) { - rc = nxt_unit_process_msg(ctx, &ctx_impl->read_port_id, + rc = nxt_unit_process_msg(ctx, rbuf->buf, rbuf->size, rbuf->oob, sizeof(rbuf->oob)); @@ -4013,25 +4050,15 @@ nxt_unit_run_once(nxt_unit_ctx_t *ctx) static void nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) { - nxt_unit_impl_t *lib; nxt_unit_ctx_impl_t *ctx_impl; ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); memset(rbuf->oob, 0, sizeof(struct cmsghdr)); - if (ctx_impl->read_port_fd != -1) { - rbuf->size = nxt_unit_port_recv(ctx, ctx_impl->read_port_fd, - rbuf->buf, sizeof(rbuf->buf), - rbuf->oob, sizeof(rbuf->oob)); - - } else { - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - - rbuf->size = lib->callbacks.port_recv(ctx, &ctx_impl->read_port_id, - rbuf->buf, sizeof(rbuf->buf), - rbuf->oob, sizeof(rbuf->oob)); - } + rbuf->size = nxt_unit_port_recv(ctx, ctx_impl->read_port, + rbuf->buf, sizeof(rbuf->buf), + rbuf->oob, sizeof(rbuf->oob)); } @@ -4049,52 +4076,49 @@ nxt_unit_done(nxt_unit_ctx_t *ctx) nxt_unit_ctx_t * nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data) { - int rc, fd; + int rc; nxt_unit_impl_t *lib; - nxt_unit_port_id_t new_port_id; + nxt_unit_port_t *port; nxt_unit_ctx_impl_t *new_ctx; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); new_ctx = malloc(sizeof(nxt_unit_ctx_impl_t) + lib->request_data_size); if (nxt_slow_path(new_ctx == NULL)) { - nxt_unit_warn(ctx, "failed to allocate context"); + nxt_unit_alert(ctx, "failed to allocate context"); return NULL; } - rc = nxt_unit_create_port(ctx, &new_port_id, &fd); - if (nxt_slow_path(rc != NXT_UNIT_OK)) { + port = nxt_unit_create_port(ctx); + if (nxt_slow_path(port == NULL)) { free(new_ctx); return NULL; } - rc = nxt_unit_send_port(ctx, &lib->router_port_id, &new_port_id, fd); + rc = nxt_unit_send_port(ctx, lib->router_port, port); if (nxt_slow_path(rc != NXT_UNIT_OK)) { - nxt_unit_remove_port(lib, &new_port_id); - - close(fd); - - free(new_ctx); - - return NULL; + goto fail; } - close(fd); - rc = nxt_unit_ctx_init(lib, new_ctx, data); if (nxt_slow_path(rc != NXT_UNIT_OK)) { - nxt_unit_remove_port(lib, &new_port_id); - - free(new_ctx); - - return NULL; + goto fail; } - new_ctx->read_port_id = new_port_id; + new_ctx->read_port = port; return &new_ctx->ctx; + +fail: + + nxt_unit_remove_port(lib, &port->id); + nxt_unit_port_release(port); + + free(new_ctx); + + return NULL; } @@ -4144,6 +4168,10 @@ nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl) nxt_queue_remove(&ctx_impl->link); + if (nxt_fast_path(ctx_impl->read_port != NULL)) { + nxt_unit_port_release(ctx_impl->read_port); + } + if (ctx_impl != &lib->main_ctx) { free(ctx_impl); } @@ -4174,12 +4202,12 @@ nxt_unit_port_id_init(nxt_unit_port_id_t *port_id, pid_t pid, uint16_t id) } -static int -nxt_unit_create_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int *fd) +static nxt_unit_port_t * +nxt_unit_create_port(nxt_unit_ctx_t *ctx) { int rc, port_sockets[2]; nxt_unit_impl_t *lib; - nxt_unit_port_t new_port; + nxt_unit_port_t new_port, *port; nxt_unit_process_t *process; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); @@ -4189,7 +4217,7 @@ nxt_unit_create_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int *fd) nxt_unit_warn(ctx, "create_port: socketpair() failed: %s (%d)", strerror(errno), errno); - return NXT_UNIT_ERROR; + return NULL; } nxt_unit_debug(ctx, "create_port: new socketpair: %d->%d", @@ -4204,39 +4232,34 @@ nxt_unit_create_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int *fd) close(port_sockets[0]); close(port_sockets[1]); - return NXT_UNIT_ERROR; + return NULL; } nxt_unit_port_id_init(&new_port.id, lib->pid, process->next_port_id++); new_port.in_fd = port_sockets[0]; - new_port.out_fd = -1; + new_port.out_fd = port_sockets[1]; new_port.data = NULL; pthread_mutex_unlock(&lib->mutex); nxt_unit_process_release(process); - rc = nxt_unit_add_port(ctx, &new_port); - if (nxt_slow_path(rc != NXT_UNIT_OK)) { - nxt_unit_warn(ctx, "create_port: add_port() failed"); + port = nxt_unit_add_port(ctx, &new_port); + if (nxt_slow_path(port == NULL)) { + nxt_unit_alert(ctx, "create_port: add_port() failed"); close(port_sockets[0]); close(port_sockets[1]); - - return rc; } - *port_id = new_port.id; - *fd = port_sockets[1]; - - return rc; + return port; } static int -nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, - nxt_unit_port_id_t *new_port, int fd) +nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst, + nxt_unit_port_t *port) { ssize_t res; nxt_unit_impl_t *lib; @@ -4263,8 +4286,8 @@ nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, m.msg.mf = 0; m.msg.tracking = 0; - m.new_port.id = new_port->id; - m.new_port.pid = new_port->pid; + m.new_port.id = port->id.id; + m.new_port.pid = port->id.pid; m.new_port.type = NXT_PROCESS_APP; m.new_port.max_size = 16 * 1024; m.new_port.max_share = 64 * 1024; @@ -4284,7 +4307,7 @@ nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, * Fortunately, GCC with -O1 compiles this nxt_memcpy() * in the same simple assignment as in the code above. */ - memcpy(CMSG_DATA(&cmsg.cm), &fd, sizeof(int)); + memcpy(CMSG_DATA(&cmsg.cm), &port->out_fd, sizeof(int)); res = nxt_unit_port_send(ctx, dst, &m, sizeof(m), &cmsg, sizeof(cmsg)); @@ -4292,13 +4315,67 @@ nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, } -static int +nxt_inline void nxt_unit_port_use(nxt_unit_port_t *port) +{ + nxt_unit_port_impl_t *port_impl; + + port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); + + nxt_atomic_fetch_add(&port_impl->use_count, 1); +} + + +nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port) +{ + long c; + nxt_unit_port_impl_t *port_impl; + + port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); + + c = nxt_atomic_fetch_add(&port_impl->use_count, -1); + + if (c == 1) { + nxt_unit_debug(NULL, "destroy port %d,%d", + (int) port->id.pid, (int) port->id.id); + + nxt_unit_process_release(port_impl->process); + + if (port->in_fd != -1) { + close(port->in_fd); + + port->in_fd = -1; + } + + if (port->out_fd != -1) { + close(port->out_fd); + + port->out_fd = -1; + } + + free(port_impl); + } +} + + +nxt_inline nxt_unit_process_t * +nxt_unit_port_process(nxt_unit_port_t *port) +{ + nxt_unit_port_impl_t *port_impl; + + port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); + + return port_impl->process; +} + + +static nxt_unit_port_t * nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) { int rc; nxt_unit_impl_t *lib; + nxt_unit_port_t *old_port; nxt_unit_process_t *process; - nxt_unit_port_impl_t *new_port, *old_port; + nxt_unit_port_impl_t *new_port; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); @@ -4311,13 +4388,13 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) port->id.pid, port->id.id, port->in_fd, port->out_fd); - if (old_port->port.data == NULL) { - old_port->port.data = port->data; + if (old_port->data == NULL) { + old_port->data = port->data; port->data = NULL; } - if (old_port->port.in_fd == -1) { - old_port->port.in_fd = port->in_fd; + if (old_port->in_fd == -1) { + old_port->in_fd = port->in_fd; port->in_fd = -1; } @@ -4326,8 +4403,8 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) port->in_fd = -1; } - if (old_port->port.out_fd == -1) { - old_port->port.out_fd = port->out_fd; + if (old_port->out_fd == -1) { + old_port->out_fd = port->out_fd; port->out_fd = -1; } @@ -4336,26 +4413,27 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) port->out_fd = -1; } - *port = old_port->port; + *port = *old_port; pthread_mutex_unlock(&lib->mutex); if (lib->callbacks.add_port != NULL && (port->in_fd != -1 || port->out_fd != -1)) { - lib->callbacks.add_port(ctx, &old_port->port); + lib->callbacks.add_port(ctx, old_port); } - return NXT_UNIT_OK; + return old_port; } + new_port = NULL; + nxt_unit_debug(ctx, "add_port: %d,%d in_fd %d out_fd %d", port->id.pid, port->id.id, port->in_fd, port->out_fd); process = nxt_unit_process_get(lib, port->id.pid); if (nxt_slow_path(process == NULL)) { - rc = NXT_UNIT_ERROR; goto unlock; } @@ -4365,7 +4443,6 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) new_port = malloc(sizeof(nxt_unit_port_impl_t)); if (nxt_slow_path(new_port == NULL)) { - rc = NXT_UNIT_ERROR; goto unlock; } @@ -4376,107 +4453,85 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) nxt_unit_alert(ctx, "add_port: %d,%d hash_add failed", port->id.pid, port->id.id); + free(new_port); + + new_port = NULL; + goto unlock; } nxt_queue_insert_tail(&process->ports, &new_port->link); - rc = NXT_UNIT_OK; - + new_port->use_count = 2; new_port->process = process; + process = NULL; + unlock: pthread_mutex_unlock(&lib->mutex); - if (nxt_slow_path(process != NULL && rc != NXT_UNIT_OK)) { + if (nxt_slow_path(process != NULL)) { nxt_unit_process_release(process); } if (lib->callbacks.add_port != NULL - && rc == NXT_UNIT_OK + && new_port != NULL && (port->in_fd != -1 || port->out_fd != -1)) { lib->callbacks.add_port(ctx, &new_port->port); } - return rc; + return &new_port->port; } -static int +static void nxt_unit_remove_port(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id) { - int res; - nxt_unit_port_t *port; - nxt_unit_process_t *process; - - port = NULL; - process = NULL; + nxt_unit_port_t *port; + nxt_unit_port_impl_t *port_impl; pthread_mutex_lock(&lib->mutex); - res = nxt_unit_remove_port_unsafe(lib, port_id, &port, &process); - - pthread_mutex_unlock(&lib->mutex); - - if (lib->callbacks.remove_port != NULL && res == NXT_UNIT_OK) { - lib->callbacks.remove_port(&lib->unit, port); - } + port = nxt_unit_remove_port_unsafe(lib, port_id); if (nxt_fast_path(port != NULL)) { - if (port->in_fd != -1) { - close(port->in_fd); - } + port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); - if (port->out_fd != -1) { - close(port->out_fd); - } + nxt_queue_remove(&port_impl->link); } - if (nxt_slow_path(process != NULL)) { - nxt_unit_process_release(process); + pthread_mutex_unlock(&lib->mutex); + + if (lib->callbacks.remove_port != NULL && port != NULL) { + lib->callbacks.remove_port(&lib->unit, port); } if (nxt_fast_path(port != NULL)) { - free(port); + nxt_unit_port_release(port); } - - return res; } -static int -nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id, - nxt_unit_port_t **r_port, nxt_unit_process_t **process) +static nxt_unit_port_t * +nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id) { - nxt_unit_port_impl_t *port; + nxt_unit_port_t *port; port = nxt_unit_port_hash_find(&lib->ports, port_id, 1); if (nxt_slow_path(port == NULL)) { nxt_unit_debug(NULL, "remove_port: port %d,%d not found", (int) port_id->pid, (int) port_id->id); - return NXT_UNIT_ERROR; + return NULL; } nxt_unit_debug(NULL, "remove_port: port %d,%d, fds %d,%d, data %p", (int) port_id->pid, (int) port_id->id, - port->port.in_fd, port->port.out_fd, port->port.data); - - if (port->process != NULL) { - nxt_queue_remove(&port->link); - } + port->in_fd, port->out_fd, port->data); - if (process != NULL) { - *process = port->process; - } - - if (r_port != NULL) { - *r_port = &port->port; - } - - return NXT_UNIT_OK; + return port; } @@ -4516,12 +4571,7 @@ nxt_unit_remove_process(nxt_unit_impl_t *lib, nxt_unit_process_t *process) nxt_queue_each(port, &ports, nxt_unit_port_impl_t, link) { - nxt_unit_process_release(process); - - /* To avoid unlink port. */ - port->process = NULL; - - nxt_unit_remove_port_unsafe(lib, &port->port.id, NULL, NULL); + nxt_unit_remove_port_unsafe(lib, &port->port.id); } nxt_queue_loop; @@ -4535,15 +4585,7 @@ nxt_unit_remove_process(nxt_unit_impl_t *lib, nxt_unit_process_t *process) lib->callbacks.remove_port(&lib->unit, &port->port); } - if (port->port.in_fd != -1) { - close(port->port.in_fd); - } - - if (port->port.out_fd != -1) { - close(port->port.out_fd); - } - - free(port); + nxt_unit_port_release(&port->port); } nxt_queue_loop; @@ -4567,43 +4609,23 @@ nxt_unit_quit(nxt_unit_ctx_t *ctx) static ssize_t -nxt_unit_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, +nxt_unit_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, const void *buf, size_t buf_size, const void *oob, size_t oob_size) { - int fd; - nxt_unit_impl_t *lib; - nxt_unit_port_impl_t *port; - - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - - pthread_mutex_lock(&lib->mutex); - - port = nxt_unit_port_hash_find(&lib->ports, port_id, 0); - - if (nxt_fast_path(port != NULL && port->port.out_fd != -1)) { - fd = port->port.out_fd; - - pthread_mutex_unlock(&lib->mutex); - - } else { - pthread_mutex_unlock(&lib->mutex); - - nxt_unit_alert(ctx, "port_send: port %d,%d not found", - (int) port_id->pid, (int) port_id->id); - - return -NXT_UNIT_ERROR; - } + nxt_unit_impl_t *lib; - nxt_unit_debug(ctx, "port_send: found port %d,%d fd %d", - (int) port_id->pid, (int) port_id->id, fd); + nxt_unit_debug(ctx, "port_send: port %d,%d fd %d", + (int) port->id.pid, (int) port->id.id, port->out_fd); - if (lib->callbacks.port_send == NULL) { - return nxt_unit_sendmsg(ctx, fd, buf, buf_size, oob, oob_size); + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - } else { - return lib->callbacks.port_send(ctx, port_id, buf, buf_size, + if (lib->callbacks.port_send != NULL) { + return lib->callbacks.port_send(ctx, port, buf, buf_size, oob, oob_size); } + + return nxt_unit_sendmsg(ctx, port->out_fd, buf, buf_size, + oob, oob_size); } @@ -4652,56 +4674,22 @@ retry: static ssize_t -nxt_unit_port_recv_default(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, +nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, void *buf, size_t buf_size, void *oob, size_t oob_size) { - int fd; - nxt_unit_impl_t *lib; - nxt_unit_ctx_impl_t *ctx_impl; - nxt_unit_port_impl_t *port; + int fd; + ssize_t res; + struct iovec iov[1]; + struct msghdr msg; + nxt_unit_impl_t *lib; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - pthread_mutex_lock(&lib->mutex); - - port = nxt_unit_port_hash_find(&lib->ports, port_id, 0); - - if (nxt_fast_path(port != NULL)) { - fd = port->port.in_fd; - - } else { - nxt_unit_debug(ctx, "port_recv: port %d,%d not found", - (int) port_id->pid, (int) port_id->id); - fd = -1; + if (lib->callbacks.port_recv != NULL) { + return lib->callbacks.port_recv(ctx, port, + buf, buf_size, oob, oob_size); } - pthread_mutex_unlock(&lib->mutex); - - if (nxt_slow_path(fd == -1)) { - return -1; - } - - nxt_unit_debug(ctx, "port_recv: found port %d,%d, fd %d", - (int) port_id->pid, (int) port_id->id, fd); - - ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); - - if (nxt_fast_path(port_id == &ctx_impl->read_port_id)) { - ctx_impl->read_port_fd = fd; - } - - return nxt_unit_port_recv(ctx, fd, buf, buf_size, oob, oob_size); -} - - -ssize_t -nxt_unit_port_recv(nxt_unit_ctx_t *ctx, int fd, void *buf, size_t buf_size, - void *oob, size_t oob_size) -{ - ssize_t res; - struct iovec iov[1]; - struct msghdr msg; - iov[0].iov_base = buf; iov[0].iov_len = buf_size; @@ -4713,6 +4701,8 @@ nxt_unit_port_recv(nxt_unit_ctx_t *ctx, int fd, void *buf, size_t buf_size, msg.msg_control = oob; msg.msg_controllen = oob_size; + fd = port->in_fd; + retry: res = recvmsg(fd, &msg, 0); @@ -4813,7 +4803,7 @@ nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port) } -static nxt_unit_port_impl_t * +static nxt_unit_port_t * nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, nxt_unit_port_id_t *port_id, int remove) { @@ -4833,6 +4823,10 @@ nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, nxt_unit_port_id_t *port_id, switch (res) { case NXT_OK: + if (!remove) { + nxt_unit_port_use(lhq.value); + } + return lhq.value; default: diff --git a/src/nxt_unit.h b/src/nxt_unit.h index fa1fa843..6723026f 100644 --- a/src/nxt_unit.h +++ b/src/nxt_unit.h @@ -91,8 +91,7 @@ struct nxt_unit_request_info_s { nxt_unit_t *unit; nxt_unit_ctx_t *ctx; - nxt_unit_port_id_t request_port; - nxt_unit_port_id_t response_port; + nxt_unit_port_t *response_port; nxt_unit_request_t *request; nxt_unit_buf_t *request_buf; @@ -142,12 +141,12 @@ struct nxt_unit_callbacks_s { void (*shm_ack_handler)(nxt_unit_ctx_t *); /* Send data and control to process pid using port id. Optional. */ - ssize_t (*port_send)(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id, + ssize_t (*port_send)(nxt_unit_ctx_t *, nxt_unit_port_t *port, const void *buf, size_t buf_size, const void *oob, size_t oob_size); /* Receive data on port id. Optional. */ - ssize_t (*port_recv)(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id, + ssize_t (*port_recv)(nxt_unit_ctx_t *, nxt_unit_port_t *port, void *buf, size_t buf_size, void *oob, size_t oob_size); }; @@ -195,7 +194,7 @@ nxt_unit_ctx_t *nxt_unit_init(nxt_unit_init_t *); * from port socket should be initially processed by unit. This function * may invoke other application-defined callback for message processing. */ -int nxt_unit_process_msg(nxt_unit_ctx_t *, nxt_unit_port_id_t *port_id, +int nxt_unit_process_msg(nxt_unit_ctx_t *, void *buf, size_t buf_size, void *oob, size_t oob_size); /* @@ -225,10 +224,6 @@ nxt_unit_ctx_t *nxt_unit_ctx_alloc(nxt_unit_ctx_t *, void *); /* Initialize port_id, calculate hash. */ void nxt_unit_port_id_init(nxt_unit_port_id_t *port_id, pid_t pid, uint16_t id); -/* Default 'port_recv' implementation. */ -ssize_t nxt_unit_port_recv(nxt_unit_ctx_t *, int fd, void *buf, size_t buf_size, - void *oob, size_t oob_size); - /* Calculates hash for given field name. */ uint16_t nxt_unit_field_hash(const char* name, size_t name_length); -- cgit From 3cbc22a6dc45abdeade4deb364601230ddca02c1 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:10 +0300 Subject: Changing router to application port exchange protocol. The application process needs to request the port from the router instead of the latter pushing the port before sending a request to the application. This is required to simplify the communication between the router and the application and to prepare the router to use the application shared port and then the queue. --- src/nxt_port.h | 9 ++ src/nxt_process.c | 37 -------- src/nxt_process.h | 9 -- src/nxt_router.c | 100 ++++++++++++++++---- src/nxt_runtime.c | 7 -- src/nxt_unit.c | 276 ++++++++++++++++++++++++++++++++++++++++++++++++------ src/nxt_unit.h | 1 + 7 files changed, 342 insertions(+), 97 deletions(-) diff --git a/src/nxt_port.h b/src/nxt_port.h index 0e8707f3..838a7ffe 100644 --- a/src/nxt_port.h +++ b/src/nxt_port.h @@ -25,6 +25,7 @@ struct nxt_port_handlers_s { /* File descriptor exchange. */ nxt_port_handler_t change_file; nxt_port_handler_t new_port; + nxt_port_handler_t get_port; nxt_port_handler_t mmap; /* New process */ @@ -77,6 +78,7 @@ typedef enum { _NXT_PORT_MSG_CHANGE_FILE = nxt_port_handler_idx(change_file), _NXT_PORT_MSG_NEW_PORT = nxt_port_handler_idx(new_port), + _NXT_PORT_MSG_GET_PORT = nxt_port_handler_idx(get_port), _NXT_PORT_MSG_MMAP = nxt_port_handler_idx(mmap), _NXT_PORT_MSG_PROCESS_CREATED = nxt_port_handler_idx(process_created), @@ -107,6 +109,7 @@ typedef enum { NXT_PORT_MSG_ACCESS_LOG = nxt_msg_last(_NXT_PORT_MSG_ACCESS_LOG), NXT_PORT_MSG_CHANGE_FILE = nxt_msg_last(_NXT_PORT_MSG_CHANGE_FILE), NXT_PORT_MSG_NEW_PORT = nxt_msg_last(_NXT_PORT_MSG_NEW_PORT), + NXT_PORT_MSG_GET_PORT = nxt_msg_last(_NXT_PORT_MSG_GET_PORT), NXT_PORT_MSG_MMAP = nxt_msg_last(_NXT_PORT_MSG_MMAP) | NXT_PORT_MSG_CLOSE_FD | NXT_PORT_MSG_SYNC, @@ -238,6 +241,12 @@ typedef struct { } nxt_port_msg_new_port_t; +typedef struct { + nxt_port_id_t id; + nxt_pid_t pid; +} nxt_port_msg_get_port_t; + + /* * nxt_port_data_t size is allocation size * which enables effective reuse of memory pool cache. diff --git a/src/nxt_process.c b/src/nxt_process.c index 5a01c21e..0b3aa40f 100644 --- a/src/nxt_process.c +++ b/src/nxt_process.c @@ -1107,43 +1107,6 @@ nxt_process_close_ports(nxt_task_t *task, nxt_process_t *process) } -void -nxt_process_connected_port_add(nxt_process_t *process, nxt_port_t *port) -{ - nxt_thread_mutex_lock(&process->cp_mutex); - - nxt_port_hash_add(&process->connected_ports, port); - - nxt_thread_mutex_unlock(&process->cp_mutex); -} - - -void -nxt_process_connected_port_remove(nxt_process_t *process, nxt_port_t *port) -{ - nxt_thread_mutex_lock(&process->cp_mutex); - - nxt_port_hash_remove(&process->connected_ports, port); - - nxt_thread_mutex_unlock(&process->cp_mutex); -} - - -nxt_port_t * -nxt_process_connected_port_find(nxt_process_t *process, nxt_port_t *port) -{ - nxt_port_t *res; - - nxt_thread_mutex_lock(&process->cp_mutex); - - res = nxt_port_hash_find(&process->connected_ports, port->pid, port->id); - - nxt_thread_mutex_unlock(&process->cp_mutex); - - return res; -} - - void nxt_process_quit(nxt_task_t *task, nxt_uint_t exit_status) { diff --git a/src/nxt_process.h b/src/nxt_process.h index d3311722..4076cefc 100644 --- a/src/nxt_process.h +++ b/src/nxt_process.h @@ -95,7 +95,6 @@ typedef struct { nxt_port_mmaps_t outgoing; nxt_thread_mutex_t cp_mutex; - nxt_lvlhsh_t connected_ports; /* of nxt_port_t */ uint32_t stream; @@ -172,14 +171,6 @@ void nxt_process_use(nxt_task_t *task, nxt_process_t *process, int i); void nxt_process_close_ports(nxt_task_t *task, nxt_process_t *process); -void nxt_process_connected_port_add(nxt_process_t *process, nxt_port_t *port); - -void nxt_process_connected_port_remove(nxt_process_t *process, - nxt_port_t *port); - -nxt_port_t *nxt_process_connected_port_find(nxt_process_t *process, - nxt_port_t *port); - void nxt_process_quit(nxt_task_t *task, nxt_uint_t exit_status); void nxt_signal_quit_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg); diff --git a/src/nxt_router.c b/src/nxt_router.c index 758310a9..3380e133 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -182,6 +182,8 @@ static void nxt_router_engine_post(nxt_event_engine_t *engine, nxt_work_t *jobs); static void nxt_router_thread_start(void *data); +static void nxt_router_rt_add_port(nxt_task_t *task, void *obj, + void *data); static void nxt_router_listen_socket_create(nxt_task_t *task, void *obj, void *data); static void nxt_router_listen_socket_update(nxt_task_t *task, void *obj, @@ -253,6 +255,8 @@ static nxt_int_t nxt_router_http_request_done(nxt_task_t *task, static void nxt_router_http_request_release(nxt_task_t *task, void *obj, void *data); static void nxt_router_oosm_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg); +static void nxt_router_get_port_handler(nxt_task_t *task, + nxt_port_recv_msg_t *msg); extern const nxt_http_request_state_t nxt_http_websocket; @@ -274,6 +278,7 @@ static const nxt_str_t *nxt_app_msg_prefix[] = { static const nxt_port_handlers_t nxt_router_process_port_handlers = { .quit = nxt_signal_quit_handler, .new_port = nxt_router_new_port_handler, + .get_port = nxt_router_get_port_handler, .change_file = nxt_port_change_log_file_handler, .mmap = nxt_port_mmap_handler, .data = nxt_router_conf_data_handler, @@ -2944,6 +2949,7 @@ nxt_router_thread_start(void *data) nxt_int_t ret; nxt_port_t *port; nxt_task_t *task; + nxt_work_t *work; nxt_thread_t *thread; nxt_thread_link_t *link; nxt_event_engine_t *engine; @@ -2988,10 +2994,42 @@ nxt_router_thread_start(void *data) nxt_port_enable(task, port, &nxt_router_app_port_handlers); + work = nxt_zalloc(sizeof(nxt_work_t)); + if (nxt_slow_path(work == NULL)) { + return; + } + + work->handler = nxt_router_rt_add_port; + work->task = link->work.task; + work->obj = work; + work->data = port; + + nxt_event_engine_post(link->work.task->thread->engine, work); + nxt_event_engine_start(engine); } +static void +nxt_router_rt_add_port(nxt_task_t *task, void *obj, void *data) +{ + nxt_int_t res; + nxt_port_t *port; + nxt_runtime_t *rt; + + rt = task->thread->runtime; + port = data; + + nxt_free(obj); + + res = nxt_port_hash_add(&rt->ports, port); + + if (nxt_fast_path(res == NXT_OK)) { + nxt_port_use(task, port, 1); + } +} + + static void nxt_router_listen_socket_create(nxt_task_t *task, void *obj, void *data) { @@ -3281,7 +3319,6 @@ nxt_router_conf_release(nxt_task_t *task, nxt_socket_conf_joint_t *joint) } /* TODO remove engine->port */ - /* TODO excude from connected ports */ if (rtcf != NULL) { nxt_debug(task, "old router conf is destroyed"); @@ -4937,7 +4974,7 @@ nxt_router_app_prepare_request(nxt_task_t *task, { nxt_buf_t *buf; nxt_int_t res; - nxt_port_t *port, *c_port, *reply_port; + nxt_port_t *port, *reply_port; nxt_apr_action_t apr_action; nxt_assert(req_app_link->app_port != NULL); @@ -4947,21 +4984,6 @@ nxt_router_app_prepare_request(nxt_task_t *task, apr_action = NXT_APR_REQUEST_FAILED; - c_port = nxt_process_connected_port_find(port->process, reply_port); - - if (nxt_slow_path(c_port != reply_port)) { - res = nxt_port_send_port(task, port, reply_port, 0); - - if (nxt_slow_path(res != NXT_OK)) { - nxt_request_app_link_error(task, port->app, req_app_link, - "Failed to send reply port to application"); - - goto release_port; - } - - nxt_process_connected_port_add(port->process, reply_port); - } - buf = nxt_router_prepare_msg(task, req_app_link->request, port, nxt_app_msg_prefix[port->app->type]); @@ -5531,3 +5553,47 @@ nxt_router_oosm_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) -1, 0, 0, NULL); } } + + +static void +nxt_router_get_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) +{ + nxt_port_t *port, *reply_port; + nxt_runtime_t *rt; + nxt_port_msg_get_port_t *get_port_msg; + + rt = task->thread->runtime; + + reply_port = nxt_runtime_port_find(rt, msg->port_msg.pid, + msg->port_msg.reply_port); + if (nxt_slow_path(reply_port == NULL)) { + nxt_alert(task, "get_port_handler: reply_port %PI:%d not found", + msg->port_msg.pid, msg->port_msg.reply_port); + + return; + } + + if (nxt_slow_path(nxt_buf_used_size(msg->buf) + < (int) sizeof(nxt_port_msg_get_port_t))) + { + nxt_alert(task, "get_port_handler: message buffer too small (%d)", + (int) nxt_buf_used_size(msg->buf)); + + return; + } + + get_port_msg = (nxt_port_msg_get_port_t *) msg->buf->mem.pos; + + port = nxt_runtime_port_find(rt, get_port_msg->pid, get_port_msg->id); + if (nxt_slow_path(port == NULL)) { + nxt_alert(task, "get_port_handler: port %PI:%d not found", + get_port_msg->pid, get_port_msg->id); + + return; + } + + nxt_debug(task, "get port %PI:%d found", get_port_msg->pid, + get_port_msg->id); + + (void) nxt_port_send_port(task, reply_port, port, msg->port_msg.stream); +} diff --git a/src/nxt_runtime.c b/src/nxt_runtime.c index 694ce74d..c25b93cc 100644 --- a/src/nxt_runtime.c +++ b/src/nxt_runtime.c @@ -1389,8 +1389,6 @@ nxt_runtime_process_new(nxt_runtime_t *rt) void nxt_runtime_process_release(nxt_runtime_t *rt, nxt_process_t *process) { - nxt_port_t *port; - if (process->registered == 1) { nxt_runtime_process_remove(rt, process); } @@ -1401,11 +1399,6 @@ nxt_runtime_process_release(nxt_runtime_t *rt, nxt_process_t *process) nxt_port_mmaps_destroy(&process->incoming, 1); nxt_port_mmaps_destroy(&process->outgoing, 1); - do { - port = nxt_port_hash_retrieve(&process->connected_ports); - - } while (port != NULL); - nxt_thread_mutex_destroy(&process->incoming.mutex); nxt_thread_mutex_destroy(&process->outgoing.mutex); nxt_thread_mutex_destroy(&process->cp_mutex); diff --git a/src/nxt_unit.c b/src/nxt_unit.c index ddfd9c80..c1ef977f 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -55,6 +55,8 @@ static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg); static int nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg); +static int nxt_unit_request_check_response_port(nxt_unit_request_info_t *req, + nxt_unit_port_id_t *port_id); static int nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg); static int nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx); @@ -119,6 +121,7 @@ static nxt_unit_process_t *nxt_unit_process_find(nxt_unit_impl_t *lib, static nxt_unit_process_t *nxt_unit_process_pop_first(nxt_unit_impl_t *lib); static void nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf); +static void nxt_unit_process_ready_req(nxt_unit_ctx_impl_t *ctx_impl); static void nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl); static nxt_unit_port_t *nxt_unit_create_port(nxt_unit_ctx_t *ctx); @@ -138,6 +141,7 @@ static void nxt_unit_remove_pid(nxt_unit_impl_t *lib, pid_t pid); static void nxt_unit_remove_process(nxt_unit_impl_t *lib, nxt_unit_process_t *process); static void nxt_unit_quit(nxt_unit_ctx_t *ctx); +static int nxt_unit_get_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id); static ssize_t nxt_unit_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, const void *buf, size_t buf_size, const void *oob, size_t oob_size); @@ -215,7 +219,10 @@ struct nxt_unit_request_info_impl_s { nxt_unit_req_state_t state; uint8_t websocket; + /* for nxt_unit_ctx_impl_t.free_req or active_req */ nxt_queue_link_t link; + /* for nxt_unit_port_impl_t.awaiting_req */ + nxt_queue_link_t port_wait_link; char extra_data[]; }; @@ -244,6 +251,7 @@ struct nxt_unit_ctx_impl_s { nxt_unit_ctx_t ctx; nxt_atomic_t use_count; + nxt_atomic_t wait_items; pthread_mutex_t mutex; @@ -265,6 +273,9 @@ struct nxt_unit_ctx_impl_s { /* of nxt_unit_request_info_impl_t */ nxt_lvlhsh_t requests; + /* of nxt_unit_request_info_impl_t */ + nxt_queue_t ready_req; + nxt_unit_read_buf_t *pending_read_head; nxt_unit_read_buf_t **pending_read_tail; nxt_unit_read_buf_t *free_read_buf; @@ -309,6 +320,11 @@ struct nxt_unit_port_impl_s { nxt_queue_link_t link; nxt_unit_process_t *process; + + /* of nxt_unit_request_info_impl_t */ + nxt_queue_t awaiting_req; + + int ready; }; @@ -515,10 +531,12 @@ nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, nxt_queue_insert_tail(&lib->contexts, &ctx_impl->link); ctx_impl->use_count = 1; + ctx_impl->wait_items = 0; nxt_queue_init(&ctx_impl->free_req); nxt_queue_init(&ctx_impl->free_ws); nxt_queue_init(&ctx_impl->active_req); + nxt_queue_init(&ctx_impl->ready_req); ctx_impl->free_buf = NULL; nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[1]); @@ -973,8 +991,8 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) static int nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) { + int res; nxt_unit_impl_t *lib; - nxt_unit_port_t *port; nxt_unit_port_id_t port_id; nxt_unit_request_t *r; nxt_unit_mmap_buf_t *b; @@ -1004,28 +1022,8 @@ nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) return NXT_UNIT_ERROR; } - nxt_unit_port_id_init(&port_id, recv_msg->pid, recv_msg->reply_port); - - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - - pthread_mutex_lock(&lib->mutex); - - port = nxt_unit_port_hash_find(&lib->ports, &port_id, 0); - - pthread_mutex_unlock(&lib->mutex); - - if (nxt_slow_path(port == NULL)) { - nxt_unit_alert(ctx, "#%"PRIu32": response port %d,%d not found", - recv_msg->stream, - (int) recv_msg->pid, (int) recv_msg->reply_port); - - return NXT_UNIT_ERROR; - } - req = &req_impl->req; - req->response_port = port; - req->request = recv_msg->start; b = recv_msg->incoming_buf; @@ -1076,12 +1074,129 @@ nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) (char *) nxt_unit_sptr_get(&r->target), (int) r->content_length); - lib->callbacks.request_handler(req); + nxt_unit_port_id_init(&port_id, recv_msg->pid, recv_msg->reply_port); + + res = nxt_unit_request_check_response_port(req, &port_id); + + if (nxt_fast_path(res == NXT_UNIT_OK)) { + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + + lib->callbacks.request_handler(req); + } return NXT_UNIT_OK; } +static int +nxt_unit_request_check_response_port(nxt_unit_request_info_t *req, + nxt_unit_port_id_t *port_id) +{ + int res; + nxt_unit_ctx_t *ctx; + nxt_unit_impl_t *lib; + nxt_unit_port_t *port; + nxt_unit_ctx_impl_t *ctx_impl; + nxt_unit_port_impl_t *port_impl; + nxt_unit_request_info_impl_t *req_impl; + + ctx = req->ctx; + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + + pthread_mutex_lock(&lib->mutex); + + port = nxt_unit_port_hash_find(&lib->ports, port_id, 0); + port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); + + if (nxt_fast_path(port != NULL)) { + req->response_port = port; + + if (nxt_fast_path(port_impl->ready)) { + pthread_mutex_unlock(&lib->mutex); + + nxt_unit_debug(ctx, "check_response_port: found port{%d,%d}", + (int) port->id.pid, (int) port->id.id); + + return NXT_UNIT_OK; + } + + nxt_unit_debug(ctx, "check_response_port: " + "port{%d,%d} already requested", + (int) port->id.pid, (int) port->id.id); + + req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); + + nxt_queue_insert_tail(&port_impl->awaiting_req, + &req_impl->port_wait_link); + + pthread_mutex_unlock(&lib->mutex); + + nxt_atomic_fetch_add(&ctx_impl->wait_items, 1); + + return NXT_UNIT_AGAIN; + } + + port_impl = malloc(sizeof(nxt_unit_port_impl_t)); + if (nxt_slow_path(port_impl == NULL)) { + nxt_unit_alert(ctx, "check_response_port: malloc(%d) failed", + (int) sizeof(nxt_unit_port_impl_t)); + + pthread_mutex_unlock(&lib->mutex); + + return NXT_UNIT_ERROR; + } + + port = &port_impl->port; + + port->id = *port_id; + port->in_fd = -1; + port->out_fd = -1; + port->data = NULL; + + res = nxt_unit_port_hash_add(&lib->ports, port); + if (nxt_slow_path(res != NXT_UNIT_OK)) { + nxt_unit_alert(ctx, "check_response_port: %d,%d hash_add failed", + port->id.pid, port->id.id); + + pthread_mutex_unlock(&lib->mutex); + + free(port); + + return NXT_UNIT_ERROR; + } + + req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); + + nxt_queue_insert_tail(&req_impl->process->ports, &port_impl->link); + + port_impl->process = req_impl->process; + + + nxt_queue_init(&port_impl->awaiting_req); + + nxt_queue_insert_tail(&port_impl->awaiting_req, &req_impl->port_wait_link); + + port_impl->use_count = 2; + port_impl->ready = 0; + + req->response_port = port; + + pthread_mutex_unlock(&lib->mutex); + + nxt_unit_process_use(port_impl->process); + + res = nxt_unit_get_port(ctx, port_id); + if (nxt_slow_path(res == NXT_UNIT_ERROR)) { + return NXT_UNIT_ERROR; + } + + nxt_atomic_fetch_add(&ctx_impl->wait_items, 1); + + return NXT_UNIT_AGAIN; +} + + static int nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) { @@ -4041,6 +4156,8 @@ nxt_unit_run_once(nxt_unit_ctx_t *ctx) nxt_unit_read_buf_release(ctx, rbuf); + nxt_unit_process_ready_req(ctx_impl); + nxt_unit_ctx_release(ctx_impl); return rc; @@ -4062,6 +4179,39 @@ nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) } +static void +nxt_unit_process_ready_req(nxt_unit_ctx_impl_t *ctx_impl) +{ + nxt_queue_t ready_req; + nxt_unit_impl_t *lib; + nxt_unit_request_info_impl_t *req_impl; + + nxt_queue_init(&ready_req); + + pthread_mutex_lock(&ctx_impl->mutex); + + if (nxt_queue_is_empty(&ctx_impl->ready_req)) { + pthread_mutex_unlock(&ctx_impl->mutex); + + return; + } + + nxt_queue_add(&ready_req, &ctx_impl->ready_req); + nxt_queue_init(&ctx_impl->ready_req); + + pthread_mutex_unlock(&ctx_impl->mutex); + + nxt_queue_each(req_impl, &ready_req, + nxt_unit_request_info_impl_t, port_wait_link) + { + lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit); + + lib->callbacks.request_handler(&req_impl->req); + + } nxt_queue_loop; +} + + void nxt_unit_done(nxt_unit_ctx_t *ctx) { @@ -4371,11 +4521,14 @@ nxt_unit_port_process(nxt_unit_port_t *port) static nxt_unit_port_t * nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) { - int rc; - nxt_unit_impl_t *lib; - nxt_unit_port_t *old_port; - nxt_unit_process_t *process; - nxt_unit_port_impl_t *new_port; + int rc; + nxt_queue_t awaiting_req; + nxt_unit_impl_t *lib; + nxt_unit_port_t *old_port; + nxt_unit_process_t *process; + nxt_unit_ctx_impl_t *ctx_impl; + nxt_unit_port_impl_t *new_port, *old_port_impl; + nxt_unit_request_info_impl_t *req_impl; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); @@ -4415,6 +4568,17 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) *port = *old_port; + nxt_queue_init(&awaiting_req); + + old_port_impl = nxt_container_of(old_port, nxt_unit_port_impl_t, port); + + if (!nxt_queue_is_empty(&old_port_impl->awaiting_req)) { + nxt_queue_add(&awaiting_req, &old_port_impl->awaiting_req); + nxt_queue_init(&old_port_impl->awaiting_req); + } + + old_port_impl->ready = (port->in_fd != -1 || port->out_fd != -1); + pthread_mutex_unlock(&lib->mutex); if (lib->callbacks.add_port != NULL @@ -4423,6 +4587,25 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) lib->callbacks.add_port(ctx, old_port); } + nxt_queue_each(req_impl, &awaiting_req, + nxt_unit_request_info_impl_t, port_wait_link) + { + nxt_queue_remove(&req_impl->port_wait_link); + + ctx_impl = nxt_container_of(req_impl->req.ctx, nxt_unit_ctx_impl_t, + ctx); + + pthread_mutex_lock(&ctx_impl->mutex); + + nxt_queue_insert_tail(&ctx_impl->ready_req, + &req_impl->port_wait_link); + + pthread_mutex_unlock(&ctx_impl->mutex); + + nxt_atomic_fetch_add(&ctx_impl->wait_items, -1); + + } nxt_queue_loop; + return old_port; } @@ -4464,6 +4647,9 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) new_port->use_count = 2; new_port->process = process; + new_port->ready = (port->in_fd != -1 || port->out_fd != -1); + + nxt_queue_init(&new_port->awaiting_req); process = NULL; @@ -4608,6 +4794,42 @@ nxt_unit_quit(nxt_unit_ctx_t *ctx) } +static int +nxt_unit_get_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id) +{ + ssize_t res; + nxt_unit_impl_t *lib; + nxt_unit_ctx_impl_t *ctx_impl; + + struct { + nxt_port_msg_t msg; + nxt_port_msg_get_port_t get_port; + } m; + + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + + memset(&m.msg, 0, sizeof(nxt_port_msg_t)); + + m.msg.pid = lib->pid; + m.msg.reply_port = ctx_impl->read_port->id.id; + m.msg.type = _NXT_PORT_MSG_GET_PORT; + + m.get_port.id = port_id->id; + m.get_port.pid = port_id->pid; + + nxt_unit_debug(ctx, "get_port: %d %d", (int) port_id->pid, + (int) port_id->id); + + res = nxt_unit_port_send(ctx, lib->router_port, &m, sizeof(m), NULL, 0); + if (nxt_slow_path(res != sizeof(m))) { + return NXT_UNIT_ERROR; + } + + return NXT_UNIT_OK; +} + + static ssize_t nxt_unit_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, const void *buf, size_t buf_size, const void *oob, size_t oob_size) diff --git a/src/nxt_unit.h b/src/nxt_unit.h index 6723026f..8fa64f4e 100644 --- a/src/nxt_unit.h +++ b/src/nxt_unit.h @@ -19,6 +19,7 @@ enum { NXT_UNIT_OK = 0, NXT_UNIT_ERROR = 1, + NXT_UNIT_AGAIN = 2, }; enum { -- cgit From 6e31d6cd39be9d3f4ee680fc13c3fe42f5cd39e7 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:13 +0300 Subject: Changing router to application shared memory exchange protocol. The application process needs to request the shared memory segment from the router instead of the latter pushing the segment before sending a request to the application. This is required to simplify the communication between the router and the application and to prepare the router for using the application shared port and then the queue. --- src/nxt_http_websocket.c | 3 +- src/nxt_port.h | 7 + src/nxt_port_memory.c | 73 +++------ src/nxt_port_memory.h | 12 +- src/nxt_port_memory_int.h | 1 + src/nxt_router.c | 69 ++++++++- src/nxt_unit.c | 377 +++++++++++++++++++++++++++++++++------------- src/nxt_unit.h | 11 +- 8 files changed, 374 insertions(+), 179 deletions(-) diff --git a/src/nxt_http_websocket.c b/src/nxt_http_websocket.c index fb888f5d..4d31b320 100644 --- a/src/nxt_http_websocket.c +++ b/src/nxt_http_websocket.c @@ -69,7 +69,8 @@ nxt_http_websocket_client(nxt_task_t *task, void *obj, void *data) if (buf == NULL || buf_free_size == 0) { buf_free_size = nxt_min(frame_size, PORT_MMAP_DATA_SIZE); - buf = nxt_port_mmap_get_buf(task, req_app_link->app_port, + buf = nxt_port_mmap_get_buf(task, + &req_app_link->app_port->process->outgoing, buf_free_size); *out_tail = buf; diff --git a/src/nxt_port.h b/src/nxt_port.h index 838a7ffe..3a8a200a 100644 --- a/src/nxt_port.h +++ b/src/nxt_port.h @@ -27,6 +27,7 @@ struct nxt_port_handlers_s { nxt_port_handler_t new_port; nxt_port_handler_t get_port; nxt_port_handler_t mmap; + nxt_port_handler_t get_mmap; /* New process */ nxt_port_handler_t process_created; @@ -80,6 +81,7 @@ typedef enum { _NXT_PORT_MSG_NEW_PORT = nxt_port_handler_idx(new_port), _NXT_PORT_MSG_GET_PORT = nxt_port_handler_idx(get_port), _NXT_PORT_MSG_MMAP = nxt_port_handler_idx(mmap), + _NXT_PORT_MSG_GET_MMAP = nxt_port_handler_idx(get_mmap), _NXT_PORT_MSG_PROCESS_CREATED = nxt_port_handler_idx(process_created), _NXT_PORT_MSG_PROCESS_READY = nxt_port_handler_idx(process_ready), @@ -247,6 +249,11 @@ typedef struct { } nxt_port_msg_get_port_t; +typedef struct { + uint32_t id; +} nxt_port_msg_get_mmap_t; + + /* * nxt_port_data_t size is allocation size * which enables effective reuse of memory pool cache. diff --git a/src/nxt_port_memory.c b/src/nxt_port_memory.c index fd472cc6..1e01629e 100644 --- a/src/nxt_port_memory.c +++ b/src/nxt_port_memory.c @@ -282,8 +282,8 @@ fail: static nxt_port_mmap_handler_t * -nxt_port_new_port_mmap(nxt_task_t *task, nxt_process_t *process, - nxt_port_t *port, nxt_bool_t tracking, nxt_int_t n) +nxt_port_new_port_mmap(nxt_task_t *task, nxt_port_mmaps_t *mmaps, + nxt_bool_t tracking, nxt_int_t n) { void *mem; nxt_fd_t fd; @@ -295,15 +295,14 @@ nxt_port_new_port_mmap(nxt_task_t *task, nxt_process_t *process, mmap_handler = nxt_zalloc(sizeof(nxt_port_mmap_handler_t)); if (nxt_slow_path(mmap_handler == NULL)) { - nxt_log(task, NXT_LOG_WARN, "failed to allocate mmap_handler"); + nxt_alert(task, "failed to allocate mmap_handler"); return NULL; } - port_mmap = nxt_port_mmap_at(&process->outgoing, process->outgoing.size); + port_mmap = nxt_port_mmap_at(mmaps, mmaps->size); if (nxt_slow_path(port_mmap == NULL)) { - nxt_log(task, NXT_LOG_WARN, - "failed to add port mmap to outgoing array"); + nxt_alert(task, "failed to add port mmap to mmaps array"); nxt_free(mmap_handler); return NULL; @@ -322,6 +321,7 @@ nxt_port_new_port_mmap(nxt_task_t *task, nxt_process_t *process, } mmap_handler->hdr = mem; + mmap_handler->fd = fd; port_mmap->mmap_handler = mmap_handler; nxt_port_mmap_handler_use(mmap_handler, 1); @@ -331,10 +331,9 @@ nxt_port_new_port_mmap(nxt_task_t *task, nxt_process_t *process, nxt_memset(hdr->free_map, 0xFFU, sizeof(hdr->free_map)); nxt_memset(hdr->free_tracking_map, 0xFFU, sizeof(hdr->free_tracking_map)); - hdr->id = process->outgoing.size - 1; + hdr->id = mmaps->size - 1; hdr->src_pid = nxt_pid; - hdr->dst_pid = process->pid; - hdr->sent_over = port->id; + hdr->sent_over = 0xFFFFu; /* Mark first chunk as busy */ free_map = tracking ? hdr->free_tracking_map : hdr->free_map; @@ -347,13 +346,8 @@ nxt_port_new_port_mmap(nxt_task_t *task, nxt_process_t *process, nxt_port_mmap_set_chunk_busy(hdr->free_map, PORT_MMAP_CHUNK_COUNT); nxt_port_mmap_set_chunk_busy(hdr->free_tracking_map, PORT_MMAP_CHUNK_COUNT); - nxt_debug(task, "send mmap fd %FD to process %PI", fd, port->pid); - - /* TODO handle error */ - (void) nxt_port_socket_write(task, port, NXT_PORT_MSG_MMAP, fd, 0, 0, NULL); - - nxt_log(task, NXT_LOG_DEBUG, "new mmap #%D created for %PI -> %PI", - hdr->id, nxt_pid, process->pid); + nxt_log(task, NXT_LOG_DEBUG, "new mmap #%D created for %PI -> ...", + hdr->id, nxt_pid); return mmap_handler; @@ -361,7 +355,7 @@ remove_fail: nxt_free(mmap_handler); - process->outgoing.size--; + mmaps->size--; return NULL; } @@ -445,34 +439,28 @@ nxt_shm_open(nxt_task_t *task, size_t size) static nxt_port_mmap_handler_t * -nxt_port_mmap_get(nxt_task_t *task, nxt_port_t *port, nxt_chunk_id_t *c, +nxt_port_mmap_get(nxt_task_t *task, nxt_port_mmaps_t *mmaps, nxt_chunk_id_t *c, nxt_int_t n, nxt_bool_t tracking) { nxt_int_t i, res, nchunks; - nxt_process_t *process; nxt_free_map_t *free_map; nxt_port_mmap_t *port_mmap; nxt_port_mmap_t *end_port_mmap; nxt_port_mmap_header_t *hdr; nxt_port_mmap_handler_t *mmap_handler; - process = port->process; - if (nxt_slow_path(process == NULL)) { - return NULL; - } - - nxt_thread_mutex_lock(&process->outgoing.mutex); + nxt_thread_mutex_lock(&mmaps->mutex); - end_port_mmap = process->outgoing.elts + process->outgoing.size; + end_port_mmap = mmaps->elts + mmaps->size; - for (port_mmap = process->outgoing.elts; + for (port_mmap = mmaps->elts; port_mmap < end_port_mmap; port_mmap++) { mmap_handler = port_mmap->mmap_handler; hdr = mmap_handler->hdr; - if (hdr->sent_over != 0xFFFFu && hdr->sent_over != port->id) { + if (hdr->sent_over != 0xFFFFu) { continue; } @@ -510,11 +498,11 @@ nxt_port_mmap_get(nxt_task_t *task, nxt_port_t *port, nxt_chunk_id_t *c, /* TODO introduce port_mmap limit and release wait. */ *c = 0; - mmap_handler = nxt_port_new_port_mmap(task, process, port, tracking, n); + mmap_handler = nxt_port_new_port_mmap(task, mmaps, tracking, n); unlock_return: - nxt_thread_mutex_unlock(&process->outgoing.mutex); + nxt_thread_mutex_unlock(&mmaps->mutex); return mmap_handler; } @@ -549,7 +537,7 @@ nxt_port_get_port_incoming_mmap(nxt_task_t *task, nxt_pid_t spid, uint32_t id) nxt_int_t -nxt_port_mmap_get_tracking(nxt_task_t *task, nxt_port_t *port, +nxt_port_mmap_get_tracking(nxt_task_t *task, nxt_port_mmaps_t *mmaps, nxt_port_mmap_tracking_t *tracking, uint32_t stream) { nxt_chunk_id_t c; @@ -558,7 +546,7 @@ nxt_port_mmap_get_tracking(nxt_task_t *task, nxt_port_t *port, nxt_debug(task, "request tracking for stream #%uD", stream); - mmap_handler = nxt_port_mmap_get(task, port, &c, 1, 1); + mmap_handler = nxt_port_mmap_get(task, mmaps, &c, 1, 1); if (nxt_slow_path(mmap_handler == NULL)) { return NXT_ERROR; } @@ -680,7 +668,7 @@ nxt_port_mmap_tracking_read(nxt_task_t *task, nxt_port_recv_msg_t *msg) nxt_buf_t * -nxt_port_mmap_get_buf(nxt_task_t *task, nxt_port_t *port, size_t size) +nxt_port_mmap_get_buf(nxt_task_t *task, nxt_port_mmaps_t *mmaps, size_t size) { nxt_mp_t *mp; nxt_buf_t *b; @@ -707,7 +695,7 @@ nxt_port_mmap_get_buf(nxt_task_t *task, nxt_port_t *port, size_t size) b->completion_handler = nxt_port_mmap_buf_completion; nxt_buf_set_port_mmap(b); - mmap_handler = nxt_port_mmap_get(task, port, &c, nchunks, 0); + mmap_handler = nxt_port_mmap_get(task, mmaps, &c, nchunks, 0); if (nxt_slow_path(mmap_handler == NULL)) { mp = task->thread->engine->mem_pool; nxt_mp_free(mp, b); @@ -943,9 +931,7 @@ nxt_port_mmap_read(nxt_task_t *task, nxt_port_recv_msg_t *msg) nxt_port_method_t nxt_port_mmap_get_method(nxt_task_t *task, nxt_port_t *port, nxt_buf_t *b) { - nxt_port_method_t m; - nxt_port_mmap_header_t *hdr; - nxt_port_mmap_handler_t *mmap_handler; + nxt_port_method_t m; m = NXT_PORT_METHOD_ANY; @@ -956,9 +942,6 @@ nxt_port_mmap_get_method(nxt_task_t *task, nxt_port_t *port, nxt_buf_t *b) } if (nxt_buf_is_port_mmap(b)) { - mmap_handler = b->parent; - hdr = mmap_handler->hdr; - if (m == NXT_PORT_METHOD_PLAIN) { nxt_log_error(NXT_LOG_ERR, task->log, "mixing plain and mmap buffers, " @@ -967,16 +950,6 @@ nxt_port_mmap_get_method(nxt_task_t *task, nxt_port_t *port, nxt_buf_t *b) break; } - if (port->pid != hdr->dst_pid) { - nxt_log_error(NXT_LOG_ERR, task->log, - "send mmap buffer for %PI to %PI, " - "using plain mode", hdr->dst_pid, port->pid); - - m = NXT_PORT_METHOD_PLAIN; - - break; - } - if (m == NXT_PORT_METHOD_ANY) { nxt_debug(task, "using mmap mode"); diff --git a/src/nxt_port_memory.h b/src/nxt_port_memory.h index 2cd4bd76..8e71af3d 100644 --- a/src/nxt_port_memory.h +++ b/src/nxt_port_memory.h @@ -23,7 +23,7 @@ struct nxt_port_mmap_tracking_s { }; nxt_int_t -nxt_port_mmap_get_tracking(nxt_task_t *task, nxt_port_t *port, +nxt_port_mmap_get_tracking(nxt_task_t *task, nxt_port_mmaps_t *mmaps, nxt_port_mmap_tracking_t *tracking, uint32_t stream); nxt_bool_t @@ -37,14 +37,12 @@ nxt_bool_t nxt_port_mmap_tracking_read(nxt_task_t *task, nxt_port_recv_msg_t *msg); /* - * Allocates nxt_but_t structure from port's mem_pool, assigns this buf 'mem' - * pointers to first available shared mem bucket(s). 'size' used as a hint to - * acquire several successive buckets if possible. - * - * This function assumes that current thread operates the 'port' exclusively. + * Allocates nxt_but_t structure from task's thread engine mem_pool, assigns + * this buf 'mem' pointers to first available shared mem bucket(s). 'size' + * used as a hint to acquire several successive buckets if possible. */ nxt_buf_t * -nxt_port_mmap_get_buf(nxt_task_t *task, nxt_port_t *port, size_t size); +nxt_port_mmap_get_buf(nxt_task_t *task, nxt_port_mmaps_t *mmaps, size_t size); nxt_int_t nxt_port_mmap_increase_buf(nxt_task_t *task, nxt_buf_t *b, size_t size, size_t min_size); diff --git a/src/nxt_port_memory_int.h b/src/nxt_port_memory_int.h index 87c3d833..d2524ee4 100644 --- a/src/nxt_port_memory_int.h +++ b/src/nxt_port_memory_int.h @@ -63,6 +63,7 @@ struct nxt_port_mmap_header_s { struct nxt_port_mmap_handler_s { nxt_port_mmap_header_t *hdr; nxt_atomic_t use_count; + nxt_fd_t fd; }; /* diff --git a/src/nxt_router.c b/src/nxt_router.c index 3380e133..4df1489d 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -257,6 +257,8 @@ static void nxt_router_http_request_release(nxt_task_t *task, void *obj, static void nxt_router_oosm_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg); static void nxt_router_get_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg); +static void nxt_router_get_mmap_handler(nxt_task_t *task, + nxt_port_recv_msg_t *msg); extern const nxt_http_request_state_t nxt_http_websocket; @@ -281,6 +283,7 @@ static const nxt_port_handlers_t nxt_router_process_port_handlers = { .get_port = nxt_router_get_port_handler, .change_file = nxt_port_change_log_file_handler, .mmap = nxt_port_mmap_handler, + .get_mmap = nxt_router_get_mmap_handler, .data = nxt_router_conf_data_handler, .remove_pid = nxt_router_remove_pid_handler, .access_log = nxt_router_access_log_reopen_handler, @@ -5008,7 +5011,7 @@ nxt_router_app_prepare_request(nxt_task_t *task, buf = req_app_link->msg_info.buf; - res = nxt_port_mmap_get_tracking(task, port, + res = nxt_port_mmap_get_tracking(task, &port->process->outgoing, &req_app_link->msg_info.tracking, req_app_link->stream); if (nxt_slow_path(res != NXT_OK)) { @@ -5138,7 +5141,7 @@ nxt_router_prepare_msg(nxt_task_t *task, nxt_http_request_t *r, return NULL; } - out = nxt_port_mmap_get_buf(task, port, + out = nxt_port_mmap_get_buf(task, &port->process->outgoing, nxt_min(req_size + content_length, PORT_MMAP_DATA_SIZE)); if (nxt_slow_path(out == NULL)) { return NULL; @@ -5320,7 +5323,8 @@ nxt_router_prepare_msg(nxt_task_t *task, nxt_http_request_t *r, if (buf == NULL) { free_size = nxt_min(size, PORT_MMAP_DATA_SIZE); - buf = nxt_port_mmap_get_buf(task, port, free_size); + buf = nxt_port_mmap_get_buf(task, &port->process->outgoing, + free_size); if (nxt_slow_path(buf == NULL)) { while (out != NULL) { buf = out->next; @@ -5555,6 +5559,65 @@ nxt_router_oosm_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) } +static void +nxt_router_get_mmap_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) +{ + nxt_fd_t fd; + nxt_port_t *port; + nxt_runtime_t *rt; + nxt_port_mmaps_t *mmaps; + nxt_port_msg_get_mmap_t *get_mmap_msg; + nxt_port_mmap_handler_t *mmap_handler; + + rt = task->thread->runtime; + + port = nxt_runtime_port_find(rt, msg->port_msg.pid, + msg->port_msg.reply_port); + if (nxt_slow_path(port == NULL)) { + nxt_alert(task, "get_mmap_handler: reply_port %PI:%d not found", + msg->port_msg.pid, msg->port_msg.reply_port); + + return; + } + + if (nxt_slow_path(nxt_buf_used_size(msg->buf) + < (int) sizeof(nxt_port_msg_get_mmap_t))) + { + nxt_alert(task, "get_mmap_handler: message buffer too small (%d)", + (int) nxt_buf_used_size(msg->buf)); + + return; + } + + get_mmap_msg = (nxt_port_msg_get_mmap_t *) msg->buf->mem.pos; + + nxt_assert(port->type == NXT_PROCESS_APP); + + mmaps = &port->process->outgoing; + nxt_thread_mutex_lock(&mmaps->mutex); + + if (nxt_slow_path(get_mmap_msg->id >= mmaps->size)) { + nxt_thread_mutex_unlock(&mmaps->mutex); + + nxt_alert(task, "get_mmap_handler: mmap id is too big (%d)", + (int) get_mmap_msg->id); + + return; + } + + mmap_handler = mmaps->elts[get_mmap_msg->id].mmap_handler; + + fd = mmap_handler->fd; + + nxt_thread_mutex_unlock(&mmaps->mutex); + + nxt_debug(task, "get mmap %PI:%d found", + msg->port_msg.pid, (int) get_mmap_msg->id); + + (void) nxt_port_socket_write(task, port, NXT_PORT_MSG_MMAP, fd, 0, 0, NULL); +} + + static void nxt_router_get_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) { diff --git a/src/nxt_unit.c b/src/nxt_unit.c index c1ef977f..b321a0d4 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -51,6 +51,7 @@ static int nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *router_port, nxt_unit_port_t *read_port, int *log_fd, uint32_t *stream, uint32_t *shm_limit); static int nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream); +static int nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf); static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg); static int nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, @@ -103,12 +104,14 @@ static void nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps); nxt_inline void nxt_unit_process_use(nxt_unit_process_t *process); nxt_inline void nxt_unit_process_release(nxt_unit_process_t *process); static void nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps); -static nxt_port_mmap_header_t *nxt_unit_get_incoming_mmap(nxt_unit_ctx_t *ctx, - nxt_unit_process_t *process, uint32_t id); static int nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, - nxt_unit_recv_msg_t *recv_msg); + nxt_unit_recv_msg_t *recv_msg, nxt_unit_read_buf_t *rbuf); +static int nxt_unit_check_rbuf_mmap(nxt_unit_ctx_t *ctx, + nxt_unit_mmaps_t *mmaps, pid_t pid, uint32_t id, + nxt_port_mmap_header_t **hdr, nxt_unit_read_buf_t *rbuf); static int nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, - nxt_unit_recv_msg_t *recv_msg); + nxt_unit_recv_msg_t *recv_msg, nxt_unit_read_buf_t *rbuf); +static int nxt_unit_get_mmap(nxt_unit_ctx_t *ctx, pid_t pid, uint32_t id); static void nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, nxt_port_mmap_header_t *hdr, void *start, uint32_t size); @@ -240,7 +243,8 @@ struct nxt_unit_websocket_frame_impl_s { struct nxt_unit_read_buf_s { - nxt_unit_read_buf_t *next; + nxt_queue_link_t link; + nxt_unit_ctx_impl_t *ctx_impl; ssize_t size; char buf[16384]; char oob[256]; @@ -276,9 +280,11 @@ struct nxt_unit_ctx_impl_s { /* of nxt_unit_request_info_impl_t */ nxt_queue_t ready_req; - nxt_unit_read_buf_t *pending_read_head; - nxt_unit_read_buf_t **pending_read_tail; - nxt_unit_read_buf_t *free_read_buf; + /* of nxt_unit_read_buf_t */ + nxt_queue_t pending_rbuf; + + /* of nxt_unit_read_buf_t */ + nxt_queue_t free_rbuf; nxt_unit_mmap_buf_t ctx_buf[2]; nxt_unit_read_buf_t ctx_read_buf; @@ -318,6 +324,7 @@ struct nxt_unit_port_impl_s { nxt_atomic_t use_count; + /* for nxt_unit_process_t.ports */ nxt_queue_link_t link; nxt_unit_process_t *process; @@ -330,6 +337,9 @@ struct nxt_unit_port_impl_s { struct nxt_unit_mmap_s { nxt_port_mmap_header_t *hdr; + + /* of nxt_unit_read_buf_t */ + nxt_queue_t awaiting_rbuf; }; @@ -345,7 +355,7 @@ struct nxt_unit_mmaps_s { struct nxt_unit_process_s { pid_t pid; - nxt_queue_t ports; + nxt_queue_t ports; /* of nxt_unit_port_impl_t */ nxt_unit_mmaps_t incoming; nxt_unit_mmaps_t outgoing; @@ -537,17 +547,17 @@ nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, nxt_queue_init(&ctx_impl->free_ws); nxt_queue_init(&ctx_impl->active_req); nxt_queue_init(&ctx_impl->ready_req); + nxt_queue_init(&ctx_impl->pending_rbuf); + nxt_queue_init(&ctx_impl->free_rbuf); ctx_impl->free_buf = NULL; nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[1]); nxt_unit_mmap_buf_insert(&ctx_impl->free_buf, &ctx_impl->ctx_buf[0]); nxt_queue_insert_tail(&ctx_impl->free_req, &ctx_impl->req.link); + nxt_queue_insert_tail(&ctx_impl->free_rbuf, &ctx_impl->ctx_read_buf.link); - ctx_impl->pending_read_head = NULL; - ctx_impl->pending_read_tail = &ctx_impl->pending_read_head; - ctx_impl->free_read_buf = &ctx_impl->ctx_read_buf; - ctx_impl->ctx_read_buf.next = NULL; + ctx_impl->ctx_read_buf.ctx_impl = ctx_impl; ctx_impl->req.req.ctx = &ctx_impl->ctx; ctx_impl->req.req.unit = &lib->unit; @@ -767,9 +777,8 @@ nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream) } -int -nxt_unit_process_msg(nxt_unit_ctx_t *ctx, - void *buf, size_t buf_size, void *oob, size_t oob_size) +static int +nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) { int rc; pid_t pid; @@ -783,11 +792,10 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, rc = NXT_UNIT_ERROR; recv_msg.fd = -1; recv_msg.process = NULL; - port_msg = buf; - cm = oob; + port_msg = (nxt_port_msg_t *) rbuf->buf; + cm = (struct cmsghdr *) rbuf->oob; - if (oob_size >= CMSG_SPACE(sizeof(int)) - && cm->cmsg_len == CMSG_LEN(sizeof(int)) + if (cm->cmsg_len == CMSG_LEN(sizeof(int)) && cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_RIGHTS) { @@ -796,8 +804,8 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, recv_msg.incoming_buf = NULL; - if (nxt_slow_path(buf_size < sizeof(nxt_port_msg_t))) { - nxt_unit_warn(ctx, "message too small (%d bytes)", (int) buf_size); + if (nxt_slow_path(rbuf->size < (ssize_t) sizeof(nxt_port_msg_t))) { + nxt_unit_alert(ctx, "message too small (%d bytes)", (int) rbuf->size); goto fail; } @@ -808,7 +816,7 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, recv_msg.mmap = port_msg->mmap; recv_msg.start = port_msg + 1; - recv_msg.size = buf_size - sizeof(nxt_port_msg_t); + recv_msg.size = rbuf->size - sizeof(nxt_port_msg_t); if (nxt_slow_path(port_msg->type >= NXT_PORT_MSG_MAX)) { nxt_unit_warn(ctx, "#%"PRIu32": unknown message type (%d)", @@ -816,10 +824,16 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, goto fail; } - if (port_msg->tracking && nxt_unit_tracking_read(ctx, &recv_msg) == 0) { - rc = NXT_UNIT_OK; + if (port_msg->tracking) { + rc = nxt_unit_tracking_read(ctx, &recv_msg, rbuf); - goto fail; + if (nxt_slow_path(rc != NXT_UNIT_OK)) { + if (rc == NXT_UNIT_AGAIN) { + recv_msg.fd = -1; + } + + goto fail; + } } /* Fragmentation is unsupported. */ @@ -830,7 +844,13 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, } if (port_msg->mmap) { - if (nxt_unit_mmap_read(ctx, &recv_msg) != NXT_UNIT_OK) { + rc = nxt_unit_mmap_read(ctx, &recv_msg, rbuf); + + if (nxt_slow_path(rc != NXT_UNIT_OK)) { + if (rc == NXT_UNIT_AGAIN) { + recv_msg.fd = -1; + } + goto fail; } } @@ -1077,6 +1097,9 @@ nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) nxt_unit_port_id_init(&port_id, recv_msg->pid, recv_msg->reply_port); res = nxt_unit_request_check_response_port(req, &port_id); + if (nxt_slow_path(res == NXT_UNIT_ERROR)) { + return NXT_UNIT_ERROR; + } if (nxt_fast_path(res == NXT_UNIT_OK)) { lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); @@ -2376,33 +2399,41 @@ static nxt_unit_read_buf_t * nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx) { nxt_unit_ctx_impl_t *ctx_impl; + nxt_unit_read_buf_t *rbuf; ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); pthread_mutex_lock(&ctx_impl->mutex); - return nxt_unit_read_buf_get_impl(ctx_impl); + rbuf = nxt_unit_read_buf_get_impl(ctx_impl); + + pthread_mutex_unlock(&ctx_impl->mutex); + + return rbuf; } static nxt_unit_read_buf_t * nxt_unit_read_buf_get_impl(nxt_unit_ctx_impl_t *ctx_impl) { + nxt_queue_link_t *link; nxt_unit_read_buf_t *rbuf; - if (ctx_impl->free_read_buf != NULL) { - rbuf = ctx_impl->free_read_buf; - ctx_impl->free_read_buf = rbuf->next; + if (!nxt_queue_is_empty(&ctx_impl->free_rbuf)) { + link = nxt_queue_first(&ctx_impl->free_rbuf); + nxt_queue_remove(link); - pthread_mutex_unlock(&ctx_impl->mutex); + rbuf = nxt_container_of(link, nxt_unit_read_buf_t, link); return rbuf; } - pthread_mutex_unlock(&ctx_impl->mutex); - rbuf = malloc(sizeof(nxt_unit_read_buf_t)); + if (nxt_fast_path(rbuf != NULL)) { + rbuf->ctx_impl = ctx_impl; + } + return rbuf; } @@ -2417,8 +2448,7 @@ nxt_unit_read_buf_release(nxt_unit_ctx_t *ctx, pthread_mutex_lock(&ctx_impl->mutex); - rbuf->next = ctx_impl->free_read_buf; - ctx_impl->free_read_buf = rbuf; + nxt_queue_insert_head(&ctx_impl->free_rbuf, &rbuf->link); pthread_mutex_unlock(&ctx_impl->mutex); } @@ -3255,9 +3285,7 @@ nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx) pthread_mutex_lock(&ctx_impl->mutex); - *ctx_impl->pending_read_tail = rbuf; - ctx_impl->pending_read_tail = &rbuf->next; - rbuf->next = NULL; + nxt_queue_insert_tail(&ctx_impl->pending_rbuf, &rbuf->link); pthread_mutex_unlock(&ctx_impl->mutex); @@ -3275,7 +3303,12 @@ nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx) static nxt_unit_mmap_t * nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i) { - uint32_t cap; + uint32_t cap, n; + nxt_unit_mmap_t *e; + + if (nxt_fast_path(mmaps->size > i)) { + return mmaps->elts + i; + } cap = mmaps->cap; @@ -3295,13 +3328,19 @@ nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i) if (cap != mmaps->cap) { - mmaps->elts = realloc(mmaps->elts, cap * sizeof(*mmaps->elts)); - if (nxt_slow_path(mmaps->elts == NULL)) { + e = realloc(mmaps->elts, cap * sizeof(nxt_unit_mmap_t)); + if (nxt_slow_path(e == NULL)) { return NULL; } - memset(mmaps->elts + mmaps->cap, 0, - sizeof(*mmaps->elts) * (cap - mmaps->cap)); + mmaps->elts = e; + + for (n = mmaps->cap; n < cap; n++) { + e = mmaps->elts + n; + + e->hdr = NULL; + nxt_queue_init(&e->awaiting_rbuf); + } mmaps->cap = cap; } @@ -3581,13 +3620,16 @@ nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, static int nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) { - int rc; - void *mem; - struct stat mmap_stat; - nxt_unit_mmap_t *mm; - nxt_unit_impl_t *lib; - nxt_unit_process_t *process; - nxt_port_mmap_header_t *hdr; + int rc; + void *mem; + nxt_queue_t awaiting_rbuf; + struct stat mmap_stat; + nxt_unit_mmap_t *mm; + nxt_unit_impl_t *lib; + nxt_unit_process_t *process; + nxt_unit_ctx_impl_t *ctx_impl; + nxt_unit_read_buf_t *rbuf; + nxt_port_mmap_header_t *hdr; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); @@ -3626,7 +3668,7 @@ nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) hdr = mem; - if (nxt_slow_path(hdr->src_pid != pid || hdr->dst_pid != lib->pid)) { + if (nxt_slow_path(hdr->src_pid != pid)) { nxt_unit_warn(ctx, "incoming_mmap: unexpected pid in mmap header " "detected: %d != %d or %d != %d", (int) hdr->src_pid, @@ -3637,6 +3679,8 @@ nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) goto fail; } + nxt_queue_init(&awaiting_rbuf); + pthread_mutex_lock(&process->incoming.mutex); mm = nxt_unit_mmap_at(&process->incoming, hdr->id); @@ -3650,11 +3694,28 @@ nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) hdr->sent_over = 0xFFFFu; + nxt_queue_add(&awaiting_rbuf, &mm->awaiting_rbuf); + nxt_queue_init(&mm->awaiting_rbuf); + rc = NXT_UNIT_OK; } pthread_mutex_unlock(&process->incoming.mutex); + nxt_queue_each(rbuf, &awaiting_rbuf, nxt_unit_read_buf_t, link) { + + ctx_impl = rbuf->ctx_impl; + + pthread_mutex_lock(&ctx_impl->mutex); + + nxt_queue_insert_head(&ctx_impl->pending_rbuf, &rbuf->link); + + pthread_mutex_unlock(&ctx_impl->mutex); + + nxt_atomic_fetch_add(&ctx_impl->wait_items, -1); + + } nxt_queue_loop; + fail: nxt_unit_process_release(process); @@ -3719,27 +3780,11 @@ nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps) } -static nxt_port_mmap_header_t * -nxt_unit_get_incoming_mmap(nxt_unit_ctx_t *ctx, nxt_unit_process_t *process, - uint32_t id) -{ - nxt_port_mmap_header_t *hdr; - - if (nxt_fast_path(process->incoming.size > id)) { - hdr = process->incoming.elts[id].hdr; - - } else { - hdr = NULL; - } - - return hdr; -} - - static int -nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) +nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, + nxt_unit_read_buf_t *rbuf) { - int rc; + int res; nxt_chunk_id_t c; nxt_unit_process_t *process; nxt_port_mmap_header_t *hdr; @@ -3749,7 +3794,7 @@ nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) nxt_unit_warn(ctx, "#%"PRIu32": tracking_read: too small message (%d)", recv_msg->stream, (int) recv_msg->size); - return 0; + return NXT_UNIT_ERROR; } tracking_msg = recv_msg->start; @@ -3759,44 +3804,95 @@ nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) process = nxt_unit_msg_get_process(ctx, recv_msg); if (nxt_slow_path(process == NULL)) { - return 0; + return NXT_UNIT_ERROR; } pthread_mutex_lock(&process->incoming.mutex); - hdr = nxt_unit_get_incoming_mmap(ctx, process, tracking_msg->mmap_id); - if (nxt_slow_path(hdr == NULL)) { - pthread_mutex_unlock(&process->incoming.mutex); - - nxt_unit_warn(ctx, "#%"PRIu32": tracking_read: " - "invalid mmap id %d,%"PRIu32, - recv_msg->stream, (int) process->pid, - tracking_msg->mmap_id); + res = nxt_unit_check_rbuf_mmap(ctx, &process->incoming, + recv_msg->pid, tracking_msg->mmap_id, + &hdr, rbuf); - return 0; + if (nxt_slow_path(res != NXT_UNIT_OK)) { + return res; } c = tracking_msg->tracking_id; - rc = nxt_atomic_cmp_set(hdr->tracking + c, recv_msg->stream, 0); + res = nxt_atomic_cmp_set(hdr->tracking + c, recv_msg->stream, 0); - if (rc == 0) { + if (res == 0) { nxt_unit_debug(ctx, "#%"PRIu32": tracking cancelled", recv_msg->stream); nxt_port_mmap_set_chunk_free(hdr->free_tracking_map, c); + + res = NXT_UNIT_CANCELLED; + + } else { + res = NXT_UNIT_OK; } pthread_mutex_unlock(&process->incoming.mutex); - return rc; + return res; } static int -nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) +nxt_unit_check_rbuf_mmap(nxt_unit_ctx_t *ctx, nxt_unit_mmaps_t *mmaps, + pid_t pid, uint32_t id, nxt_port_mmap_header_t **hdr, + nxt_unit_read_buf_t *rbuf) { + int res, need_rbuf; + nxt_unit_mmap_t *mm; + nxt_unit_ctx_impl_t *ctx_impl; + + mm = nxt_unit_mmap_at(mmaps, id); + if (nxt_slow_path(mm == NULL)) { + nxt_unit_alert(ctx, "failed to allocate mmap"); + + pthread_mutex_unlock(&mmaps->mutex); + + *hdr = NULL; + + return NXT_UNIT_ERROR; + } + + *hdr = mm->hdr; + + if (nxt_fast_path(*hdr != NULL)) { + return NXT_UNIT_OK; + } + + need_rbuf = nxt_queue_is_empty(&mm->awaiting_rbuf); + + nxt_queue_insert_tail(&mm->awaiting_rbuf, &rbuf->link); + + pthread_mutex_unlock(&mmaps->mutex); + + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + + nxt_atomic_fetch_add(&ctx_impl->wait_items, 1); + + if (need_rbuf) { + res = nxt_unit_get_mmap(ctx, pid, id); + if (nxt_slow_path(res == NXT_UNIT_ERROR)) { + return NXT_UNIT_ERROR; + } + } + + return NXT_UNIT_AGAIN; +} + + +static int +nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, + nxt_unit_read_buf_t *rbuf) +{ + int res; void *start; uint32_t size; + nxt_unit_mmaps_t *mmaps; nxt_unit_process_t *process; nxt_unit_mmap_buf_t *b, **incoming_tail; nxt_port_mmap_msg_t *mmap_msg, *end; @@ -3819,12 +3915,17 @@ nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) incoming_tail = &recv_msg->incoming_buf; + /* Allocating buffer structures. */ for (; mmap_msg < end; mmap_msg++) { b = nxt_unit_mmap_buf_get(ctx); if (nxt_slow_path(b == NULL)) { nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: failed to allocate buf", recv_msg->stream); + while (recv_msg->incoming_buf != NULL) { + nxt_unit_mmap_buf_release(recv_msg->incoming_buf); + } + return NXT_UNIT_ERROR; } @@ -3835,19 +3936,21 @@ nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) b = recv_msg->incoming_buf; mmap_msg = recv_msg->start; - pthread_mutex_lock(&process->incoming.mutex); + mmaps = &process->incoming; + + pthread_mutex_lock(&mmaps->mutex); for (; mmap_msg < end; mmap_msg++) { - hdr = nxt_unit_get_incoming_mmap(ctx, process, mmap_msg->mmap_id); - if (nxt_slow_path(hdr == NULL)) { - pthread_mutex_unlock(&process->incoming.mutex); + res = nxt_unit_check_rbuf_mmap(ctx, mmaps, + recv_msg->pid, mmap_msg->mmap_id, + &hdr, rbuf); - nxt_unit_warn(ctx, "#%"PRIu32": mmap_read: " - "invalid mmap id %d,%"PRIu32, - recv_msg->stream, (int) process->pid, - mmap_msg->mmap_id); + if (nxt_slow_path(res != NXT_UNIT_OK)) { + while (recv_msg->incoming_buf != NULL) { + nxt_unit_mmap_buf_release(recv_msg->incoming_buf); + } - return NXT_UNIT_ERROR; + return res; } start = nxt_port_mmap_chunk_start(hdr, mmap_msg->chunk_id); @@ -3874,7 +3977,41 @@ nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) (int) mmap_msg->size); } - pthread_mutex_unlock(&process->incoming.mutex); + pthread_mutex_unlock(&mmaps->mutex); + + return NXT_UNIT_OK; +} + + +static int +nxt_unit_get_mmap(nxt_unit_ctx_t *ctx, pid_t pid, uint32_t id) +{ + ssize_t res; + nxt_unit_impl_t *lib; + nxt_unit_ctx_impl_t *ctx_impl; + + struct { + nxt_port_msg_t msg; + nxt_port_msg_get_mmap_t get_mmap; + } m; + + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + + memset(&m.msg, 0, sizeof(nxt_port_msg_t)); + + m.msg.pid = lib->pid; + m.msg.reply_port = ctx_impl->read_port->id.id; + m.msg.type = _NXT_PORT_MSG_GET_MMAP; + + m.get_mmap.id = id; + + nxt_unit_debug(ctx, "get_mmap: %d %d", (int) pid, (int) id); + + res = nxt_unit_port_send(ctx, lib->router_port, &m, sizeof(m), NULL, 0); + if (nxt_slow_path(res != sizeof(m))) { + return NXT_UNIT_ERROR; + } return NXT_UNIT_OK; } @@ -4110,6 +4247,7 @@ int nxt_unit_run_once(nxt_unit_ctx_t *ctx) { int rc; + nxt_queue_link_t *link; nxt_unit_ctx_impl_t *ctx_impl; nxt_unit_read_buf_t *rbuf; @@ -4119,18 +4257,22 @@ nxt_unit_run_once(nxt_unit_ctx_t *ctx) pthread_mutex_lock(&ctx_impl->mutex); - if (ctx_impl->pending_read_head != NULL) { - rbuf = ctx_impl->pending_read_head; - ctx_impl->pending_read_head = rbuf->next; + if (!nxt_queue_is_empty(&ctx_impl->pending_rbuf)) { - if (ctx_impl->pending_read_tail == &rbuf->next) { - ctx_impl->pending_read_tail = &ctx_impl->pending_read_head; - } +next_pending: + + link = nxt_queue_first(&ctx_impl->pending_rbuf); + nxt_queue_remove(link); + + rbuf = nxt_container_of(link, nxt_unit_read_buf_t, link); pthread_mutex_unlock(&ctx_impl->mutex); } else { rbuf = nxt_unit_read_buf_get_impl(ctx_impl); + + pthread_mutex_unlock(&ctx_impl->mutex); + if (nxt_slow_path(rbuf == NULL)) { nxt_unit_ctx_release(ctx_impl); @@ -4142,21 +4284,40 @@ nxt_unit_run_once(nxt_unit_ctx_t *ctx) } if (nxt_fast_path(rbuf->size > 0)) { - rc = nxt_unit_process_msg(ctx, - rbuf->buf, rbuf->size, - rbuf->oob, sizeof(rbuf->oob)); + rc = nxt_unit_process_msg(ctx, rbuf); #if (NXT_DEBUG) - memset(rbuf->buf, 0xAC, rbuf->size); + if (nxt_fast_path(rc != NXT_UNIT_AGAIN)) { + memset(rbuf->buf, 0xAC, rbuf->size); + } #endif } else { rc = NXT_UNIT_ERROR; } - nxt_unit_read_buf_release(ctx, rbuf); + if (nxt_slow_path(rc == NXT_UNIT_AGAIN)) { + rc = NXT_UNIT_OK; + + } else { + nxt_unit_read_buf_release(ctx, rbuf); + } + + if (nxt_slow_path(rc == NXT_UNIT_CANCELLED)) { + rc = NXT_UNIT_OK; + } + + if (nxt_fast_path(rc == NXT_UNIT_OK)) { + pthread_mutex_lock(&ctx_impl->mutex); - nxt_unit_process_ready_req(ctx_impl); + if (!nxt_queue_is_empty(&ctx_impl->pending_rbuf)) { + goto next_pending; + } + + pthread_mutex_unlock(&ctx_impl->mutex); + + nxt_unit_process_ready_req(ctx_impl); + } nxt_unit_ctx_release(ctx_impl); diff --git a/src/nxt_unit.h b/src/nxt_unit.h index 8fa64f4e..79157f5f 100644 --- a/src/nxt_unit.h +++ b/src/nxt_unit.h @@ -20,6 +20,7 @@ enum { NXT_UNIT_OK = 0, NXT_UNIT_ERROR = 1, NXT_UNIT_AGAIN = 2, + NXT_UNIT_CANCELLED = 3, }; enum { @@ -188,16 +189,6 @@ struct nxt_unit_read_info_s { */ nxt_unit_ctx_t *nxt_unit_init(nxt_unit_init_t *); -/* - * Process received message, invoke configured callbacks. - * - * If application implements it's own event loop, each datagram received - * from port socket should be initially processed by unit. This function - * may invoke other application-defined callback for message processing. - */ -int nxt_unit_process_msg(nxt_unit_ctx_t *, - void *buf, size_t buf_size, void *oob, size_t oob_size); - /* * Main function useful in case when application does not have it's own * event loop. nxt_unit_run() starts infinite message wait and process loop. -- cgit From 83595606121a821f9e3cef0f0b7e7fe87eb1e50a Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:15 +0300 Subject: Introducing the shared application port. This is the port shared between all application processes which use it to pass requests for processing. Using it significantly simplifies the request processing code in the router. The drawback is 2 more file descriptors per each configured application and more complex libunit message wait/read code. --- go/nxt_cgo_lib.c | 11 +- go/nxt_cgo_lib.h | 2 + go/port.go | 8 +- src/nodejs/unit-http/unit.cpp | 22 +- src/nxt_http_websocket.c | 32 +- src/nxt_port.c | 2 - src/nxt_port.h | 11 +- src/nxt_router.c | 1508 +++++++++++++---------------------------- src/nxt_router.h | 14 +- src/nxt_router_request.h | 37 +- src/nxt_unit.c | 467 ++++++++++--- src/nxt_unit.h | 13 + 12 files changed, 896 insertions(+), 1231 deletions(-) diff --git a/go/nxt_cgo_lib.c b/go/nxt_cgo_lib.c index 937996b0..f7171f55 100644 --- a/go/nxt_cgo_lib.c +++ b/go/nxt_cgo_lib.c @@ -44,7 +44,7 @@ nxt_cgo_run(uintptr_t handler) return NXT_UNIT_ERROR; } - rc = nxt_unit_run(ctx); + rc = nxt_unit_run_ctx(ctx); nxt_unit_done(ctx); @@ -105,7 +105,7 @@ nxt_cgo_str_init(nxt_cgo_str_t *dst, nxt_unit_sptr_t *sptr, uint32_t length) static int nxt_cgo_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) { - nxt_go_add_port(port->id.pid, port->id.id, + nxt_go_add_port((uintptr_t) ctx, port->id.pid, port->id.id, port->in_fd, port->out_fd); port->in_fd = -1; @@ -203,6 +203,13 @@ nxt_cgo_request_done(uintptr_t req, int res) } +void +nxt_cgo_unit_run_shared(uintptr_t ctx) +{ + nxt_unit_run_shared((nxt_unit_ctx_t *) ctx); +} + + void nxt_cgo_warn(uintptr_t msg, uint32_t msg_len) { diff --git a/go/nxt_cgo_lib.h b/go/nxt_cgo_lib.h index 5317380b..fa515be5 100644 --- a/go/nxt_cgo_lib.h +++ b/go/nxt_cgo_lib.h @@ -35,6 +35,8 @@ int nxt_cgo_request_close(uintptr_t req); void nxt_cgo_request_done(uintptr_t req, int res); +void nxt_cgo_unit_run_shared(uintptr_t ctx); + void nxt_cgo_warn(uintptr_t msg, uint32_t msg_len); #endif /* _NXT_CGO_LIB_H_INCLUDED_ */ diff --git a/go/port.go b/go/port.go index 59a13f8b..64004d91 100644 --- a/go/port.go +++ b/go/port.go @@ -93,7 +93,7 @@ func getUnixConn(fd int) *net.UnixConn { } //export nxt_go_add_port -func nxt_go_add_port(pid C.int, id C.int, rcv C.int, snd C.int) { +func nxt_go_add_port(ctx C.uintptr_t, pid C.int, id C.int, rcv C.int, snd C.int) { p := &port{ key: port_key{ pid: int(pid), @@ -104,6 +104,12 @@ func nxt_go_add_port(pid C.int, id C.int, rcv C.int, snd C.int) { } add_port(p) + + if id == 65535 { + go func(ctx C.uintptr_t) { + C.nxt_cgo_unit_run_shared(ctx); + }(ctx) + } } //export nxt_go_remove_port diff --git a/src/nodejs/unit-http/unit.cpp b/src/nodejs/unit-http/unit.cpp index 468acf96..1ee5b742 100644 --- a/src/nodejs/unit-http/unit.cpp +++ b/src/nodejs/unit-http/unit.cpp @@ -20,7 +20,7 @@ napi_ref Unit::constructor_; struct port_data_t { nxt_unit_ctx_t *ctx; - nxt_unit_port_id_t port_id; + nxt_unit_port_t *port; uv_poll_t poll; }; @@ -351,7 +351,11 @@ Unit::shm_ack_handler(nxt_unit_ctx_t *ctx) static void nxt_uv_read_callback(uv_poll_t *handle, int status, int events) { - nxt_unit_run_once((nxt_unit_ctx_t *) handle->data); + port_data_t *data; + + data = (port_data_t *) handle->data; + + nxt_unit_process_port_msg(data->ctx, data->port); } @@ -396,21 +400,14 @@ Unit::add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) port->data = data; data->ctx = ctx; - data->port_id = port->id; - data->poll.data = ctx; + data->port = port; + data->poll.data = data; } return NXT_UNIT_OK; } -inline bool -operator == (const nxt_unit_port_id_t &p1, const nxt_unit_port_id_t &p2) -{ - return p1.pid == p2.pid && p1.id == p2.id; -} - - void Unit::remove_port(nxt_unit_t *unit, nxt_unit_port_t *port) { @@ -419,10 +416,9 @@ Unit::remove_port(nxt_unit_t *unit, nxt_unit_port_t *port) if (port->data != NULL) { data = (port_data_t *) port->data; - if (data->port_id == port->id) { + if (data->port == port) { uv_poll_stop(&data->poll); - data->poll.data = data; uv_close((uv_handle_t *) &data->poll, delete_port_data); } } diff --git a/src/nxt_http_websocket.c b/src/nxt_http_websocket.c index 4d31b320..393c20ac 100644 --- a/src/nxt_http_websocket.c +++ b/src/nxt_http_websocket.c @@ -33,15 +33,13 @@ nxt_http_websocket_client(nxt_task_t *task, void *obj, void *data) nxt_buf_t *out, *buf, **out_tail, *b, *next; nxt_int_t res; nxt_http_request_t *r; - nxt_request_app_link_t *req_app_link; nxt_request_rpc_data_t *req_rpc_data; nxt_websocket_header_t *wsh; r = obj; + req_rpc_data = r->req_rpc_data; - if (nxt_slow_path((req_rpc_data = r->req_rpc_data) == NULL - || (req_app_link = req_rpc_data->req_app_link) == NULL)) - { + if (nxt_slow_path(req_rpc_data == NULL)) { nxt_debug(task, "websocket client frame for destroyed request"); return; @@ -69,8 +67,7 @@ nxt_http_websocket_client(nxt_task_t *task, void *obj, void *data) if (buf == NULL || buf_free_size == 0) { buf_free_size = nxt_min(frame_size, PORT_MMAP_DATA_SIZE); - buf = nxt_port_mmap_get_buf(task, - &req_app_link->app_port->process->outgoing, + buf = nxt_port_mmap_get_buf(task, &req_rpc_data->app->outgoing, buf_free_size); *out_tail = buf; @@ -101,10 +98,10 @@ nxt_http_websocket_client(nxt_task_t *task, void *obj, void *data) b = next; } - res = nxt_port_socket_twrite(task, req_app_link->app_port, + res = nxt_port_socket_twrite(task, req_rpc_data->app_port, NXT_PORT_MSG_WEBSOCKET, -1, - req_app_link->stream, - req_app_link->reply_port->id, out, NULL); + req_rpc_data->stream, + task->thread->engine->port->id, out, NULL); if (nxt_slow_path(res != NXT_OK)) { // TODO: handle } @@ -130,32 +127,27 @@ static void nxt_http_websocket_error_handler(nxt_task_t *task, void *obj, void *data) { nxt_http_request_t *r; - nxt_request_app_link_t *req_app_link; nxt_request_rpc_data_t *req_rpc_data; nxt_debug(task, "http websocket error handler"); r = obj; + req_rpc_data = r->req_rpc_data; - if ((req_rpc_data = r->req_rpc_data) == NULL) { + if (req_rpc_data == NULL) { nxt_debug(task, " req_rpc_data is NULL"); goto close_handler; } - if ((req_app_link = req_rpc_data->req_app_link) == NULL) { - nxt_debug(task, " req_app_link is NULL"); - goto close_handler; - } - - if (req_app_link->app_port == NULL) { + if (req_rpc_data->app_port == NULL) { nxt_debug(task, " app_port is NULL"); goto close_handler; } - (void) nxt_port_socket_twrite(task, req_app_link->app_port, + (void) nxt_port_socket_twrite(task, req_rpc_data->app_port, NXT_PORT_MSG_WEBSOCKET_LAST, - -1, req_app_link->stream, - req_app_link->reply_port->id, NULL, NULL); + -1, req_rpc_data->stream, + task->thread->engine->port->id, NULL, NULL); close_handler: diff --git a/src/nxt_port.c b/src/nxt_port.c index 7232c465..54434d70 100644 --- a/src/nxt_port.c +++ b/src/nxt_port.c @@ -67,8 +67,6 @@ nxt_port_new(nxt_task_t *task, nxt_port_id_t id, nxt_pid_t pid, nxt_queue_init(&port->messages); nxt_thread_mutex_create(&port->write_mutex); - nxt_queue_init(&port->pending_requests); - nxt_queue_init(&port->active_websockets); } else { nxt_mp_destroy(mp); diff --git a/src/nxt_port.h b/src/nxt_port.h index 3a8a200a..9a933e75 100644 --- a/src/nxt_port.h +++ b/src/nxt_port.h @@ -41,6 +41,7 @@ struct nxt_port_handlers_s { /* Request headers. */ nxt_port_handler_t req_headers; + nxt_port_handler_t req_headers_ack; /* Websocket frame. */ nxt_port_handler_t websocket_frame; @@ -89,6 +90,7 @@ typedef enum { _NXT_PORT_MSG_QUIT = nxt_port_handler_idx(quit), _NXT_PORT_MSG_REQ_HEADERS = nxt_port_handler_idx(req_headers), + _NXT_PORT_MSG_REQ_HEADERS_ACK = nxt_port_handler_idx(req_headers_ack), _NXT_PORT_MSG_WEBSOCKET = nxt_port_handler_idx(websocket_frame), _NXT_PORT_MSG_DATA = nxt_port_handler_idx(data), @@ -113,7 +115,8 @@ typedef enum { NXT_PORT_MSG_NEW_PORT = nxt_msg_last(_NXT_PORT_MSG_NEW_PORT), NXT_PORT_MSG_GET_PORT = nxt_msg_last(_NXT_PORT_MSG_GET_PORT), NXT_PORT_MSG_MMAP = nxt_msg_last(_NXT_PORT_MSG_MMAP) - | NXT_PORT_MSG_CLOSE_FD | NXT_PORT_MSG_SYNC, + | NXT_PORT_MSG_SYNC, + NXT_PORT_MSG_GET_MMAP = nxt_msg_last(_NXT_PORT_MSG_GET_MMAP), NXT_PORT_MSG_PROCESS_CREATED = nxt_msg_last(_NXT_PORT_MSG_PROCESS_CREATED), NXT_PORT_MSG_PROCESS_READY = nxt_msg_last(_NXT_PORT_MSG_PROCESS_READY), @@ -193,6 +196,7 @@ struct nxt_port_s { nxt_queue_link_t app_link; /* for nxt_app_t.ports */ nxt_app_t *app; + nxt_port_t *main_app_port; nxt_queue_link_t idle_link; /* for nxt_app_t.idle_ports */ nxt_msec_t idle_start; @@ -205,11 +209,10 @@ struct nxt_port_s { /* Maximum interleave of message parts. */ uint32_t max_share; - uint32_t app_pending_responses; uint32_t app_responses; - nxt_queue_t pending_requests; - nxt_queue_t active_websockets; + uint32_t active_websockets; + uint32_t active_requests; nxt_port_handler_t handler; nxt_port_handler_t *data; diff --git a/src/nxt_router.c b/src/nxt_router.c index 4df1489d..44b303e4 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -61,59 +61,13 @@ typedef struct { } nxt_app_rpc_t; -struct nxt_port_select_state_s { - nxt_app_t *app; - nxt_request_app_link_t *req_app_link; - - nxt_port_t *failed_port; - int failed_port_use_delta; - - uint8_t start_process; /* 1 bit */ - nxt_request_app_link_t *shared_ra; - nxt_port_t *port; -}; - -typedef struct nxt_port_select_state_s nxt_port_select_state_t; - static nxt_int_t nxt_router_prefork(nxt_task_t *task, nxt_process_t *process, nxt_mp_t *mp); static nxt_int_t nxt_router_start(nxt_task_t *task, nxt_process_data_t *data); static void nxt_router_greet_controller(nxt_task_t *task, nxt_port_t *controller_port); -static void nxt_router_port_select(nxt_task_t *task, - nxt_port_select_state_t *state); - -static nxt_int_t nxt_router_port_post_select(nxt_task_t *task, - nxt_port_select_state_t *state); - static nxt_int_t nxt_router_start_app_process(nxt_task_t *task, nxt_app_t *app); -static void nxt_request_app_link_update_peer(nxt_task_t *task, - nxt_request_app_link_t *req_app_link); - - -nxt_inline void -nxt_request_app_link_inc_use(nxt_request_app_link_t *req_app_link) -{ - nxt_atomic_fetch_add(&req_app_link->use_count, 1); -} - -nxt_inline void -nxt_request_app_link_chk_use(nxt_request_app_link_t *req_app_link, int i) -{ -#if (NXT_DEBUG) - int c; - - c = nxt_atomic_fetch_add(&req_app_link->use_count, i); - - nxt_assert((c + i) > 0); -#else - (void) nxt_atomic_fetch_add(&req_app_link->use_count, i); -#endif -} - -static void nxt_request_app_link_use(nxt_task_t *task, - nxt_request_app_link_t *req_app_link, int i); static nxt_router_temp_conf_t *nxt_router_temp_conf(nxt_task_t *task); static void nxt_router_conf_apply(nxt_task_t *task, void *obj, void *data); @@ -196,6 +150,8 @@ static void nxt_router_listen_socket_close(nxt_task_t *task, void *obj, void *data); static void nxt_router_thread_exit_handler(nxt_task_t *task, void *obj, void *data); +static void nxt_router_req_headers_ack_handler(nxt_task_t *task, + nxt_port_recv_msg_t *msg, nxt_request_rpc_data_t *req_rpc_data); static void nxt_router_listen_socket_release(nxt_task_t *task, nxt_socket_conf_t *skcf); @@ -220,6 +176,8 @@ static void nxt_router_access_log_reopen_error(nxt_task_t *task, static void nxt_router_app_port_ready(nxt_task_t *task, nxt_port_recv_msg_t *msg, void *data); +static nxt_int_t nxt_router_app_shared_port_send(nxt_task_t *task, + nxt_port_t *app_port); static void nxt_router_app_port_error(nxt_task_t *task, nxt_port_recv_msg_t *msg, void *data); @@ -227,13 +185,15 @@ static void nxt_router_app_unlink(nxt_task_t *task, nxt_app_t *app); static void nxt_router_app_port_release(nxt_task_t *task, nxt_port_t *port, nxt_apr_action_t action); -static nxt_int_t nxt_router_app_port(nxt_task_t *task, nxt_app_t *app, - nxt_request_app_link_t *req_app_link); +static void nxt_router_app_port_get(nxt_task_t *task, nxt_app_t *app, + nxt_request_rpc_data_t *req_rpc_data); +static void nxt_router_http_request_done(nxt_task_t *task, void *obj, + void *data); static void nxt_router_app_prepare_request(nxt_task_t *task, - nxt_request_app_link_t *req_app_link); + nxt_request_rpc_data_t *req_rpc_data); static nxt_buf_t *nxt_router_prepare_msg(nxt_task_t *task, - nxt_http_request_t *r, nxt_port_t *port, const nxt_str_t *prefix); + nxt_http_request_t *r, nxt_app_t *app, const nxt_str_t *prefix); static void nxt_router_app_timeout(nxt_task_t *task, void *obj, void *data); static void nxt_router_adjust_idle_timer(nxt_task_t *task, void *obj, @@ -250,7 +210,7 @@ static void nxt_http_request_send_body(nxt_task_t *task, void *obj, void *data); static void nxt_router_app_joint_use(nxt_task_t *task, nxt_app_joint_t *app_joint, int i); -static nxt_int_t nxt_router_http_request_done(nxt_task_t *task, +static void nxt_router_http_request_release_post(nxt_task_t *task, nxt_http_request_t *r); static void nxt_router_http_request_release(nxt_task_t *task, void *obj, void *data); @@ -501,83 +461,6 @@ nxt_router_start_app_process(nxt_task_t *task, nxt_app_t *app) } -nxt_inline void -nxt_request_app_link_init(nxt_task_t *task, - nxt_request_app_link_t *req_app_link, nxt_request_rpc_data_t *req_rpc_data) -{ - nxt_buf_t *body; - nxt_event_engine_t *engine; - - engine = task->thread->engine; - - nxt_memzero(req_app_link, sizeof(nxt_request_app_link_t)); - - req_app_link->stream = req_rpc_data->stream; - req_app_link->use_count = 1; - req_app_link->req_rpc_data = req_rpc_data; - req_rpc_data->req_app_link = req_app_link; - req_app_link->reply_port = engine->port; - req_app_link->request = req_rpc_data->request; - req_app_link->apr_action = NXT_APR_GOT_RESPONSE; - - req_app_link->work.handler = NULL; - req_app_link->work.task = &engine->task; - req_app_link->work.obj = req_app_link; - req_app_link->work.data = engine; - - body = req_rpc_data->request->body; - - if (body != NULL && nxt_buf_is_file(body)) { - req_app_link->body_fd = body->file->fd; - - body->file->fd = -1; - - } else { - req_app_link->body_fd = -1; - } -} - - -nxt_inline nxt_request_app_link_t * -nxt_request_app_link_alloc(nxt_task_t *task, - nxt_request_app_link_t *ra_src, nxt_request_rpc_data_t *req_rpc_data) -{ - nxt_mp_t *mp; - nxt_request_app_link_t *req_app_link; - - if (ra_src != NULL && ra_src->mem_pool != NULL) { - return ra_src; - } - - mp = req_rpc_data->request->mem_pool; - - req_app_link = nxt_mp_alloc(mp, sizeof(nxt_request_app_link_t)); - - if (nxt_slow_path(req_app_link == NULL)) { - - req_rpc_data->req_app_link = NULL; - - if (ra_src != NULL) { - ra_src->req_rpc_data = NULL; - } - - return NULL; - } - - nxt_mp_retain(mp); - - nxt_request_app_link_init(task, req_app_link, req_rpc_data); - - if (ra_src != NULL) { - req_app_link->body_fd = ra_src->body_fd; - } - - req_app_link->mem_pool = mp; - - return req_app_link; -} - - nxt_inline nxt_bool_t nxt_router_msg_cancel(nxt_task_t *task, nxt_msg_info_t *msg_info, uint32_t stream) @@ -614,198 +497,6 @@ nxt_router_msg_cancel(nxt_task_t *task, nxt_msg_info_t *msg_info, } -static void -nxt_request_app_link_update_peer_handler(nxt_task_t *task, void *obj, - void *data) -{ - nxt_request_app_link_t *req_app_link; - - req_app_link = obj; - - nxt_request_app_link_update_peer(task, req_app_link); - - nxt_request_app_link_use(task, req_app_link, -1); -} - - -static void -nxt_request_app_link_update_peer(nxt_task_t *task, - nxt_request_app_link_t *req_app_link) -{ - nxt_event_engine_t *engine; - nxt_request_rpc_data_t *req_rpc_data; - - engine = req_app_link->work.data; - - if (task->thread->engine != engine) { - nxt_request_app_link_inc_use(req_app_link); - - req_app_link->work.handler = nxt_request_app_link_update_peer_handler; - req_app_link->work.task = &engine->task; - req_app_link->work.next = NULL; - - nxt_debug(task, "req_app_link stream #%uD post update peer to %p", - req_app_link->stream, engine); - - nxt_event_engine_post(engine, &req_app_link->work); - - return; - } - - nxt_debug(task, "req_app_link stream #%uD update peer", - req_app_link->stream); - - req_rpc_data = req_app_link->req_rpc_data; - - if (req_rpc_data != NULL && req_app_link->app_port != NULL) { - nxt_port_rpc_ex_set_peer(task, engine->port, req_rpc_data, - req_app_link->app_port->pid); - } -} - - -static void -nxt_request_app_link_release(nxt_task_t *task, - nxt_request_app_link_t *req_app_link) -{ - nxt_mp_t *mp; - nxt_http_request_t *r; - nxt_request_rpc_data_t *req_rpc_data; - - nxt_assert(task->thread->engine == req_app_link->work.data); - nxt_assert(req_app_link->use_count == 0); - - nxt_debug(task, "req_app_link stream #%uD release", req_app_link->stream); - - req_rpc_data = req_app_link->req_rpc_data; - - if (req_rpc_data != NULL) { - if (nxt_slow_path(req_app_link->err_code != 0)) { - nxt_http_request_error(task, req_rpc_data->request, - req_app_link->err_code); - - } else { - req_rpc_data->app_port = req_app_link->app_port; - req_rpc_data->apr_action = req_app_link->apr_action; - req_rpc_data->msg_info = req_app_link->msg_info; - - if (req_rpc_data->app->timeout != 0) { - r = req_rpc_data->request; - - r->timer.handler = nxt_router_app_timeout; - r->timer_data = req_rpc_data; - nxt_timer_add(task->thread->engine, &r->timer, - req_rpc_data->app->timeout); - } - - req_app_link->app_port = NULL; - req_app_link->msg_info.buf = NULL; - } - - req_rpc_data->req_app_link = NULL; - req_app_link->req_rpc_data = NULL; - } - - if (req_app_link->app_port != NULL) { - nxt_router_app_port_release(task, req_app_link->app_port, - req_app_link->apr_action); - - req_app_link->app_port = NULL; - } - - if (req_app_link->body_fd != -1) { - nxt_fd_close(req_app_link->body_fd); - - req_app_link->body_fd = -1; - } - - nxt_router_msg_cancel(task, &req_app_link->msg_info, req_app_link->stream); - - mp = req_app_link->mem_pool; - - if (mp != NULL) { - nxt_mp_free(mp, req_app_link); - nxt_mp_release(mp); - } -} - - -static void -nxt_request_app_link_release_handler(nxt_task_t *task, void *obj, void *data) -{ - nxt_request_app_link_t *req_app_link; - - req_app_link = obj; - - nxt_assert(req_app_link->work.data == data); - - nxt_request_app_link_use(task, req_app_link, -1); -} - - -static void -nxt_request_app_link_use(nxt_task_t *task, nxt_request_app_link_t *req_app_link, - int i) -{ - int c; - nxt_event_engine_t *engine; - - c = nxt_atomic_fetch_add(&req_app_link->use_count, i); - - if (i < 0 && c == -i) { - engine = req_app_link->work.data; - - if (task->thread->engine == engine) { - nxt_request_app_link_release(task, req_app_link); - - return; - } - - nxt_request_app_link_inc_use(req_app_link); - - req_app_link->work.handler = nxt_request_app_link_release_handler; - req_app_link->work.task = &engine->task; - req_app_link->work.next = NULL; - - nxt_debug(task, "req_app_link stream #%uD post release to %p", - req_app_link->stream, engine); - - nxt_event_engine_post(engine, &req_app_link->work); - } -} - - -nxt_inline void -nxt_request_app_link_error(nxt_task_t *task, nxt_app_t *app, - nxt_request_app_link_t *req_app_link, const char *str) -{ - req_app_link->app_port = NULL; - req_app_link->err_code = 500; - req_app_link->err_str = str; - - nxt_alert(task, "app \"%V\" internal error: %s on #%uD", - &app->name, str, req_app_link->stream); -} - - -nxt_inline void -nxt_request_app_link_pending(nxt_task_t *task, nxt_app_t *app, - nxt_request_app_link_t *req_app_link) -{ - nxt_queue_insert_tail(&req_app_link->app_port->pending_requests, - &req_app_link->link_port_pending); - nxt_queue_insert_tail(&app->pending, &req_app_link->link_app_pending); - - nxt_request_app_link_inc_use(req_app_link); - - req_app_link->res_time = nxt_thread_monotonic_time(task->thread) - + app->res_timeout; - - nxt_debug(task, "req_app_link stream #%uD enqueue to pending_requests", - req_app_link->stream); -} - - nxt_inline nxt_bool_t nxt_queue_chk_remove(nxt_queue_link_t *lnk) { @@ -825,8 +516,9 @@ nxt_inline void nxt_request_rpc_data_unlink(nxt_task_t *task, nxt_request_rpc_data_t *req_rpc_data) { - int ra_use_delta; - nxt_request_app_link_t *req_app_link; + nxt_http_request_t *r; + + nxt_router_msg_cancel(task, &req_rpc_data->msg_info, req_rpc_data->stream); if (req_rpc_data->app_port != NULL) { nxt_router_app_port_release(task, req_rpc_data->app_port, @@ -835,53 +527,34 @@ nxt_request_rpc_data_unlink(nxt_task_t *task, req_rpc_data->app_port = NULL; } - nxt_router_msg_cancel(task, &req_rpc_data->msg_info, req_rpc_data->stream); - - req_app_link = req_rpc_data->req_app_link; - if (req_app_link != NULL) { - req_rpc_data->req_app_link = NULL; - req_app_link->req_rpc_data = NULL; - - ra_use_delta = 0; - - nxt_thread_mutex_lock(&req_rpc_data->app->mutex); + if (req_rpc_data->app != NULL) { + nxt_router_app_use(task, req_rpc_data->app, -1); - if (req_app_link->link_app_requests.next == NULL - && req_app_link->link_port_pending.next == NULL - && req_app_link->link_app_pending.next == NULL - && req_app_link->link_port_websockets.next == NULL) - { - req_app_link = NULL; + req_rpc_data->app = NULL; + } - } else { - ra_use_delta -= - nxt_queue_chk_remove(&req_app_link->link_app_requests) - + nxt_queue_chk_remove(&req_app_link->link_port_pending) - + nxt_queue_chk_remove(&req_app_link->link_port_websockets); + r = req_rpc_data->request; - nxt_queue_chk_remove(&req_app_link->link_app_pending); - } + if (r != NULL) { + r->timer_data = NULL; - nxt_thread_mutex_unlock(&req_rpc_data->app->mutex); + nxt_router_http_request_release_post(task, r); - if (req_app_link != NULL) { - nxt_request_app_link_use(task, req_app_link, ra_use_delta); - } + r->req_rpc_data = NULL; + req_rpc_data->request = NULL; } - if (req_rpc_data->app != NULL) { - nxt_router_app_use(task, req_rpc_data->app, -1); + if (req_rpc_data->msg_info.body_fd != -1) { + nxt_fd_close(req_rpc_data->msg_info.body_fd); - req_rpc_data->app = NULL; + req_rpc_data->msg_info.body_fd = -1; } - if (req_rpc_data->request != NULL) { - req_rpc_data->request->timer_data = NULL; - - nxt_router_http_request_done(task, req_rpc_data->request); + if (req_rpc_data->rpc_cancel) { + req_rpc_data->rpc_cancel = 0; - req_rpc_data->request->req_rpc_data = NULL; - req_rpc_data->request = NULL; + nxt_port_rpc_cancel(task, task->thread->engine->port, + req_rpc_data->stream); } } @@ -889,25 +562,62 @@ nxt_request_rpc_data_unlink(nxt_task_t *task, void nxt_router_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) { + nxt_app_t *app; + nxt_port_t *port, *main_app_port; + nxt_runtime_t *rt; + nxt_port_new_port_handler(task, msg); - if (msg->u.new_port != NULL - && msg->u.new_port->type == NXT_PROCESS_CONTROLLER) - { + port = msg->u.new_port; + + if (port != NULL && port->type == NXT_PROCESS_CONTROLLER) { nxt_router_greet_controller(task, msg->u.new_port); } - if (msg->port_msg.stream == 0) { - return; - } + if (port == NULL || port->type != NXT_PROCESS_APP) { + + if (msg->port_msg.stream == 0) { + return; + } - if (msg->u.new_port == NULL - || msg->u.new_port->type != NXT_PROCESS_APP) - { msg->port_msg.type = _NXT_PORT_MSG_RPC_ERROR; } - nxt_port_rpc_handler(task, msg); + if (msg->port_msg.stream != 0) { + nxt_port_rpc_handler(task, msg); + return; + } + + /* + * Port with "id == 0" is application 'main' port and it always + * should come with non-zero stream. + */ + nxt_assert(port->id != 0); + + /* Find 'main' app port and get app reference. */ + rt = task->thread->runtime; + + /* + * It is safe to access 'runtime->ports' hash because 'NEW_PORT' + * sent to main port (with id == 0) and processed in main thread. + */ + main_app_port = nxt_port_hash_find(&rt->ports, port->pid, 0); + nxt_assert(main_app_port != NULL); + + app = main_app_port->app; + nxt_assert(app != NULL); + + nxt_thread_mutex_lock(&app->mutex); + + /* TODO here should be find-and-add code because there can be + port waiters in port_hash */ + nxt_port_hash_add(&app->port_hash, port); + app->port_hash_count++; + + nxt_thread_mutex_unlock(&app->mutex); + + port->app = app; + port->main_app_port = main_app_port; } @@ -1100,8 +810,10 @@ nxt_router_app_can_start(nxt_app_t *app) nxt_inline nxt_bool_t nxt_router_app_need_start(nxt_app_t *app) { - return app->idle_processes + app->pending_processes - < app->spare_processes; + return (app->active_requests + > app->port_hash_count + app->pending_processes) + || (app->spare_processes + > app->idle_processes + app->pending_processes); } @@ -1530,6 +1242,7 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, nxt_app_t *app, *prev; nxt_str_t *t, *s, *targets; nxt_uint_t n, i; + nxt_port_t *port; nxt_router_t *router; nxt_app_joint_t *app_joint; nxt_conf_value_t *conf, *http, *value, *websocket; @@ -1744,8 +1457,6 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, nxt_queue_init(&app->ports); nxt_queue_init(&app->spare_ports); nxt_queue_init(&app->idle_ports); - nxt_queue_init(&app->requests); - nxt_queue_init(&app->pending); app->name.length = name.length; nxt_memcpy(app->name.start, name.start, name.length); @@ -1758,7 +1469,6 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, app->timeout = apcf.timeout; app->res_timeout = apcf.res_timeout * 1000000; app->idle_timeout = apcf.idle_timeout; - app->max_pending_responses = 2; app->max_requests = apcf.requests; app->targets = targets; @@ -1789,6 +1499,25 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, app_joint->free_app_work.handler = nxt_router_free_app; app_joint->free_app_work.task = &engine->task; app_joint->free_app_work.obj = app_joint; + + port = nxt_port_new(task, (nxt_port_id_t) -1, nxt_pid, + NXT_PROCESS_APP); + if (nxt_slow_path(port == NULL)) { + return NXT_ERROR; + } + + ret = nxt_port_socket_init(task, port, 0); + if (nxt_slow_path(ret != NXT_OK)) { + nxt_port_use(task, port, -1); + return NXT_ERROR; + } + + nxt_port_write_enable(task, port); + port->app = app; + + app->shared_port = port; + + nxt_thread_mutex_create(&app->outgoing.mutex); } } @@ -2522,7 +2251,13 @@ nxt_router_app_prefork_ready(nxt_task_t *task, nxt_port_recv_msg_t *msg, app = rpc->app; port = msg->u.new_port; + + nxt_assert(port != NULL); + nxt_assert(port->type == NXT_PROCESS_APP); + nxt_assert(port->id == 0); + port->app = app; + port->main_app_port = port; app->pending_processes--; app->processes++; @@ -2532,11 +2267,15 @@ nxt_router_app_prefork_ready(nxt_task_t *task, nxt_port_recv_msg_t *msg, nxt_queue_insert_tail(&app->ports, &port->app_link); nxt_queue_insert_tail(&app->spare_ports, &port->idle_link); + nxt_port_hash_add(&app->port_hash, port); + app->port_hash_count++; port->idle_start = 0; nxt_port_inc_use(port); + nxt_router_app_shared_port_send(task, port); + nxt_work_queue_add(&engine->fast_work_queue, nxt_router_conf_apply, task, rpc->temp_conf, NULL); } @@ -2939,10 +2678,11 @@ nxt_router_engine_post(nxt_event_engine_t *engine, nxt_work_t *jobs) static nxt_port_handlers_t nxt_router_app_port_handlers = { - .rpc_error = nxt_port_rpc_handler, - .mmap = nxt_port_mmap_handler, - .data = nxt_port_rpc_handler, - .oosm = nxt_router_oosm_handler, + .rpc_error = nxt_port_rpc_handler, + .mmap = nxt_port_mmap_handler, + .data = nxt_port_rpc_handler, + .oosm = nxt_router_oosm_handler, + .req_headers_ack = nxt_port_rpc_handler, }; @@ -3736,22 +3476,17 @@ nxt_router_response_ready_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg, void *data) { nxt_int_t ret; + nxt_app_t *app; nxt_buf_t *b, *next; nxt_port_t *app_port; nxt_unit_field_t *f; nxt_http_field_t *field; nxt_http_request_t *r; nxt_unit_response_t *resp; - nxt_request_app_link_t *req_app_link; nxt_request_rpc_data_t *req_rpc_data; - b = msg->buf; req_rpc_data = data; - if (msg->size == 0) { - b = NULL; - } - r = req_rpc_data->request; if (nxt_slow_path(r == NULL)) { return; @@ -3762,19 +3497,32 @@ nxt_router_response_ready_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg, return; } + app = req_rpc_data->app; + nxt_assert(app != NULL); + + if (msg->port_msg.type == _NXT_PORT_MSG_REQ_HEADERS_ACK) { + nxt_router_req_headers_ack_handler(task, msg, req_rpc_data); + + return; + } + + b = (msg->size == 0) ? NULL : msg->buf; + if (msg->port_msg.last != 0) { nxt_debug(task, "router data create last buf"); nxt_buf_chain_add(&b, nxt_http_buf_last(r)); + req_rpc_data->rpc_cancel = 0; + req_rpc_data->apr_action = NXT_APR_GOT_RESPONSE; + nxt_request_rpc_data_unlink(task, req_rpc_data); } else { - if (req_rpc_data->app != NULL && req_rpc_data->app->timeout != 0) { + if (app->timeout != 0) { r->timer.handler = nxt_router_app_timeout; r->timer_data = req_rpc_data; - nxt_timer_add(task->thread->engine, &r->timer, - req_rpc_data->app->timeout); + nxt_timer_add(task->thread->engine, &r->timer, app->timeout); } } @@ -3870,39 +3618,21 @@ nxt_router_response_ready_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg, if (r->websocket_handshake && r->status == NXT_HTTP_SWITCHING_PROTOCOLS) { - req_app_link = nxt_request_app_link_alloc(task, - req_rpc_data->req_app_link, - req_rpc_data); - if (nxt_slow_path(req_app_link == NULL)) { - goto fail; - } - - app_port = req_app_link->app_port; - - if (app_port == NULL && req_rpc_data->app_port != NULL) { - req_app_link->app_port = req_rpc_data->app_port; - app_port = req_app_link->app_port; - req_app_link->apr_action = req_rpc_data->apr_action; - - req_rpc_data->app_port = NULL; - } - + app_port = req_rpc_data->app_port; if (nxt_slow_path(app_port == NULL)) { goto fail; } - nxt_thread_mutex_lock(&req_rpc_data->app->mutex); + nxt_thread_mutex_lock(&app->mutex); - nxt_queue_insert_tail(&app_port->active_websockets, - &req_app_link->link_port_websockets); + app_port->main_app_port->active_websockets++; - nxt_thread_mutex_unlock(&req_rpc_data->app->mutex); + nxt_thread_mutex_unlock(&app->mutex); nxt_router_app_port_release(task, app_port, NXT_APR_UPGRADE); - req_app_link->apr_action = NXT_APR_CLOSE; + req_rpc_data->apr_action = NXT_APR_CLOSE; - nxt_debug(task, "req_app_link stream #%uD upgrade", - req_app_link->stream); + nxt_debug(task, "stream #%uD upgrade", req_rpc_data->stream); r->state = &nxt_http_websocket; @@ -3921,8 +3651,96 @@ fail: } -static const nxt_http_request_state_t nxt_http_request_send_state - nxt_aligned(64) = +static void +nxt_router_req_headers_ack_handler(nxt_task_t *task, + nxt_port_recv_msg_t *msg, nxt_request_rpc_data_t *req_rpc_data) +{ + nxt_app_t *app; + nxt_bool_t start_process; + nxt_port_t *app_port, *main_app_port, *idle_port; + nxt_queue_link_t *idle_lnk; + nxt_http_request_t *r; + + nxt_debug(task, "stream #%uD: got ack from %PI:%d", + req_rpc_data->stream, + msg->port_msg.pid, msg->port_msg.reply_port); + + nxt_port_rpc_ex_set_peer(task, msg->port, req_rpc_data, + msg->port_msg.pid); + + app = req_rpc_data->app; + + start_process = 0; + + nxt_thread_mutex_lock(&app->mutex); + + app_port = nxt_port_hash_find(&app->port_hash, msg->port_msg.pid, + msg->port_msg.reply_port); + if (nxt_slow_path(app_port == NULL)) { + nxt_thread_mutex_unlock(&app->mutex); + + r = req_rpc_data->request; + nxt_http_request_error(task, r, NXT_HTTP_INTERNAL_SERVER_ERROR); + + return; + } + + main_app_port = app_port->main_app_port; + + if (nxt_queue_chk_remove(&main_app_port->idle_link)) { + app->idle_processes--; + + /* Check port was in 'spare_ports' using idle_start field. */ + if (main_app_port->idle_start == 0 + && app->idle_processes >= app->spare_processes) + { + /* + * If there is a vacant space in spare ports, + * move the last idle to spare_ports. + */ + nxt_assert(!nxt_queue_is_empty(&app->idle_ports)); + + idle_lnk = nxt_queue_last(&app->idle_ports); + idle_port = nxt_queue_link_data(idle_lnk, nxt_port_t, idle_link); + nxt_queue_remove(idle_lnk); + + nxt_queue_insert_tail(&app->spare_ports, idle_lnk); + + idle_port->idle_start = 0; + } + + if (nxt_router_app_can_start(app) && nxt_router_app_need_start(app)) { + app->pending_processes++; + start_process = 1; + } + } + + main_app_port->active_requests++; + + nxt_port_inc_use(app_port); + + nxt_thread_mutex_unlock(&app->mutex); + + if (start_process) { + nxt_router_start_app_process(task, app); + } + + nxt_port_use(task, req_rpc_data->app_port, -1); + + req_rpc_data->app_port = app_port; + + if (app->timeout != 0) { + r = req_rpc_data->request; + + r->timer.handler = nxt_router_app_timeout; + r->timer_data = req_rpc_data; + nxt_timer_add(task->thread->engine, &r->timer, app->timeout); + } +} + + +static const nxt_http_request_state_t nxt_http_request_send_state + nxt_aligned(64) = { .error_handler = nxt_http_request_error_handler, }; @@ -3949,42 +3767,14 @@ static void nxt_router_response_error_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg, void *data) { - nxt_int_t res; - nxt_port_t *port; - nxt_bool_t cancelled; - nxt_request_app_link_t *req_app_link; nxt_request_rpc_data_t *req_rpc_data; req_rpc_data = data; - req_app_link = req_rpc_data->req_app_link; - - if (req_app_link != NULL) { - cancelled = nxt_router_msg_cancel(task, &req_app_link->msg_info, - req_app_link->stream); - if (cancelled) { - res = nxt_router_app_port(task, req_rpc_data->app, req_app_link); - - if (res == NXT_OK) { - port = req_app_link->app_port; - - if (nxt_slow_path(port == NULL)) { - nxt_log(task, NXT_LOG_ERR, - "port is NULL in cancelled req_app_link"); - return; - } - - nxt_port_rpc_ex_set_peer(task, task->thread->engine->port, - req_rpc_data, port->pid); - - nxt_router_app_prepare_request(task, req_app_link); - } + req_rpc_data->rpc_cancel = 0; - msg->port_msg.last = 0; - - return; - } - } + /* TODO cancel message and return if cancelled. */ + // nxt_router_msg_cancel(task, &req_rpc_data->msg_info, req_rpc_data->stream); if (req_rpc_data->request != NULL) { nxt_http_request_error(task, req_rpc_data->request, @@ -4008,6 +3798,8 @@ nxt_router_app_port_ready(nxt_task_t *task, nxt_port_recv_msg_t *msg, nxt_assert(app_joint != NULL); nxt_assert(port != NULL); + nxt_assert(port->type == NXT_PROCESS_APP); + nxt_assert(port->id == 0); app = app_joint->app; @@ -4022,6 +3814,7 @@ nxt_router_app_port_ready(nxt_task_t *task, nxt_port_recv_msg_t *msg, } port->app = app; + port->main_app_port = port; nxt_thread_mutex_lock(&app->mutex); @@ -4029,24 +3822,60 @@ nxt_router_app_port_ready(nxt_task_t *task, nxt_port_recv_msg_t *msg, app->pending_processes--; app->processes++; + nxt_port_hash_add(&app->port_hash, port); + app->port_hash_count++; nxt_thread_mutex_unlock(&app->mutex); nxt_debug(task, "app '%V' new port ready, pid %PI, %d/%d", &app->name, port->pid, app->processes, app->pending_processes); + nxt_router_app_shared_port_send(task, port); + nxt_router_app_port_release(task, port, NXT_APR_NEW_PORT); } +static nxt_int_t +nxt_router_app_shared_port_send(nxt_task_t *task, nxt_port_t *app_port) +{ + nxt_buf_t *b; + nxt_port_t *port; + nxt_port_msg_new_port_t *msg; + + b = nxt_buf_mem_ts_alloc(task, task->thread->engine->mem_pool, + sizeof(nxt_port_data_t)); + if (nxt_slow_path(b == NULL)) { + return NXT_ERROR; + } + + port = app_port->app->shared_port; + + nxt_debug(task, "send port %FD to process %PI", + port->pair[0], app_port->pid); + + b->mem.free += sizeof(nxt_port_msg_new_port_t); + msg = (nxt_port_msg_new_port_t *) b->mem.pos; + + msg->id = port->id; + msg->pid = port->pid; + msg->max_size = port->max_size; + msg->max_share = port->max_share; + msg->type = port->type; + + return nxt_port_socket_twrite(task, app_port, + NXT_PORT_MSG_NEW_PORT, + port->pair[0], + 0, 0, b, NULL); +} + + static void nxt_router_app_port_error(nxt_task_t *task, nxt_port_recv_msg_t *msg, void *data) { - nxt_app_t *app; - nxt_app_joint_t *app_joint; - nxt_queue_link_t *lnk; - nxt_request_app_link_t *req_app_link; + nxt_app_t *app; + nxt_app_joint_t *app_joint; app_joint = data; @@ -4070,32 +3899,11 @@ nxt_router_app_port_error(nxt_task_t *task, nxt_port_recv_msg_t *msg, app->pending_processes--; - if (!nxt_queue_is_empty(&app->requests)) { - lnk = nxt_queue_last(&app->requests); - nxt_queue_remove(lnk); - lnk->next = NULL; - - req_app_link = nxt_queue_link_data(lnk, nxt_request_app_link_t, - link_app_requests); - - } else { - req_app_link = NULL; - } - nxt_thread_mutex_unlock(&app->mutex); - if (req_app_link != NULL) { - nxt_debug(task, "app '%V' %p abort next stream #%uD", - &app->name, app, req_app_link->stream); - - nxt_request_app_link_error(task, app, req_app_link, - "Failed to start application process"); - nxt_request_app_link_use(task, req_app_link, -1); - } + /* TODO req_app_link to cancel first pending message */ } -nxt_inline nxt_port_t * -nxt_router_app_get_port_for_quit(nxt_app_t *app); void nxt_router_app_use(nxt_task_t *task, nxt_app_t *app, int i) @@ -4116,63 +3924,6 @@ nxt_router_app_use(nxt_task_t *task, nxt_app_t *app, int i) } -nxt_inline nxt_bool_t -nxt_router_app_first_port_busy(nxt_app_t *app) -{ - nxt_port_t *port; - nxt_queue_link_t *lnk; - - lnk = nxt_queue_first(&app->ports); - port = nxt_queue_link_data(lnk, nxt_port_t, app_link); - - return port->app_pending_responses > 0; -} - - -nxt_inline nxt_port_t * -nxt_router_pop_first_port(nxt_app_t *app) -{ - nxt_port_t *port; - nxt_queue_link_t *lnk; - - lnk = nxt_queue_first(&app->ports); - nxt_queue_remove(lnk); - - port = nxt_queue_link_data(lnk, nxt_port_t, app_link); - - port->app_pending_responses++; - - if (nxt_queue_chk_remove(&port->idle_link)) { - app->idle_processes--; - - if (port->idle_start == 0) { - nxt_assert(app->idle_processes < app->spare_processes); - - } else { - nxt_assert(app->idle_processes >= app->spare_processes); - - port->idle_start = 0; - } - } - - if ((app->max_pending_responses == 0 - || port->app_pending_responses < app->max_pending_responses) - && (app->max_requests == 0 - || port->app_responses + port->app_pending_responses - < app->max_requests)) - { - nxt_queue_insert_tail(&app->ports, lnk); - - nxt_port_inc_use(port); - - } else { - lnk->next = NULL; - } - - return port; -} - - nxt_inline nxt_port_t * nxt_router_app_get_port_for_quit(nxt_app_t *app) { @@ -4184,12 +3935,6 @@ nxt_router_app_get_port_for_quit(nxt_app_t *app) nxt_queue_each(port, &app->ports, nxt_port_t, app_link) { - if (port->app_pending_responses > 0) { - port = NULL; - - continue; - } - /* Caller is responsible to decrease port use count. */ nxt_queue_chk_remove(&port->app_link); @@ -4197,6 +3942,9 @@ nxt_router_app_get_port_for_quit(nxt_app_t *app) app->idle_processes--; } + nxt_port_hash_remove(&app->port_hash, port); + app->port_hash_count--; + port->app = NULL; app->processes--; @@ -4221,72 +3969,37 @@ nxt_router_app_unlink(nxt_task_t *task, nxt_app_t *app) } -static void -nxt_router_app_process_request(nxt_task_t *task, void *obj, void *data) -{ - nxt_request_app_link_t *req_app_link; - - req_app_link = data; - -#if (NXT_DEBUG) - { - nxt_app_t *app; - - app = obj; - - nxt_assert(app != NULL); - nxt_assert(req_app_link != NULL); - nxt_assert(req_app_link->app_port != NULL); - - nxt_debug(task, "app '%V' %p process next stream #%uD", - &app->name, app, req_app_link->stream); - } -#endif - - nxt_router_app_prepare_request(task, req_app_link); - - nxt_request_app_link_use(task, req_app_link, -1); -} - - static void nxt_router_app_port_release(nxt_task_t *task, nxt_port_t *port, nxt_apr_action_t action) { - int inc_use; - uint32_t dec_pending, got_response; - nxt_app_t *app; - nxt_bool_t port_unchained; - nxt_bool_t send_quit, cancelled, adjust_idle_timer; - nxt_queue_link_t *lnk; - nxt_request_app_link_t *req_app_link, *pending_ra, *re_ra; - nxt_port_select_state_t state; + int inc_use; + uint32_t got_response, dec_requests; + nxt_app_t *app; + nxt_bool_t port_unchained, send_quit, adjust_idle_timer; + nxt_port_t *main_app_port; nxt_assert(port != NULL); nxt_assert(port->app != NULL); - req_app_link = NULL; - app = port->app; inc_use = 0; - dec_pending = 0; got_response = 0; + dec_requests = 0; switch (action) { case NXT_APR_NEW_PORT: break; case NXT_APR_REQUEST_FAILED: - dec_pending = 1; + dec_requests = 1; inc_use = -1; break; case NXT_APR_GOT_RESPONSE: - dec_pending = 1; got_response = 1; inc_use = -1; break; case NXT_APR_UPGRADE: - dec_pending = 1; got_response = 1; break; case NXT_APR_CLOSE: @@ -4294,120 +4007,49 @@ nxt_router_app_port_release(nxt_task_t *task, nxt_port_t *port, break; } - nxt_thread_mutex_lock(&app->mutex); - - port->app_pending_responses -= dec_pending; - port->app_responses += got_response; + nxt_debug(task, "app '%V' release port %PI:%d: %d %d", &app->name, + port->pid, port->id, + (int) inc_use, (int) got_response); - if (port->pair[1] != -1 - && (app->max_pending_responses == 0 - || port->app_pending_responses < app->max_pending_responses) - && (app->max_requests == 0 - || port->app_responses + port->app_pending_responses - < app->max_requests)) - { - if (port->app_link.next == NULL) { - if (port->app_pending_responses > 0) { - nxt_queue_insert_tail(&app->ports, &port->app_link); + if (port == app->shared_port) { + nxt_thread_mutex_lock(&app->mutex); - } else { - nxt_queue_insert_head(&app->ports, &port->app_link); - } + app->active_requests -= got_response + dec_requests; - nxt_port_inc_use(port); + nxt_thread_mutex_unlock(&app->mutex); - } else { - if (port->app_pending_responses == 0 - && nxt_queue_first(&app->ports) != &port->app_link) - { - nxt_queue_remove(&port->app_link); - nxt_queue_insert_head(&app->ports, &port->app_link); - } - } + goto adjust_use; } - if (!nxt_queue_is_empty(&app->ports) - && !nxt_queue_is_empty(&app->requests)) - { - lnk = nxt_queue_first(&app->requests); - nxt_queue_remove(lnk); - lnk->next = NULL; - - req_app_link = nxt_queue_link_data(lnk, nxt_request_app_link_t, - link_app_requests); + main_app_port = port->main_app_port; - req_app_link->app_port = nxt_router_pop_first_port(app); + nxt_thread_mutex_lock(&app->mutex); - if (req_app_link->app_port->app_pending_responses > 1) { - nxt_request_app_link_pending(task, app, req_app_link); - } - } + main_app_port->app_responses += got_response; + main_app_port->active_requests -= got_response + dec_requests; + app->active_requests -= got_response + dec_requests; - /* Pop first pending request for this port. */ - if (dec_pending > 0 - && !nxt_queue_is_empty(&port->pending_requests)) + if (main_app_port->pair[1] != -1 + && (app->max_requests == 0 + || main_app_port->app_responses < app->max_requests)) { - lnk = nxt_queue_first(&port->pending_requests); - nxt_queue_remove(lnk); - lnk->next = NULL; - - pending_ra = nxt_queue_link_data(lnk, nxt_request_app_link_t, - link_port_pending); - - nxt_assert(pending_ra->link_app_pending.next != NULL); - - nxt_queue_remove(&pending_ra->link_app_pending); - pending_ra->link_app_pending.next = NULL; - - } else { - pending_ra = NULL; - } - - /* Try to cancel and re-schedule first stalled request for this app. */ - if (got_response > 0 && !nxt_queue_is_empty(&app->pending)) { - lnk = nxt_queue_first(&app->pending); - - re_ra = nxt_queue_link_data(lnk, nxt_request_app_link_t, - link_app_pending); - - if (re_ra->res_time <= nxt_thread_monotonic_time(task->thread)) { - - nxt_debug(task, "app '%V' stalled request #%uD detected", - &app->name, re_ra->stream); + if (main_app_port->app_link.next == NULL) { + nxt_queue_insert_tail(&app->ports, &main_app_port->app_link); - cancelled = nxt_router_msg_cancel(task, &re_ra->msg_info, - re_ra->stream); - - if (cancelled) { - state.req_app_link = re_ra; - state.app = app; - - /* - * Need to increment use count "in advance" because - * nxt_router_port_select() will remove re_ra from lists - * and decrement use count. - */ - nxt_request_app_link_inc_use(re_ra); - - nxt_router_port_select(task, &state); - - goto re_ra_cancelled; - } + nxt_port_inc_use(main_app_port); } } - re_ra = NULL; - -re_ra_cancelled: - send_quit = (app->max_requests > 0 - && port->app_pending_responses == 0 - && port->app_responses >= app->max_requests); + && main_app_port->app_responses >= app->max_requests); if (send_quit) { - port_unchained = nxt_queue_chk_remove(&port->app_link); + port_unchained = nxt_queue_chk_remove(&main_app_port->app_link); - port->app = NULL; + nxt_port_hash_remove(&app->port_hash, main_app_port); + app->port_hash_count--; + + main_app_port->app = NULL; app->processes--; } else { @@ -4416,9 +4058,10 @@ re_ra_cancelled: adjust_idle_timer = 0; - if (port->pair[1] != -1 && !send_quit && port->app_pending_responses == 0 - && nxt_queue_is_empty(&port->active_websockets) - && port->idle_link.next == NULL) + if (main_app_port->pair[1] != -1 && !send_quit + && main_app_port->active_requests == 0 + && main_app_port->active_websockets == 0 + && main_app_port->idle_link.next == NULL) { if (app->idle_processes == app->spare_processes && app->adjust_idle_work.data == NULL) @@ -4429,12 +4072,12 @@ re_ra_cancelled: } if (app->idle_processes < app->spare_processes) { - nxt_queue_insert_tail(&app->spare_ports, &port->idle_link); + nxt_queue_insert_tail(&app->spare_ports, &main_app_port->idle_link); } else { - nxt_queue_insert_tail(&app->idle_ports, &port->idle_link); + nxt_queue_insert_tail(&app->idle_ports, &main_app_port->idle_link); - port->idle_start = task->thread->engine->timers.now; + main_app_port->idle_start = task->thread->engine->timers.now; } app->idle_processes++; @@ -4447,60 +4090,22 @@ re_ra_cancelled: nxt_event_engine_post(app->engine, &app->adjust_idle_work); } - if (pending_ra != NULL) { - nxt_request_app_link_use(task, pending_ra, -1); - } - - if (re_ra != NULL) { - if (nxt_router_port_post_select(task, &state) == NXT_OK) { - /* - * Reference counter already incremented above, this will - * keep re_ra while nxt_router_app_process_request() - * task is in queue. Reference counter decreased in - * nxt_router_app_process_request() after processing. - */ - - nxt_work_queue_add(&task->thread->engine->fast_work_queue, - nxt_router_app_process_request, - &task->thread->engine->task, app, re_ra); - - } else { - nxt_request_app_link_use(task, re_ra, -1); - } - } - - if (req_app_link != NULL) { - /* - * There should be call nxt_request_app_link_inc_use(req_app_link), - * because of one more link in the queue. But one link was - * recently removed from app->requests linked list. - * Corresponding decrement is in nxt_router_app_process_request(). - */ - - nxt_work_queue_add(&task->thread->engine->fast_work_queue, - nxt_router_app_process_request, - &task->thread->engine->task, app, req_app_link); - - goto adjust_use; - } - /* ? */ - if (port->pair[1] == -1) { + if (main_app_port->pair[1] == -1) { nxt_debug(task, "app '%V' %p port %p already closed (pid %PI dead?)", - &app->name, app, port, port->pid); + &app->name, app, main_app_port, main_app_port->pid); goto adjust_use; } if (send_quit) { - nxt_debug(task, "app '%V' %p send QUIT to port", - &app->name, app); + nxt_debug(task, "app '%V' %p send QUIT to port", &app->name, app); - nxt_port_socket_write(task, port, NXT_PORT_MSG_QUIT, - -1, 0, 0, NULL); + nxt_port_socket_write(task, main_app_port, NXT_PORT_MSG_QUIT, -1, 0, 0, + NULL); if (port_unchained) { - nxt_port_use(task, port, -1); + nxt_port_use(task, main_app_port, -1); } goto adjust_use; @@ -4529,6 +4134,18 @@ nxt_router_app_port_close(nxt_task_t *task, nxt_port_t *port) nxt_thread_mutex_lock(&app->mutex); + nxt_port_hash_remove(&app->port_hash, port); + app->port_hash_count--; + + if (port->id != 0) { + nxt_thread_mutex_unlock(&app->mutex); + + nxt_debug(task, "app '%V' port (%PI, %d) closed", &app->name, + port->pid, port->id); + + return; + } + unchain = nxt_queue_chk_remove(&port->app_link); if (nxt_queue_chk_remove(&port->idle_link)) { @@ -4553,8 +4170,7 @@ nxt_router_app_port_close(nxt_task_t *task, nxt_port_t *port) start_process = !task->thread->engine->shutdown && nxt_router_app_can_start(app) - && (!nxt_queue_is_empty(&app->requests) - || nxt_router_app_need_start(app)); + && nxt_router_app_need_start(app); if (start_process) { app->pending_processes++; @@ -4603,6 +4219,10 @@ nxt_router_adjust_idle_timer(nxt_task_t *task, void *obj, void *data) app->adjust_idle_work.data = NULL; } + nxt_debug(task, "app '%V' idle_processes %d, spare_processes %d", + &app->name, + (int) app->idle_processes, (int) app->spare_processes); + while (app->idle_processes > app->spare_processes) { nxt_assert(!nxt_queue_is_empty(&app->idle_ports)); @@ -4612,6 +4232,10 @@ nxt_router_adjust_idle_timer(nxt_task_t *task, void *obj, void *data) timeout = port->idle_start + app->idle_timeout; + nxt_debug(task, "app '%V' pid %PI, start %M, timeout %M, threshold %M", + &app->name, port->pid, + port->idle_start, timeout, threshold); + if (timeout > threshold) { break; } @@ -4621,6 +4245,9 @@ nxt_router_adjust_idle_timer(nxt_task_t *task, void *obj, void *data) nxt_queue_chk_remove(&port->app_link); + nxt_port_hash_remove(&app->port_hash, port); + app->port_hash_count--; + app->idle_processes--; app->processes--; port->app = NULL; @@ -4704,12 +4331,23 @@ nxt_router_free_app(nxt_task_t *task, void *obj, void *data) } nxt_assert(app->processes == 0); + nxt_assert(app->active_requests == 0); + nxt_assert(app->port_hash_count == 0); nxt_assert(app->idle_processes == 0); - nxt_assert(nxt_queue_is_empty(&app->requests)); nxt_assert(nxt_queue_is_empty(&app->ports)); nxt_assert(nxt_queue_is_empty(&app->spare_ports)); nxt_assert(nxt_queue_is_empty(&app->idle_ports)); + nxt_port_mmaps_destroy(&app->outgoing, 1); + + nxt_thread_mutex_destroy(&app->outgoing.mutex); + + if (app->shared_port != NULL) { + app->shared_port->app = NULL; + nxt_port_close(task, app->shared_port); + nxt_port_use(task, app->shared_port, -1); + } + nxt_thread_mutex_destroy(&app->mutex); nxt_mp_destroy(app->mem_pool); @@ -4726,178 +4364,34 @@ nxt_router_free_app(nxt_task_t *task, void *obj, void *data) static void -nxt_router_port_select(nxt_task_t *task, nxt_port_select_state_t *state) -{ - int ra_use_delta; - nxt_app_t *app; - nxt_bool_t can_start_process; - nxt_request_app_link_t *req_app_link; - - req_app_link = state->req_app_link; - app = state->app; - - state->failed_port_use_delta = 0; - ra_use_delta = -nxt_queue_chk_remove(&req_app_link->link_app_requests); - - if (nxt_queue_chk_remove(&req_app_link->link_port_pending)) - { - nxt_assert(req_app_link->link_app_pending.next != NULL); - - nxt_queue_remove(&req_app_link->link_app_pending); - req_app_link->link_app_pending.next = NULL; - - ra_use_delta--; - } - - state->failed_port = req_app_link->app_port; - - if (req_app_link->app_port != NULL) { - state->failed_port_use_delta--; - - state->failed_port->app_pending_responses--; - - if (nxt_queue_chk_remove(&state->failed_port->app_link)) { - state->failed_port_use_delta--; - } - - req_app_link->app_port = NULL; - } - - can_start_process = nxt_router_app_can_start(app); - - state->port = NULL; - state->start_process = 0; - - if (nxt_queue_is_empty(&app->ports) - || (can_start_process && nxt_router_app_first_port_busy(app)) ) - { - req_app_link = nxt_request_app_link_alloc(task, req_app_link, - req_app_link->req_rpc_data); - if (nxt_slow_path(req_app_link == NULL)) { - goto fail; - } - - if (nxt_slow_path(state->failed_port != NULL)) { - nxt_queue_insert_head(&app->requests, - &req_app_link->link_app_requests); - - } else { - nxt_queue_insert_tail(&app->requests, - &req_app_link->link_app_requests); - } - - nxt_request_app_link_inc_use(req_app_link); - - nxt_debug(task, "req_app_link stream #%uD enqueue to app->requests", - req_app_link->stream); - - if (can_start_process) { - app->pending_processes++; - state->start_process = 1; - } - - } else { - state->port = nxt_router_pop_first_port(app); - - if (state->port->app_pending_responses > 1) { - req_app_link = nxt_request_app_link_alloc(task, req_app_link, - req_app_link->req_rpc_data); - if (nxt_slow_path(req_app_link == NULL)) { - goto fail; - } - - req_app_link->app_port = state->port; - - nxt_request_app_link_pending(task, app, req_app_link); - } - - if (can_start_process && nxt_router_app_need_start(app)) { - app->pending_processes++; - state->start_process = 1; - } - } - - nxt_request_app_link_chk_use(req_app_link, ra_use_delta); - -fail: - - state->shared_ra = req_app_link; -} - - -static nxt_int_t -nxt_router_port_post_select(nxt_task_t *task, nxt_port_select_state_t *state) +nxt_router_app_port_get(nxt_task_t *task, nxt_app_t *app, + nxt_request_rpc_data_t *req_rpc_data) { - nxt_int_t res; - nxt_app_t *app; - nxt_request_app_link_t *req_app_link; - - req_app_link = state->shared_ra; - app = state->app; - - if (state->failed_port_use_delta != 0) { - nxt_port_use(task, state->failed_port, state->failed_port_use_delta); - } - - if (nxt_slow_path(req_app_link == NULL)) { - if (state->port != NULL) { - nxt_port_use(task, state->port, -1); - } - - nxt_request_app_link_error(task, app, state->req_app_link, - "Failed to allocate shared req<->app link"); - - return NXT_ERROR; - } - - if (state->port != NULL) { - nxt_debug(task, "already have port for app '%V' %p ", &app->name, app); + nxt_bool_t start_process; + nxt_port_t *port; - req_app_link->app_port = state->port; + start_process = 0; - if (state->start_process) { - nxt_router_start_app_process(task, app); - } + nxt_thread_mutex_lock(&app->mutex); - return NXT_OK; - } + port = app->shared_port; + nxt_port_inc_use(port); - if (!state->start_process) { - nxt_debug(task, "app '%V' %p too many running or pending processes", - &app->name, app); + app->active_requests++; - return NXT_AGAIN; + if (nxt_router_app_can_start(app) && nxt_router_app_need_start(app)) { + app->pending_processes++; + start_process = 1; } - res = nxt_router_start_app_process(task, app); + nxt_thread_mutex_unlock(&app->mutex); - if (nxt_slow_path(res != NXT_OK)) { - nxt_request_app_link_error(task, app, req_app_link, - "Failed to start app process"); + req_rpc_data->app_port = port; + req_rpc_data->apr_action = NXT_APR_REQUEST_FAILED; - return NXT_ERROR; + if (start_process) { + nxt_router_start_app_process(task, app); } - - return NXT_AGAIN; -} - - -static nxt_int_t -nxt_router_app_port(nxt_task_t *task, nxt_app_t *app, - nxt_request_app_link_t *req_app_link) -{ - nxt_port_select_state_t state; - - state.req_app_link = req_app_link; - state.app = app; - - nxt_thread_mutex_lock(&app->mutex); - - nxt_router_port_select(task, &state); - - nxt_thread_mutex_unlock(&app->mutex); - - return nxt_router_port_post_select(task, &state); } @@ -4905,10 +4399,7 @@ void nxt_router_process_http_request(nxt_task_t *task, nxt_http_request_t *r, nxt_app_t *app) { - nxt_int_t res; - nxt_port_t *port; nxt_event_engine_t *engine; - nxt_request_app_link_t ra_local, *req_app_link; nxt_request_rpc_data_t *req_rpc_data; engine = task->thread->engine; @@ -4927,7 +4418,7 @@ nxt_router_process_http_request(nxt_task_t *task, nxt_http_request_t *r, * in port handlers. Need to fixup request memory pool. Counterpart * release will be called via following call chain: * nxt_request_rpc_data_unlink() -> - * nxt_router_http_request_done() -> + * nxt_router_http_request_release_post() -> * nxt_router_http_request_release() */ nxt_mp_retain(r->mem_pool); @@ -4939,29 +4430,37 @@ nxt_router_process_http_request(nxt_task_t *task, nxt_http_request_t *r, req_rpc_data->stream = nxt_port_rpc_ex_stream(req_rpc_data); req_rpc_data->app = app; + req_rpc_data->msg_info.body_fd = -1; + req_rpc_data->rpc_cancel = 1; nxt_router_app_use(task, app, 1); req_rpc_data->request = r; r->req_rpc_data = req_rpc_data; - req_app_link = &ra_local; - nxt_request_app_link_init(task, req_app_link, req_rpc_data); + if (r->last != NULL) { + r->last->completion_handler = nxt_router_http_request_done; + } - res = nxt_router_app_port(task, app, req_app_link); - req_app_link = req_rpc_data->req_app_link; + nxt_router_app_port_get(task, app, req_rpc_data); + nxt_router_app_prepare_request(task, req_rpc_data); +} - if (res == NXT_OK) { - port = req_app_link->app_port; - nxt_assert(port != NULL); +static void +nxt_router_http_request_done(nxt_task_t *task, void *obj, void *data) +{ + nxt_http_request_t *r; - nxt_port_rpc_ex_set_peer(task, engine->port, req_rpc_data, port->pid); + r = data; - nxt_router_app_prepare_request(task, req_app_link); + nxt_debug(task, "router http request done (rpc_data %p)", r->req_rpc_data); + + if (r->req_rpc_data) { + nxt_request_rpc_data_unlink(task, r->req_rpc_data); } - nxt_request_app_link_use(task, req_app_link, -1); + nxt_http_request_close_handler(task, r, r->proto.any); } @@ -4973,76 +4472,80 @@ nxt_router_dummy_buf_completion(nxt_task_t *task, void *obj, void *data) static void nxt_router_app_prepare_request(nxt_task_t *task, - nxt_request_app_link_t *req_app_link) + nxt_request_rpc_data_t *req_rpc_data) { - nxt_buf_t *buf; + nxt_app_t *app; + nxt_buf_t *buf, *body; nxt_int_t res; nxt_port_t *port, *reply_port; - nxt_apr_action_t apr_action; - nxt_assert(req_app_link->app_port != NULL); + app = req_rpc_data->app; - port = req_app_link->app_port; - reply_port = req_app_link->reply_port; + nxt_assert(app != NULL); - apr_action = NXT_APR_REQUEST_FAILED; + port = req_rpc_data->app_port; - buf = nxt_router_prepare_msg(task, req_app_link->request, port, - nxt_app_msg_prefix[port->app->type]); + nxt_assert(port != NULL); + reply_port = task->thread->engine->port; + + buf = nxt_router_prepare_msg(task, req_rpc_data->request, app, + nxt_app_msg_prefix[app->type]); if (nxt_slow_path(buf == NULL)) { - nxt_request_app_link_error(task, port->app, req_app_link, - "Failed to prepare message for application"); - goto release_port; + nxt_alert(task, "stream #%uD, app '%V': failed to prepare app message", + req_rpc_data->stream, &app->name); + + nxt_http_request_error(task, req_rpc_data->request, + NXT_HTTP_INTERNAL_SERVER_ERROR); + + return; } nxt_debug(task, "about to send %O bytes buffer to app process port %d", nxt_buf_used_size(buf), port->socket.fd); - apr_action = NXT_APR_NEW_PORT; - - req_app_link->msg_info.buf = buf; - req_app_link->msg_info.completion_handler = buf->completion_handler; + req_rpc_data->msg_info.buf = buf; + req_rpc_data->msg_info.completion_handler = buf->completion_handler; - for (; buf; buf = buf->next) { + do { buf->completion_handler = nxt_router_dummy_buf_completion; - } + buf = buf->next; + } while (buf != NULL); - buf = req_app_link->msg_info.buf; + buf = req_rpc_data->msg_info.buf; - res = nxt_port_mmap_get_tracking(task, &port->process->outgoing, - &req_app_link->msg_info.tracking, - req_app_link->stream); - if (nxt_slow_path(res != NXT_OK)) { - nxt_request_app_link_error(task, port->app, req_app_link, - "Failed to get tracking area"); - goto release_port; - } + body = req_rpc_data->request->body; - if (req_app_link->body_fd != -1) { - nxt_debug(task, "stream #%uD: send body fd %d", req_app_link->stream, - req_app_link->body_fd); + if (body != NULL && nxt_buf_is_file(body)) { + req_rpc_data->msg_info.body_fd = body->file->fd; + + body->file->fd = -1; - lseek(req_app_link->body_fd, 0, SEEK_SET); + } else { + req_rpc_data->msg_info.body_fd = -1; } - res = nxt_port_socket_twrite(task, port, NXT_PORT_MSG_REQ_HEADERS, - req_app_link->body_fd, - req_app_link->stream, reply_port->id, buf, - &req_app_link->msg_info.tracking); + if (req_rpc_data->msg_info.body_fd != -1) { + nxt_debug(task, "stream #%uD: send body fd %d", req_rpc_data->stream, + req_rpc_data->msg_info.body_fd); - if (nxt_slow_path(res != NXT_OK)) { - nxt_request_app_link_error(task, port->app, req_app_link, - "Failed to send message to application"); - goto release_port; + lseek(req_rpc_data->msg_info.body_fd, 0, SEEK_SET); } -release_port: + res = nxt_port_socket_twrite(task, port, + NXT_PORT_MSG_REQ_HEADERS, + req_rpc_data->msg_info.body_fd, + req_rpc_data->stream, reply_port->id, buf, + NULL); - nxt_router_app_port_release(task, port, apr_action); + if (nxt_slow_path(res != NXT_OK)) { + nxt_alert(task, "stream #%uD, app '%V': failed to send app message", + req_rpc_data->stream, &app->name); - nxt_request_app_link_update_peer(task, req_app_link); + nxt_http_request_error(task, req_rpc_data->request, + NXT_HTTP_INTERNAL_SERVER_ERROR); + } } @@ -5100,7 +4603,7 @@ nxt_fields_next(nxt_fields_iter_t *i) static nxt_buf_t * nxt_router_prepare_msg(nxt_task_t *task, nxt_http_request_t *r, - nxt_port_t *port, const nxt_str_t *prefix) + nxt_app_t *app, const nxt_str_t *prefix) { void *target_pos, *query_pos; u_char *pos, *end, *p, c; @@ -5141,7 +4644,7 @@ nxt_router_prepare_msg(nxt_task_t *task, nxt_http_request_t *r, return NULL; } - out = nxt_port_mmap_get_buf(task, &port->process->outgoing, + out = nxt_port_mmap_get_buf(task, &app->outgoing, nxt_min(req_size + content_length, PORT_MMAP_DATA_SIZE)); if (nxt_slow_path(out == NULL)) { return NULL; @@ -5323,8 +4826,7 @@ nxt_router_prepare_msg(nxt_task_t *task, nxt_http_request_t *r, if (buf == NULL) { free_size = nxt_min(size, PORT_MMAP_DATA_SIZE); - buf = nxt_port_mmap_get_buf(task, &port->process->outgoing, - free_size); + buf = nxt_port_mmap_get_buf(task, &app->outgoing, free_size); if (nxt_slow_path(buf == NULL)) { while (out != NULL) { buf = out->next; @@ -5372,15 +4874,9 @@ nxt_router_prepare_msg(nxt_task_t *task, nxt_http_request_t *r, static void nxt_router_app_timeout(nxt_task_t *task, void *obj, void *data) { - nxt_app_t *app; - nxt_bool_t cancelled, unlinked; - nxt_port_t *port; nxt_timer_t *timer; - nxt_queue_link_t *lnk; nxt_http_request_t *r; - nxt_request_app_link_t *pending_ra; nxt_request_rpc_data_t *req_rpc_data; - nxt_port_select_state_t state; timer = obj; @@ -5388,94 +4884,6 @@ nxt_router_app_timeout(nxt_task_t *task, void *obj, void *data) r = nxt_timer_data(timer, nxt_http_request_t, timer); req_rpc_data = r->timer_data; - app = req_rpc_data->app; - - if (app == NULL) { - goto generate_error; - } - - port = NULL; - pending_ra = NULL; - - if (req_rpc_data->app_port != NULL) { - port = req_rpc_data->app_port; - req_rpc_data->app_port = NULL; - } - - if (port == NULL && req_rpc_data->req_app_link != NULL - && req_rpc_data->req_app_link->app_port != NULL) - { - port = req_rpc_data->req_app_link->app_port; - req_rpc_data->req_app_link->app_port = NULL; - } - - if (port == NULL) { - goto generate_error; - } - - nxt_thread_mutex_lock(&app->mutex); - - unlinked = nxt_queue_chk_remove(&port->app_link); - - if (!nxt_queue_is_empty(&port->pending_requests)) { - lnk = nxt_queue_first(&port->pending_requests); - - pending_ra = nxt_queue_link_data(lnk, nxt_request_app_link_t, - link_port_pending); - - nxt_assert(pending_ra->link_app_pending.next != NULL); - - nxt_debug(task, "app '%V' pending request #%uD found", - &app->name, pending_ra->stream); - - cancelled = nxt_router_msg_cancel(task, &pending_ra->msg_info, - pending_ra->stream); - - if (cancelled) { - state.req_app_link = pending_ra; - state.app = app; - - /* - * Need to increment use count "in advance" because - * nxt_router_port_select() will remove pending_ra from lists - * and decrement use count. - */ - nxt_request_app_link_inc_use(pending_ra); - - nxt_router_port_select(task, &state); - - } else { - pending_ra = NULL; - } - } - - nxt_thread_mutex_unlock(&app->mutex); - - if (pending_ra != NULL) { - if (nxt_router_port_post_select(task, &state) == NXT_OK) { - /* - * Reference counter already incremented above, this will - * keep pending_ra while nxt_router_app_process_request() - * task is in queue. Reference counter decreased in - * nxt_router_app_process_request() after processing. - */ - - nxt_work_queue_add(&task->thread->engine->fast_work_queue, - nxt_router_app_process_request, - &task->thread->engine->task, app, pending_ra); - - } else { - nxt_request_app_link_use(task, pending_ra, -1); - } - } - - nxt_debug(task, "send quit to app '%V' pid %PI", &app->name, port->pid); - - nxt_port_socket_write(task, port, NXT_PORT_MSG_QUIT, -1, 0, 0, NULL); - - nxt_port_use(task, port, unlinked ? -2 : -1); - -generate_error: nxt_http_request_error(task, r, NXT_HTTP_SERVICE_UNAVAILABLE); @@ -5483,13 +4891,11 @@ generate_error: } -static nxt_int_t -nxt_router_http_request_done(nxt_task_t *task, nxt_http_request_t *r) +static void +nxt_router_http_request_release_post(nxt_task_t *task, nxt_http_request_t *r) { r->timer.handler = nxt_router_http_request_release; nxt_timer_add(task->thread->engine, &r->timer, 0); - - return NXT_OK; } @@ -5498,7 +4904,7 @@ nxt_router_http_request_release(nxt_task_t *task, void *obj, void *data) { nxt_http_request_t *r; - nxt_debug(task, "http app release"); + nxt_debug(task, "http request pool release"); r = nxt_timer_data(obj, nxt_http_request_t, timer); @@ -5593,7 +4999,18 @@ nxt_router_get_mmap_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) nxt_assert(port->type == NXT_PROCESS_APP); - mmaps = &port->process->outgoing; + if (nxt_slow_path(port->app == NULL)) { + nxt_alert(task, "get_mmap_handler: app == NULL for reply port %PI:%d", + port->pid, port->id); + + // FIXME + nxt_port_socket_write(task, port, NXT_PORT_MSG_RPC_ERROR, + -1, msg->port_msg.stream, 0, NULL); + + return; + } + + mmaps = &port->app->outgoing; nxt_thread_mutex_lock(&mmaps->mutex); if (nxt_slow_path(get_mmap_msg->id >= mmaps->size)) { @@ -5602,6 +5019,9 @@ nxt_router_get_mmap_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) nxt_alert(task, "get_mmap_handler: mmap id is too big (%d)", (int) get_mmap_msg->id); + // FIXME + nxt_port_socket_write(task, port, NXT_PORT_MSG_RPC_ERROR, + -1, msg->port_msg.stream, 0, NULL); return; } diff --git a/src/nxt_router.h b/src/nxt_router.h index d8e93be6..7e0dc7d7 100644 --- a/src/nxt_router.h +++ b/src/nxt_router.h @@ -101,18 +101,20 @@ typedef struct { struct nxt_app_s { - nxt_thread_mutex_t mutex; /* Protects ports queue. */ - nxt_queue_t ports; /* of nxt_port_t.app_link */ + nxt_thread_mutex_t mutex; /* Protects ports queue. */ + nxt_queue_t ports; /* of nxt_port_t.app_link */ + nxt_lvlhsh_t port_hash; /* of nxt_port_t */ nxt_queue_t spare_ports; /* of nxt_port_t.idle_link */ nxt_queue_t idle_ports; /* of nxt_port_t.idle_link */ nxt_work_t adjust_idle_work; nxt_event_engine_t *engine; - nxt_queue_t requests; /* of nxt_request_app_link_t */ - nxt_queue_t pending; /* of nxt_request_app_link_t */ nxt_str_t name; + uint32_t port_hash_count; + + uint32_t active_requests; uint32_t pending_processes; uint32_t processes; uint32_t idle_processes; @@ -120,7 +122,6 @@ struct nxt_app_s { uint32_t max_processes; uint32_t spare_processes; uint32_t max_pending_processes; - uint32_t max_pending_responses; uint32_t max_requests; nxt_msec_t timeout; @@ -139,6 +140,9 @@ struct nxt_app_s { nxt_atomic_t use_count; nxt_app_joint_t *joint; + nxt_port_t *shared_port; + + nxt_port_mmaps_t outgoing; }; diff --git a/src/nxt_router_request.h b/src/nxt_router_request.h index a38980ee..1271520d 100644 --- a/src/nxt_router_request.h +++ b/src/nxt_router_request.h @@ -9,14 +9,12 @@ typedef struct nxt_msg_info_s { nxt_buf_t *buf; + nxt_fd_t body_fd; nxt_port_mmap_tracking_t tracking; nxt_work_handler_t completion_handler; } nxt_msg_info_t; -typedef struct nxt_request_app_link_s nxt_request_app_link_t; - - typedef enum { NXT_APR_NEW_PORT, NXT_APR_REQUEST_FAILED, @@ -35,38 +33,9 @@ typedef struct { nxt_http_request_t *request; nxt_msg_info_t msg_info; - nxt_request_app_link_t *req_app_link; -} nxt_request_rpc_data_t; - - -struct nxt_request_app_link_s { - uint32_t stream; - nxt_atomic_t use_count; - - nxt_port_t *app_port; - nxt_apr_action_t apr_action; - - nxt_port_t *reply_port; - nxt_http_request_t *request; - nxt_msg_info_t msg_info; - nxt_request_rpc_data_t *req_rpc_data; - nxt_fd_t body_fd; - nxt_nsec_t res_time; - - nxt_queue_link_t link_app_requests; /* for nxt_app_t.requests */ - /* for nxt_port_t.pending_requests */ - nxt_queue_link_t link_port_pending; - nxt_queue_link_t link_app_pending; /* for nxt_app_t.pending */ - /* for nxt_port_t.active_websockets */ - nxt_queue_link_t link_port_websockets; - - nxt_mp_t *mem_pool; - nxt_work_t work; - - int err_code; - const char *err_str; -}; + nxt_bool_t rpc_cancel; +} nxt_request_rpc_data_t; #endif /* _NXT_ROUTER_REQUEST_H_INCLUDED_ */ diff --git a/src/nxt_unit.c b/src/nxt_unit.c index b321a0d4..7fb2826d 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -38,8 +38,8 @@ typedef struct nxt_unit_websocket_frame_impl_s nxt_unit_websocket_frame_impl_t; static nxt_unit_impl_t *nxt_unit_create(nxt_unit_init_t *init); static int nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, void *data); -nxt_inline void nxt_unit_ctx_use(nxt_unit_ctx_impl_t *ctx_impl); -nxt_inline void nxt_unit_ctx_release(nxt_unit_ctx_impl_t *ctx_impl); +nxt_inline void nxt_unit_ctx_use(nxt_unit_ctx_t *ctx); +nxt_inline void nxt_unit_ctx_release(nxt_unit_ctx_t *ctx); nxt_inline void nxt_unit_lib_use(nxt_unit_impl_t *lib); nxt_inline void nxt_unit_lib_release(nxt_unit_impl_t *lib); nxt_inline void nxt_unit_mmap_buf_insert(nxt_unit_mmap_buf_t **head, @@ -58,6 +58,7 @@ static int nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg); static int nxt_unit_request_check_response_port(nxt_unit_request_info_t *req, nxt_unit_port_id_t *port_id); +static int nxt_unit_send_req_headers_ack(nxt_unit_request_info_t *req); static int nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg); static int nxt_unit_process_shm_ack(nxt_unit_ctx_t *ctx); @@ -122,9 +123,12 @@ static nxt_unit_process_t *nxt_unit_process_get(nxt_unit_impl_t *lib, static nxt_unit_process_t *nxt_unit_process_find(nxt_unit_impl_t *lib, pid_t pid, int remove); static nxt_unit_process_t *nxt_unit_process_pop_first(nxt_unit_impl_t *lib); -static void nxt_unit_read_buf(nxt_unit_ctx_t *ctx, - nxt_unit_read_buf_t *rbuf); -static void nxt_unit_process_ready_req(nxt_unit_ctx_impl_t *ctx_impl); +static int nxt_unit_run_once_impl(nxt_unit_ctx_t *ctx); +static int nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf); +static int nxt_unit_process_pending_rbuf(nxt_unit_ctx_t *ctx); +static void nxt_unit_process_ready_req(nxt_unit_ctx_t *ctx); +static int nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx, + nxt_unit_port_t *port); static void nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl); static nxt_unit_port_t *nxt_unit_create_port(nxt_unit_ctx_t *ctx); @@ -150,9 +154,8 @@ static ssize_t nxt_unit_port_send(nxt_unit_ctx_t *ctx, const void *oob, size_t oob_size); static ssize_t nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd, const void *buf, size_t buf_size, const void *oob, size_t oob_size); -static ssize_t nxt_unit_port_recv(nxt_unit_ctx_t *ctx, - nxt_unit_port_t *port, void *buf, size_t buf_size, - void *oob, size_t oob_size); +static int nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, + nxt_unit_read_buf_t *rbuf); static int nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port); @@ -308,6 +311,7 @@ struct nxt_unit_impl_s { nxt_lvlhsh_t ports; /* of nxt_unit_port_impl_t */ nxt_unit_port_t *router_port; + nxt_unit_port_t *shared_port; nxt_queue_t contexts; /* of nxt_unit_ctx_impl_t */ @@ -452,7 +456,7 @@ nxt_unit_init(nxt_unit_init_t *init) fail: - nxt_unit_ctx_release(&lib->main_ctx); + nxt_unit_ctx_release(&lib->main_ctx.ctx); return NULL; } @@ -496,6 +500,7 @@ nxt_unit_create(nxt_unit_init_t *init) lib->use_count = 0; lib->router_port = NULL; + lib->shared_port = NULL; rc = nxt_unit_ctx_init(lib, &lib->main_ctx, init->ctx_data); if (nxt_slow_path(rc != NXT_UNIT_OK)) { @@ -570,16 +575,23 @@ nxt_unit_ctx_init(nxt_unit_impl_t *lib, nxt_unit_ctx_impl_t *ctx_impl, nxt_inline void -nxt_unit_ctx_use(nxt_unit_ctx_impl_t *ctx_impl) +nxt_unit_ctx_use(nxt_unit_ctx_t *ctx) { + nxt_unit_ctx_impl_t *ctx_impl; + + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + nxt_atomic_fetch_add(&ctx_impl->use_count, 1); } nxt_inline void -nxt_unit_ctx_release(nxt_unit_ctx_impl_t *ctx_impl) +nxt_unit_ctx_release(nxt_unit_ctx_t *ctx) { - long c; + long c; + nxt_unit_ctx_impl_t *ctx_impl; + + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); c = nxt_atomic_fetch_add(&ctx_impl->use_count, -1); @@ -624,6 +636,10 @@ nxt_unit_lib_release(nxt_unit_impl_t *lib) nxt_unit_port_release(lib->router_port); } + if (nxt_fast_path(lib->shared_port != NULL)) { + nxt_unit_port_release(lib->shared_port); + } + free(lib); } } @@ -805,6 +821,15 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) recv_msg.incoming_buf = NULL; if (nxt_slow_path(rbuf->size < (ssize_t) sizeof(nxt_port_msg_t))) { + if (nxt_slow_path(rbuf->size == 0)) { + nxt_unit_debug(ctx, "read port closed"); + + nxt_unit_quit(ctx); + rc = NXT_UNIT_OK; + + goto fail; + } + nxt_unit_alert(ctx, "message too small (%d bytes)", (int) rbuf->size); goto fail; } @@ -946,6 +971,13 @@ fail: nxt_unit_process_release(recv_msg.process); } + if (nxt_fast_path(rc != NXT_UNIT_AGAIN)) { +#if (NXT_DEBUG) + memset(rbuf->buf, 0xAC, rbuf->size); +#endif + nxt_unit_read_buf_release(ctx, rbuf); + } + return rc; } @@ -954,6 +986,7 @@ static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) { int nb; + nxt_unit_impl_t *lib; nxt_unit_port_t new_port, *port; nxt_port_msg_new_port_t *new_port_msg; @@ -978,21 +1011,33 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) recv_msg->stream, (int) new_port_msg->pid, (int) new_port_msg->id, recv_msg->fd); - nb = 0; + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + + if (new_port_msg->id == (nxt_port_id_t) -1) { + nxt_unit_port_id_init(&new_port.id, lib->pid, new_port_msg->id); - if (nxt_slow_path(ioctl(recv_msg->fd, FIONBIO, &nb) == -1)) { - nxt_unit_alert(ctx, "#%"PRIu32": new_port: ioctl(%d, FIONBIO, 0) " - "failed: %s (%d)", - recv_msg->stream, recv_msg->fd, strerror(errno), errno); + new_port.in_fd = recv_msg->fd; + new_port.out_fd = -1; - return NXT_UNIT_ERROR; + } else { + nb = 0; + + if (nxt_slow_path(ioctl(recv_msg->fd, FIONBIO, &nb) == -1)) { + nxt_unit_alert(ctx, "#%"PRIu32": new_port: ioctl(%d, FIONBIO, 0) " + "failed: %s (%d)", + recv_msg->stream, recv_msg->fd, strerror(errno), errno); + + return NXT_UNIT_ERROR; + } + + nxt_unit_port_id_init(&new_port.id, new_port_msg->pid, + new_port_msg->id); + + new_port.in_fd = -1; + new_port.out_fd = recv_msg->fd; } - nxt_unit_port_id_init(&new_port.id, new_port_msg->pid, - new_port_msg->id); - new_port.in_fd = -1; - new_port.out_fd = recv_msg->fd; new_port.data = NULL; recv_msg->fd = -1; @@ -1002,7 +1047,12 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) return NXT_UNIT_ERROR; } - nxt_unit_port_release(port); + if (new_port_msg->id == (nxt_port_id_t) -1) { + lib->shared_port = port; + + } else { + nxt_unit_port_release(port); + } return NXT_UNIT_OK; } @@ -1102,6 +1152,11 @@ nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) } if (nxt_fast_path(res == NXT_UNIT_OK)) { + res = nxt_unit_send_req_headers_ack(req); + if (nxt_slow_path(res != NXT_UNIT_OK)) { + return res; + } + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); lib->callbacks.request_handler(req); @@ -1220,6 +1275,36 @@ nxt_unit_request_check_response_port(nxt_unit_request_info_t *req, } +static int +nxt_unit_send_req_headers_ack(nxt_unit_request_info_t *req) +{ + ssize_t res; + nxt_port_msg_t msg; + nxt_unit_impl_t *lib; + nxt_unit_ctx_impl_t *ctx_impl; + nxt_unit_request_info_impl_t *req_impl; + + lib = nxt_container_of(req->ctx->unit, nxt_unit_impl_t, unit); + ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx); + req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); + + memset(&msg, 0, sizeof(nxt_port_msg_t)); + + msg.stream = req_impl->stream; + msg.pid = lib->pid; + msg.reply_port = ctx_impl->read_port->id.id; + msg.type = _NXT_PORT_MSG_REQ_HEADERS_ACK; + + res = nxt_unit_port_send(req->ctx, req->response_port, + &msg, sizeof(msg), NULL, 0); + if (nxt_slow_path(res != sizeof(msg))) { + return NXT_UNIT_ERROR; + } + + return NXT_UNIT_OK; +} + + static int nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) { @@ -3267,7 +3352,9 @@ nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx) return NXT_UNIT_ERROR; } - nxt_unit_read_buf(ctx, rbuf); + memset(rbuf->oob, 0, sizeof(struct cmsghdr)); + + nxt_unit_port_recv(ctx, ctx_impl->read_port, rbuf); if (nxt_slow_path(rbuf->size < (ssize_t) sizeof(nxt_port_msg_t))) { nxt_unit_read_buf_release(ctx, rbuf); @@ -4218,26 +4305,23 @@ nxt_unit_process_pop_first(nxt_unit_impl_t *lib) int nxt_unit_run(nxt_unit_ctx_t *ctx) { - int rc; - nxt_unit_impl_t *lib; - nxt_unit_ctx_impl_t *ctx_impl; - - ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + int rc; + nxt_unit_impl_t *lib; - nxt_unit_ctx_use(ctx_impl); + nxt_unit_ctx_use(ctx); lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); rc = NXT_UNIT_OK; while (nxt_fast_path(lib->online)) { - rc = nxt_unit_run_once(ctx); + rc = nxt_unit_run_once_impl(ctx); - if (nxt_slow_path(rc != NXT_UNIT_OK)) { + if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { break; } } - nxt_unit_ctx_release(ctx_impl); + nxt_unit_ctx_release(ctx); return rc; } @@ -4246,109 +4330,163 @@ nxt_unit_run(nxt_unit_ctx_t *ctx) int nxt_unit_run_once(nxt_unit_ctx_t *ctx) { - int rc; - nxt_queue_link_t *link; - nxt_unit_ctx_impl_t *ctx_impl; - nxt_unit_read_buf_t *rbuf; + int rc; - ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + nxt_unit_ctx_use(ctx); - nxt_unit_ctx_use(ctx_impl); + rc = nxt_unit_run_once_impl(ctx); - pthread_mutex_lock(&ctx_impl->mutex); + nxt_unit_ctx_release(ctx); - if (!nxt_queue_is_empty(&ctx_impl->pending_rbuf)) { + return rc; +} -next_pending: - link = nxt_queue_first(&ctx_impl->pending_rbuf); - nxt_queue_remove(link); +static int +nxt_unit_run_once_impl(nxt_unit_ctx_t *ctx) +{ + int rc; + nxt_unit_read_buf_t *rbuf; - rbuf = nxt_container_of(link, nxt_unit_read_buf_t, link); + rbuf = nxt_unit_read_buf_get(ctx); + if (nxt_slow_path(rbuf == NULL)) { + return NXT_UNIT_ERROR; + } - pthread_mutex_unlock(&ctx_impl->mutex); + rc = nxt_unit_read_buf(ctx, rbuf); + if (nxt_slow_path(rc != NXT_UNIT_OK)) { + nxt_unit_read_buf_release(ctx, rbuf); - } else { - rbuf = nxt_unit_read_buf_get_impl(ctx_impl); + return rc; + } - pthread_mutex_unlock(&ctx_impl->mutex); + rc = nxt_unit_process_msg(ctx, rbuf); + if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { + return NXT_UNIT_ERROR; + } - if (nxt_slow_path(rbuf == NULL)) { + rc = nxt_unit_process_pending_rbuf(ctx); + if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { + return NXT_UNIT_ERROR; + } - nxt_unit_ctx_release(ctx_impl); + nxt_unit_process_ready_req(ctx); - return NXT_UNIT_ERROR; - } + return rc; +} - nxt_unit_read_buf(ctx, rbuf); - } - if (nxt_fast_path(rbuf->size > 0)) { - rc = nxt_unit_process_msg(ctx, rbuf); +static int +nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) +{ + int res, err; + nxt_unit_impl_t *lib; + nxt_unit_ctx_impl_t *ctx_impl; + struct pollfd fds[2]; -#if (NXT_DEBUG) - if (nxt_fast_path(rc != NXT_UNIT_AGAIN)) { - memset(rbuf->buf, 0xAC, rbuf->size); - } -#endif + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); - } else { - rc = NXT_UNIT_ERROR; + memset(rbuf->oob, 0, sizeof(struct cmsghdr)); + + if (ctx_impl->wait_items > 0 || lib->shared_port == NULL) { + return nxt_unit_port_recv(ctx, ctx_impl->read_port, rbuf); } - if (nxt_slow_path(rc == NXT_UNIT_AGAIN)) { - rc = NXT_UNIT_OK; +retry: - } else { - nxt_unit_read_buf_release(ctx, rbuf); - } + fds[0].fd = ctx_impl->read_port->in_fd; + fds[0].events = POLLIN; + fds[0].revents = 0; - if (nxt_slow_path(rc == NXT_UNIT_CANCELLED)) { - rc = NXT_UNIT_OK; - } + fds[1].fd = lib->shared_port->in_fd; + fds[1].events = POLLIN; + fds[1].revents = 0; - if (nxt_fast_path(rc == NXT_UNIT_OK)) { - pthread_mutex_lock(&ctx_impl->mutex); + res = poll(fds, 2, -1); + if (nxt_slow_path(res < 0)) { + err = errno; - if (!nxt_queue_is_empty(&ctx_impl->pending_rbuf)) { - goto next_pending; + if (err == EINTR) { + goto retry; } - pthread_mutex_unlock(&ctx_impl->mutex); + nxt_unit_alert(ctx, "poll() failed: %s (%d)", + strerror(err), err); - nxt_unit_process_ready_req(ctx_impl); + rbuf->size = -1; + + return (err == EAGAIN) ? NXT_UNIT_AGAIN : NXT_UNIT_ERROR; } - nxt_unit_ctx_release(ctx_impl); + if ((fds[0].revents & POLLIN) != 0) { + return nxt_unit_port_recv(ctx, ctx_impl->read_port, rbuf); + } - return rc; + if ((fds[1].revents & POLLIN) != 0) { + return nxt_unit_port_recv(ctx, lib->shared_port, rbuf); + } + + rbuf->size = -1; + + return NXT_UNIT_ERROR; } -static void -nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) +static int +nxt_unit_process_pending_rbuf(nxt_unit_ctx_t *ctx) { + int rc; + nxt_queue_t pending_rbuf; nxt_unit_ctx_impl_t *ctx_impl; + nxt_unit_read_buf_t *rbuf; + + nxt_queue_init(&pending_rbuf); ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); - memset(rbuf->oob, 0, sizeof(struct cmsghdr)); + pthread_mutex_lock(&ctx_impl->mutex); + + if (nxt_queue_is_empty(&ctx_impl->pending_rbuf)) { + pthread_mutex_unlock(&ctx_impl->mutex); + + return NXT_UNIT_OK; + } + + nxt_queue_add(&pending_rbuf, &ctx_impl->pending_rbuf); + nxt_queue_init(&ctx_impl->pending_rbuf); - rbuf->size = nxt_unit_port_recv(ctx, ctx_impl->read_port, - rbuf->buf, sizeof(rbuf->buf), - rbuf->oob, sizeof(rbuf->oob)); + pthread_mutex_unlock(&ctx_impl->mutex); + + rc = NXT_UNIT_OK; + + nxt_queue_each(rbuf, &pending_rbuf, nxt_unit_read_buf_t, link) { + + if (nxt_fast_path(rc != NXT_UNIT_ERROR)) { + rc = nxt_unit_process_msg(&ctx_impl->ctx, rbuf); + + } else { + nxt_unit_read_buf_release(ctx, rbuf); + } + + } nxt_queue_loop; + + return rc; } static void -nxt_unit_process_ready_req(nxt_unit_ctx_impl_t *ctx_impl) +nxt_unit_process_ready_req(nxt_unit_ctx_t *ctx) { nxt_queue_t ready_req; nxt_unit_impl_t *lib; + nxt_unit_ctx_impl_t *ctx_impl; nxt_unit_request_info_impl_t *req_impl; nxt_queue_init(&ready_req); + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + pthread_mutex_lock(&ctx_impl->mutex); if (nxt_queue_is_empty(&ctx_impl->ready_req)) { @@ -4367,20 +4505,121 @@ nxt_unit_process_ready_req(nxt_unit_ctx_impl_t *ctx_impl) { lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit); + (void) nxt_unit_send_req_headers_ack(&req_impl->req); + lib->callbacks.request_handler(&req_impl->req); } nxt_queue_loop; } -void -nxt_unit_done(nxt_unit_ctx_t *ctx) +int +nxt_unit_run_ctx(nxt_unit_ctx_t *ctx) { + int rc; + nxt_unit_impl_t *lib; nxt_unit_ctx_impl_t *ctx_impl; + nxt_unit_ctx_use(ctx); + + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); - nxt_unit_ctx_release(ctx_impl); + rc = NXT_UNIT_OK; + + while (nxt_fast_path(lib->online)) { + rc = nxt_unit_process_port_msg_impl(ctx, ctx_impl->read_port); + + if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { + break; + } + } + + nxt_unit_ctx_release(ctx); + + return rc; +} + + +int +nxt_unit_run_shared(nxt_unit_ctx_t *ctx) +{ + int rc; + nxt_unit_impl_t *lib; + + nxt_unit_ctx_use(ctx); + + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + rc = NXT_UNIT_OK; + + while (nxt_fast_path(lib->online)) { + rc = nxt_unit_process_port_msg_impl(ctx, lib->shared_port); + + if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { + break; + } + } + + nxt_unit_ctx_release(ctx); + + return rc; +} + + +int +nxt_unit_process_port_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) +{ + int rc; + + nxt_unit_ctx_use(ctx); + + rc = nxt_unit_process_port_msg_impl(ctx, port); + + nxt_unit_ctx_release(ctx); + + return rc; +} + + +static int +nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) +{ + int rc; + nxt_unit_read_buf_t *rbuf; + + rbuf = nxt_unit_read_buf_get(ctx); + if (nxt_slow_path(rbuf == NULL)) { + return NXT_UNIT_ERROR; + } + + memset(rbuf->oob, 0, sizeof(struct cmsghdr)); + + rc = nxt_unit_port_recv(ctx, port, rbuf); + if (nxt_slow_path(rc != NXT_UNIT_OK)) { + nxt_unit_read_buf_release(ctx, rbuf); + return rc; + } + + rc = nxt_unit_process_msg(ctx, rbuf); + if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { + return NXT_UNIT_ERROR; + } + + rc = nxt_unit_process_pending_rbuf(ctx); + if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { + return NXT_UNIT_ERROR; + } + + nxt_unit_process_ready_req(ctx); + + return rc; +} + + +void +nxt_unit_done(nxt_unit_ctx_t *ctx) +{ + nxt_unit_ctx_release(ctx); } @@ -5056,12 +5295,11 @@ retry: } -static ssize_t +static int nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, - void *buf, size_t buf_size, void *oob, size_t oob_size) + nxt_unit_read_buf_t *rbuf) { - int fd; - ssize_t res; + int fd, err; struct iovec iov[1]; struct msghdr msg; nxt_unit_impl_t *lib; @@ -5069,40 +5307,57 @@ nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); if (lib->callbacks.port_recv != NULL) { - return lib->callbacks.port_recv(ctx, port, - buf, buf_size, oob, oob_size); + rbuf->size = lib->callbacks.port_recv(ctx, port, + rbuf->buf, sizeof(rbuf->buf), + rbuf->oob, sizeof(rbuf->oob)); + + if (nxt_slow_path(rbuf->size < 0)) { + return NXT_UNIT_ERROR; + } + + return NXT_UNIT_OK; } - iov[0].iov_base = buf; - iov[0].iov_len = buf_size; + iov[0].iov_base = rbuf->buf; + iov[0].iov_len = sizeof(rbuf->buf); msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = 1; msg.msg_flags = 0; - msg.msg_control = oob; - msg.msg_controllen = oob_size; + msg.msg_control = rbuf->oob; + msg.msg_controllen = sizeof(rbuf->oob); fd = port->in_fd; retry: - res = recvmsg(fd, &msg, 0); + rbuf->size = recvmsg(fd, &msg, 0); - if (nxt_slow_path(res == -1)) { - if (errno == EINTR) { + if (nxt_slow_path(rbuf->size == -1)) { + err = errno; + + if (err == EINTR) { goto retry; } + if (err == EAGAIN) { + nxt_unit_debug(ctx, "recvmsg(%d) failed: %s (%d)", + fd, strerror(errno), errno); + + return NXT_UNIT_AGAIN; + } + nxt_unit_alert(ctx, "recvmsg(%d) failed: %s (%d)", fd, strerror(errno), errno); - } else { - nxt_unit_debug(ctx, "recvmsg(%d): %d", fd, (int) res); + return NXT_UNIT_ERROR; } - return res; + nxt_unit_debug(ctx, "recvmsg(%d): %d", fd, (int) rbuf->size); + + return NXT_UNIT_OK; } diff --git a/src/nxt_unit.h b/src/nxt_unit.h index 79157f5f..0f16773f 100644 --- a/src/nxt_unit.h +++ b/src/nxt_unit.h @@ -202,8 +202,21 @@ nxt_unit_ctx_t *nxt_unit_init(nxt_unit_init_t *); */ int nxt_unit_run(nxt_unit_ctx_t *); +int nxt_unit_run_ctx(nxt_unit_ctx_t *ctx); + +int nxt_unit_run_shared(nxt_unit_ctx_t *ctx); + +/* + * Receive and process one message, invoke configured callbacks. + * + * If application implements it's own event loop, each datagram received + * from port socket should be initially processed by unit. This function + * may invoke other application-defined callback for message processing. + */ int nxt_unit_run_once(nxt_unit_ctx_t *ctx); +int nxt_unit_process_port_msg(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port); + /* Destroy application library object. */ void nxt_unit_done(nxt_unit_ctx_t *); -- cgit From 2f3d27fa22d2e5566dfdeddfb6a1f8c927a5c73d Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:17 +0300 Subject: Process structures refactoring in runtime and libunit. Generic process-to-process shared memory exchange is no more required. Here, it is transformed into a router-to-application pattern. The outgoing shared memory segments collection is now the property of the application structure. The applications connect to the router only, and the process only needs to group the ports. --- src/nxt_process.c | 1 - src/nxt_process.h | 1 - src/nxt_runtime.c | 3 - src/nxt_unit.c | 296 +++++++++++++++++------------------------------------- 4 files changed, 93 insertions(+), 208 deletions(-) diff --git a/src/nxt_process.c b/src/nxt_process.c index 0b3aa40f..9bfae395 100644 --- a/src/nxt_process.c +++ b/src/nxt_process.c @@ -146,7 +146,6 @@ nxt_process_child_fixup(nxt_task_t *task, nxt_process_t *process) } nxt_port_mmaps_destroy(&p->incoming, 0); - nxt_port_mmaps_destroy(&p->outgoing, 0); } nxt_runtime_process_loop; diff --git a/src/nxt_process.h b/src/nxt_process.h index 4076cefc..ecd813e2 100644 --- a/src/nxt_process.h +++ b/src/nxt_process.h @@ -92,7 +92,6 @@ typedef struct { nxt_int_t use_count; nxt_port_mmaps_t incoming; - nxt_port_mmaps_t outgoing; nxt_thread_mutex_t cp_mutex; diff --git a/src/nxt_runtime.c b/src/nxt_runtime.c index c25b93cc..5f4b3e58 100644 --- a/src/nxt_runtime.c +++ b/src/nxt_runtime.c @@ -1377,7 +1377,6 @@ nxt_runtime_process_new(nxt_runtime_t *rt) nxt_queue_init(&process->ports); nxt_thread_mutex_create(&process->incoming.mutex); - nxt_thread_mutex_create(&process->outgoing.mutex); nxt_thread_mutex_create(&process->cp_mutex); process->use_count = 1; @@ -1397,10 +1396,8 @@ nxt_runtime_process_release(nxt_runtime_t *rt, nxt_process_t *process) nxt_assert(process->registered == 0); nxt_port_mmaps_destroy(&process->incoming, 1); - nxt_port_mmaps_destroy(&process->outgoing, 1); nxt_thread_mutex_destroy(&process->incoming.mutex); - nxt_thread_mutex_destroy(&process->outgoing.mutex); nxt_thread_mutex_destroy(&process->cp_mutex); /* processes from nxt_runtime_process_get() have no memory pool */ diff --git a/src/nxt_unit.c b/src/nxt_unit.c index 7fb2826d..154fd480 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -70,8 +70,6 @@ static nxt_unit_websocket_frame_impl_t *nxt_unit_websocket_frame_get( nxt_unit_ctx_t *ctx); static void nxt_unit_websocket_frame_release(nxt_unit_websocket_frame_t *ws); static void nxt_unit_websocket_frame_free(nxt_unit_websocket_frame_impl_t *ws); -static nxt_unit_process_t *nxt_unit_msg_get_process(nxt_unit_ctx_t *ctx, - nxt_unit_recv_msg_t *recv_msg); static nxt_unit_mmap_buf_t *nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx); static void nxt_unit_mmap_buf_release(nxt_unit_mmap_buf_t *mmap_buf); static int nxt_unit_mmap_buf_send(nxt_unit_request_info_t *req, @@ -114,7 +112,6 @@ static int nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, nxt_unit_read_buf_t *rbuf); static int nxt_unit_get_mmap(nxt_unit_ctx_t *ctx, pid_t pid, uint32_t id); static void nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, - nxt_unit_process_t *process, nxt_port_mmap_header_t *hdr, void *start, uint32_t size); static int nxt_unit_send_shm_ack(nxt_unit_ctx_t *ctx, pid_t pid); @@ -137,7 +134,6 @@ static int nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst, nxt_inline void nxt_unit_port_use(nxt_unit_port_t *port); nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port); -nxt_inline nxt_unit_process_t *nxt_unit_port_process(nxt_unit_port_t *port); static nxt_unit_port_t *nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port); static void nxt_unit_remove_port(nxt_unit_impl_t *lib, @@ -179,7 +175,6 @@ struct nxt_unit_mmap_buf_s { nxt_port_mmap_header_t *hdr; nxt_unit_request_info_t *req; nxt_unit_ctx_impl_t *ctx_impl; - nxt_unit_process_t *process; char *free_ptr; char *plain_ptr; }; @@ -197,7 +192,6 @@ struct nxt_unit_recv_msg_s { uint32_t size; int fd; - nxt_unit_process_t *process; nxt_unit_mmap_buf_t *incoming_buf; }; @@ -217,8 +211,6 @@ struct nxt_unit_request_info_impl_s { uint32_t stream; - nxt_unit_process_t *process; - nxt_unit_mmap_buf_t *outgoing_buf; nxt_unit_mmap_buf_t *incoming_buf; @@ -296,6 +288,23 @@ struct nxt_unit_ctx_impl_s { }; +struct nxt_unit_mmap_s { + nxt_port_mmap_header_t *hdr; + + /* of nxt_unit_read_buf_t */ + nxt_queue_t awaiting_rbuf; +}; + + +struct nxt_unit_mmaps_s { + pthread_mutex_t mutex; + uint32_t size; + uint32_t cap; + nxt_atomic_t allocated_chunks; + nxt_unit_mmap_t *elts; +}; + + struct nxt_unit_impl_s { nxt_unit_t unit; nxt_unit_callbacks_t callbacks; @@ -315,6 +324,9 @@ struct nxt_unit_impl_s { nxt_queue_t contexts; /* of nxt_unit_ctx_impl_t */ + nxt_unit_mmaps_t incoming; + nxt_unit_mmaps_t outgoing; + pid_t pid; int log_fd; int online; @@ -339,31 +351,11 @@ struct nxt_unit_port_impl_s { }; -struct nxt_unit_mmap_s { - nxt_port_mmap_header_t *hdr; - - /* of nxt_unit_read_buf_t */ - nxt_queue_t awaiting_rbuf; -}; - - -struct nxt_unit_mmaps_s { - pthread_mutex_t mutex; - uint32_t size; - uint32_t cap; - nxt_atomic_t allocated_chunks; - nxt_unit_mmap_t *elts; -}; - - struct nxt_unit_process_s { pid_t pid; nxt_queue_t ports; /* of nxt_unit_port_impl_t */ - nxt_unit_mmaps_t incoming; - nxt_unit_mmaps_t outgoing; - nxt_unit_impl_t *lib; nxt_atomic_t use_count; @@ -515,6 +507,9 @@ nxt_unit_create(nxt_unit_init_t *init) goto fail; } + nxt_unit_mmaps_init(&lib->incoming); + nxt_unit_mmaps_init(&lib->outgoing); + return lib; fail: @@ -640,6 +635,9 @@ nxt_unit_lib_release(nxt_unit_impl_t *lib) nxt_unit_port_release(lib->shared_port); } + nxt_unit_mmaps_destroy(&lib->incoming); + nxt_unit_mmaps_destroy(&lib->outgoing); + free(lib); } } @@ -807,7 +805,6 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) rc = NXT_UNIT_ERROR; recv_msg.fd = -1; - recv_msg.process = NULL; port_msg = (nxt_port_msg_t *) rbuf->buf; cm = (struct cmsghdr *) rbuf->oob; @@ -967,10 +964,6 @@ fail: nxt_unit_mmap_buf_free(recv_msg.incoming_buf); } - if (recv_msg.process != NULL) { - nxt_unit_process_release(recv_msg.process); - } - if (nxt_fast_path(rc != NXT_UNIT_AGAIN)) { #if (NXT_DEBUG) memset(rbuf->buf, 0xAC, rbuf->size); @@ -1109,14 +1102,6 @@ nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) req->content_buf = req->request_buf; req->content_buf->free = nxt_unit_sptr_get(&r->preread_content); - /* "Move" process reference to req_impl. */ - req_impl->process = nxt_unit_msg_get_process(ctx, recv_msg); - if (nxt_slow_path(req_impl->process == NULL)) { - return NXT_UNIT_ERROR; - } - - recv_msg->process = NULL; - req_impl->stream = recv_msg->stream; req_impl->outgoing_buf = NULL; @@ -1174,6 +1159,7 @@ nxt_unit_request_check_response_port(nxt_unit_request_info_t *req, nxt_unit_ctx_t *ctx; nxt_unit_impl_t *lib; nxt_unit_port_t *port; + nxt_unit_process_t *process; nxt_unit_ctx_impl_t *ctx_impl; nxt_unit_port_impl_t *port_impl; nxt_unit_request_info_impl_t *req_impl; @@ -1244,15 +1230,28 @@ nxt_unit_request_check_response_port(nxt_unit_request_info_t *req, return NXT_UNIT_ERROR; } - req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); + process = nxt_unit_process_find(lib, port_id->pid, 0); + if (nxt_slow_path(process == NULL)) { + nxt_unit_alert(ctx, "check_response_port: process %d not found", + port->id.pid); + + nxt_unit_port_hash_find(&lib->ports, port_id, 1); - nxt_queue_insert_tail(&req_impl->process->ports, &port_impl->link); + pthread_mutex_unlock(&lib->mutex); + + free(port); + + return NXT_UNIT_ERROR; + } - port_impl->process = req_impl->process; + nxt_queue_insert_tail(&process->ports, &port_impl->link); + port_impl->process = process; nxt_queue_init(&port_impl->awaiting_req); + req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); + nxt_queue_insert_tail(&port_impl->awaiting_req, &req_impl->port_wait_link); port_impl->use_count = 2; @@ -1262,8 +1261,6 @@ nxt_unit_request_check_response_port(nxt_unit_request_info_t *req, pthread_mutex_unlock(&lib->mutex); - nxt_unit_process_use(port_impl->process); - res = nxt_unit_get_port(ctx, port_id); if (nxt_slow_path(res == NXT_UNIT_ERROR)) { return NXT_UNIT_ERROR; @@ -1511,16 +1508,6 @@ nxt_unit_request_info_release(nxt_unit_request_info_t *req) req->content_fd = -1; } - /* - * Process release should go after buffers release to guarantee mmap - * existence. - */ - if (req_impl->process != NULL) { - nxt_unit_process_release(req_impl->process); - - req_impl->process = NULL; - } - if (req->response_port != NULL) { nxt_unit_port_release(req->response_port); @@ -2111,32 +2098,6 @@ nxt_unit_response_buf_alloc(nxt_unit_request_info_t *req, uint32_t size) } -static nxt_unit_process_t * -nxt_unit_msg_get_process(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) -{ - nxt_unit_impl_t *lib; - - if (recv_msg->process != NULL) { - return recv_msg->process; - } - - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - - pthread_mutex_lock(&lib->mutex); - - recv_msg->process = nxt_unit_process_find(lib, recv_msg->pid, 0); - - pthread_mutex_unlock(&lib->mutex); - - if (recv_msg->process == NULL) { - nxt_unit_warn(ctx, "#%"PRIu32": process %d not found", - recv_msg->stream, (int) recv_msg->pid); - } - - return recv_msg->process; -} - - static nxt_unit_mmap_buf_t * nxt_unit_mmap_buf_get(nxt_unit_ctx_t *ctx) { @@ -2398,12 +2359,11 @@ nxt_unit_mmap_buf_send(nxt_unit_request_info_t *req, mmap_buf->hdr = NULL; } - nxt_atomic_fetch_add(&mmap_buf->process->outgoing.allocated_chunks, + nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, (int) m.mmap_msg.chunk_id - (int) first_free_chunk); - nxt_unit_debug(req->ctx, "process %d allocated_chunks %d", - mmap_buf->process->pid, - (int) mmap_buf->process->outgoing.allocated_chunks); + nxt_unit_debug(req->ctx, "allocated_chunks %d", + (int) lib->outgoing.allocated_chunks); } else { if (nxt_slow_path(mmap_buf->plain_ptr == NULL @@ -2463,7 +2423,6 @@ nxt_unit_free_outgoing_buf(nxt_unit_mmap_buf_t *mmap_buf) { if (mmap_buf->hdr != NULL) { nxt_unit_mmap_release(&mmap_buf->ctx_impl->ctx, - mmap_buf->process, mmap_buf->hdr, mmap_buf->buf.start, mmap_buf->buf.end - mmap_buf->buf.start); @@ -2881,7 +2840,6 @@ nxt_unit_request_preread(nxt_unit_request_info_t *req, size_t size) mmap_buf->buf.start = mmap_buf->free_ptr; mmap_buf->buf.free = mmap_buf->buf.start; mmap_buf->buf.end = mmap_buf->buf.start + size; - mmap_buf->process = NULL; res = read(req->content_fd, mmap_buf->free_ptr, size); if (res < 0) { @@ -3184,28 +3142,19 @@ nxt_unit_mmap_get(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, uint32_t outgoing_size; nxt_unit_mmap_t *mm, *mm_end; nxt_unit_impl_t *lib; - nxt_unit_process_t *process; nxt_port_mmap_header_t *hdr; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - process = nxt_unit_port_process(port); - if (nxt_slow_path(process == NULL)) { - nxt_unit_alert(ctx, "mmap_get: port %d,%d already closed", - (int) port->id.pid, (int) port->id.id); - - return NULL; - } - - pthread_mutex_lock(&process->outgoing.mutex); + pthread_mutex_lock(&lib->outgoing.mutex); retry: - outgoing_size = process->outgoing.size; + outgoing_size = lib->outgoing.size; - mm_end = process->outgoing.elts + outgoing_size; + mm_end = lib->outgoing.elts + outgoing_size; - for (mm = process->outgoing.elts; mm < mm_end; mm++) { + for (mm = lib->outgoing.elts; mm < mm_end; mm++) { hdr = mm->hdr; if (hdr->sent_over != 0xFFFFu && hdr->sent_over != port->id.id) { @@ -3252,13 +3201,13 @@ retry: if (outgoing_size >= lib->shm_mmap_limit) { /* Cannot allocate more shared memory. */ - pthread_mutex_unlock(&process->outgoing.mutex); + pthread_mutex_unlock(&lib->outgoing.mutex); if (min_n == 0) { *n = 0; } - if (nxt_slow_path(process->outgoing.allocated_chunks + min_n + if (nxt_slow_path(lib->outgoing.allocated_chunks + min_n >= lib->shm_mmap_limit * PORT_MMAP_CHUNK_COUNT)) { /* Memory allocated by application, but not send to router. */ @@ -3287,7 +3236,7 @@ retry: nxt_unit_debug(ctx, "oosm: retry"); - pthread_mutex_lock(&process->outgoing.mutex); + pthread_mutex_lock(&lib->outgoing.mutex); goto retry; } @@ -3297,13 +3246,12 @@ retry: unlock: - nxt_atomic_fetch_add(&process->outgoing.allocated_chunks, *n); + nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, *n); - nxt_unit_debug(ctx, "process %d allocated_chunks %d", - process->pid, - (int) process->outgoing.allocated_chunks); + nxt_unit_debug(ctx, "allocated_chunks %d", + (int) lib->outgoing.allocated_chunks); - pthread_mutex_unlock(&process->outgoing.mutex); + pthread_mutex_unlock(&lib->outgoing.mutex); return hdr; } @@ -3448,20 +3396,11 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) char name[64]; nxt_unit_mmap_t *mm; nxt_unit_impl_t *lib; - nxt_unit_process_t *process; nxt_port_mmap_header_t *hdr; - process = nxt_unit_port_process(port); - if (nxt_slow_path(process == NULL)) { - nxt_unit_alert(ctx, "new_mmap: port %d,%d already closed", - (int) port->id.pid, (int) port->id.id); - - return NULL; - } - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - mm = nxt_unit_mmap_at(&process->outgoing, process->outgoing.size); + mm = nxt_unit_mmap_at(&lib->outgoing, lib->outgoing.size); if (nxt_slow_path(mm == NULL)) { nxt_unit_alert(ctx, "failed to add mmap to outgoing array"); @@ -3538,9 +3477,9 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) memset(hdr->free_map, 0xFFU, sizeof(hdr->free_map)); memset(hdr->free_tracking_map, 0xFFU, sizeof(hdr->free_tracking_map)); - hdr->id = process->outgoing.size - 1; + hdr->id = lib->outgoing.size - 1; hdr->src_pid = lib->pid; - hdr->dst_pid = process->pid; + hdr->dst_pid = port->id.pid; hdr->sent_over = port->id.id; /* Mark first n chunk(s) as busy */ @@ -3552,7 +3491,7 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) nxt_port_mmap_set_chunk_busy(hdr->free_map, PORT_MMAP_CHUNK_COUNT); nxt_port_mmap_set_chunk_busy(hdr->free_tracking_map, PORT_MMAP_CHUNK_COUNT); - pthread_mutex_unlock(&process->outgoing.mutex); + pthread_mutex_unlock(&lib->outgoing.mutex); rc = nxt_unit_send_mmap(ctx, port, fd); if (nxt_slow_path(rc != NXT_UNIT_OK)) { @@ -3561,12 +3500,12 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) } else { nxt_unit_debug(ctx, "new mmap #%"PRIu32" created for %d -> %d", - hdr->id, (int) lib->pid, (int) process->pid); + hdr->id, (int) lib->pid, (int) port->id.pid); } close(fd); - pthread_mutex_lock(&process->outgoing.mutex); + pthread_mutex_lock(&lib->outgoing.mutex); if (nxt_fast_path(hdr != NULL)) { return hdr; @@ -3574,7 +3513,7 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) remove_fail: - process->outgoing.size--; + lib->outgoing.size--; return NULL; } @@ -3662,7 +3601,6 @@ nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, mmap_buf->buf.start = mmap_buf->plain_ptr + sizeof(nxt_port_msg_t); mmap_buf->buf.free = mmap_buf->buf.start; mmap_buf->buf.end = mmap_buf->buf.start + size; - mmap_buf->process = nxt_unit_port_process(port); nxt_unit_debug(ctx, "outgoing plain buffer allocation: (%p, %d)", mmap_buf->buf.start, (int) size); @@ -3692,7 +3630,6 @@ nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, mmap_buf->buf.start = (char *) nxt_port_mmap_chunk_start(hdr, c); mmap_buf->buf.free = mmap_buf->buf.start; mmap_buf->buf.end = mmap_buf->buf.start + nchunks * PORT_MMAP_CHUNK_SIZE; - mmap_buf->process = nxt_unit_port_process(port); mmap_buf->free_ptr = NULL; mmap_buf->ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); @@ -3713,7 +3650,6 @@ nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) struct stat mmap_stat; nxt_unit_mmap_t *mm; nxt_unit_impl_t *lib; - nxt_unit_process_t *process; nxt_unit_ctx_impl_t *ctx_impl; nxt_unit_read_buf_t *rbuf; nxt_port_mmap_header_t *hdr; @@ -3722,60 +3658,47 @@ nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) nxt_unit_debug(ctx, "incoming_mmap: fd %d from process %d", fd, (int) pid); - pthread_mutex_lock(&lib->mutex); - - process = nxt_unit_process_find(lib, pid, 0); - - pthread_mutex_unlock(&lib->mutex); - - if (nxt_slow_path(process == NULL)) { - nxt_unit_warn(ctx, "incoming_mmap: process %d not found, fd %d", - (int) pid, fd); - - return NXT_UNIT_ERROR; - } - - rc = NXT_UNIT_ERROR; - if (fstat(fd, &mmap_stat) == -1) { - nxt_unit_warn(ctx, "incoming_mmap: fstat(%d) failed: %s (%d)", fd, - strerror(errno), errno); + nxt_unit_alert(ctx, "incoming_mmap: fstat(%d) failed: %s (%d)", fd, + strerror(errno), errno); - goto fail; + return NXT_UNIT_ERROR; } mem = mmap(NULL, mmap_stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (nxt_slow_path(mem == MAP_FAILED)) { - nxt_unit_warn(ctx, "incoming_mmap: mmap() failed: %s (%d)", - strerror(errno), errno); + nxt_unit_alert(ctx, "incoming_mmap: mmap() failed: %s (%d)", + strerror(errno), errno); - goto fail; + return NXT_UNIT_ERROR; } hdr = mem; if (nxt_slow_path(hdr->src_pid != pid)) { - nxt_unit_warn(ctx, "incoming_mmap: unexpected pid in mmap header " - "detected: %d != %d or %d != %d", (int) hdr->src_pid, - (int) pid, (int) hdr->dst_pid, (int) lib->pid); + nxt_unit_alert(ctx, "incoming_mmap: unexpected pid in mmap header " + "detected: %d != %d or %d != %d", (int) hdr->src_pid, + (int) pid, (int) hdr->dst_pid, (int) lib->pid); munmap(mem, PORT_MMAP_SIZE); - goto fail; + return NXT_UNIT_ERROR; } nxt_queue_init(&awaiting_rbuf); - pthread_mutex_lock(&process->incoming.mutex); + pthread_mutex_lock(&lib->incoming.mutex); - mm = nxt_unit_mmap_at(&process->incoming, hdr->id); + mm = nxt_unit_mmap_at(&lib->incoming, hdr->id); if (nxt_slow_path(mm == NULL)) { - nxt_unit_warn(ctx, "incoming_mmap: failed to add to incoming array"); + nxt_unit_alert(ctx, "incoming_mmap: failed to add to incoming array"); munmap(mem, PORT_MMAP_SIZE); + rc = NXT_UNIT_ERROR; + } else { mm->hdr = hdr; @@ -3787,7 +3710,7 @@ nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) rc = NXT_UNIT_OK; } - pthread_mutex_unlock(&process->incoming.mutex); + pthread_mutex_unlock(&lib->incoming.mutex); nxt_queue_each(rbuf, &awaiting_rbuf, nxt_unit_read_buf_t, link) { @@ -3803,10 +3726,6 @@ nxt_unit_incoming_mmap(nxt_unit_ctx_t *ctx, pid_t pid, int fd) } nxt_queue_loop; -fail: - - nxt_unit_process_release(process); - return rc; } @@ -3840,9 +3759,6 @@ nxt_unit_process_release(nxt_unit_process_t *process) if (c == 1) { nxt_unit_debug(NULL, "destroy process #%d", (int) process->pid); - nxt_unit_mmaps_destroy(&process->incoming); - nxt_unit_mmaps_destroy(&process->outgoing); - free(process); } } @@ -3873,7 +3789,7 @@ nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, { int res; nxt_chunk_id_t c; - nxt_unit_process_t *process; + nxt_unit_impl_t *lib; nxt_port_mmap_header_t *hdr; nxt_port_mmap_tracking_msg_t *tracking_msg; @@ -3889,14 +3805,11 @@ nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, recv_msg->start = tracking_msg + 1; recv_msg->size -= sizeof(nxt_port_mmap_tracking_msg_t); - process = nxt_unit_msg_get_process(ctx, recv_msg); - if (nxt_slow_path(process == NULL)) { - return NXT_UNIT_ERROR; - } + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - pthread_mutex_lock(&process->incoming.mutex); + pthread_mutex_lock(&lib->incoming.mutex); - res = nxt_unit_check_rbuf_mmap(ctx, &process->incoming, + res = nxt_unit_check_rbuf_mmap(ctx, &lib->incoming, recv_msg->pid, tracking_msg->mmap_id, &hdr, rbuf); @@ -3919,7 +3832,7 @@ nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, res = NXT_UNIT_OK; } - pthread_mutex_unlock(&process->incoming.mutex); + pthread_mutex_unlock(&lib->incoming.mutex); return res; } @@ -3979,8 +3892,8 @@ nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, int res; void *start; uint32_t size; + nxt_unit_impl_t *lib; nxt_unit_mmaps_t *mmaps; - nxt_unit_process_t *process; nxt_unit_mmap_buf_t *b, **incoming_tail; nxt_port_mmap_msg_t *mmap_msg, *end; nxt_port_mmap_header_t *hdr; @@ -3992,11 +3905,6 @@ nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, return NXT_UNIT_ERROR; } - process = nxt_unit_msg_get_process(ctx, recv_msg); - if (nxt_slow_path(process == NULL)) { - return NXT_UNIT_ERROR; - } - mmap_msg = recv_msg->start; end = nxt_pointer_to(recv_msg->start, recv_msg->size); @@ -4023,7 +3931,9 @@ nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, b = recv_msg->incoming_buf; mmap_msg = recv_msg->start; - mmaps = &process->incoming; + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + + mmaps = &lib->incoming; pthread_mutex_lock(&mmaps->mutex); @@ -4052,7 +3962,6 @@ nxt_unit_mmap_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, b->buf.free = start; b->buf.end = b->buf.start + size; b->hdr = hdr; - b->process = process; b = b->next; @@ -4105,8 +4014,7 @@ nxt_unit_get_mmap(nxt_unit_ctx_t *ctx, pid_t pid, uint32_t id) static void -nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, - nxt_unit_process_t *process, nxt_port_mmap_header_t *hdr, +nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, nxt_port_mmap_header_t *hdr, void *start, uint32_t size) { int freed_chunks; @@ -4132,12 +4040,10 @@ nxt_unit_mmap_release(nxt_unit_ctx_t *ctx, lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); if (hdr->src_pid == lib->pid && freed_chunks != 0) { - nxt_atomic_fetch_add(&process->outgoing.allocated_chunks, - -freed_chunks); + nxt_atomic_fetch_add(&lib->outgoing.allocated_chunks, -freed_chunks); - nxt_unit_debug(ctx, "process %d allocated_chunks %d", - process->pid, - (int) process->outgoing.allocated_chunks); + nxt_unit_debug(ctx, "allocated_chunks %d", + (int) lib->outgoing.allocated_chunks); } if (hdr->dst_pid == lib->pid @@ -4241,9 +4147,6 @@ nxt_unit_process_get(nxt_unit_impl_t *lib, pid_t pid) nxt_queue_init(&process->ports); - nxt_unit_mmaps_init(&process->incoming); - nxt_unit_mmaps_init(&process->outgoing); - lhq.replace = 0; lhq.value = process; @@ -4255,8 +4158,6 @@ nxt_unit_process_get(nxt_unit_impl_t *lib, pid_t pid) default: nxt_unit_alert(NULL, "process %d insert failed", (int) pid); - pthread_mutex_destroy(&process->outgoing.mutex); - pthread_mutex_destroy(&process->incoming.mutex); free(process); process = NULL; break; @@ -4907,17 +4808,6 @@ nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port) } -nxt_inline nxt_unit_process_t * -nxt_unit_port_process(nxt_unit_port_t *port) -{ - nxt_unit_port_impl_t *port_impl; - - port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); - - return port_impl->process; -} - - static nxt_unit_port_t * nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) { -- cgit From f4a118f84ae7c8b9e67e2c461087dd0986664574 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:20 +0300 Subject: Adding debug messages to catch process management issues. --- src/nxt_router.c | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/src/nxt_router.c b/src/nxt_router.c index 44b303e4..922f15cd 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -437,6 +437,8 @@ nxt_router_start_app_process(nxt_task_t *task, nxt_app_t *app) nxt_port_t *router_port; nxt_runtime_t *rt; + nxt_debug(task, "app '%V' start process", &app->name); + rt = task->thread->runtime; router_port = rt->port_by_type[NXT_PROCESS_ROUTER]; @@ -2267,6 +2269,10 @@ nxt_router_app_prefork_ready(nxt_task_t *task, nxt_port_recv_msg_t *msg, nxt_queue_insert_tail(&app->ports, &port->app_link); nxt_queue_insert_tail(&app->spare_ports, &port->idle_link); + + nxt_debug(task, "app '%V' move new port %PI:%d to spare_ports", + &app->name, port->pid, port->id); + nxt_port_hash_add(&app->port_hash, port); app->port_hash_count++; @@ -3690,6 +3696,10 @@ nxt_router_req_headers_ack_handler(nxt_task_t *task, if (nxt_queue_chk_remove(&main_app_port->idle_link)) { app->idle_processes--; + nxt_debug(task, "app '%V' move port %PI:%d out of %s (ack)", + &app->name, main_app_port->pid, main_app_port->id, + (main_app_port->idle_start ? "idle_ports" : "spare_ports")); + /* Check port was in 'spare_ports' using idle_start field. */ if (main_app_port->idle_start == 0 && app->idle_processes >= app->spare_processes) @@ -3707,6 +3717,10 @@ nxt_router_req_headers_ack_handler(nxt_task_t *task, nxt_queue_insert_tail(&app->spare_ports, idle_lnk); idle_port->idle_start = 0; + + nxt_debug(task, "app '%V' move port %PI:%d from idle_ports " + "to spare_ports", + &app->name, idle_port->pid, idle_port->id); } if (nxt_router_app_can_start(app) && nxt_router_app_need_start(app)) { @@ -3925,7 +3939,7 @@ nxt_router_app_use(nxt_task_t *task, nxt_app_t *app, int i) nxt_inline nxt_port_t * -nxt_router_app_get_port_for_quit(nxt_app_t *app) +nxt_router_app_get_port_for_quit(nxt_task_t *task, nxt_app_t *app) { nxt_port_t *port; @@ -3940,6 +3954,10 @@ nxt_router_app_get_port_for_quit(nxt_app_t *app) if (nxt_queue_chk_remove(&port->idle_link)) { app->idle_processes--; + + nxt_debug(task, "app '%V' move port %PI:%d out of %s for quit", + &app->name, port->pid, port->id, + (port->idle_start ? "idle_ports" : "spare_ports")); } nxt_port_hash_remove(&app->port_hash, port); @@ -4074,10 +4092,15 @@ nxt_router_app_port_release(nxt_task_t *task, nxt_port_t *port, if (app->idle_processes < app->spare_processes) { nxt_queue_insert_tail(&app->spare_ports, &main_app_port->idle_link); + nxt_debug(task, "app '%V' move port %PI:%d to spare_ports", + &app->name, main_app_port->pid, main_app_port->id); } else { nxt_queue_insert_tail(&app->idle_ports, &main_app_port->idle_link); main_app_port->idle_start = task->thread->engine->timers.now; + + nxt_debug(task, "app '%V' move port %PI:%d to idle_ports", + &app->name, main_app_port->pid, main_app_port->id); } app->idle_processes++; @@ -4151,6 +4174,10 @@ nxt_router_app_port_close(nxt_task_t *task, nxt_port_t *port) if (nxt_queue_chk_remove(&port->idle_link)) { app->idle_processes--; + nxt_debug(task, "app '%V' move port %PI:%d out of %s before close", + &app->name, port->pid, port->id, + (port->idle_start ? "idle_ports" : "spare_ports")); + if (port->idle_start == 0 && app->idle_processes >= app->spare_processes) { @@ -4163,6 +4190,10 @@ nxt_router_app_port_close(nxt_task_t *task, nxt_port_t *port) nxt_queue_insert_tail(&app->spare_ports, idle_lnk); idle_port->idle_start = 0; + + nxt_debug(task, "app '%V' move port %PI:%d from idle_ports " + "to spare_ports", + &app->name, idle_port->pid, idle_port->id); } } @@ -4243,6 +4274,9 @@ nxt_router_adjust_idle_timer(nxt_task_t *task, void *obj, void *data) nxt_queue_remove(lnk); lnk->next = NULL; + nxt_debug(task, "app '%V' move port %PI:%d out of idle_ports (timeout)", + &app->name, port->pid, port->id); + nxt_queue_chk_remove(&port->app_link); nxt_port_hash_remove(&app->port_hash, port); @@ -4318,7 +4352,7 @@ nxt_router_free_app(nxt_task_t *task, void *obj, void *data) app = app_joint->app; for ( ;; ) { - port = nxt_router_app_get_port_for_quit(app); + port = nxt_router_app_get_port_for_quit(task, app); if (port == NULL) { break; } -- cgit From bab4a9e9f25d29d0ef0d01ac0f873cce333f1fe7 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:23 +0300 Subject: Tests: skipping idle zero timeout. This is a temporary solution after the 'shared port' patch. The application process becomes idle immediately after creation. Even if it starts processing a request (without acknowledging it yet), it is stopped by the router because an 'out-of-idle-time' event occurs. --- test/test_python_procman.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_python_procman.py b/test/test_python_procman.py index 8613f58e..c327ab14 100644 --- a/test/test_python_procman.py +++ b/test/test_python_procman.py @@ -33,6 +33,7 @@ class TestPythonProcman(TestApplicationPython): self.assertIn('success', self.conf(conf, path), 'configure processes') + @unittest.skip('not yet') def test_python_processes_idle_timeout_zero(self): self.conf_proc({"spare": 0, "max": 2, "idle_timeout": 0}) -- cgit From 496f41c13435510a53da8dd1d383f63fd0c467ab Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:25 +0300 Subject: Tests: reducing the number of generated applications. Each application initializes a shared port with 2 file descriptors, so the test fails because the router reaches the open files limit. --- test/test_configuration.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/test_configuration.py b/test/test_configuration.py index 0329ef5e..fec1b3dc 100644 --- a/test/test_configuration.py +++ b/test/test_configuration.py @@ -400,7 +400,9 @@ class TestConfiguration(TestControl): "path": "/app", "module": "wsgi", } - for a in range(999) + # Larger number of applications can cause test fail with default + # open files limit due to the lack of file descriptors. + for a in range(100) }, "listeners": {"*:7080": {"pass": "applications/app-1"}}, } -- cgit From 72475ee11c4254086e5d5648c86498833bf8e939 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:28 +0300 Subject: Made router port message handlers into static functions. Mostly harmless. --- src/nxt_router.c | 17 +++++++++++++---- src/nxt_router.h | 6 ------ 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/nxt_router.c b/src/nxt_router.c index 922f15cd..b8e94bcc 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -69,6 +69,15 @@ static void nxt_router_greet_controller(nxt_task_t *task, static nxt_int_t nxt_router_start_app_process(nxt_task_t *task, nxt_app_t *app); +static void nxt_router_new_port_handler(nxt_task_t *task, + nxt_port_recv_msg_t *msg); +static void nxt_router_conf_data_handler(nxt_task_t *task, + nxt_port_recv_msg_t *msg); +static void nxt_router_remove_pid_handler(nxt_task_t *task, + nxt_port_recv_msg_t *msg); +static void nxt_router_access_log_reopen_handler(nxt_task_t *task, + nxt_port_recv_msg_t *msg); + static nxt_router_temp_conf_t *nxt_router_temp_conf(nxt_task_t *task); static void nxt_router_conf_apply(nxt_task_t *task, void *obj, void *data); static void nxt_router_conf_ready(nxt_task_t *task, @@ -561,7 +570,7 @@ nxt_request_rpc_data_unlink(nxt_task_t *task, } -void +static void nxt_router_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) { nxt_app_t *app; @@ -623,7 +632,7 @@ nxt_router_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) } -void +static void nxt_router_conf_data_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) { void *p; @@ -708,7 +717,7 @@ nxt_router_app_process_remove_pid(nxt_task_t *task, nxt_port_t *port, } -void +static void nxt_router_remove_pid_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) { nxt_event_engine_t *engine; @@ -3323,7 +3332,7 @@ typedef struct { } nxt_router_access_log_reopen_t; -void +static void nxt_router_access_log_reopen_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) { nxt_mp_t *mp; diff --git a/src/nxt_router.h b/src/nxt_router.h index 7e0dc7d7..ead8f292 100644 --- a/src/nxt_router.h +++ b/src/nxt_router.h @@ -217,12 +217,6 @@ struct nxt_router_access_log_s { }; -void nxt_router_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg); -void nxt_router_conf_data_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg); -void nxt_router_remove_pid_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg); -void nxt_router_access_log_reopen_handler(nxt_task_t *task, - nxt_port_recv_msg_t *msg); - void nxt_router_process_http_request(nxt_task_t *task, nxt_http_request_t *r, nxt_app_t *app); void nxt_router_app_port_close(nxt_task_t *task, nxt_port_t *port); -- cgit From a1e9df2aef5a3917728c6fd37280b03020d51123 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:30 +0300 Subject: Port message extended to transfer 2 file descriptors. --- src/nxt_port.h | 2 ++ src/nxt_port_socket.c | 26 ++++++++++++++++++++++- src/nxt_socket.h | 2 +- src/nxt_socketpair.c | 59 ++++++++++++++++++++++++++++++++------------------- src/nxt_unit.c | 19 ++++++++++++++--- 5 files changed, 81 insertions(+), 27 deletions(-) diff --git a/src/nxt_port.h b/src/nxt_port.h index 9a933e75..ab455f92 100644 --- a/src/nxt_port.h +++ b/src/nxt_port.h @@ -165,6 +165,7 @@ typedef struct { nxt_buf_t *buf; size_t share; nxt_fd_t fd; + nxt_fd_t fd2; nxt_port_msg_t port_msg; uint32_t tracking_msg[2]; uint8_t close_fd; /* 1 bit */ @@ -174,6 +175,7 @@ typedef struct { struct nxt_port_recv_msg_s { nxt_fd_t fd; + nxt_fd_t fd2; nxt_buf_t *buf; nxt_port_t *port; nxt_port_msg_t port_msg; diff --git a/src/nxt_port_socket.c b/src/nxt_port_socket.c index 4e3eaef6..844b65ca 100644 --- a/src/nxt_port_socket.c +++ b/src/nxt_port_socket.c @@ -156,6 +156,7 @@ nxt_port_socket_twrite(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type, msg.buf = b; msg.share = 0; msg.fd = fd; + msg.fd2 = -1; msg.close_fd = (type & NXT_PORT_MSG_CLOSE_FD) != 0; msg.allocated = 0; @@ -331,7 +332,7 @@ next_fragment: msg->port_msg.last |= sb.last; msg->port_msg.mf = sb.limit_reached || sb.nmax_reached; - n = nxt_socketpair_send(&port->socket, msg->fd, iov, sb.niov + 1); + n = nxt_socketpair_send(&port->socket, &msg->fd, iov, sb.niov + 1); if (n > 0) { if (nxt_slow_path((size_t) n != sb.size + iov[0].iov_len)) { @@ -346,6 +347,12 @@ next_fragment: msg->fd = -1; } + if (msg->fd2 != -1 && msg->close_fd != 0) { + nxt_fd_close(msg->fd2); + + msg->fd2 = -1; + } + msg->buf = nxt_port_buf_completion(task, wq, msg->buf, plain_size, m == NXT_PORT_METHOD_MMAP); @@ -358,6 +365,7 @@ next_fragment: * in the first message of a stream. */ msg->fd = -1; + msg->fd2 = -1; msg->share += n; msg->port_msg.nf = 1; msg->port_msg.tracking = 0; @@ -810,6 +818,10 @@ nxt_port_read_msg_process(nxt_task_t *task, nxt_port_t *port, nxt_fd_close(msg->fd); } + if (msg->fd2 != -1) { + nxt_fd_close(msg->fd2); + } + return; } @@ -854,6 +866,7 @@ nxt_port_read_msg_process(nxt_task_t *task, nxt_port_t *port, msg->buf = fmsg->buf; msg->fd = fmsg->fd; + msg->fd2 = fmsg->fd2; /* * To disable instant completion or buffer re-usage, @@ -888,12 +901,17 @@ nxt_port_read_msg_process(nxt_task_t *task, nxt_port_t *port, if (nxt_fast_path(msg->cancelled == 0)) { msg->buf = NULL; msg->fd = -1; + msg->fd2 = -1; b = NULL; } else { if (msg->fd != -1) { nxt_fd_close(msg->fd); } + + if (msg->fd2 != -1) { + nxt_fd_close(msg->fd2); + } } } else { if (nxt_fast_path(msg->cancelled == 0)) { @@ -999,6 +1017,12 @@ nxt_port_error_handler(nxt_task_t *task, void *obj, void *data) msg->fd = -1; } + if (msg->fd2 != -1 && msg->close_fd != 0) { + nxt_fd_close(msg->fd2); + + msg->fd2 = -1; + } + for (b = msg->buf; b != NULL; b = next) { next = b->next; b->next = NULL; diff --git a/src/nxt_socket.h b/src/nxt_socket.h index 718ad398..7403de3d 100644 --- a/src/nxt_socket.h +++ b/src/nxt_socket.h @@ -112,7 +112,7 @@ nxt_uint_t nxt_socket_error_level(nxt_err_t err); NXT_EXPORT nxt_int_t nxt_socketpair_create(nxt_task_t *task, nxt_socket_t *pair); NXT_EXPORT void nxt_socketpair_close(nxt_task_t *task, nxt_socket_t *pair); -NXT_EXPORT ssize_t nxt_socketpair_send(nxt_fd_event_t *ev, nxt_fd_t fd, +NXT_EXPORT ssize_t nxt_socketpair_send(nxt_fd_event_t *ev, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob); NXT_EXPORT ssize_t nxt_socketpair_recv(nxt_fd_event_t *ev, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob); diff --git a/src/nxt_socketpair.c b/src/nxt_socketpair.c index 10ea562e..8b9d12bf 100644 --- a/src/nxt_socketpair.c +++ b/src/nxt_socketpair.c @@ -20,7 +20,7 @@ #endif -static ssize_t nxt_sendmsg(nxt_socket_t s, nxt_fd_t fd, nxt_iobuf_t *iob, +static ssize_t nxt_sendmsg(nxt_socket_t s, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob); static ssize_t nxt_recvmsg(nxt_socket_t s, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob); @@ -71,7 +71,7 @@ nxt_socketpair_close(nxt_task_t *task, nxt_socket_t *pair) ssize_t -nxt_socketpair_send(nxt_fd_event_t *ev, nxt_fd_t fd, nxt_iobuf_t *iob, +nxt_socketpair_send(nxt_fd_event_t *ev, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob) { ssize_t n; @@ -82,7 +82,8 @@ nxt_socketpair_send(nxt_fd_event_t *ev, nxt_fd_t fd, nxt_iobuf_t *iob, err = (n == -1) ? nxt_socket_errno : 0; - nxt_debug(ev->task, "sendmsg(%d, %FD, %ui): %z", ev->fd, fd, niob, n); + nxt_debug(ev->task, "sendmsg(%d, %FD, %FD, %ui): %z", ev->fd, fd[0], + fd[1], niob, n); if (n > 0) { return n; @@ -108,8 +109,8 @@ nxt_socketpair_send(nxt_fd_event_t *ev, nxt_fd_t fd, nxt_iobuf_t *iob, continue; default: - nxt_alert(ev->task, "sendmsg(%d, %FD, %ui) failed %E", - ev->fd, fd, niob, err); + nxt_alert(ev->task, "sendmsg(%d, %FD, %FD, %ui) failed %E", + ev->fd, fd[0], fd[1], niob, err); return NXT_ERROR; } @@ -133,7 +134,8 @@ nxt_socketpair_recv(nxt_fd_event_t *ev, nxt_fd_t *fd, nxt_iobuf_t *iob, err = (n == -1) ? nxt_socket_errno : 0; - nxt_debug(ev->task, "recvmsg(%d, %FD, %ui): %z", ev->fd, *fd, niob, n); + nxt_debug(ev->task, "recvmsg(%d, %FD, %FD, %ui): %z", ev->fd, fd[0], + fd[1], niob, n); if (n > 0) { return n; @@ -178,12 +180,13 @@ nxt_socketpair_recv(nxt_fd_event_t *ev, nxt_fd_t *fd, nxt_iobuf_t *iob, */ static ssize_t -nxt_sendmsg(nxt_socket_t s, nxt_fd_t fd, nxt_iobuf_t *iob, nxt_uint_t niob) +nxt_sendmsg(nxt_socket_t s, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob) { + size_t csize; struct msghdr msg; union { struct cmsghdr cm; - char space[CMSG_SPACE(sizeof(int))]; + char space[CMSG_SPACE(sizeof(int) * 2)]; } cmsg; msg.msg_name = NULL; @@ -193,15 +196,17 @@ nxt_sendmsg(nxt_socket_t s, nxt_fd_t fd, nxt_iobuf_t *iob, nxt_uint_t niob) /* Flags are cleared just to suppress valgrind warning. */ msg.msg_flags = 0; - if (fd != -1) { + if (fd[0] != -1) { + csize = (fd[1] == -1) ? sizeof(int) : sizeof(int) * 2; + msg.msg_control = (caddr_t) &cmsg; - msg.msg_controllen = sizeof(cmsg); + msg.msg_controllen = CMSG_SPACE(csize); #if (NXT_VALGRIND) nxt_memzero(&cmsg, sizeof(cmsg)); #endif - cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int)); + cmsg.cm.cmsg_len = CMSG_LEN(csize); cmsg.cm.cmsg_level = SOL_SOCKET; cmsg.cm.cmsg_type = SCM_RIGHTS; @@ -214,7 +219,7 @@ nxt_sendmsg(nxt_socket_t s, nxt_fd_t fd, nxt_iobuf_t *iob, nxt_uint_t niob) * Fortunately, GCC with -O1 compiles this nxt_memcpy() * in the same simple assignment as in the code above. */ - nxt_memcpy(CMSG_DATA(&cmsg.cm), &fd, sizeof(int)); + nxt_memcpy(CMSG_DATA(&cmsg.cm), fd, csize); } else { msg.msg_control = NULL; @@ -232,7 +237,7 @@ nxt_recvmsg(nxt_socket_t s, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob) struct msghdr msg; union { struct cmsghdr cm; - char space[CMSG_SPACE(sizeof(int))]; + char space[CMSG_SPACE(sizeof(int) * 2)]; } cmsg; msg.msg_name = NULL; @@ -242,7 +247,8 @@ nxt_recvmsg(nxt_socket_t s, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob) msg.msg_control = (caddr_t) &cmsg; msg.msg_controllen = sizeof(cmsg); - *fd = -1; + fd[0] = -1; + fd[1] = -1; #if (NXT_VALGRIND) nxt_memzero(&cmsg, sizeof(cmsg)); @@ -251,12 +257,16 @@ nxt_recvmsg(nxt_socket_t s, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob) n = recvmsg(s, &msg, 0); if (n > 0 - && cmsg.cm.cmsg_len == CMSG_LEN(sizeof(int)) && cmsg.cm.cmsg_level == SOL_SOCKET && cmsg.cm.cmsg_type == SCM_RIGHTS) { - /* (*fd) = *(int *) CMSG_DATA(&cmsg.cm); */ - nxt_memcpy(fd, CMSG_DATA(&cmsg.cm), sizeof(int)); + if (cmsg.cm.cmsg_len == CMSG_LEN(sizeof(int))) { + nxt_memcpy(fd, CMSG_DATA(&cmsg.cm), sizeof(int)); + } + + if (cmsg.cm.cmsg_len == CMSG_LEN(sizeof(int) * 2)) { + nxt_memcpy(fd, CMSG_DATA(&cmsg.cm), sizeof(int) * 2); + } } return n; @@ -267,7 +277,7 @@ nxt_recvmsg(nxt_socket_t s, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob) /* Solaris 4.3BSD sockets. */ static ssize_t -nxt_sendmsg(nxt_socket_t s, nxt_fd_t fd, nxt_iobuf_t *iob, nxt_uint_t niob) +nxt_sendmsg(nxt_socket_t s, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob) { struct msghdr msg; @@ -276,10 +286,14 @@ nxt_sendmsg(nxt_socket_t s, nxt_fd_t fd, nxt_iobuf_t *iob, nxt_uint_t niob) msg.msg_iov = iob; msg.msg_iovlen = niob; - if (fd != -1) { - msg.msg_accrights = (caddr_t) &fd; + if (fd[0] != -1) { + msg.msg_accrights = (caddr_t) fd; msg.msg_accrightslen = sizeof(int); + if (fd[1] != -1) { + msg.msg_accrightslen += sizeof(int); + } + } else { msg.msg_accrights = NULL; msg.msg_accrightslen = 0; @@ -294,14 +308,15 @@ nxt_recvmsg(nxt_socket_t s, nxt_fd_t *fd, nxt_iobuf_t *iob, nxt_uint_t niob) { struct msghdr msg; - *fd = -1; + fd[0] = -1; + fd[1] = -1; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iob; msg.msg_iovlen = niob; msg.msg_accrights = (caddr_t) fd; - msg.msg_accrightslen = sizeof(int); + msg.msg_accrightslen = sizeof(int) * 2; return recvmsg(s, &msg, 0); } diff --git a/src/nxt_unit.c b/src/nxt_unit.c index 154fd480..66aadd98 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -192,6 +192,7 @@ struct nxt_unit_recv_msg_s { uint32_t size; int fd; + int fd2; nxt_unit_mmap_buf_t *incoming_buf; }; @@ -805,14 +806,20 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) rc = NXT_UNIT_ERROR; recv_msg.fd = -1; + recv_msg.fd2 = -1; port_msg = (nxt_port_msg_t *) rbuf->buf; cm = (struct cmsghdr *) rbuf->oob; - if (cm->cmsg_len == CMSG_LEN(sizeof(int)) - && cm->cmsg_level == SOL_SOCKET + if (cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_RIGHTS) { - memcpy(&recv_msg.fd, CMSG_DATA(cm), sizeof(int)); + if (cm->cmsg_len == CMSG_LEN(sizeof(int))) { + memcpy(&recv_msg.fd, CMSG_DATA(cm), sizeof(int)); + } + + if (cm->cmsg_len == CMSG_LEN(sizeof(int) * 2)) { + memcpy(&recv_msg.fd, CMSG_DATA(cm), sizeof(int) * 2); + } } recv_msg.incoming_buf = NULL; @@ -852,6 +859,7 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) if (nxt_slow_path(rc != NXT_UNIT_OK)) { if (rc == NXT_UNIT_AGAIN) { recv_msg.fd = -1; + recv_msg.fd2 = -1; } goto fail; @@ -871,6 +879,7 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) if (nxt_slow_path(rc != NXT_UNIT_OK)) { if (rc == NXT_UNIT_AGAIN) { recv_msg.fd = -1; + recv_msg.fd2 = -1; } goto fail; @@ -960,6 +969,10 @@ fail: close(recv_msg.fd); } + if (recv_msg.fd2 != -1) { + close(recv_msg.fd2); + } + while (recv_msg.incoming_buf != NULL) { nxt_unit_mmap_buf_free(recv_msg.incoming_buf); } -- cgit From a82cf4ffb68126f2831ab9877a7ef283dd517690 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:32 +0300 Subject: Circular queues implementations and a test. - naive circular queue, described in the article "A Scalable, Portable, and Memory-Efficient Lock-Free FIFO Queue" by Ruslan Nikolaev: https://drops.dagstuhl.de/opus/volltexte/2019/11335/pdf/LIPIcs-DISC-2019-28.pdf - circular queue, proposed by Valentin Bartenev in the "Unit router application IPC" design draft --- auto/make | 52 +++++ src/nxt_nncq.h | 162 ++++++++++++++ src/nxt_nvbcq.h | 146 +++++++++++++ src/test/nxt_cq_test.c | 578 +++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 938 insertions(+) create mode 100644 src/nxt_nncq.h create mode 100644 src/nxt_nvbcq.h create mode 100644 src/test/nxt_cq_test.c diff --git a/auto/make b/auto/make index 32ead76e..50bc6064 100644 --- a/auto/make +++ b/auto/make @@ -130,6 +130,42 @@ END done +nxt_src=src/test/nxt_cq_test.c +nxt_obj=src/test/nxt_ncq_test.o +nxt_dep=src/test/nxt_ncq_test.dep +nxt_dep_flags=`nxt_gen_dep_flags` +nxt_dep_post=`nxt_gen_dep_post` +cat << END >> $NXT_MAKEFILE + +$NXT_BUILD_DIR/$nxt_obj: $nxt_src $NXT_VERSION_H + \$(CC) -c \$(CFLAGS) -DNXT_NCQ_TEST=1 \$(NXT_LIB_INCS) $NXT_LIB_AUX_CFLAGS \\ + -o $NXT_BUILD_DIR/$nxt_obj \\ + $nxt_dep_flags \\ + $nxt_src + $nxt_dep_post + +-include $NXT_BUILD_DIR/$nxt_dep + +END + +nxt_src=src/test/nxt_cq_test.c +nxt_obj=src/test/nxt_vbcq_test.o +nxt_dep=src/test/nxt_vbcq_test.dep +nxt_dep_flags=`nxt_gen_dep_flags` +nxt_dep_post=`nxt_gen_dep_post` +cat << END >> $NXT_MAKEFILE + +$NXT_BUILD_DIR/$nxt_obj: $nxt_src $NXT_VERSION_H + \$(CC) -c \$(CFLAGS) -DNXT_NCQ_TEST=0 \$(NXT_LIB_INCS) $NXT_LIB_AUX_CFLAGS \\ + -o $NXT_BUILD_DIR/$nxt_obj \\ + $nxt_dep_flags \\ + $nxt_src + $nxt_dep_post + +-include $NXT_BUILD_DIR/$nxt_dep + +END + $echo >> $NXT_MAKEFILE @@ -151,6 +187,8 @@ if [ $NXT_TESTS = YES ]; then .PHONY: tests tests: $NXT_BUILD_DIR/tests $NXT_BUILD_DIR/utf8_file_name_test \\ + $NXT_BUILD_DIR/ncq_test \\ + $NXT_BUILD_DIR/vbcq_test \\ $NXT_BUILD_DIR/unit_app_test $NXT_BUILD_DIR/unit_websocket_chat \\ $NXT_BUILD_DIR/unit_websocket_echo @@ -169,6 +207,20 @@ $NXT_BUILD_DIR/utf8_file_name_test: $NXT_LIB_UTF8_FILE_NAME_TEST_SRCS \\ $NXT_BUILD_DIR/$NXT_LIB_STATIC \\ $NXT_LD_OPT $NXT_LIBM $NXT_LIBS $NXT_LIB_AUX_LIBS +$NXT_BUILD_DIR/ncq_test: $NXT_BUILD_DIR/src/test/nxt_ncq_test.o \\ + $NXT_BUILD_DIR/$NXT_LIB_STATIC + \$(NXT_EXEC_LINK) -o $NXT_BUILD_DIR/ncq_test \\ + \$(CFLAGS) $NXT_BUILD_DIR/src/test/nxt_ncq_test.o \\ + $NXT_BUILD_DIR/$NXT_LIB_STATIC \\ + $NXT_LD_OPT $NXT_LIBM $NXT_LIBS $NXT_LIB_AUX_LIBS + +$NXT_BUILD_DIR/vbcq_test: $NXT_BUILD_DIR/src/test/nxt_vbcq_test.o \\ + $NXT_BUILD_DIR/$NXT_LIB_STATIC + \$(NXT_EXEC_LINK) -o $NXT_BUILD_DIR/vbcq_test \\ + \$(CFLAGS) $NXT_BUILD_DIR/src/test/nxt_vbcq_test.o \\ + $NXT_BUILD_DIR/$NXT_LIB_STATIC \\ + $NXT_LD_OPT $NXT_LIBM $NXT_LIBS $NXT_LIB_AUX_LIBS + $NXT_BUILD_DIR/unit_app_test: $NXT_BUILD_DIR/src/test/nxt_unit_app_test.o \\ $NXT_BUILD_DIR/$NXT_LIB_UNIT_STATIC \$(NXT_EXEC_LINK) -o $NXT_BUILD_DIR/unit_app_test \\ diff --git a/src/nxt_nncq.h b/src/nxt_nncq.h new file mode 100644 index 00000000..20e7ecff --- /dev/null +++ b/src/nxt_nncq.h @@ -0,0 +1,162 @@ + +/* + * Copyright (C) NGINX, Inc. + */ + +#ifndef _NXT_NNCQ_H_INCLUDED_ +#define _NXT_NNCQ_H_INCLUDED_ + + +/* Numeric Naive Circular Queue */ + +#define NXT_NNCQ_SIZE 16384 + +typedef uint32_t nxt_nncq_atomic_t; +typedef uint16_t nxt_nncq_cycle_t; + +typedef struct { + nxt_nncq_atomic_t head; + nxt_nncq_atomic_t entries[NXT_NNCQ_SIZE]; + nxt_nncq_atomic_t tail; +} nxt_nncq_t; + + +static inline nxt_nncq_atomic_t +nxt_nncq_head(nxt_nncq_t const volatile *q) +{ + return q->head; +} + + +static inline nxt_nncq_atomic_t +nxt_nncq_tail(nxt_nncq_t const volatile *q) +{ + return q->tail; +} + + +static inline void +nxt_nncq_tail_cmp_inc(nxt_nncq_t volatile *q, nxt_nncq_atomic_t t) +{ + nxt_atomic_cmp_set(&q->tail, t, t + 1); +} + + +static inline nxt_nncq_atomic_t +nxt_nncq_index(nxt_nncq_t const volatile *q, nxt_nncq_atomic_t i) +{ + return i % NXT_NNCQ_SIZE; +} + + +static inline nxt_nncq_atomic_t +nxt_nncq_map(nxt_nncq_t const volatile *q, nxt_nncq_atomic_t i) +{ + return i % NXT_NNCQ_SIZE; +} + + +static inline nxt_nncq_cycle_t +nxt_nncq_cycle(nxt_nncq_t const volatile *q, nxt_nncq_atomic_t i) +{ + return i / NXT_NNCQ_SIZE; +} + + +static inline nxt_nncq_cycle_t +nxt_nncq_next_cycle(nxt_nncq_t const volatile *q, nxt_nncq_cycle_t i) +{ + return i + 1; +} + + +static inline nxt_nncq_atomic_t +nxt_nncq_new_entry(nxt_nncq_t const volatile *q, nxt_nncq_cycle_t cycle, + nxt_nncq_atomic_t i) +{ + return cycle * NXT_NNCQ_SIZE + (i % NXT_NNCQ_SIZE); +} + + +static inline nxt_nncq_atomic_t +nxt_nncq_empty(nxt_nncq_t const volatile *q) +{ + return NXT_NNCQ_SIZE; +} + + +static void +nxt_nncq_init(nxt_nncq_t volatile *q) +{ + q->head = NXT_NNCQ_SIZE; + nxt_memzero((void *) q->entries, NXT_NNCQ_SIZE * sizeof(nxt_nncq_atomic_t)); + q->tail = NXT_NNCQ_SIZE; +} + + +static void +nxt_nncq_enqueue(nxt_nncq_t volatile *q, nxt_nncq_atomic_t val) +{ + nxt_nncq_cycle_t e_cycle, t_cycle; + nxt_nncq_atomic_t n, t, e, j; + + for ( ;; ) { + t = nxt_nncq_tail(q); + j = nxt_nncq_map(q, t); + e = q->entries[j]; + + e_cycle = nxt_nncq_cycle(q, e); + t_cycle = nxt_nncq_cycle(q, t); + + if (e_cycle == t_cycle) { + nxt_nncq_tail_cmp_inc(q, t); + continue; + } + + if (nxt_nncq_next_cycle(q, e_cycle) != t_cycle) { + continue; + } + + n = nxt_nncq_new_entry(q, t_cycle, val); + + if (nxt_atomic_cmp_set(&q->entries[j], e, n)) { + break; + } + } + + nxt_nncq_tail_cmp_inc(q, t); +} + + +static nxt_nncq_atomic_t +nxt_nncq_dequeue(nxt_nncq_t volatile *q) +{ + nxt_nncq_cycle_t e_cycle, h_cycle; + nxt_nncq_atomic_t h, j, e; + + for ( ;; ) { + h = nxt_nncq_head(q); + j = nxt_nncq_map(q, h); + e = q->entries[j]; + + e_cycle = nxt_nncq_cycle(q, e); + h_cycle = nxt_nncq_cycle(q, h); + + if (e_cycle != h_cycle) { + if (nxt_nncq_next_cycle(q, e_cycle) == h_cycle) { + return nxt_nncq_empty(q); + } + + continue; + } + + if (nxt_atomic_cmp_set(&q->head, h, h + 1)) { + break; + } + } + + return nxt_nncq_index(q, e); +} + + +#endif /* _NXT_NNCQ_H_INCLUDED_ */ diff --git a/src/nxt_nvbcq.h b/src/nxt_nvbcq.h new file mode 100644 index 00000000..2b019dcc --- /dev/null +++ b/src/nxt_nvbcq.h @@ -0,0 +1,146 @@ + +/* + * Copyright (C) NGINX, Inc. + */ + +#ifndef _NXT_NVBCQ_H_INCLUDED_ +#define _NXT_NVBCQ_H_INCLUDED_ + + +/* Numeric VBart Circular Queue */ + +#define NXT_NVBCQ_SIZE 16384 + +typedef uint32_t nxt_nvbcq_atomic_t; + +struct nxt_nvbcq_s { + nxt_nvbcq_atomic_t head; + nxt_nvbcq_atomic_t entries[NXT_NVBCQ_SIZE]; + nxt_nvbcq_atomic_t tail; +}; + +typedef struct nxt_nvbcq_s nxt_nvbcq_t; + + +static inline nxt_nvbcq_atomic_t +nxt_nvbcq_head(nxt_nvbcq_t const volatile *q) +{ + return q->head; +} + + +static inline nxt_nvbcq_atomic_t +nxt_nvbcq_tail(nxt_nvbcq_t const volatile *q) +{ + return q->tail; +} + + +static inline void +nxt_nvbcq_tail_cmp_inc(nxt_nvbcq_t volatile *q, nxt_nvbcq_atomic_t t) +{ + nxt_atomic_cmp_set(&q->tail, t, t + 1); +} + + +static inline nxt_nvbcq_atomic_t +nxt_nvbcq_index(nxt_nvbcq_t const volatile *q, nxt_nvbcq_atomic_t i) +{ + return i % NXT_NVBCQ_SIZE; +} + + +static inline nxt_nvbcq_atomic_t +nxt_nvbcq_map(nxt_nvbcq_t const volatile *q, nxt_nvbcq_atomic_t i) +{ + return i % NXT_NVBCQ_SIZE; +} + + +static inline nxt_nvbcq_atomic_t +nxt_nvbcq_empty(nxt_nvbcq_t const volatile *q) +{ + return NXT_NVBCQ_SIZE; +} + + +static void +nxt_nvbcq_init(nxt_nvbcq_t volatile *q) +{ + nxt_nvbcq_atomic_t i; + + q->head = 0; + + for (i = 0; i < NXT_NVBCQ_SIZE; i++) { + q->entries[i] = NXT_NVBCQ_SIZE; + } + + q->tail = NXT_NVBCQ_SIZE; +} + + +static void +nxt_nvbcq_enqueue(nxt_nvbcq_t volatile *q, nxt_nvbcq_atomic_t val) +{ + nxt_nvbcq_atomic_t t, h, i; + + t = nxt_nvbcq_tail(q); + h = t - NXT_NVBCQ_SIZE; + + for ( ;; ) { + i = nxt_nvbcq_map(q, t); + + if (q->entries[i] == NXT_NVBCQ_SIZE + && nxt_atomic_cmp_set(&q->entries[i], NXT_NVBCQ_SIZE, val)) + { + nxt_nvbcq_tail_cmp_inc(q, t); + return; + } + + if ((t - h) == NXT_NVBCQ_SIZE) { + h = nxt_nvbcq_head(q); + + if ((t - h) == NXT_NVBCQ_SIZE) { + return; + } + } + + t++; + } +} + + +static nxt_nvbcq_atomic_t +nxt_nvbcq_dequeue(nxt_nvbcq_t volatile *q) +{ + nxt_nvbcq_atomic_t h, t, i, e; + + h = nxt_nvbcq_head(q); + t = h + NXT_NVBCQ_SIZE; + + for ( ;; ) { + i = nxt_nvbcq_map(q, h); + e = q->entries[i]; + + if (e < NXT_NVBCQ_SIZE + && nxt_atomic_cmp_set(&q->entries[i], e, NXT_NVBCQ_SIZE)) + { + nxt_atomic_cmp_set(&q->head, h, h + 1); + + return e; + } + + if ((t - h) == NXT_NVBCQ_SIZE) { + t = nxt_nvbcq_tail(q); + + if ((t - h) == NXT_NVBCQ_SIZE) { + return NXT_NVBCQ_SIZE; + } + } + + h++; + } +} + + +#endif /* _NXT_NVBCQ_H_INCLUDED_ */ diff --git a/src/test/nxt_cq_test.c b/src/test/nxt_cq_test.c new file mode 100644 index 00000000..ae69505a --- /dev/null +++ b/src/test/nxt_cq_test.c @@ -0,0 +1,578 @@ + +/* + * Copyright (C) NGINX, Inc. + */ + +#include +#include +#include + +#ifndef NXT_NCQ_TEST +#define NXT_NCQ_TEST 1 +#endif + +#define NXT_QTEST_USE_THREAD 0 + +#if NXT_NCQ_TEST +#include +#else +#include +#endif + + +#define MAX_ITER 20 +#define STAT_ITER 5 +#define MIN_COV 0.02 + +extern char **environ; +static uintptr_t nops = 10000000; + +static uintptr_t nprocs_enq = 0; +static uintptr_t nprocs_deq = 0; +static uintptr_t nprocs_wenq = 0; +static uintptr_t nprocs_wdeq = 0; +static uintptr_t nprocs_enq_deq = 0; +static uintptr_t nprocs_cas = 0; +static uintptr_t nprocs_faa = 0; + +static uintptr_t nprocs = 1; + + +static size_t +elapsed_time(size_t us) +{ + struct timeval t; + + gettimeofday(&t, NULL); + + return t.tv_sec * 1000000 + t.tv_usec - us; +} + + +static double +mean(const double *times, int n) +{ + int i; + double sum; + + sum = 0; + + for (i = 0; i < n; i++) { + sum += times[i]; + } + + return sum / n; +} + + +static double +cov(const double *times, double mean, int n) +{ + int i; + double variance; + + variance = 0; + + for (i = 0; i < n; i++) { + variance += (times[i] - mean) * (times[i] - mean); + } + + variance /= n; + + return sqrt(variance) / mean; +} + +typedef struct { +#if NXT_NCQ_TEST + nxt_nncq_t free_queue; + nxt_nncq_t active_queue; +#else + nxt_nvbcq_t free_queue; + nxt_nvbcq_t active_queue; +#endif + uint32_t counter; +} nxt_cq_t; + + +static nxt_cq_t *pgq; + + +#if NXT_NCQ_TEST +#define nxt_cq_enqueue nxt_nncq_enqueue +#define nxt_cq_dequeue nxt_nncq_dequeue +#define nxt_cq_empty nxt_nncq_empty +#define nxt_cq_init nxt_nncq_init +#define NXT_CQ_SIZE NXT_NNCQ_SIZE +#else +#define nxt_cq_enqueue nxt_nvbcq_enqueue +#define nxt_cq_dequeue nxt_nvbcq_dequeue +#define nxt_cq_empty nxt_nvbcq_empty +#define nxt_cq_init nxt_nvbcq_init +#define NXT_CQ_SIZE NXT_NVBCQ_SIZE +#endif + +typedef struct { + int id; + uint64_t enq; + uint64_t deq; + uint64_t wait_enq; + uint64_t wait_deq; + uint64_t own_res; + uint64_t cas; + uint64_t faa; + +#if NXT_QTEST_USE_THREAD + nxt_thread_handle_t handle; +#else + nxt_pid_t pid; + int status; +#endif +} nxt_worker_info_t; + + +static void +cas_worker(void *p) +{ + nxt_cq_t *q; + uint32_t c; + uintptr_t i; + nxt_worker_info_t *wi; + + q = pgq; + wi = p; + + for (i = 0; i < nops / nprocs_cas; i++) { + c = q->counter; + + if (nxt_atomic_cmp_set(&q->counter, c, c + 1)) { + ++wi->cas; + } + } +} + + +static void +faa_worker(void *p) +{ + nxt_cq_t *q; + uintptr_t i; + nxt_worker_info_t *wi; + + q = pgq; + wi = p; + + for (i = 0; i < nops / nprocs_faa; i++) { + nxt_atomic_fetch_add(&q->counter, 1); + wi->faa++; + } +} + + +static void +enq_deq_worker(void *p) +{ + nxt_cq_t *q; + uintptr_t i, v; + nxt_worker_info_t *wi; + + q = pgq; + wi = p; + + for (i = 0; i < nops / nprocs_enq_deq; i++) { + v = nxt_cq_dequeue(&q->free_queue); + + if (v != nxt_cq_empty(&q->free_queue)) { + nxt_cq_enqueue(&q->active_queue, wi->id); + wi->enq++; + } + + v = nxt_cq_dequeue(&q->active_queue); + + if (v != nxt_cq_empty(&q->active_queue)) { + nxt_cq_enqueue(&q->free_queue, v); + wi->deq++; + + if ((int) v == wi->id) { + wi->own_res++; + } + } + } +} + + +static void +enq_worker(void *p) +{ + nxt_cq_t *q; + uintptr_t i, v; + nxt_worker_info_t *wi; + + q = pgq; + wi = p; + + for (i = 0; i < nops / nprocs_enq; i++) { + v = nxt_cq_dequeue(&q->free_queue); + + if (v != nxt_cq_empty(&q->free_queue)) { + nxt_cq_enqueue(&q->active_queue, v); + wi->enq++; + } + } +} + + +static void +deq_worker(void *p) +{ + nxt_cq_t *q; + uintptr_t i, v; + nxt_worker_info_t *wi; + + q = pgq; + wi = p; + + for (i = 0; i < nops / nprocs_deq; i++) { + v = nxt_cq_dequeue(&q->active_queue); + + if (v != nxt_cq_empty(&q->active_queue)) { + nxt_cq_enqueue(&q->free_queue, v); + ++wi->deq; + } + } +} + + +static void +wenq_worker(void *p) +{ + nxt_cq_t *q; + uintptr_t i, v; + nxt_worker_info_t *wi; + + q = pgq; + wi = p; + + for (i = 0; i < nops / nprocs_wenq; i++) { + + do { + wi->wait_enq++; + v = nxt_cq_dequeue(&q->free_queue); + } while (v == nxt_cq_empty(&q->free_queue)); + + nxt_cq_enqueue(&q->active_queue, v); + + wi->enq++; + wi->wait_enq--; + } +} + + +static void +wdeq_worker(void *p) +{ + nxt_cq_t *q; + uintptr_t i, v; + nxt_worker_info_t *wi; + + q = pgq; + wi = p; + + for (i = 0; i < nops / nprocs_wdeq; i++) { + + do { + wi->wait_deq++; + v = nxt_cq_dequeue(&q->active_queue); + } while (v == nxt_cq_empty(&q->active_queue)); + + nxt_cq_enqueue(&q->free_queue, v); + + wi->deq++; + wi->wait_deq--; + } +} + + +static nxt_int_t +worker_create(nxt_worker_info_t *wi, int id, nxt_thread_start_t start) +{ + wi->id = id; + +#if NXT_QTEST_USE_THREAD + nxt_thread_link_t *link; + + link = nxt_zalloc(sizeof(nxt_thread_link_t)); + + link->start = start; + link->work.data = wi; + + return nxt_thread_create(&wi->handle, link); + +#else + pid_t pid = fork(); + + if (pid == 0) { + start(wi); + exit(0); + + } else { + wi->pid = pid; + } + + return NXT_OK; +#endif +} + + +static void +worker_wait(nxt_worker_info_t *wi) +{ +#if NXT_QTEST_USE_THREAD + pthread_join(wi->handle, NULL); + +#else + waitpid(wi->pid, &wi->status, 0); +#endif +} + + +int nxt_cdecl +main(int argc, char **argv) +{ + int i, k, id, verbose, objective, rk; + char *a; + size_t start, elapsed; + double *stats, m, c; + uint64_t total_ops; + uintptr_t j; + nxt_task_t task; + nxt_thread_t *thr; + nxt_worker_info_t *wi; + double times[MAX_ITER], mopsec[MAX_ITER]; + + verbose = 0; + objective = 0; + + for (i = 1; i < argc; i++) { + a = argv[i]; + + if (strcmp(a, "-v") == 0) { + verbose++; + continue; + } + + if (strcmp(a, "-n") == 0 && (i + 1) < argc) { + nops = atoi(argv[++i]); + continue; + } + + if (strcmp(a, "--enq") == 0 && (i + 1) < argc) { + nprocs_enq = atoi(argv[++i]); + continue; + } + + if (strcmp(a, "--deq") == 0 && (i + 1) < argc) { + nprocs_deq = atoi(argv[++i]); + continue; + } + + if (strcmp(a, "--wenq") == 0 && (i + 1) < argc) { + nprocs_wenq = atoi(argv[++i]); + continue; + } + + if (strcmp(a, "--wdeq") == 0 && (i + 1) < argc) { + nprocs_wdeq = atoi(argv[++i]); + continue; + } + + if (strcmp(a, "--ed") == 0 && (i + 1) < argc) { + nprocs_enq_deq = atoi(argv[++i]); + continue; + } + + if (strcmp(a, "--cas") == 0 && (i + 1) < argc) { + nprocs_cas = atoi(argv[++i]); + continue; + } + + if (strcmp(a, "--faa") == 0 && (i + 1) < argc) { + nprocs_faa = atoi(argv[++i]); + continue; + } + + if (strcmp(a, "--obj") == 0 && (i + 1) < argc) { + objective = atoi(argv[++i]); + continue; + } + + printf("unknown option %s", a); + + return 1; + } + + if (nxt_lib_start("ncq_test", argv, &environ) != NXT_OK) { + return 1; + } + + nprocs = nprocs_enq + nprocs_deq + nprocs_wenq + nprocs_wdeq + + nprocs_enq_deq + nprocs_cas + nprocs_faa; + + if (nprocs == 0) { + return 0; + } + + nxt_main_log.level = NXT_LOG_INFO; + task.log = &nxt_main_log; + + thr = nxt_thread(); + thr->task = &task; + + pgq = mmap(NULL, sizeof(nxt_cq_t), PROT_READ | PROT_WRITE, + MAP_ANON | MAP_SHARED, -1, 0); + if (pgq == MAP_FAILED) { + return 2; + } + + nxt_cq_init(&pgq->free_queue); + nxt_cq_init(&pgq->active_queue); + + for(i = 0; i < NXT_CQ_SIZE; i++) { + nxt_cq_enqueue(&pgq->free_queue, i); + } + + if (verbose >= 1) { + printf("number of workers: %d\n", (int) nprocs); + printf("number of ops: %d\n", (int) nops); + } + + wi = mmap(NULL, nprocs * sizeof(nxt_worker_info_t), PROT_READ | PROT_WRITE, + MAP_ANON | MAP_SHARED, -1, 0); + if (wi == MAP_FAILED) { + return 3; + } + + for (k = 0; k < MAX_ITER; k++) { + nxt_memzero(wi, nprocs * sizeof(nxt_worker_info_t)); + + nxt_cq_init(&pgq->free_queue); + nxt_cq_init(&pgq->active_queue); + + for(i = 0; i < NXT_CQ_SIZE; i++) { + nxt_cq_enqueue(&pgq->free_queue, i); + } + + start = elapsed_time(0); + + id = 0; + + for (j = 0; j < nprocs_enq; j++, id++) { + worker_create(wi + id, id, enq_worker); + } + + for (j = 0; j < nprocs_deq; j++, id++) { + worker_create(wi + id, id, deq_worker); + } + + for (j = 0; j < nprocs_wenq; j++, id++) { + worker_create(wi + id, id, wenq_worker); + } + + for (j = 0; j < nprocs_wdeq; j++, id++) { + worker_create(wi + id, id, wdeq_worker); + } + + for (j = 0; j < nprocs_enq_deq; j++, id++) { + worker_create(wi + id, id, enq_deq_worker); + } + + for (j = 0; j < nprocs_cas; j++, id++) { + worker_create(wi + id, id, cas_worker); + } + + for (j = 0; j < nprocs_faa; j++, id++) { + worker_create(wi + id, id, faa_worker); + } + + for (j = 0; j < nprocs; j++) { + worker_wait(wi + j); + } + + elapsed = elapsed_time(start); + + for (j = 1; j < nprocs; j++) { + wi[0].enq += wi[j].enq; + wi[0].deq += wi[j].deq; + wi[0].wait_enq += wi[j].wait_enq; + wi[0].wait_deq += wi[j].wait_deq; + wi[0].own_res += wi[j].own_res; + wi[0].cas += wi[j].cas; + wi[0].faa += wi[j].faa; + } + + total_ops = wi[0].enq + wi[0].deq + wi[0].cas + wi[0].faa; + + if (total_ops == 0) { + total_ops = nops; + } + + times[k] = elapsed / 1000.0; + mopsec[k] = (double) total_ops / elapsed; + + if (verbose >= 2) { + printf("enq %10"PRIu64"\n", wi[0].enq); + printf("deq %10"PRIu64"\n", wi[0].deq); + printf("wait_enq %10"PRIu64"\n", wi[0].wait_enq); + printf("wait_deq %10"PRIu64"\n", wi[0].wait_deq); + printf("own_res %10"PRIu64"\n", wi[0].own_res); + printf("cas %10"PRIu64"\n", wi[0].cas); + printf("faa %10"PRIu64"\n", wi[0].faa); + printf("total ops %10"PRIu64"\n", total_ops); + printf("Mops/sec %13.2f\n", mopsec[k]); + + printf("elapsed %10d us\n", (int) elapsed); + printf("per op %10d ns\n", (int) ((1000 * elapsed) / total_ops)); + } + + if (k >= STAT_ITER) { + stats = (objective == 0) ? times : mopsec; + + m = mean(stats + k - STAT_ITER, STAT_ITER); + c = cov(stats + k - STAT_ITER, m, STAT_ITER); + + if (verbose >= 1) { + if (objective == 0) { + printf(" #%02d elapsed time: %.2f ms; Mops/sec %.2f; " + "mean time %.2f ms; cov %.4f\n", + (int) k + 1, times[k], mopsec[k], m, c); + + } else { + printf(" #%02d elapsed time: %.2f ms; Mops/sec %.2f; " + "mean Mop/sec %.2f; cov %.4f\n", + (int) k + 1, times[k], mopsec[k], m, c); + } + } + + if (c < MIN_COV) { + rk = k - STAT_ITER; + + for (i = rk + 1; i <= k; i++) { + if (fabs(stats[i] - m) < fabs(stats[rk] - m)) { + rk = i; + } + } + + printf("#%d %.2f ms; %.2f\n", rk, times[rk], mopsec[rk]); + + return 0; + } + + } else { + if (verbose >= 1) { + printf(" #%02d elapsed time: %.2f ms; Mops/sec %.2f\n", + (int) k + 1, times[k], mopsec[k]); + } + } + } + + return 0; +} -- cgit From e227fc9e6281c280c46139a81646ecd7b0510e2b Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:34 +0300 Subject: Introducing application and port shared memory queues. The goal is to minimize the number of syscalls needed to deliver a message. --- src/nxt_app_nncq.h | 165 +++++++ src/nxt_app_queue.h | 119 +++++ src/nxt_http_websocket.c | 16 +- src/nxt_port.c | 28 +- src/nxt_port.h | 25 +- src/nxt_port_queue.h | 102 ++++ src/nxt_port_socket.c | 271 ++++++++++- src/nxt_router.c | 201 +++++++- src/nxt_router_request.h | 4 +- src/nxt_unit.c | 1175 ++++++++++++++++++++++++++++++++++++---------- src/nxt_unit.h | 2 + 11 files changed, 1795 insertions(+), 313 deletions(-) create mode 100644 src/nxt_app_nncq.h create mode 100644 src/nxt_app_queue.h create mode 100644 src/nxt_port_queue.h diff --git a/src/nxt_app_nncq.h b/src/nxt_app_nncq.h new file mode 100644 index 00000000..f9b8ce0c --- /dev/null +++ b/src/nxt_app_nncq.h @@ -0,0 +1,165 @@ + +/* + * Copyright (C) NGINX, Inc. + */ + +#ifndef _NXT_APP_NNCQ_H_INCLUDED_ +#define _NXT_APP_NNCQ_H_INCLUDED_ + + +/* Appilcation Numeric Naive Circular Queue */ + +#define NXT_APP_NNCQ_SIZE 131072 + +typedef uint32_t nxt_app_nncq_atomic_t; +typedef uint16_t nxt_app_nncq_cycle_t; + +typedef struct { + nxt_app_nncq_atomic_t head; + nxt_app_nncq_atomic_t entries[NXT_APP_NNCQ_SIZE]; + nxt_app_nncq_atomic_t tail; +} nxt_app_nncq_t; + + +static inline nxt_app_nncq_atomic_t +nxt_app_nncq_head(nxt_app_nncq_t const volatile *q) +{ + return q->head; +} + + +static inline nxt_app_nncq_atomic_t +nxt_app_nncq_tail(nxt_app_nncq_t const volatile *q) +{ + return q->tail; +} + + +static inline void +nxt_app_nncq_tail_cmp_inc(nxt_app_nncq_t volatile *q, nxt_app_nncq_atomic_t t) +{ + nxt_atomic_cmp_set(&q->tail, t, t + 1); +} + + +static inline nxt_app_nncq_atomic_t +nxt_app_nncq_index(nxt_app_nncq_t const volatile *q, nxt_app_nncq_atomic_t i) +{ + return i % NXT_APP_NNCQ_SIZE; +} + + +static inline nxt_app_nncq_atomic_t +nxt_app_nncq_map(nxt_app_nncq_t const volatile *q, nxt_app_nncq_atomic_t i) +{ + return i % NXT_APP_NNCQ_SIZE; +} + + +static inline nxt_app_nncq_cycle_t +nxt_app_nncq_cycle(nxt_app_nncq_t const volatile *q, nxt_app_nncq_atomic_t i) +{ + return i / NXT_APP_NNCQ_SIZE; +} + + +static inline nxt_app_nncq_cycle_t +nxt_app_nncq_next_cycle(nxt_app_nncq_t const volatile *q, + nxt_app_nncq_cycle_t i) +{ + return i + 1; +} + + +static inline nxt_app_nncq_atomic_t +nxt_app_nncq_new_entry(nxt_app_nncq_t const volatile *q, + nxt_app_nncq_cycle_t cycle, + nxt_app_nncq_atomic_t i) +{ + return cycle * NXT_APP_NNCQ_SIZE + (i % NXT_APP_NNCQ_SIZE); +} + + +static inline nxt_app_nncq_atomic_t +nxt_app_nncq_empty(nxt_app_nncq_t const volatile *q) +{ + return NXT_APP_NNCQ_SIZE; +} + + +static void +nxt_app_nncq_init(nxt_app_nncq_t volatile *q) +{ + q->head = NXT_APP_NNCQ_SIZE; + nxt_memzero((void *) q->entries, + NXT_APP_NNCQ_SIZE * sizeof(nxt_app_nncq_atomic_t)); + q->tail = NXT_APP_NNCQ_SIZE; +} + + +static void +nxt_app_nncq_enqueue(nxt_app_nncq_t volatile *q, nxt_app_nncq_atomic_t val) +{ + nxt_app_nncq_cycle_t e_cycle, t_cycle; + nxt_app_nncq_atomic_t n, t, e, j; + + for ( ;; ) { + t = nxt_app_nncq_tail(q); + j = nxt_app_nncq_map(q, t); + e = q->entries[j]; + + e_cycle = nxt_app_nncq_cycle(q, e); + t_cycle = nxt_app_nncq_cycle(q, t); + + if (e_cycle == t_cycle) { + nxt_app_nncq_tail_cmp_inc(q, t); + continue; + } + + if (nxt_app_nncq_next_cycle(q, e_cycle) != t_cycle) { + continue; + } + + n = nxt_app_nncq_new_entry(q, t_cycle, val); + + if (nxt_atomic_cmp_set(&q->entries[j], e, n)) { + break; + } + } + + nxt_app_nncq_tail_cmp_inc(q, t); +} + + +static nxt_app_nncq_atomic_t +nxt_app_nncq_dequeue(nxt_app_nncq_t volatile *q) +{ + nxt_app_nncq_cycle_t e_cycle, h_cycle; + nxt_app_nncq_atomic_t h, j, e; + + for ( ;; ) { + h = nxt_app_nncq_head(q); + j = nxt_app_nncq_map(q, h); + e = q->entries[j]; + + e_cycle = nxt_app_nncq_cycle(q, e); + h_cycle = nxt_app_nncq_cycle(q, h); + + if (e_cycle != h_cycle) { + if (nxt_app_nncq_next_cycle(q, e_cycle) == h_cycle) { + return nxt_app_nncq_empty(q); + } + + continue; + } + + if (nxt_atomic_cmp_set(&q->head, h, h + 1)) { + break; + } + } + + return nxt_app_nncq_index(q, e); +} + + +#endif /* _NXT_APP_NNCQ_H_INCLUDED_ */ diff --git a/src/nxt_app_queue.h b/src/nxt_app_queue.h new file mode 100644 index 00000000..127cb8f3 --- /dev/null +++ b/src/nxt_app_queue.h @@ -0,0 +1,119 @@ + +/* + * Copyright (C) NGINX, Inc. + */ + +#ifndef _NXT_APP_QUEUE_H_INCLUDED_ +#define _NXT_APP_QUEUE_H_INCLUDED_ + + +#include + + +/* Using Numeric Naive Circular Queue as a backend. */ + +#define NXT_APP_QUEUE_SIZE NXT_APP_NNCQ_SIZE +#define NXT_APP_QUEUE_MSG_SIZE 31 + +typedef struct { + uint8_t size; + uint8_t data[NXT_APP_QUEUE_MSG_SIZE]; + uint32_t tracking; +} nxt_app_queue_item_t; + + +typedef struct { + nxt_app_nncq_atomic_t nitems; + nxt_app_nncq_t free_items; + nxt_app_nncq_t queue; + nxt_app_queue_item_t items[NXT_APP_QUEUE_SIZE]; +} nxt_app_queue_t; + + +nxt_inline void +nxt_app_queue_init(nxt_app_queue_t volatile *q) +{ + nxt_app_nncq_atomic_t i; + + nxt_app_nncq_init(&q->free_items); + nxt_app_nncq_init(&q->queue); + + for (i = 0; i < NXT_APP_QUEUE_SIZE; i++) { + nxt_app_nncq_enqueue(&q->free_items, i); + } + + q->nitems = 0; +} + + +nxt_inline nxt_int_t +nxt_app_queue_send(nxt_app_queue_t volatile *q, const void *p, + uint8_t size, uint32_t tracking, int *notify, uint32_t *cookie) +{ + nxt_app_queue_item_t *qi; + nxt_app_nncq_atomic_t i; + + i = nxt_app_nncq_dequeue(&q->free_items); + if (i == nxt_app_nncq_empty(&q->free_items)) { + return NXT_AGAIN; + } + + qi = (nxt_app_queue_item_t *) &q->items[i]; + + qi->size = size; + nxt_memcpy(qi->data, p, size); + qi->tracking = tracking; + *cookie = i; + + nxt_app_nncq_enqueue(&q->queue, i); + + i = nxt_atomic_fetch_add(&q->nitems, 1); + + if (notify != NULL) { + *notify = (i == 0); + } + + return NXT_OK; +} + + +nxt_inline nxt_bool_t +nxt_app_queue_cancel(nxt_app_queue_t volatile *q, uint32_t cookie, + uint32_t tracking) +{ + nxt_app_queue_item_t *qi; + + qi = (nxt_app_queue_item_t *) &q->items[cookie]; + + return nxt_atomic_cmp_set(&qi->tracking, tracking, 0); +} + + +nxt_inline ssize_t +nxt_app_queue_recv(nxt_app_queue_t volatile *q, void *p, uint32_t *cookie) +{ + ssize_t res; + nxt_app_queue_item_t *qi; + nxt_app_nncq_atomic_t i; + + i = nxt_app_nncq_dequeue(&q->queue); + if (i == nxt_app_nncq_empty(&q->queue)) { + *cookie = 0; + return -1; + } + + qi = (nxt_app_queue_item_t *) &q->items[i]; + + res = qi->size; + nxt_memcpy(p, qi->data, qi->size); + *cookie = i; + + nxt_app_nncq_enqueue(&q->free_items, i); + + nxt_atomic_fetch_add(&q->nitems, -1); + + return res; +} + + +#endif /* _NXT_APP_QUEUE_H_INCLUDED_ */ diff --git a/src/nxt_http_websocket.c b/src/nxt_http_websocket.c index 393c20ac..1968633e 100644 --- a/src/nxt_http_websocket.c +++ b/src/nxt_http_websocket.c @@ -98,10 +98,10 @@ nxt_http_websocket_client(nxt_task_t *task, void *obj, void *data) b = next; } - res = nxt_port_socket_twrite(task, req_rpc_data->app_port, - NXT_PORT_MSG_WEBSOCKET, -1, - req_rpc_data->stream, - task->thread->engine->port->id, out, NULL); + res = nxt_port_socket_write(task, req_rpc_data->app_port, + NXT_PORT_MSG_WEBSOCKET, -1, + req_rpc_data->stream, + task->thread->engine->port->id, out); if (nxt_slow_path(res != NXT_OK)) { // TODO: handle } @@ -144,10 +144,10 @@ nxt_http_websocket_error_handler(nxt_task_t *task, void *obj, void *data) goto close_handler; } - (void) nxt_port_socket_twrite(task, req_rpc_data->app_port, - NXT_PORT_MSG_WEBSOCKET_LAST, - -1, req_rpc_data->stream, - task->thread->engine->port->id, NULL, NULL); + (void) nxt_port_socket_write(task, req_rpc_data->app_port, + NXT_PORT_MSG_WEBSOCKET_LAST, + -1, req_rpc_data->stream, + task->thread->engine->port->id, NULL); close_handler: diff --git a/src/nxt_port.c b/src/nxt_port.c index 54434d70..c9189d7c 100644 --- a/src/nxt_port.c +++ b/src/nxt_port.c @@ -8,6 +8,7 @@ #include #include #include +#include static void nxt_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg); @@ -68,6 +69,8 @@ nxt_port_new(nxt_task_t *task, nxt_port_id_t id, nxt_pid_t pid, nxt_queue_init(&port->messages); nxt_thread_mutex_create(&port->write_mutex); + port->queue_fd = -1; + } else { nxt_mp_destroy(mp); } @@ -99,6 +102,16 @@ nxt_port_close(nxt_task_t *task, nxt_port_t *port) nxt_router_app_port_close(task, port); } } + + if (port->queue_fd != -1) { + nxt_fd_close(port->queue_fd); + port->queue_fd = -1; + } + + if (port->queue != NULL) { + nxt_mem_munmap(port->queue, sizeof(nxt_port_queue_t)); + port->queue = NULL; + } } @@ -176,6 +189,7 @@ nxt_port_quit_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) } +/* TODO join with process_ready and move to nxt_main_process.c */ nxt_inline void nxt_port_send_new_port(nxt_task_t *task, nxt_runtime_t *rt, nxt_port_t *new_port, uint32_t stream) @@ -227,8 +241,9 @@ nxt_port_send_port(nxt_task_t *task, nxt_port_t *port, nxt_port_t *new_port, msg->max_share = port->max_share; msg->type = new_port->type; - return nxt_port_socket_write(task, port, NXT_PORT_MSG_NEW_PORT, - new_port->pair[1], stream, 0, b); + return nxt_port_socket_write2(task, port, NXT_PORT_MSG_NEW_PORT, + new_port->pair[1], new_port->queue_fd, + stream, 0, b); } @@ -279,7 +294,7 @@ nxt_port_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) msg->u.new_port = port; } - +/* TODO move to nxt_main_process.c */ void nxt_port_process_ready_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) { @@ -304,6 +319,13 @@ nxt_port_process_ready_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) nxt_debug(task, "process %PI ready", msg->port_msg.pid); + if (msg->fd != -1) { + port->queue_fd = msg->fd; + port->queue = nxt_mem_mmap(NULL, sizeof(nxt_port_queue_t), + PROT_READ | PROT_WRITE, MAP_SHARED, msg->fd, + 0); + } + nxt_port_send_new_port(task, rt, port, msg->port_msg.stream); } diff --git a/src/nxt_port.h b/src/nxt_port.h index ab455f92..9fbf00b1 100644 --- a/src/nxt_port.h +++ b/src/nxt_port.h @@ -42,6 +42,7 @@ struct nxt_port_handlers_s { /* Request headers. */ nxt_port_handler_t req_headers; nxt_port_handler_t req_headers_ack; + nxt_port_handler_t req_body; /* Websocket frame. */ nxt_port_handler_t websocket_frame; @@ -51,6 +52,8 @@ struct nxt_port_handlers_s { nxt_port_handler_t oosm; nxt_port_handler_t shm_ack; + nxt_port_handler_t read_queue; + nxt_port_handler_t read_socket; }; @@ -91,12 +94,15 @@ typedef enum { _NXT_PORT_MSG_REQ_HEADERS = nxt_port_handler_idx(req_headers), _NXT_PORT_MSG_REQ_HEADERS_ACK = nxt_port_handler_idx(req_headers_ack), + _NXT_PORT_MSG_REQ_BODY = nxt_port_handler_idx(req_body), _NXT_PORT_MSG_WEBSOCKET = nxt_port_handler_idx(websocket_frame), _NXT_PORT_MSG_DATA = nxt_port_handler_idx(data), _NXT_PORT_MSG_OOSM = nxt_port_handler_idx(oosm), _NXT_PORT_MSG_SHM_ACK = nxt_port_handler_idx(shm_ack), + _NXT_PORT_MSG_READ_QUEUE = nxt_port_handler_idx(read_queue), + _NXT_PORT_MSG_READ_SOCKET = nxt_port_handler_idx(read_socket), NXT_PORT_MSG_MAX = sizeof(nxt_port_handlers_t) / sizeof(nxt_port_handler_t), @@ -124,6 +130,7 @@ typedef enum { NXT_PORT_MSG_REMOVE_PID = nxt_msg_last(_NXT_PORT_MSG_REMOVE_PID), NXT_PORT_MSG_REQ_HEADERS = _NXT_PORT_MSG_REQ_HEADERS, + NXT_PORT_MSG_REQ_BODY = _NXT_PORT_MSG_REQ_BODY, NXT_PORT_MSG_WEBSOCKET = _NXT_PORT_MSG_WEBSOCKET, NXT_PORT_MSG_WEBSOCKET_LAST = nxt_msg_last(_NXT_PORT_MSG_WEBSOCKET), @@ -132,6 +139,8 @@ typedef enum { NXT_PORT_MSG_OOSM = nxt_msg_last(_NXT_PORT_MSG_OOSM), NXT_PORT_MSG_SHM_ACK = nxt_msg_last(_NXT_PORT_MSG_SHM_ACK), + NXT_PORT_MSG_READ_QUEUE = _NXT_PORT_MSG_READ_QUEUE, + NXT_PORT_MSG_READ_SOCKET = _NXT_PORT_MSG_READ_SOCKET, } nxt_port_msg_type_t; @@ -236,6 +245,12 @@ struct nxt_port_s { nxt_atomic_t use_count; nxt_process_type_t type; + + nxt_fd_t queue_fd; + void *queue; + + void *socket_msg; + int from_socket; }; @@ -286,17 +301,17 @@ void nxt_port_write_enable(nxt_task_t *task, nxt_port_t *port); void nxt_port_write_close(nxt_port_t *port); void nxt_port_read_enable(nxt_task_t *task, nxt_port_t *port); void nxt_port_read_close(nxt_port_t *port); -nxt_int_t nxt_port_socket_twrite(nxt_task_t *task, nxt_port_t *port, - nxt_uint_t type, nxt_fd_t fd, uint32_t stream, nxt_port_id_t reply_port, - nxt_buf_t *b, void *tracking); +nxt_int_t nxt_port_socket_write2(nxt_task_t *task, nxt_port_t *port, + nxt_uint_t type, nxt_fd_t fd, nxt_fd_t fd2, uint32_t stream, + nxt_port_id_t reply_port, nxt_buf_t *b); nxt_inline nxt_int_t nxt_port_socket_write(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type, nxt_fd_t fd, uint32_t stream, nxt_port_id_t reply_port, nxt_buf_t *b) { - return nxt_port_socket_twrite(task, port, type, fd, stream, reply_port, b, - NULL); + return nxt_port_socket_write2(task, port, type, fd, -1, stream, reply_port, + b); } void nxt_port_enable(nxt_task_t *task, nxt_port_t *port, diff --git a/src/nxt_port_queue.h b/src/nxt_port_queue.h new file mode 100644 index 00000000..d2b2326b --- /dev/null +++ b/src/nxt_port_queue.h @@ -0,0 +1,102 @@ + +/* + * Copyright (C) NGINX, Inc. + */ + +#ifndef _NXT_PORT_QUEUE_H_INCLUDED_ +#define _NXT_PORT_QUEUE_H_INCLUDED_ + + +#include + + +/* Using Numeric Naive Circular Queue as a backend. */ + +#define NXT_PORT_QUEUE_SIZE NXT_NNCQ_SIZE +#define NXT_PORT_QUEUE_MSG_SIZE 31 + + +typedef struct { + uint8_t size; + uint8_t data[NXT_PORT_QUEUE_MSG_SIZE]; +} nxt_port_queue_item_t; + + +typedef struct { + nxt_nncq_atomic_t nitems; + nxt_nncq_t free_items; + nxt_nncq_t queue; + nxt_port_queue_item_t items[NXT_PORT_QUEUE_SIZE]; +} nxt_port_queue_t; + + +nxt_inline void +nxt_port_queue_init(nxt_port_queue_t volatile *q) +{ + nxt_nncq_atomic_t i; + + nxt_nncq_init(&q->free_items); + nxt_nncq_init(&q->queue); + + for (i = 0; i < NXT_PORT_QUEUE_SIZE; i++) { + nxt_nncq_enqueue(&q->free_items, i); + } + + q->nitems = 0; +} + + +nxt_inline nxt_int_t +nxt_port_queue_send(nxt_port_queue_t volatile *q, const void *p, uint8_t size, + int *notify) +{ + nxt_nncq_atomic_t i; + nxt_port_queue_item_t *qi; + + i = nxt_nncq_dequeue(&q->free_items); + if (i == nxt_nncq_empty(&q->free_items)) { + *notify = 0; + return NXT_AGAIN; + } + + qi = (nxt_port_queue_item_t *) &q->items[i]; + + qi->size = size; + nxt_memcpy(qi->data, p, size); + + nxt_nncq_enqueue(&q->queue, i); + + i = nxt_atomic_fetch_add(&q->nitems, 1); + + *notify = (i == 0); + + return NXT_OK; +} + + +nxt_inline ssize_t +nxt_port_queue_recv(nxt_port_queue_t volatile *q, void *p) +{ + ssize_t res; + nxt_nncq_atomic_t i; + nxt_port_queue_item_t *qi; + + i = nxt_nncq_dequeue(&q->queue); + if (i == nxt_nncq_empty(&q->queue)) { + return -1; + } + + qi = (nxt_port_queue_item_t *) &q->items[i]; + + res = qi->size; + nxt_memcpy(p, qi->data, qi->size); + + nxt_nncq_enqueue(&q->free_items, i); + + nxt_atomic_fetch_add(&q->nitems, -1); + + return res; +} + + +#endif /* _NXT_PORT_QUEUE_H_INCLUDED_ */ diff --git a/src/nxt_port_socket.c b/src/nxt_port_socket.c index 844b65ca..14e2e605 100644 --- a/src/nxt_port_socket.c +++ b/src/nxt_port_socket.c @@ -5,6 +5,7 @@ */ #include +#include static nxt_int_t nxt_port_msg_chk_insert(nxt_task_t *task, nxt_port_t *port, @@ -17,6 +18,8 @@ static nxt_buf_t *nxt_port_buf_completion(nxt_task_t *task, static nxt_port_send_msg_t *nxt_port_msg_insert_tail(nxt_port_t *port, nxt_port_send_msg_t *msg); static void nxt_port_read_handler(nxt_task_t *task, void *obj, void *data); +static void nxt_port_queue_read_handler(nxt_task_t *task, void *obj, + void *data); static void nxt_port_read_msg_process(nxt_task_t *task, nxt_port_t *port, nxt_port_recv_msg_t *msg); static nxt_buf_t *nxt_port_buf_alloc(nxt_port_t *port); @@ -143,12 +146,15 @@ nxt_port_release_send_msg(nxt_port_send_msg_t *msg) nxt_int_t -nxt_port_socket_twrite(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type, - nxt_fd_t fd, uint32_t stream, nxt_port_id_t reply_port, nxt_buf_t *b, - void *tracking) +nxt_port_socket_write2(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type, + nxt_fd_t fd, nxt_fd_t fd2, uint32_t stream, nxt_port_id_t reply_port, + nxt_buf_t *b) { + int notify; + uint8_t *p; nxt_int_t res; nxt_port_send_msg_t msg; + uint8_t qmsg[NXT_PORT_QUEUE_MSG_SIZE]; msg.link.next = NULL; msg.link.prev = NULL; @@ -156,14 +162,10 @@ nxt_port_socket_twrite(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type, msg.buf = b; msg.share = 0; msg.fd = fd; - msg.fd2 = -1; + msg.fd2 = fd2; msg.close_fd = (type & NXT_PORT_MSG_CLOSE_FD) != 0; msg.allocated = 0; - if (tracking != NULL) { - nxt_port_mmap_tracking_write(msg.tracking_msg, tracking); - } - msg.port_msg.stream = stream; msg.port_msg.pid = nxt_pid; msg.port_msg.reply_port = reply_port; @@ -172,7 +174,42 @@ nxt_port_socket_twrite(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type, msg.port_msg.mmap = 0; msg.port_msg.nf = 0; msg.port_msg.mf = 0; - msg.port_msg.tracking = tracking != NULL; + + if (port->queue != NULL && type != _NXT_PORT_MSG_READ_QUEUE) { + + if (fd == -1 + && (b == NULL + || nxt_buf_mem_used_size(&b->mem) + <= (int) (NXT_PORT_QUEUE_MSG_SIZE - sizeof(nxt_port_msg_t)))) + { + p = nxt_cpymem(qmsg, &msg.port_msg, sizeof(nxt_port_msg_t)); + if (b != NULL) { + p = nxt_cpymem(p, b->mem.pos, nxt_buf_mem_used_size(&b->mem)); + } + + res = nxt_port_queue_send(port->queue, qmsg, p - qmsg, ¬ify); + + nxt_debug(task, "port{%d,%d} %d: enqueue %d notify %d, %d", + (int) port->pid, (int) port->id, port->socket.fd, + (int) (p - qmsg), notify, res); + + if (notify == 0) { + return res; + } + + msg.port_msg.type = _NXT_PORT_MSG_READ_QUEUE; + msg.buf = NULL; + + } else { + qmsg[0] = _NXT_PORT_MSG_READ_SOCKET; + + res = nxt_port_queue_send(port->queue, qmsg, 1, ¬ify); + + nxt_debug(task, "port{%d,%d} %d: enqueue 1 notify %d, %d", + (int) port->pid, (int) port->id, port->socket.fd, + notify, res); + } + } res = nxt_port_msg_chk_insert(task, port, &msg); if (nxt_fast_path(res == NXT_DECLINED)) { @@ -308,10 +345,6 @@ next_fragment: port->max_size / PORT_MMAP_MIN_SIZE); } - if (msg->port_msg.tracking) { - iov[0].iov_len += sizeof(msg->tracking_msg); - } - sb.limit -= iov[0].iov_len; nxt_sendbuf_mem_coalesce(task, &sb); @@ -368,7 +401,6 @@ next_fragment: msg->fd2 = -1; msg->share += n; msg->port_msg.nf = 1; - msg->port_msg.tracking = 0; if (msg->share >= port->max_share) { msg->share = 0; @@ -576,7 +608,9 @@ nxt_port_read_enable(nxt_task_t *task, nxt_port_t *port) port->engine = task->thread->engine; port->socket.read_work_queue = &port->engine->fast_work_queue; - port->socket.read_handler = nxt_port_read_handler; + port->socket.read_handler = port->queue != NULL + ? nxt_port_queue_read_handler + : nxt_port_read_handler; port->socket.error_handler = nxt_port_error_handler; nxt_fd_event_enable_read(port->engine, &port->socket); @@ -660,6 +694,206 @@ nxt_port_read_handler(nxt_task_t *task, void *obj, void *data) } +static void +nxt_port_queue_read_handler(nxt_task_t *task, void *obj, void *data) +{ + ssize_t n; + nxt_buf_t *b; + nxt_port_t *port; + struct iovec iov[2]; + nxt_port_queue_t *queue; + nxt_port_recv_msg_t msg, *smsg; + uint8_t qmsg[NXT_PORT_QUEUE_MSG_SIZE]; + + port = nxt_container_of(obj, nxt_port_t, socket); + msg.port = port; + + nxt_assert(port->engine == task->thread->engine); + + queue = port->queue; + nxt_atomic_fetch_add(&queue->nitems, 1); + + for ( ;; ) { + + if (port->from_socket == 0) { + n = nxt_port_queue_recv(queue, qmsg); + + if (n < 0 && !port->socket.read_ready) { + nxt_atomic_fetch_add(&queue->nitems, -1); + + n = nxt_port_queue_recv(queue, qmsg); + if (n < 0) { + return; + } + + nxt_atomic_fetch_add(&queue->nitems, 1); + } + + if (n == 1 && qmsg[0] == _NXT_PORT_MSG_READ_SOCKET) { + port->from_socket++; + + nxt_debug(task, "port{%d,%d} %d: dequeue 1 read_socket %d", + (int) port->pid, (int) port->id, port->socket.fd, + port->from_socket); + + n = -1; + + continue; + } + + nxt_debug(task, "port{%d,%d} %d: dequeue %d", + (int) port->pid, (int) port->id, port->socket.fd, + (int) n); + + } else { + if ((smsg = port->socket_msg) != NULL && smsg->size != 0) { + msg.port_msg = smsg->port_msg; + b = smsg->buf; + n = smsg->size; + msg.fd = smsg->fd; + msg.fd2 = smsg->fd2; + + smsg->size = 0; + + port->from_socket--; + + nxt_debug(task, "port{%d,%d} %d: use suspended message %d", + (int) port->pid, (int) port->id, port->socket.fd, + (int) n); + + goto process; + } + + n = -1; + } + + if (n < 0 && !port->socket.read_ready) { + nxt_atomic_fetch_add(&queue->nitems, -1); + return; + } + + b = nxt_port_buf_alloc(port); + + if (nxt_slow_path(b == NULL)) { + /* TODO: disable event for some time */ + } + + if (n >= (ssize_t) sizeof(nxt_port_msg_t)) { + nxt_memcpy(&msg.port_msg, qmsg, sizeof(nxt_port_msg_t)); + + if (n > (ssize_t) sizeof(nxt_port_msg_t)) { + nxt_memcpy(b->mem.pos, qmsg + sizeof(nxt_port_msg_t), + n - sizeof(nxt_port_msg_t)); + } + + } else { + iov[0].iov_base = &msg.port_msg; + iov[0].iov_len = sizeof(nxt_port_msg_t); + + iov[1].iov_base = b->mem.pos; + iov[1].iov_len = port->max_size; + + n = nxt_socketpair_recv(&port->socket, &msg.fd, iov, 2); + + if (n == (ssize_t) sizeof(nxt_port_msg_t) + && msg.port_msg.type == _NXT_PORT_MSG_READ_QUEUE) + { + nxt_port_buf_free(port, b); + + nxt_debug(task, "port{%d,%d} %d: recv %d read_queue", + (int) port->pid, (int) port->id, port->socket.fd, + (int) n); + + continue; + } + + nxt_debug(task, "port{%d,%d} %d: recvmsg %d", + (int) port->pid, (int) port->id, port->socket.fd, + (int) n); + + if (n > 0) { + if (port->from_socket == 0) { + nxt_debug(task, "port{%d,%d} %d: suspend message %d", + (int) port->pid, (int) port->id, port->socket.fd, + (int) n); + + smsg = port->socket_msg; + + if (nxt_slow_path(smsg == NULL)) { + smsg = nxt_mp_alloc(port->mem_pool, + sizeof(nxt_port_recv_msg_t)); + + if (nxt_slow_path(smsg == NULL)) { + nxt_alert(task, "port{%d,%d} %d: suspend message " + "failed", + (int) port->pid, (int) port->id, + port->socket.fd); + + return; + } + + port->socket_msg = smsg; + + } else { + if (nxt_slow_path(smsg->size != 0)) { + nxt_alert(task, "port{%d,%d} %d: too many suspend " + "messages", + (int) port->pid, (int) port->id, + port->socket.fd); + + return; + } + } + + smsg->port_msg = msg.port_msg; + smsg->buf = b; + smsg->size = n; + smsg->fd = msg.fd; + smsg->fd2 = msg.fd2; + + continue; + } + + port->from_socket--; + } + } + + process: + + if (n > 0) { + msg.buf = b; + msg.size = n; + + nxt_port_read_msg_process(task, port, &msg); + + /* + * To disable instant completion or buffer re-usage, + * handler should reset 'msg.buf'. + */ + if (msg.buf == b) { + nxt_port_buf_free(port, b); + } + + continue; + } + + if (n == NXT_AGAIN) { + nxt_port_buf_free(port, b); + + nxt_fd_event_enable_read(task->thread->engine, &port->socket); + + continue; + } + + /* n == 0 || n == NXT_ERROR */ + + nxt_work_queue_add(&task->thread->engine->fast_work_queue, + nxt_port_error_handler, task, &port->socket, NULL); + return; + } +} + + typedef struct { uint32_t stream; uint32_t pid; @@ -831,12 +1065,7 @@ nxt_port_read_msg_process(nxt_task_t *task, nxt_port_t *port, b = orig_b = msg->buf; b->mem.free += msg->size; - if (msg->port_msg.tracking) { - msg->cancelled = nxt_port_mmap_tracking_read(task, msg) == 0; - - } else { - msg->cancelled = 0; - } + msg->cancelled = 0; if (nxt_slow_path(msg->port_msg.nf != 0)) { diff --git a/src/nxt_router.c b/src/nxt_router.c index b8e94bcc..3dd0878b 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include typedef struct { nxt_str_t type; @@ -92,6 +94,12 @@ static nxt_int_t nxt_router_conf_create(nxt_task_t *task, static nxt_int_t nxt_router_conf_process_static(nxt_task_t *task, nxt_router_conf_t *rtcf, nxt_conf_value_t *conf); static nxt_app_t *nxt_router_app_find(nxt_queue_t *queue, nxt_str_t *name); +static nxt_int_t nxt_router_app_queue_init(nxt_task_t *task, + nxt_port_t *port); +static nxt_int_t nxt_router_port_queue_init(nxt_task_t *task, + nxt_port_t *port); +static nxt_int_t nxt_router_port_queue_map(nxt_task_t *task, + nxt_port_t *port, nxt_fd_t fd); static void nxt_router_listen_socket_rpc_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, nxt_socket_conf_t *skcf); static void nxt_router_listen_socket_ready(nxt_task_t *task, @@ -473,21 +481,25 @@ nxt_router_start_app_process(nxt_task_t *task, nxt_app_t *app) nxt_inline nxt_bool_t -nxt_router_msg_cancel(nxt_task_t *task, nxt_msg_info_t *msg_info, - uint32_t stream) +nxt_router_msg_cancel(nxt_task_t *task, nxt_request_rpc_data_t *req_rpc_data) { - nxt_buf_t *b, *next; - nxt_bool_t cancelled; + nxt_buf_t *b, *next; + nxt_bool_t cancelled; + nxt_msg_info_t *msg_info; + + msg_info = &req_rpc_data->msg_info; if (msg_info->buf == NULL) { return 0; } - cancelled = nxt_port_mmap_tracking_cancel(task, &msg_info->tracking, - stream); + cancelled = nxt_app_queue_cancel(req_rpc_data->app->shared_port->queue, + msg_info->tracking_cookie, + req_rpc_data->stream); if (cancelled) { - nxt_debug(task, "stream #%uD: cancelled by router", stream); + nxt_debug(task, "stream #%uD: cancelled by router", + req_rpc_data->stream); } for (b = msg_info->buf; b != NULL; b = next) { @@ -529,7 +541,7 @@ nxt_request_rpc_data_unlink(nxt_task_t *task, { nxt_http_request_t *r; - nxt_router_msg_cancel(task, &req_rpc_data->msg_info, req_rpc_data->stream); + nxt_router_msg_cancel(task, req_rpc_data); if (req_rpc_data->app_port != NULL) { nxt_router_app_port_release(task, req_rpc_data->app_port, @@ -573,6 +585,7 @@ nxt_request_rpc_data_unlink(nxt_task_t *task, static void nxt_router_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) { + nxt_int_t res; nxt_app_t *app; nxt_port_t *port, *main_app_port; nxt_runtime_t *rt; @@ -592,6 +605,17 @@ nxt_router_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) } msg->port_msg.type = _NXT_PORT_MSG_RPC_ERROR; + + } else { + if (msg->fd2 != -1) { + res = nxt_router_port_queue_map(task, port, msg->fd2); + if (nxt_slow_path(res != NXT_OK)) { + return; + } + + nxt_fd_close(msg->fd2); + msg->fd2 = -1; + } } if (msg->port_msg.stream != 0) { @@ -1523,6 +1547,12 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, return NXT_ERROR; } + ret = nxt_router_app_queue_init(task, port); + if (nxt_slow_path(ret != NXT_OK)) { + nxt_port_use(task, port, -1); + return NXT_ERROR; + } + nxt_port_write_enable(task, port); port->app = app; @@ -1828,6 +1858,82 @@ nxt_router_app_find(nxt_queue_t *queue, nxt_str_t *name) } +static nxt_int_t +nxt_router_app_queue_init(nxt_task_t *task, nxt_port_t *port) +{ + void *mem; + nxt_int_t fd; + + fd = nxt_shm_open(task, sizeof(nxt_app_queue_t)); + if (nxt_slow_path(fd == -1)) { + return NXT_ERROR; + } + + mem = nxt_mem_mmap(NULL, sizeof(nxt_app_queue_t), + PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (nxt_slow_path(mem == MAP_FAILED)) { + nxt_fd_close(fd); + + return NXT_ERROR; + } + + nxt_app_queue_init(mem); + + port->queue_fd = fd; + port->queue = mem; + + return NXT_OK; +} + + +static nxt_int_t +nxt_router_port_queue_init(nxt_task_t *task, nxt_port_t *port) +{ + void *mem; + nxt_int_t fd; + + fd = nxt_shm_open(task, sizeof(nxt_port_queue_t)); + if (nxt_slow_path(fd == -1)) { + return NXT_ERROR; + } + + mem = nxt_mem_mmap(NULL, sizeof(nxt_port_queue_t), + PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (nxt_slow_path(mem == MAP_FAILED)) { + nxt_fd_close(fd); + + return NXT_ERROR; + } + + nxt_port_queue_init(mem); + + port->queue_fd = fd; + port->queue = mem; + + return NXT_OK; +} + + +static nxt_int_t +nxt_router_port_queue_map(nxt_task_t *task, nxt_port_t *port, nxt_fd_t fd) +{ + void *mem; + + nxt_assert(fd != -1); + + mem = nxt_mem_mmap(NULL, sizeof(nxt_port_queue_t), + PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (nxt_slow_path(mem == MAP_FAILED)) { + + return NXT_ERROR; + } + + port->queue = mem; + + return NXT_OK; +} + + void nxt_router_listener_application(nxt_router_temp_conf_t *tmcf, nxt_str_t *name, nxt_http_action_t *action) @@ -2748,6 +2854,12 @@ nxt_router_thread_start(void *data) return; } + ret = nxt_router_port_queue_init(task, port); + if (nxt_slow_path(ret != NXT_OK)) { + nxt_port_use(task, port, -1); + return; + } + engine->port = port; nxt_port_enable(task, port, &nxt_router_app_port_handlers); @@ -3670,6 +3782,7 @@ static void nxt_router_req_headers_ack_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg, nxt_request_rpc_data_t *req_rpc_data) { + int res; nxt_app_t *app; nxt_bool_t start_process; nxt_port_t *app_port, *main_app_port, *idle_port; @@ -3752,6 +3865,24 @@ nxt_router_req_headers_ack_handler(nxt_task_t *task, req_rpc_data->app_port = app_port; + if (req_rpc_data->msg_info.body_fd != -1) { + nxt_debug(task, "stream #%uD: send body fd %d", req_rpc_data->stream, + req_rpc_data->msg_info.body_fd); + + lseek(req_rpc_data->msg_info.body_fd, 0, SEEK_SET); + + res = nxt_port_socket_write(task, app_port, NXT_PORT_MSG_REQ_BODY, + req_rpc_data->msg_info.body_fd, + req_rpc_data->stream, + task->thread->engine->port->id, NULL); + + if (nxt_slow_path(res != NXT_OK)) { + r = req_rpc_data->request; + + nxt_http_request_error(task, r, NXT_HTTP_INTERNAL_SERVER_ERROR); + } + } + if (app->timeout != 0) { r = req_rpc_data->request; @@ -3886,10 +4017,10 @@ nxt_router_app_shared_port_send(nxt_task_t *task, nxt_port_t *app_port) msg->max_share = port->max_share; msg->type = port->type; - return nxt_port_socket_twrite(task, app_port, + return nxt_port_socket_write2(task, app_port, NXT_PORT_MSG_NEW_PORT, - port->pair[0], - 0, 0, b, NULL); + port->pair[0], port->queue_fd, + 0, 0, b); } @@ -4522,6 +4653,13 @@ nxt_router_app_prepare_request(nxt_task_t *task, nxt_int_t res; nxt_port_t *port, *reply_port; + int notify; + struct { + nxt_port_msg_t pm; + nxt_port_mmap_msg_t mm; + } msg; + + app = req_rpc_data->app; nxt_assert(app != NULL); @@ -4529,6 +4667,7 @@ nxt_router_app_prepare_request(nxt_task_t *task, port = req_rpc_data->app_port; nxt_assert(port != NULL); + nxt_assert(port->queue != NULL); reply_port = task->thread->engine->port; @@ -4569,20 +4708,38 @@ nxt_router_app_prepare_request(nxt_task_t *task, req_rpc_data->msg_info.body_fd = -1; } - if (req_rpc_data->msg_info.body_fd != -1) { - nxt_debug(task, "stream #%uD: send body fd %d", req_rpc_data->stream, - req_rpc_data->msg_info.body_fd); + msg.pm.stream = req_rpc_data->stream; + msg.pm.pid = reply_port->pid; + msg.pm.reply_port = reply_port->id; + msg.pm.type = NXT_PORT_MSG_REQ_HEADERS; + msg.pm.last = 0; + msg.pm.mmap = 1; + msg.pm.nf = 0; + msg.pm.mf = 0; + msg.pm.tracking = 0; - lseek(req_rpc_data->msg_info.body_fd, 0, SEEK_SET); - } + nxt_port_mmap_handler_t *mmap_handler = buf->parent; + nxt_port_mmap_header_t *hdr = mmap_handler->hdr; + + msg.mm.mmap_id = hdr->id; + msg.mm.chunk_id = nxt_port_mmap_chunk_id(hdr, buf->mem.pos); + msg.mm.size = nxt_buf_used_size(buf); - res = nxt_port_socket_twrite(task, port, - NXT_PORT_MSG_REQ_HEADERS, - req_rpc_data->msg_info.body_fd, - req_rpc_data->stream, reply_port->id, buf, - NULL); + res = nxt_app_queue_send(port->queue, &msg, sizeof(msg), + req_rpc_data->stream, ¬ify, + &req_rpc_data->msg_info.tracking_cookie); + if (nxt_fast_path(res == NXT_OK)) { + if (notify != 0) { + (void) nxt_port_socket_write(task, port, + NXT_PORT_MSG_READ_QUEUE, + -1, req_rpc_data->stream, + reply_port->id, NULL); - if (nxt_slow_path(res != NXT_OK)) { + } else { + nxt_debug(task, "queue is not empty"); + } + + } else { nxt_alert(task, "stream #%uD, app '%V': failed to send app message", req_rpc_data->stream, &app->name); diff --git a/src/nxt_router_request.h b/src/nxt_router_request.h index 1271520d..95044dbb 100644 --- a/src/nxt_router_request.h +++ b/src/nxt_router_request.h @@ -7,10 +7,10 @@ #define _NXT_ROUTER_REQUEST_H_INCLUDED_ -typedef struct nxt_msg_info_s { +typedef struct { nxt_buf_t *buf; nxt_fd_t body_fd; - nxt_port_mmap_tracking_t tracking; + uint32_t tracking_cookie; nxt_work_handler_t completion_handler; } nxt_msg_info_t; diff --git a/src/nxt_unit.c b/src/nxt_unit.c index 66aadd98..1008a9d6 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -7,6 +7,8 @@ #include "nxt_main.h" #include "nxt_port_memory_int.h" +#include "nxt_port_queue.h" +#include "nxt_app_queue.h" #include "nxt_unit.h" #include "nxt_unit_request.h" @@ -50,12 +52,15 @@ nxt_inline void nxt_unit_mmap_buf_unlink(nxt_unit_mmap_buf_t *mmap_buf); static int nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *router_port, nxt_unit_port_t *read_port, int *log_fd, uint32_t *stream, uint32_t *shm_limit); -static int nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream); +static int nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream, + int queue_fd); static int nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf); static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg); static int nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg); +static int nxt_unit_process_req_body(nxt_unit_ctx_t *ctx, + nxt_unit_recv_msg_t *recv_msg); static int nxt_unit_request_check_response_port(nxt_unit_request_info_t *req, nxt_unit_port_id_t *port_id); static int nxt_unit_send_req_headers_ack(nxt_unit_request_info_t *req); @@ -92,6 +97,7 @@ static int nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx); static nxt_unit_mmap_t *nxt_unit_mmap_at(nxt_unit_mmaps_t *mmaps, uint32_t i); static nxt_port_mmap_header_t *nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n); +static int nxt_unit_shm_open(nxt_unit_ctx_t *ctx, size_t size); static int nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int fd); static int nxt_unit_get_outgoing_buf(nxt_unit_ctx_t *ctx, @@ -103,8 +109,6 @@ static void nxt_unit_mmaps_init(nxt_unit_mmaps_t *mmaps); nxt_inline void nxt_unit_process_use(nxt_unit_process_t *process); nxt_inline void nxt_unit_process_release(nxt_unit_process_t *process); static void nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps); -static int nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, - nxt_unit_recv_msg_t *recv_msg, nxt_unit_read_buf_t *rbuf); static int nxt_unit_check_rbuf_mmap(nxt_unit_ctx_t *ctx, nxt_unit_mmaps_t *mmaps, pid_t pid, uint32_t id, nxt_port_mmap_header_t **hdr, nxt_unit_read_buf_t *rbuf); @@ -124,18 +128,22 @@ static int nxt_unit_run_once_impl(nxt_unit_ctx_t *ctx); static int nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf); static int nxt_unit_process_pending_rbuf(nxt_unit_ctx_t *ctx); static void nxt_unit_process_ready_req(nxt_unit_ctx_t *ctx); +nxt_inline int nxt_unit_is_read_queue(nxt_unit_read_buf_t *rbuf); +nxt_inline int nxt_unit_is_read_socket(nxt_unit_read_buf_t *rbuf); +nxt_inline int nxt_unit_is_shm_ack(nxt_unit_read_buf_t *rbuf); +nxt_inline int nxt_unit_is_quit(nxt_unit_read_buf_t *rbuf); static int nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port); static void nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl); static nxt_unit_port_t *nxt_unit_create_port(nxt_unit_ctx_t *ctx); static int nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst, - nxt_unit_port_t *port); + nxt_unit_port_t *port, int queue_fd); nxt_inline void nxt_unit_port_use(nxt_unit_port_t *port); nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port); static nxt_unit_port_t *nxt_unit_add_port(nxt_unit_ctx_t *ctx, - nxt_unit_port_t *port); + nxt_unit_port_t *port, void *queue); static void nxt_unit_remove_port(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id); static nxt_unit_port_t *nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, @@ -150,18 +158,28 @@ static ssize_t nxt_unit_port_send(nxt_unit_ctx_t *ctx, const void *oob, size_t oob_size); static ssize_t nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd, const void *buf, size_t buf_size, const void *oob, size_t oob_size); +static int nxt_unit_ctx_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, + nxt_unit_read_buf_t *rbuf); +nxt_inline void nxt_unit_rbuf_cpy(nxt_unit_read_buf_t *dst, + nxt_unit_read_buf_t *src); +static int nxt_unit_shared_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, + nxt_unit_read_buf_t *rbuf); static int nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf); +static int nxt_unit_port_queue_recv(nxt_unit_port_t *port, + nxt_unit_read_buf_t *rbuf); +static int nxt_unit_app_queue_recv(nxt_unit_port_t *port, + nxt_unit_read_buf_t *rbuf); static int nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port); static nxt_unit_port_t *nxt_unit_port_hash_find(nxt_lvlhsh_t *port_hash, nxt_unit_port_id_t *port_id, int remove); -static int nxt_unit_request_hash_add(nxt_lvlhsh_t *request_hash, - nxt_unit_request_info_impl_t *req_impl); -static nxt_unit_request_info_impl_t *nxt_unit_request_hash_find( - nxt_lvlhsh_t *request_hash, uint32_t stream, int remove); +static int nxt_unit_request_hash_add(nxt_unit_ctx_t *ctx, + nxt_unit_request_info_t *req); +static nxt_unit_request_info_t *nxt_unit_request_hash_find( + nxt_unit_ctx_t *ctx, uint32_t stream, int remove); static char * nxt_unit_snprint_prefix(char *p, char *end, pid_t pid, int level); @@ -217,6 +235,7 @@ struct nxt_unit_request_info_impl_s { nxt_unit_req_state_t state; uint8_t websocket; + uint8_t in_hash; /* for nxt_unit_ctx_impl_t.free_req or active_req */ nxt_queue_link_t link; @@ -349,6 +368,11 @@ struct nxt_unit_port_impl_s { nxt_queue_t awaiting_req; int ready; + + void *queue; + + int from_socket; + nxt_unit_read_buf_t *socket_rbuf; }; @@ -375,7 +399,8 @@ typedef struct { nxt_unit_ctx_t * nxt_unit_init(nxt_unit_init_t *init) { - int rc; + int rc, queue_fd; + void *mem; uint32_t ready_stream, shm_limit; nxt_unit_ctx_t *ctx; nxt_unit_impl_t *lib; @@ -386,6 +411,8 @@ nxt_unit_init(nxt_unit_init_t *init) return NULL; } + queue_fd = -1; + if (init->ready_port.id.pid != 0 && init->ready_stream != 0 && init->read_port.id.pid != 0) @@ -422,33 +449,58 @@ nxt_unit_init(nxt_unit_init_t *init) ctx = &lib->main_ctx.ctx; - lib->router_port = nxt_unit_add_port(ctx, &router_port); + lib->router_port = nxt_unit_add_port(ctx, &router_port, NULL); if (nxt_slow_path(lib->router_port == NULL)) { nxt_unit_alert(NULL, "failed to add router_port"); goto fail; } - lib->main_ctx.read_port = nxt_unit_add_port(ctx, &read_port); + queue_fd = nxt_unit_shm_open(ctx, sizeof(nxt_port_queue_t)); + if (nxt_slow_path(queue_fd == -1)) { + goto fail; + } + + mem = mmap(NULL, sizeof(nxt_port_queue_t), + PROT_READ | PROT_WRITE, MAP_SHARED, queue_fd, 0); + if (nxt_slow_path(mem == MAP_FAILED)) { + nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", queue_fd, + strerror(errno), errno); + + goto fail; + } + + nxt_port_queue_init(mem); + + lib->main_ctx.read_port = nxt_unit_add_port(ctx, &read_port, mem); if (nxt_slow_path(lib->main_ctx.read_port == NULL)) { nxt_unit_alert(NULL, "failed to add read_port"); + munmap(mem, sizeof(nxt_port_queue_t)); + goto fail; } - rc = nxt_unit_ready(ctx, ready_port.out_fd, ready_stream); + rc = nxt_unit_ready(ctx, ready_port.out_fd, ready_stream, queue_fd); if (nxt_slow_path(rc != NXT_UNIT_OK)) { nxt_unit_alert(NULL, "failed to send READY message"); + munmap(mem, sizeof(nxt_port_queue_t)); + goto fail; } close(ready_port.out_fd); + close(queue_fd); return ctx; fail: + if (queue_fd != -1) { + close(queue_fd); + } + nxt_unit_ctx_release(&lib->main_ctx.ctx); return NULL; @@ -497,6 +549,7 @@ nxt_unit_create(nxt_unit_init_t *init) rc = nxt_unit_ctx_init(lib, &lib->main_ctx, init->ctx_data); if (nxt_slow_path(rc != NXT_UNIT_OK)) { + pthread_mutex_destroy(&lib->mutex); goto fail; } @@ -505,6 +558,7 @@ nxt_unit_create(nxt_unit_init_t *init) if (cb->request_handler == NULL) { nxt_unit_alert(NULL, "request_handler is NULL"); + pthread_mutex_destroy(&lib->mutex); goto fail; } @@ -765,12 +819,17 @@ nxt_unit_read_env(nxt_unit_port_t *ready_port, nxt_unit_port_t *router_port, static int -nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream) +nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream, int queue_fd) { ssize_t res; nxt_port_msg_t msg; nxt_unit_impl_t *lib; + union { + struct cmsghdr cm; + char space[CMSG_SPACE(sizeof(int))]; + } cmsg; + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); msg.stream = stream; @@ -783,7 +842,25 @@ nxt_unit_ready(nxt_unit_ctx_t *ctx, int ready_fd, uint32_t stream) msg.mf = 0; msg.tracking = 0; - res = nxt_unit_sendmsg(ctx, ready_fd, &msg, sizeof(msg), NULL, 0); + memset(&cmsg, 0, sizeof(cmsg)); + + cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int)); + cmsg.cm.cmsg_level = SOL_SOCKET; + cmsg.cm.cmsg_type = SCM_RIGHTS; + + /* + * memcpy() is used instead of simple + * *(int *) CMSG_DATA(&cmsg.cm) = fd; + * because GCC 4.4 with -O2/3/s optimization may issue a warning: + * dereferencing type-punned pointer will break strict-aliasing rules + * + * Fortunately, GCC with -O1 compiles this nxt_memcpy() + * in the same simple assignment as in the code above. + */ + memcpy(CMSG_DATA(&cmsg.cm), &queue_fd, sizeof(int)); + + res = nxt_unit_sendmsg(ctx, ready_fd, &msg, sizeof(msg), + &cmsg, sizeof(cmsg)); if (res != sizeof(msg)) { return NXT_UNIT_ERROR; } @@ -838,6 +915,10 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) goto fail; } + nxt_unit_debug(ctx, "#%"PRIu32": process message %d fd %d fd2 %d", + port_msg->stream, (int) port_msg->type, + recv_msg.fd, recv_msg.fd2); + recv_msg.stream = port_msg->stream; recv_msg.pid = port_msg->pid; recv_msg.reply_port = port_msg->reply_port; @@ -853,19 +934,6 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) goto fail; } - if (port_msg->tracking) { - rc = nxt_unit_tracking_read(ctx, &recv_msg, rbuf); - - if (nxt_slow_path(rc != NXT_UNIT_OK)) { - if (rc == NXT_UNIT_AGAIN) { - recv_msg.fd = -1; - recv_msg.fd2 = -1; - } - - goto fail; - } - } - /* Fragmentation is unsupported. */ if (nxt_slow_path(port_msg->nf != 0 || port_msg->mf != 0)) { nxt_unit_warn(ctx, "#%"PRIu32": fragmented message type (%d)", @@ -929,6 +997,10 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) rc = nxt_unit_process_req_headers(ctx, &recv_msg); break; + case _NXT_PORT_MSG_REQ_BODY: + rc = nxt_unit_process_req_body(ctx, &recv_msg); + break; + case _NXT_PORT_MSG_WEBSOCKET: rc = nxt_unit_process_websocket(ctx, &recv_msg); break; @@ -992,6 +1064,7 @@ static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) { int nb; + void *mem; nxt_unit_impl_t *lib; nxt_unit_port_t new_port, *port; nxt_port_msg_new_port_t *new_port_msg; @@ -1013,9 +1086,9 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) new_port_msg = recv_msg->start; - nxt_unit_debug(ctx, "#%"PRIu32": new_port: %d,%d fd %d", + nxt_unit_debug(ctx, "#%"PRIu32": new_port: port{%d,%d} fd %d fd2 %d", recv_msg->stream, (int) new_port_msg->pid, - (int) new_port_msg->id, recv_msg->fd); + (int) new_port_msg->id, recv_msg->fd, recv_msg->fd2); lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); @@ -1025,6 +1098,9 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) new_port.in_fd = recv_msg->fd; new_port.out_fd = -1; + mem = mmap(NULL, sizeof(nxt_app_queue_t), PROT_READ | PROT_WRITE, + MAP_SHARED, recv_msg->fd2, 0); + } else { nb = 0; @@ -1041,14 +1117,23 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) new_port.in_fd = -1; new_port.out_fd = recv_msg->fd; + + mem = mmap(NULL, sizeof(nxt_port_queue_t), PROT_READ | PROT_WRITE, + MAP_SHARED, recv_msg->fd2, 0); } + if (nxt_slow_path(mem == MAP_FAILED)) { + nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", recv_msg->fd2, + strerror(errno), errno); + + return NXT_UNIT_ERROR; + } new_port.data = NULL; recv_msg->fd = -1; - port = nxt_unit_add_port(ctx, &new_port); + port = nxt_unit_add_port(ctx, &new_port, mem); if (nxt_slow_path(port == NULL)) { return NXT_UNIT_ERROR; } @@ -1134,6 +1219,7 @@ nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) req->response_max_fields = 0; req_impl->state = NXT_UNIT_RS_START; req_impl->websocket = 0; + req_impl->in_hash = 0; nxt_unit_debug(ctx, "#%"PRIu32": %.*s %.*s (%d)", recv_msg->stream, (int) r->method_length, @@ -1151,12 +1237,82 @@ nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) if (nxt_fast_path(res == NXT_UNIT_OK)) { res = nxt_unit_send_req_headers_ack(req); - if (nxt_slow_path(res != NXT_UNIT_OK)) { - return res; + if (nxt_slow_path(res == NXT_UNIT_ERROR)) { + nxt_unit_request_done(req, NXT_UNIT_ERROR); + + return NXT_UNIT_ERROR; } lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + if (req->content_length + > (uint64_t) (req->content_buf->end - req->content_buf->free)) + { + res = nxt_unit_request_hash_add(ctx, req); + if (nxt_slow_path(res != NXT_UNIT_OK)) { + nxt_unit_req_warn(req, "failed to add request to hash"); + + nxt_unit_request_done(req, NXT_UNIT_ERROR); + + return NXT_UNIT_ERROR; + } + + /* + * If application have separate data handler, we may start + * request processing and process data when it is arrived. + */ + if (lib->callbacks.data_handler == NULL) { + return NXT_UNIT_OK; + } + } + + lib->callbacks.request_handler(req); + } + + return NXT_UNIT_OK; +} + + +static int +nxt_unit_process_req_body(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) +{ + uint64_t l; + nxt_unit_impl_t *lib; + nxt_unit_mmap_buf_t *b; + nxt_unit_request_info_t *req; + + req = nxt_unit_request_hash_find(ctx, recv_msg->stream, recv_msg->last); + if (req == NULL) { + return NXT_UNIT_OK; + } + + l = req->content_buf->end - req->content_buf->free; + + for (b = recv_msg->incoming_buf; b != NULL; b = b->next) { + b->req = req; + l += b->buf.end - b->buf.free; + } + + if (recv_msg->incoming_buf != NULL) { + b = nxt_container_of(req->content_buf, nxt_unit_mmap_buf_t, buf); + + /* "Move" incoming buffer list to req_impl. */ + nxt_unit_mmap_buf_insert_tail(&b->next, recv_msg->incoming_buf); + recv_msg->incoming_buf = NULL; + } + + req->content_fd = recv_msg->fd; + recv_msg->fd = -1; + + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + + if (lib->callbacks.data_handler != NULL) { + lib->callbacks.data_handler(req); + + return NXT_UNIT_OK; + } + + if (req->content_fd != -1 || l == req->content_length) { lib->callbacks.request_handler(req); } @@ -1260,6 +1416,9 @@ nxt_unit_request_check_response_port(nxt_unit_request_info_t *req, nxt_queue_insert_tail(&process->ports, &port_impl->link); port_impl->process = process; + port_impl->queue = NULL; + port_impl->from_socket = 0; + port_impl->socket_rbuf = NULL; nxt_queue_init(&port_impl->awaiting_req); @@ -1321,21 +1480,17 @@ nxt_unit_process_websocket(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) size_t hsize; nxt_unit_impl_t *lib; nxt_unit_mmap_buf_t *b; - nxt_unit_ctx_impl_t *ctx_impl; nxt_unit_callbacks_t *cb; nxt_unit_request_info_t *req; nxt_unit_request_info_impl_t *req_impl; nxt_unit_websocket_frame_impl_t *ws_impl; - ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); - - req_impl = nxt_unit_request_hash_find(&ctx_impl->requests, recv_msg->stream, - recv_msg->last); - if (req_impl == NULL) { + req = nxt_unit_request_hash_find(ctx, recv_msg->stream, recv_msg->last); + if (nxt_slow_path(req == NULL)) { return NXT_UNIT_OK; } - req = &req_impl->req; + req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); cb = &lib->callbacks; @@ -1501,12 +1656,12 @@ nxt_unit_request_info_release(nxt_unit_request_info_t *req) req->response = NULL; req->response_buf = NULL; - if (req_impl->websocket) { - nxt_unit_request_hash_find(&ctx_impl->requests, req_impl->stream, 1); - - req_impl->websocket = 0; + if (req_impl->in_hash) { + nxt_unit_request_hash_find(req->ctx, req_impl->stream, 1); } + req_impl->websocket = 0; + while (req_impl->outgoing_buf != NULL) { nxt_unit_mmap_buf_free(req_impl->outgoing_buf); } @@ -2170,7 +2325,6 @@ int nxt_unit_response_upgrade(nxt_unit_request_info_t *req) { int rc; - nxt_unit_ctx_impl_t *ctx_impl; nxt_unit_request_info_impl_t *req_impl; req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); @@ -2193,9 +2347,7 @@ nxt_unit_response_upgrade(nxt_unit_request_info_t *req) return NXT_UNIT_ERROR; } - ctx_impl = nxt_container_of(req->ctx, nxt_unit_ctx_impl_t, ctx); - - rc = nxt_unit_request_hash_add(&ctx_impl->requests, req_impl); + rc = nxt_unit_request_hash_add(req->ctx, req); if (nxt_slow_path(rc != NXT_UNIT_OK)) { nxt_unit_req_warn(req, "upgrade: failed to add request to hash"); @@ -2466,6 +2618,8 @@ nxt_unit_read_buf_get(nxt_unit_ctx_t *ctx) pthread_mutex_unlock(&ctx_impl->mutex); + memset(rbuf->oob, 0, sizeof(struct cmsghdr)); + return rbuf; } @@ -2564,6 +2718,8 @@ nxt_unit_response_write_nb(nxt_unit_request_info_t *req, const void *start, nxt_unit_request_info_impl_t *req_impl; char local_buf[NXT_UNIT_LOCAL_BUF_SIZE]; + nxt_unit_req_debug(req, "write: %d", (int) size); + req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); part_start = start; @@ -2743,9 +2899,11 @@ nxt_unit_request_read(nxt_unit_request_info_t *req, void *dst, size_t size) buf_res = nxt_unit_buf_read(&req->content_buf, &req->content_length, dst, size); + nxt_unit_req_debug(req, "read: %d", (int) buf_res); + if (buf_res < (ssize_t) size && req->content_fd != -1) { res = read(req->content_fd, dst, size); - if (res < 0) { + if (nxt_slow_path(res < 0)) { nxt_unit_req_alert(req, "failed to read content: %s (%d)", strerror(errno), errno); @@ -3301,7 +3459,7 @@ nxt_unit_send_oosm(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) static int nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx) { - nxt_port_msg_t *port_msg; + int res; nxt_unit_ctx_impl_t *ctx_impl; nxt_unit_read_buf_t *rbuf; @@ -3313,21 +3471,15 @@ nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx) return NXT_UNIT_ERROR; } - memset(rbuf->oob, 0, sizeof(struct cmsghdr)); - - nxt_unit_port_recv(ctx, ctx_impl->read_port, rbuf); - - if (nxt_slow_path(rbuf->size < (ssize_t) sizeof(nxt_port_msg_t))) { + res = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); + if (res == NXT_UNIT_ERROR) { nxt_unit_read_buf_release(ctx, rbuf); return NXT_UNIT_ERROR; } - port_msg = (nxt_port_msg_t *) rbuf->buf; - - if (port_msg->type == _NXT_PORT_MSG_SHM_ACK) { + if (nxt_unit_is_shm_ack(rbuf)) { nxt_unit_read_buf_release(ctx, rbuf); - break; } @@ -3337,7 +3489,7 @@ nxt_unit_wait_shm_ack(nxt_unit_ctx_t *ctx) pthread_mutex_unlock(&ctx_impl->mutex); - if (port_msg->type == _NXT_PORT_MSG_QUIT) { + if (nxt_unit_is_quit(rbuf)) { nxt_unit_debug(ctx, "oosm: quit received"); return NXT_UNIT_ERROR; @@ -3406,7 +3558,6 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) { int i, fd, rc; void *mem; - char name[64]; nxt_unit_mmap_t *mm; nxt_unit_impl_t *lib; nxt_port_mmap_header_t *hdr; @@ -3420,59 +3571,8 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) return NULL; } - snprintf(name, sizeof(name), NXT_SHM_PREFIX "unit.%d.%p", - lib->pid, (void *) pthread_self()); - -#if (NXT_HAVE_MEMFD_CREATE) - - fd = syscall(SYS_memfd_create, name, MFD_CLOEXEC); - if (nxt_slow_path(fd == -1)) { - nxt_unit_alert(ctx, "memfd_create(%s) failed: %s (%d)", name, - strerror(errno), errno); - - goto remove_fail; - } - - nxt_unit_debug(ctx, "memfd_create(%s): %d", name, fd); - -#elif (NXT_HAVE_SHM_OPEN_ANON) - - fd = shm_open(SHM_ANON, O_RDWR, S_IRUSR | S_IWUSR); - if (nxt_slow_path(fd == -1)) { - nxt_unit_alert(ctx, "shm_open(SHM_ANON) failed: %s (%d)", - strerror(errno), errno); - - goto remove_fail; - } - -#elif (NXT_HAVE_SHM_OPEN) - - /* Just in case. */ - shm_unlink(name); - - fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR); + fd = nxt_unit_shm_open(ctx, PORT_MMAP_SIZE); if (nxt_slow_path(fd == -1)) { - nxt_unit_alert(ctx, "shm_open(%s) failed: %s (%d)", name, - strerror(errno), errno); - - goto remove_fail; - } - - if (nxt_slow_path(shm_unlink(name) == -1)) { - nxt_unit_warn(ctx, "shm_unlink(%s) failed: %s (%d)", name, - strerror(errno), errno); - } - -#else - -#error No working shared memory implementation. - -#endif - - if (nxt_slow_path(ftruncate(fd, PORT_MMAP_SIZE) == -1)) { - nxt_unit_alert(ctx, "ftruncate(%d) failed: %s (%d)", fd, - strerror(errno), errno); - goto remove_fail; } @@ -3481,6 +3581,8 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", fd, strerror(errno), errno); + close(fd); + goto remove_fail; } @@ -3532,6 +3634,80 @@ remove_fail: } +static int +nxt_unit_shm_open(nxt_unit_ctx_t *ctx, size_t size) +{ + int fd; + nxt_unit_impl_t *lib; + + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + +#if (NXT_HAVE_MEMFD_CREATE || NXT_HAVE_SHM_OPEN) + char name[64]; + + snprintf(name, sizeof(name), NXT_SHM_PREFIX "unit.%d.%p", + lib->pid, (void *) pthread_self()); +#endif + +#if (NXT_HAVE_MEMFD_CREATE) + + fd = syscall(SYS_memfd_create, name, MFD_CLOEXEC); + if (nxt_slow_path(fd == -1)) { + nxt_unit_alert(ctx, "memfd_create(%s) failed: %s (%d)", name, + strerror(errno), errno); + + return -1; + } + + nxt_unit_debug(ctx, "memfd_create(%s): %d", name, fd); + +#elif (NXT_HAVE_SHM_OPEN_ANON) + + fd = shm_open(SHM_ANON, O_RDWR, S_IRUSR | S_IWUSR); + if (nxt_slow_path(fd == -1)) { + nxt_unit_alert(ctx, "shm_open(SHM_ANON) failed: %s (%d)", + strerror(errno), errno); + + return -1; + } + +#elif (NXT_HAVE_SHM_OPEN) + + /* Just in case. */ + shm_unlink(name); + + fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR); + if (nxt_slow_path(fd == -1)) { + nxt_unit_alert(ctx, "shm_open(%s) failed: %s (%d)", name, + strerror(errno), errno); + + return -1; + } + + if (nxt_slow_path(shm_unlink(name) == -1)) { + nxt_unit_alert(ctx, "shm_unlink(%s) failed: %s (%d)", name, + strerror(errno), errno); + } + +#else + +#error No working shared memory implementation. + +#endif + + if (nxt_slow_path(ftruncate(fd, size) == -1)) { + nxt_unit_alert(ctx, "ftruncate(%d) failed: %s (%d)", fd, + strerror(errno), errno); + + close(fd); + + return -1; + } + + return fd; +} + + static int nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int fd) { @@ -3797,63 +3973,8 @@ nxt_unit_mmaps_destroy(nxt_unit_mmaps_t *mmaps) static int -nxt_unit_tracking_read(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg, - nxt_unit_read_buf_t *rbuf) -{ - int res; - nxt_chunk_id_t c; - nxt_unit_impl_t *lib; - nxt_port_mmap_header_t *hdr; - nxt_port_mmap_tracking_msg_t *tracking_msg; - - if (recv_msg->size < (int) sizeof(nxt_port_mmap_tracking_msg_t)) { - nxt_unit_warn(ctx, "#%"PRIu32": tracking_read: too small message (%d)", - recv_msg->stream, (int) recv_msg->size); - - return NXT_UNIT_ERROR; - } - - tracking_msg = recv_msg->start; - - recv_msg->start = tracking_msg + 1; - recv_msg->size -= sizeof(nxt_port_mmap_tracking_msg_t); - - lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - - pthread_mutex_lock(&lib->incoming.mutex); - - res = nxt_unit_check_rbuf_mmap(ctx, &lib->incoming, - recv_msg->pid, tracking_msg->mmap_id, - &hdr, rbuf); - - if (nxt_slow_path(res != NXT_UNIT_OK)) { - return res; - } - - c = tracking_msg->tracking_id; - res = nxt_atomic_cmp_set(hdr->tracking + c, recv_msg->stream, 0); - - if (res == 0) { - nxt_unit_debug(ctx, "#%"PRIu32": tracking cancelled", - recv_msg->stream); - - nxt_port_mmap_set_chunk_free(hdr->free_tracking_map, c); - - res = NXT_UNIT_CANCELLED; - - } else { - res = NXT_UNIT_OK; - } - - pthread_mutex_unlock(&lib->incoming.mutex); - - return res; -} - - -static int -nxt_unit_check_rbuf_mmap(nxt_unit_ctx_t *ctx, nxt_unit_mmaps_t *mmaps, - pid_t pid, uint32_t id, nxt_port_mmap_header_t **hdr, +nxt_unit_check_rbuf_mmap(nxt_unit_ctx_t *ctx, nxt_unit_mmaps_t *mmaps, + pid_t pid, uint32_t id, nxt_port_mmap_header_t **hdr, nxt_unit_read_buf_t *rbuf) { int res, need_rbuf; @@ -4154,7 +4275,7 @@ nxt_unit_process_get(nxt_unit_impl_t *lib, pid_t pid) } process->pid = pid; - process->use_count = 1; + process->use_count = 2; process->next_port_id = 0; process->lib = lib; @@ -4176,8 +4297,6 @@ nxt_unit_process_get(nxt_unit_impl_t *lib, pid_t pid) break; } - nxt_unit_process_use(process); - return process; } @@ -4293,22 +4412,52 @@ nxt_unit_run_once_impl(nxt_unit_ctx_t *ctx) static int nxt_unit_read_buf(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) { - int res, err; - nxt_unit_impl_t *lib; - nxt_unit_ctx_impl_t *ctx_impl; - struct pollfd fds[2]; + int nevents, res, err; + nxt_unit_impl_t *lib; + nxt_unit_ctx_impl_t *ctx_impl; + nxt_unit_port_impl_t *port_impl; + struct pollfd fds[2]; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); - memset(rbuf->oob, 0, sizeof(struct cmsghdr)); - if (ctx_impl->wait_items > 0 || lib->shared_port == NULL) { - return nxt_unit_port_recv(ctx, ctx_impl->read_port, rbuf); + + return nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); } + port_impl = nxt_container_of(ctx_impl->read_port, nxt_unit_port_impl_t, + port); + retry: + if (port_impl->from_socket == 0) { + res = nxt_unit_port_queue_recv(ctx_impl->read_port, rbuf); + if (res == NXT_UNIT_OK) { + if (nxt_unit_is_read_socket(rbuf)) { + port_impl->from_socket++; + + nxt_unit_debug(ctx, "port{%d,%d} dequeue 1 read_socket %d", + (int) ctx_impl->read_port->id.pid, + (int) ctx_impl->read_port->id.id, + port_impl->from_socket); + + } else { + nxt_unit_debug(ctx, "port{%d,%d} dequeue %d", + (int) ctx_impl->read_port->id.pid, + (int) ctx_impl->read_port->id.id, + (int) rbuf->size); + + return NXT_UNIT_OK; + } + } + } + + res = nxt_unit_app_queue_recv(lib->shared_port, rbuf); + if (res == NXT_UNIT_OK) { + return NXT_UNIT_OK; + } + fds[0].fd = ctx_impl->read_port->in_fd; fds[0].events = POLLIN; fds[0].revents = 0; @@ -4317,31 +4466,47 @@ retry: fds[1].events = POLLIN; fds[1].revents = 0; - res = poll(fds, 2, -1); - if (nxt_slow_path(res < 0)) { + nevents = poll(fds, 2, -1); + if (nxt_slow_path(nevents == -1)) { err = errno; if (err == EINTR) { goto retry; } - nxt_unit_alert(ctx, "poll() failed: %s (%d)", - strerror(err), err); + nxt_unit_alert(ctx, "poll(%d,%d) failed: %s (%d)", + fds[0].fd, fds[1].fd, strerror(err), err); rbuf->size = -1; return (err == EAGAIN) ? NXT_UNIT_AGAIN : NXT_UNIT_ERROR; } + nxt_unit_debug(ctx, "poll(%d,%d): %d, revents [%04uXi, %04uXi]", + fds[0].fd, fds[1].fd, nevents, fds[0].revents, + fds[1].revents); + if ((fds[0].revents & POLLIN) != 0) { - return nxt_unit_port_recv(ctx, ctx_impl->read_port, rbuf); + res = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); + if (res == NXT_UNIT_AGAIN) { + goto retry; + } + + return res; } if ((fds[1].revents & POLLIN) != 0) { - return nxt_unit_port_recv(ctx, lib->shared_port, rbuf); + res = nxt_unit_shared_port_recv(ctx, lib->shared_port, rbuf); + if (res == NXT_UNIT_AGAIN) { + goto retry; + } + + return res; } - rbuf->size = -1; + nxt_unit_alert(ctx, "poll(%d,%d): %d unexpected revents [%04uXi, %04uXi]", + fds[0].fd, fds[1].fd, nevents, fds[0].revents, + fds[1].revents); return NXT_UNIT_ERROR; } @@ -4392,9 +4557,11 @@ nxt_unit_process_pending_rbuf(nxt_unit_ctx_t *ctx) static void nxt_unit_process_ready_req(nxt_unit_ctx_t *ctx) { + int res; nxt_queue_t ready_req; nxt_unit_impl_t *lib; nxt_unit_ctx_impl_t *ctx_impl; + nxt_unit_request_info_t *req; nxt_unit_request_info_impl_t *req_impl; nxt_queue_init(&ready_req); @@ -4419,7 +4586,35 @@ nxt_unit_process_ready_req(nxt_unit_ctx_t *ctx) { lib = nxt_container_of(ctx_impl->ctx.unit, nxt_unit_impl_t, unit); - (void) nxt_unit_send_req_headers_ack(&req_impl->req); + req = &req_impl->req; + + res = nxt_unit_send_req_headers_ack(req); + if (nxt_slow_path(res != NXT_UNIT_OK)) { + nxt_unit_request_done(req, NXT_UNIT_ERROR); + + continue; + } + + if (req->content_length + > (uint64_t) (req->content_buf->end - req->content_buf->free)) + { + res = nxt_unit_request_hash_add(ctx, req); + if (nxt_slow_path(res != NXT_UNIT_OK)) { + nxt_unit_req_warn(req, "failed to add request to hash"); + + nxt_unit_request_done(req, NXT_UNIT_ERROR); + + continue; + } + + /* + * If application have separate data handler, we may start + * request processing and process data when it is arrived. + */ + if (lib->callbacks.data_handler == NULL) { + continue; + } + } lib->callbacks.request_handler(&req_impl->req); @@ -4432,6 +4627,7 @@ nxt_unit_run_ctx(nxt_unit_ctx_t *ctx) { int rc; nxt_unit_impl_t *lib; + nxt_unit_read_buf_t *rbuf; nxt_unit_ctx_impl_t *ctx_impl; nxt_unit_ctx_use(ctx); @@ -4442,11 +4638,30 @@ nxt_unit_run_ctx(nxt_unit_ctx_t *ctx) rc = NXT_UNIT_OK; while (nxt_fast_path(lib->online)) { - rc = nxt_unit_process_port_msg_impl(ctx, ctx_impl->read_port); + rbuf = nxt_unit_read_buf_get(ctx); + if (nxt_slow_path(rbuf == NULL)) { + rc = NXT_UNIT_ERROR; + break; + } + + retry: + rc = nxt_unit_ctx_port_recv(ctx, ctx_impl->read_port, rbuf); + if (rc == NXT_UNIT_AGAIN) { + goto retry; + } + + rc = nxt_unit_process_msg(ctx, rbuf); + if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { + break; + } + + rc = nxt_unit_process_pending_rbuf(ctx); if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { break; } + + nxt_unit_process_ready_req(ctx); } nxt_unit_ctx_release(ctx); @@ -4455,11 +4670,68 @@ nxt_unit_run_ctx(nxt_unit_ctx_t *ctx) } +nxt_inline int +nxt_unit_is_read_queue(nxt_unit_read_buf_t *rbuf) +{ + nxt_port_msg_t *port_msg; + + if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) { + port_msg = (nxt_port_msg_t *) rbuf->buf; + + return port_msg->type == _NXT_PORT_MSG_READ_QUEUE; + } + + return 0; +} + + +nxt_inline int +nxt_unit_is_read_socket(nxt_unit_read_buf_t *rbuf) +{ + if (nxt_fast_path(rbuf->size == 1)) { + return rbuf->buf[0] == _NXT_PORT_MSG_READ_SOCKET; + } + + return 0; +} + + +nxt_inline int +nxt_unit_is_shm_ack(nxt_unit_read_buf_t *rbuf) +{ + nxt_port_msg_t *port_msg; + + if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) { + port_msg = (nxt_port_msg_t *) rbuf->buf; + + return port_msg->type == _NXT_PORT_MSG_SHM_ACK; + } + + return 0; +} + + +nxt_inline int +nxt_unit_is_quit(nxt_unit_read_buf_t *rbuf) +{ + nxt_port_msg_t *port_msg; + + if (nxt_fast_path(rbuf->size == (ssize_t) sizeof(nxt_port_msg_t))) { + port_msg = (nxt_port_msg_t *) rbuf->buf; + + return port_msg->type == _NXT_PORT_MSG_QUIT; + } + + return 0; +} + + int nxt_unit_run_shared(nxt_unit_ctx_t *ctx) { - int rc; - nxt_unit_impl_t *lib; + int rc; + nxt_unit_impl_t *lib; + nxt_unit_read_buf_t *rbuf; nxt_unit_ctx_use(ctx); @@ -4467,11 +4739,35 @@ nxt_unit_run_shared(nxt_unit_ctx_t *ctx) rc = NXT_UNIT_OK; while (nxt_fast_path(lib->online)) { - rc = nxt_unit_process_port_msg_impl(ctx, lib->shared_port); + rbuf = nxt_unit_read_buf_get(ctx); + if (nxt_slow_path(rbuf == NULL)) { + rc = NXT_UNIT_ERROR; + break; + } + + retry: + rc = nxt_unit_shared_port_recv(ctx, lib->shared_port, rbuf); + if (rc == NXT_UNIT_AGAIN) { + goto retry; + } + + if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { + nxt_unit_read_buf_release(ctx, rbuf); + break; + } + + rc = nxt_unit_process_msg(ctx, rbuf); + if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { + break; + } + + rc = nxt_unit_process_pending_rbuf(ctx); if (nxt_slow_path(rc == NXT_UNIT_ERROR)) { break; } + + nxt_unit_process_ready_req(ctx); } nxt_unit_ctx_release(ctx); @@ -4499,6 +4795,7 @@ static int nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) { int rc; + nxt_unit_impl_t *lib; nxt_unit_read_buf_t *rbuf; rbuf = nxt_unit_read_buf_get(ctx); @@ -4506,10 +4803,18 @@ nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) return NXT_UNIT_ERROR; } - memset(rbuf->oob, 0, sizeof(struct cmsghdr)); + lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - rc = nxt_unit_port_recv(ctx, port, rbuf); - if (nxt_slow_path(rc != NXT_UNIT_OK)) { +retry: + + if (port == lib->shared_port) { + rc = nxt_unit_shared_port_recv(ctx, port, rbuf); + + } else { + rc = nxt_unit_ctx_port_recv(ctx, port, rbuf); + } + + if (rc != NXT_UNIT_OK) { nxt_unit_read_buf_release(ctx, rbuf); return rc; } @@ -4526,6 +4831,15 @@ nxt_unit_process_port_msg_impl(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) nxt_unit_process_ready_req(ctx); + rbuf = nxt_unit_read_buf_get(ctx); + if (nxt_slow_path(rbuf == NULL)) { + return NXT_UNIT_ERROR; + } + + if (lib->online) { + goto retry; + } + return rc; } @@ -4540,10 +4854,12 @@ nxt_unit_done(nxt_unit_ctx_t *ctx) nxt_unit_ctx_t * nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data) { - int rc; - nxt_unit_impl_t *lib; - nxt_unit_port_t *port; - nxt_unit_ctx_impl_t *new_ctx; + int rc, queue_fd; + void *mem; + nxt_unit_impl_t *lib; + nxt_unit_port_t *port; + nxt_unit_ctx_impl_t *new_ctx; + nxt_unit_port_impl_t *port_impl; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); @@ -4554,33 +4870,57 @@ nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data) return NULL; } + rc = nxt_unit_ctx_init(lib, new_ctx, data); + if (nxt_slow_path(rc != NXT_UNIT_OK)) { + free(new_ctx); + + return NULL; + } + + queue_fd = -1; + port = nxt_unit_create_port(ctx); if (nxt_slow_path(port == NULL)) { - free(new_ctx); + goto fail; + } - return NULL; + new_ctx->read_port = port; + + queue_fd = nxt_unit_shm_open(ctx, sizeof(nxt_port_queue_t)); + if (nxt_slow_path(queue_fd == -1)) { + goto fail; } - rc = nxt_unit_send_port(ctx, lib->router_port, port); - if (nxt_slow_path(rc != NXT_UNIT_OK)) { + mem = mmap(NULL, sizeof(nxt_port_queue_t), + PROT_READ | PROT_WRITE, MAP_SHARED, queue_fd, 0); + if (nxt_slow_path(mem == MAP_FAILED)) { + nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", queue_fd, + strerror(errno), errno); + goto fail; } - rc = nxt_unit_ctx_init(lib, new_ctx, data); + nxt_port_queue_init(mem); + + port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); + port_impl->queue = mem; + + rc = nxt_unit_send_port(ctx, lib->router_port, port, queue_fd); if (nxt_slow_path(rc != NXT_UNIT_OK)) { goto fail; } - new_ctx->read_port = port; + close(queue_fd); return &new_ctx->ctx; fail: - nxt_unit_remove_port(lib, &port->id); - nxt_unit_port_release(port); + if (queue_fd != -1) { + close(queue_fd); + } - free(new_ctx); + nxt_unit_ctx_release(&new_ctx->ctx); return NULL; } @@ -4633,6 +4973,7 @@ nxt_unit_ctx_free(nxt_unit_ctx_impl_t *ctx_impl) nxt_queue_remove(&ctx_impl->link); if (nxt_fast_path(ctx_impl->read_port != NULL)) { + nxt_unit_remove_port(lib, &ctx_impl->read_port->id); nxt_unit_port_release(ctx_impl->read_port); } @@ -4709,10 +5050,8 @@ nxt_unit_create_port(nxt_unit_ctx_t *ctx) nxt_unit_process_release(process); - port = nxt_unit_add_port(ctx, &new_port); + port = nxt_unit_add_port(ctx, &new_port, NULL); if (nxt_slow_path(port == NULL)) { - nxt_unit_alert(ctx, "create_port: add_port() failed"); - close(port_sockets[0]); close(port_sockets[1]); } @@ -4723,10 +5062,11 @@ nxt_unit_create_port(nxt_unit_ctx_t *ctx) static int nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst, - nxt_unit_port_t *port) + nxt_unit_port_t *port, int queue_fd) { ssize_t res; nxt_unit_impl_t *lib; + int fds[2] = { port->out_fd, queue_fd }; struct { nxt_port_msg_t msg; @@ -4735,7 +5075,7 @@ nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst, union { struct cmsghdr cm; - char space[CMSG_SPACE(sizeof(int))]; + char space[CMSG_SPACE(sizeof(int) * 2)]; } cmsg; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); @@ -4758,7 +5098,7 @@ nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst, memset(&cmsg, 0, sizeof(cmsg)); - cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int)); + cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int) * 2); cmsg.cm.cmsg_level = SOL_SOCKET; cmsg.cm.cmsg_type = SCM_RIGHTS; @@ -4771,7 +5111,7 @@ nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *dst, * Fortunately, GCC with -O1 compiles this nxt_memcpy() * in the same simple assignment as in the code above. */ - memcpy(CMSG_DATA(&cmsg.cm), &port->out_fd, sizeof(int)); + memcpy(CMSG_DATA(&cmsg.cm), fds, sizeof(int) * 2); res = nxt_unit_port_send(ctx, dst, &m, sizeof(m), &cmsg, sizeof(cmsg)); @@ -4799,7 +5139,7 @@ nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port) c = nxt_atomic_fetch_add(&port_impl->use_count, -1); if (c == 1) { - nxt_unit_debug(NULL, "destroy port %d,%d", + nxt_unit_debug(NULL, "destroy port{%d,%d}", (int) port->id.pid, (int) port->id.id); nxt_unit_process_release(port_impl->process); @@ -4816,13 +5156,31 @@ nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port) port->out_fd = -1; } + if (port->in_fd != -1) { + close(port->in_fd); + + port->in_fd = -1; + } + + if (port->out_fd != -1) { + close(port->out_fd); + + port->out_fd = -1; + } + + if (port_impl->queue != NULL) { + munmap(port_impl->queue, (port->id.id == (nxt_port_id_t) -1) + ? sizeof(nxt_app_queue_t) + : sizeof(nxt_port_queue_t)); + } + free(port_impl); } } static nxt_unit_port_t * -nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) +nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, void *queue) { int rc; nxt_queue_t awaiting_req; @@ -4840,9 +5198,10 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) old_port = nxt_unit_port_hash_find(&lib->ports, &port->id, 0); if (nxt_slow_path(old_port != NULL)) { - nxt_unit_debug(ctx, "add_port: duplicate %d,%d in_fd %d out_fd %d", - port->id.pid, port->id.id, - port->in_fd, port->out_fd); + nxt_unit_debug(ctx, "add_port: duplicate port{%d,%d} " + "in_fd %d out_fd %d queue %p", + port->id.pid, port->id.id, + port->in_fd, port->out_fd, queue); if (old_port->data == NULL) { old_port->data = port->data; @@ -4875,6 +5234,10 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) old_port_impl = nxt_container_of(old_port, nxt_unit_port_impl_t, port); + if (old_port_impl->queue == NULL) { + old_port_impl->queue = queue; + } + if (!nxt_queue_is_empty(&old_port_impl->awaiting_req)) { nxt_queue_add(&awaiting_req, &old_port_impl->awaiting_req); nxt_queue_init(&old_port_impl->awaiting_req); @@ -4914,9 +5277,9 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) new_port = NULL; - nxt_unit_debug(ctx, "add_port: %d,%d in_fd %d out_fd %d", + nxt_unit_debug(ctx, "add_port: port{%d,%d} in_fd %d out_fd %d queue %p", port->id.pid, port->id.id, - port->in_fd, port->out_fd); + port->in_fd, port->out_fd, queue); process = nxt_unit_process_get(lib, port->id.pid); if (nxt_slow_path(process == NULL)) { @@ -4929,6 +5292,9 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) new_port = malloc(sizeof(nxt_unit_port_impl_t)); if (nxt_slow_path(new_port == NULL)) { + nxt_unit_alert(ctx, "add_port: %d,%d malloc() failed", + port->id.pid, port->id.id); + goto unlock; } @@ -4951,6 +5317,9 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port) new_port->use_count = 2; new_port->process = process; new_port->ready = (port->in_fd != -1 || port->out_fd != -1); + new_port->queue = queue; + new_port->from_socket = 0; + new_port->socket_rbuf = NULL; nxt_queue_init(&new_port->awaiting_req); @@ -5010,13 +5379,13 @@ nxt_unit_remove_port_unsafe(nxt_unit_impl_t *lib, nxt_unit_port_id_t *port_id) port = nxt_unit_port_hash_find(&lib->ports, port_id, 1); if (nxt_slow_path(port == NULL)) { - nxt_unit_debug(NULL, "remove_port: port %d,%d not found", + nxt_unit_debug(NULL, "remove_port: port{%d,%d} not found", (int) port_id->pid, (int) port_id->id); return NULL; } - nxt_unit_debug(NULL, "remove_port: port %d,%d, fds %d,%d, data %p", + nxt_unit_debug(NULL, "remove_port: port{%d,%d}, fds %d,%d, data %p", (int) port_id->pid, (int) port_id->id, port->in_fd, port->out_fd, port->data); @@ -5089,10 +5458,12 @@ nxt_unit_quit(nxt_unit_ctx_t *ctx) lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - lib->online = 0; + if (lib->online) { + lib->online = 0; - if (lib->callbacks.quit != NULL) { - lib->callbacks.quit(ctx); + if (lib->callbacks.quit != NULL) { + lib->callbacks.quit(ctx); + } } } @@ -5137,20 +5508,91 @@ static ssize_t nxt_unit_port_send(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, const void *buf, size_t buf_size, const void *oob, size_t oob_size) { - nxt_unit_impl_t *lib; - - nxt_unit_debug(ctx, "port_send: port %d,%d fd %d", - (int) port->id.pid, (int) port->id.id, port->out_fd); + int notify; + ssize_t ret; + nxt_int_t rc; + nxt_port_msg_t msg; + nxt_unit_impl_t *lib; + nxt_unit_port_impl_t *port_impl; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); + port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); + if (port_impl->queue != NULL && oob_size == 0 + && buf_size <= NXT_PORT_QUEUE_MSG_SIZE) + { + rc = nxt_port_queue_send(port_impl->queue, buf, buf_size, ¬ify); + if (nxt_slow_path(rc != NXT_OK)) { + nxt_unit_alert(ctx, "port_send: port %d,%d queue overflow", + (int) port->id.pid, (int) port->id.id); + + return -1; + } + + nxt_unit_debug(ctx, "port{%d,%d} enqueue %d notify %d", + (int) port->id.pid, (int) port->id.id, + (int) buf_size, notify); + + if (notify) { + memcpy(&msg, buf, sizeof(nxt_port_msg_t)); + + msg.type = _NXT_PORT_MSG_READ_QUEUE; + + if (lib->callbacks.port_send == NULL) { + ret = nxt_unit_sendmsg(ctx, port->out_fd, &msg, + sizeof(nxt_port_msg_t), NULL, 0); + + nxt_unit_debug(ctx, "port{%d,%d} send %d read_queue", + (int) port->id.pid, (int) port->id.id, + (int) ret); + + } else { + ret = lib->callbacks.port_send(ctx, port, &msg, + sizeof(nxt_port_msg_t), NULL, 0); + + nxt_unit_debug(ctx, "port{%d,%d} sendcb %d read_queue", + (int) port->id.pid, (int) port->id.id, + (int) ret); + } + + } + + return buf_size; + } + + if (port_impl->queue != NULL) { + msg.type = _NXT_PORT_MSG_READ_SOCKET; + + rc = nxt_port_queue_send(port_impl->queue, &msg.type, 1, ¬ify); + if (nxt_slow_path(rc != NXT_OK)) { + nxt_unit_alert(ctx, "port_send: port %d,%d queue overflow", + (int) port->id.pid, (int) port->id.id); + + return -1; + } + + nxt_unit_debug(ctx, "port{%d,%d} enqueue 1 read_socket notify %d", + (int) port->id.pid, (int) port->id.id, notify); + } + if (lib->callbacks.port_send != NULL) { - return lib->callbacks.port_send(ctx, port, buf, buf_size, - oob, oob_size); + ret = lib->callbacks.port_send(ctx, port, buf, buf_size, + oob, oob_size); + + nxt_unit_debug(ctx, "port{%d,%d} sendcb %d", + (int) port->id.pid, (int) port->id.id, + (int) ret); + + } else { + ret = nxt_unit_sendmsg(ctx, port->out_fd, buf, buf_size, + oob, oob_size); + + nxt_unit_debug(ctx, "port{%d,%d} sendmsg %d", + (int) port->id.pid, (int) port->id.id, + (int) ret); } - return nxt_unit_sendmsg(ctx, port->out_fd, buf, buf_size, - oob, oob_size); + return ret; } @@ -5158,6 +5600,7 @@ static ssize_t nxt_unit_sendmsg(nxt_unit_ctx_t *ctx, int fd, const void *buf, size_t buf_size, const void *oob, size_t oob_size) { + int err; ssize_t res; struct iovec iov[1]; struct msghdr msg; @@ -5178,7 +5621,9 @@ retry: res = sendmsg(fd, &msg, 0); if (nxt_slow_path(res == -1)) { - if (errno == EINTR) { + err = errno; + + if (err == EINTR) { goto retry; } @@ -5187,7 +5632,7 @@ retry: * implementation. */ nxt_unit_warn(ctx, "sendmsg(%d, %d) failed: %s (%d)", - fd, (int) buf_size, strerror(errno), errno); + fd, (int) buf_size, strerror(err), err); } else { nxt_unit_debug(ctx, "sendmsg(%d, %d): %d", fd, (int) buf_size, @@ -5198,6 +5643,158 @@ retry: } +static int +nxt_unit_ctx_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, + nxt_unit_read_buf_t *rbuf) +{ + int res, read; + nxt_unit_port_impl_t *port_impl; + + port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); + + read = 0; + +retry: + + if (port_impl->from_socket > 0) { + if (port_impl->socket_rbuf != NULL + && port_impl->socket_rbuf->size > 0) + { + port_impl->from_socket--; + + nxt_unit_rbuf_cpy(rbuf, port_impl->socket_rbuf); + port_impl->socket_rbuf->size = 0; + + nxt_unit_debug(ctx, "port{%d,%d} use suspended message %d", + (int) port->id.pid, (int) port->id.id, + (int) rbuf->size); + + return NXT_UNIT_OK; + } + + } else { + res = nxt_unit_port_queue_recv(port, rbuf); + + if (res == NXT_UNIT_OK) { + if (nxt_unit_is_read_socket(rbuf)) { + port_impl->from_socket++; + + nxt_unit_debug(ctx, "port{%d,%d} dequeue 1 read_socket %d", + (int) port->id.pid, (int) port->id.id, + port_impl->from_socket); + + goto retry; + } + + nxt_unit_debug(ctx, "port{%d,%d} dequeue %d", + (int) port->id.pid, (int) port->id.id, + (int) rbuf->size); + + return NXT_UNIT_OK; + } + } + + if (read) { + return NXT_UNIT_AGAIN; + } + + res = nxt_unit_port_recv(ctx, port, rbuf); + if (nxt_slow_path(res == NXT_UNIT_ERROR)) { + return NXT_UNIT_ERROR; + } + + read = 1; + + if (nxt_unit_is_read_queue(rbuf)) { + nxt_unit_debug(ctx, "port{%d,%d} recv %d read_queue", + (int) port->id.pid, (int) port->id.id, (int) rbuf->size); + + if (port_impl->from_socket) { + nxt_unit_warn(ctx, "port protocol warning: READ_QUEUE after READ_SOCKET"); + } + + goto retry; + } + + nxt_unit_debug(ctx, "port{%d,%d} recvmsg %d", + (int) port->id.pid, (int) port->id.id, + (int) rbuf->size); + + if (res == NXT_UNIT_AGAIN) { + return NXT_UNIT_AGAIN; + } + + if (port_impl->from_socket > 0) { + port_impl->from_socket--; + + return NXT_UNIT_OK; + } + + nxt_unit_debug(ctx, "port{%d,%d} suspend message %d", + (int) port->id.pid, (int) port->id.id, + (int) rbuf->size); + + if (port_impl->socket_rbuf == NULL) { + port_impl->socket_rbuf = nxt_unit_read_buf_get(ctx); + + if (nxt_slow_path(port_impl->socket_rbuf == NULL)) { + return NXT_UNIT_ERROR; + } + + port_impl->socket_rbuf->size = 0; + } + + if (port_impl->socket_rbuf->size > 0) { + nxt_unit_alert(ctx, "too many port socket messages"); + + return NXT_UNIT_ERROR; + } + + nxt_unit_rbuf_cpy(port_impl->socket_rbuf, rbuf); + + memset(rbuf->oob, 0, sizeof(struct cmsghdr)); + + goto retry; +} + + +nxt_inline void +nxt_unit_rbuf_cpy(nxt_unit_read_buf_t *dst, nxt_unit_read_buf_t *src) +{ + memcpy(dst->buf, src->buf, src->size); + dst->size = src->size; + memcpy(dst->oob, src->oob, sizeof(src->oob)); +} + + +static int +nxt_unit_shared_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, + nxt_unit_read_buf_t *rbuf) +{ + int res; + +retry: + + res = nxt_unit_app_queue_recv(port, rbuf); + + if (res == NXT_UNIT_AGAIN) { + res = nxt_unit_port_recv(ctx, port, rbuf); + if (nxt_slow_path(res == NXT_UNIT_ERROR)) { + return NXT_UNIT_ERROR; + } + + if (nxt_unit_is_read_queue(rbuf)) { + nxt_unit_debug(ctx, "port{%d,%d} recv %d read_queue", + (int) port->id.pid, (int) port->id.id, (int) rbuf->size); + + goto retry; + } + } + + return res; +} + + static int nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf) @@ -5214,6 +5811,9 @@ nxt_unit_port_recv(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, rbuf->buf, sizeof(rbuf->buf), rbuf->oob, sizeof(rbuf->oob)); + nxt_unit_debug(ctx, "port{%d,%d} recvcb %d", + (int) port->id.pid, (int) port->id.id, (int) rbuf->size); + if (nxt_slow_path(rbuf->size < 0)) { return NXT_UNIT_ERROR; } @@ -5247,13 +5847,13 @@ retry: if (err == EAGAIN) { nxt_unit_debug(ctx, "recvmsg(%d) failed: %s (%d)", - fd, strerror(errno), errno); + fd, strerror(err), err); return NXT_UNIT_AGAIN; } nxt_unit_alert(ctx, "recvmsg(%d) failed: %s (%d)", - fd, strerror(errno), errno); + fd, strerror(err), err); return NXT_UNIT_ERROR; } @@ -5264,6 +5864,52 @@ retry: } +static int +nxt_unit_port_queue_recv(nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf) +{ + nxt_unit_port_impl_t *port_impl; + + port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); + + rbuf->size = nxt_port_queue_recv(port_impl->queue, rbuf->buf); + + return (rbuf->size == -1) ? NXT_UNIT_AGAIN : NXT_UNIT_OK; +} + + +static int +nxt_unit_app_queue_recv(nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf) +{ + uint32_t cookie; + nxt_port_msg_t *port_msg; + nxt_app_queue_t *queue; + nxt_unit_port_impl_t *port_impl; + + port_impl = nxt_container_of(port, nxt_unit_port_impl_t, port); + queue = port_impl->queue; + +retry: + + rbuf->size = nxt_app_queue_recv(queue, rbuf->buf, &cookie); + + nxt_unit_debug(NULL, "app_queue_recv: %d", (int) rbuf->size); + + if (rbuf->size >= (ssize_t) sizeof(nxt_port_msg_t)) { + port_msg = (nxt_port_msg_t *) rbuf->buf; + + if (nxt_app_queue_cancel(queue, cookie, port_msg->stream)) { + return NXT_UNIT_OK; + } + + nxt_unit_debug(NULL, "app_queue_recv: message cancelled"); + + goto retry; + } + + return (rbuf->size == -1) ? NXT_UNIT_AGAIN : NXT_UNIT_OK; +} + + static nxt_int_t nxt_unit_port_hash_test(nxt_lvlhsh_query_t *lhq, void *data) { @@ -5392,12 +6038,19 @@ static const nxt_lvlhsh_proto_t lvlhsh_requests_proto nxt_aligned(64) = { static int -nxt_unit_request_hash_add(nxt_lvlhsh_t *request_hash, - nxt_unit_request_info_impl_t *req_impl) +nxt_unit_request_hash_add(nxt_unit_ctx_t *ctx, + nxt_unit_request_info_t *req) { - uint32_t *stream; - nxt_int_t res; - nxt_lvlhsh_query_t lhq; + uint32_t *stream; + nxt_int_t res; + nxt_lvlhsh_query_t lhq; + nxt_unit_ctx_impl_t *ctx_impl; + nxt_unit_request_info_impl_t *req_impl; + + req_impl = nxt_container_of(req, nxt_unit_request_info_impl_t, req); + if (req_impl->in_hash) { + return NXT_UNIT_OK; + } stream = &req_impl->stream; @@ -5409,11 +6062,18 @@ nxt_unit_request_hash_add(nxt_lvlhsh_t *request_hash, lhq.replace = 0; lhq.value = req_impl; - res = nxt_lvlhsh_insert(request_hash, &lhq); + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + + pthread_mutex_lock(&ctx_impl->mutex); + + res = nxt_lvlhsh_insert(&ctx_impl->requests, &lhq); + + pthread_mutex_unlock(&ctx_impl->mutex); switch (res) { case NXT_OK: + req_impl->in_hash = 1; return NXT_UNIT_OK; default: @@ -5422,12 +6082,13 @@ nxt_unit_request_hash_add(nxt_lvlhsh_t *request_hash, } -static nxt_unit_request_info_impl_t * -nxt_unit_request_hash_find(nxt_lvlhsh_t *request_hash, uint32_t stream, - int remove) +static nxt_unit_request_info_t * +nxt_unit_request_hash_find(nxt_unit_ctx_t *ctx, uint32_t stream, int remove) { - nxt_int_t res; - nxt_lvlhsh_query_t lhq; + nxt_int_t res; + nxt_lvlhsh_query_t lhq; + nxt_unit_ctx_impl_t *ctx_impl; + nxt_unit_request_info_impl_t *req_impl; lhq.key_hash = nxt_murmur_hash2(&stream, sizeof(stream)); lhq.key.length = sizeof(stream); @@ -5435,16 +6096,26 @@ nxt_unit_request_hash_find(nxt_lvlhsh_t *request_hash, uint32_t stream, lhq.proto = &lvlhsh_requests_proto; lhq.pool = NULL; + ctx_impl = nxt_container_of(ctx, nxt_unit_ctx_impl_t, ctx); + + pthread_mutex_lock(&ctx_impl->mutex); + if (remove) { - res = nxt_lvlhsh_delete(request_hash, &lhq); + res = nxt_lvlhsh_delete(&ctx_impl->requests, &lhq); } else { - res = nxt_lvlhsh_find(request_hash, &lhq); + res = nxt_lvlhsh_find(&ctx_impl->requests, &lhq); } + pthread_mutex_unlock(&ctx_impl->mutex); + switch (res) { case NXT_OK: + req_impl = nxt_container_of(lhq.value, nxt_unit_request_info_impl_t, + req); + req_impl->in_hash = 0; + return lhq.value; default: diff --git a/src/nxt_unit.h b/src/nxt_unit.h index 0f16773f..67244cf4 100644 --- a/src/nxt_unit.h +++ b/src/nxt_unit.h @@ -121,6 +121,8 @@ struct nxt_unit_callbacks_s { */ void (*request_handler)(nxt_unit_request_info_t *req); + void (*data_handler)(nxt_unit_request_info_t *req); + /* Process websocket frame. */ void (*websocket_handler)(nxt_unit_websocket_frame_t *ws); -- cgit From 8cf522bf2d8c2d3bcef88ab86c93e06c6afcb6ae Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 19:20:36 +0300 Subject: Wrapping close() call in libunit for logging. --- src/nxt_unit.c | 79 ++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 44 insertions(+), 35 deletions(-) diff --git a/src/nxt_unit.c b/src/nxt_unit.c index 1008a9d6..990c789c 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -170,6 +170,7 @@ static int nxt_unit_port_queue_recv(nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf); static int nxt_unit_app_queue_recv(nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf); +nxt_inline int nxt_unit_close(int fd); static int nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port); @@ -490,15 +491,15 @@ nxt_unit_init(nxt_unit_init_t *init) goto fail; } - close(ready_port.out_fd); - close(queue_fd); + nxt_unit_close(ready_port.out_fd); + nxt_unit_close(queue_fd); return ctx; fail: if (queue_fd != -1) { - close(queue_fd); + nxt_unit_close(queue_fd); } nxt_unit_ctx_release(&lib->main_ctx.ctx); @@ -1038,11 +1039,11 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) fail: if (recv_msg.fd != -1) { - close(recv_msg.fd); + nxt_unit_close(recv_msg.fd); } if (recv_msg.fd2 != -1) { - close(recv_msg.fd2); + nxt_unit_close(recv_msg.fd2); } while (recv_msg.incoming_buf != NULL) { @@ -1671,7 +1672,7 @@ nxt_unit_request_info_release(nxt_unit_request_info_t *req) } if (req->content_fd != -1) { - close(req->content_fd); + nxt_unit_close(req->content_fd); req->content_fd = -1; } @@ -2911,7 +2912,7 @@ nxt_unit_request_read(nxt_unit_request_info_t *req, void *dst, size_t size) } if (res < (ssize_t) size) { - close(req->content_fd); + nxt_unit_close(req->content_fd); req->content_fd = -1; } @@ -3023,7 +3024,7 @@ nxt_unit_request_preread(nxt_unit_request_info_t *req, size_t size) } if (res < (ssize_t) size) { - close(req->content_fd); + nxt_unit_close(req->content_fd); req->content_fd = -1; } @@ -3581,7 +3582,7 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", fd, strerror(errno), errno); - close(fd); + nxt_unit_close(fd); goto remove_fail; } @@ -3618,7 +3619,7 @@ nxt_unit_new_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, int n) hdr->id, (int) lib->pid, (int) port->id.pid); } - close(fd); + nxt_unit_close(fd); pthread_mutex_lock(&lib->outgoing.mutex); @@ -3699,7 +3700,7 @@ nxt_unit_shm_open(nxt_unit_ctx_t *ctx, size_t size) nxt_unit_alert(ctx, "ftruncate(%d) failed: %s (%d)", fd, strerror(errno), errno); - close(fd); + nxt_unit_close(fd); return -1; } @@ -4910,14 +4911,14 @@ nxt_unit_ctx_alloc(nxt_unit_ctx_t *ctx, void *data) goto fail; } - close(queue_fd); + nxt_unit_close(queue_fd); return &new_ctx->ctx; fail: if (queue_fd != -1) { - close(queue_fd); + nxt_unit_close(queue_fd); } nxt_unit_ctx_release(&new_ctx->ctx); @@ -5034,8 +5035,8 @@ nxt_unit_create_port(nxt_unit_ctx_t *ctx) if (nxt_slow_path(process == NULL)) { pthread_mutex_unlock(&lib->mutex); - close(port_sockets[0]); - close(port_sockets[1]); + nxt_unit_close(port_sockets[0]); + nxt_unit_close(port_sockets[1]); return NULL; } @@ -5052,8 +5053,8 @@ nxt_unit_create_port(nxt_unit_ctx_t *ctx) port = nxt_unit_add_port(ctx, &new_port, NULL); if (nxt_slow_path(port == NULL)) { - close(port_sockets[0]); - close(port_sockets[1]); + nxt_unit_close(port_sockets[0]); + nxt_unit_close(port_sockets[1]); } return port; @@ -5139,31 +5140,20 @@ nxt_inline void nxt_unit_port_release(nxt_unit_port_t *port) c = nxt_atomic_fetch_add(&port_impl->use_count, -1); if (c == 1) { - nxt_unit_debug(NULL, "destroy port{%d,%d}", - (int) port->id.pid, (int) port->id.id); + nxt_unit_debug(NULL, "destroy port{%d,%d} in_fd %d out_fd %d", + (int) port->id.pid, (int) port->id.id, + port->in_fd, port->out_fd); nxt_unit_process_release(port_impl->process); if (port->in_fd != -1) { - close(port->in_fd); + nxt_unit_close(port->in_fd); port->in_fd = -1; } if (port->out_fd != -1) { - close(port->out_fd); - - port->out_fd = -1; - } - - if (port->in_fd != -1) { - close(port->in_fd); - - port->in_fd = -1; - } - - if (port->out_fd != -1) { - close(port->out_fd); + nxt_unit_close(port->out_fd); port->out_fd = -1; } @@ -5214,7 +5204,7 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, void *queue) } if (port->in_fd != -1) { - close(port->in_fd); + nxt_unit_close(port->in_fd); port->in_fd = -1; } @@ -5224,7 +5214,7 @@ nxt_unit_add_port(nxt_unit_ctx_t *ctx, nxt_unit_port_t *port, void *queue) } if (port->out_fd != -1) { - close(port->out_fd); + nxt_unit_close(port->out_fd); port->out_fd = -1; } @@ -5910,6 +5900,25 @@ retry: } +nxt_inline int +nxt_unit_close(int fd) +{ + int res; + + res = close(fd); + + if (nxt_slow_path(res == -1)) { + nxt_unit_alert(NULL, "close(%d) failed: %s (%d)", + fd, strerror(errno), errno); + + } else { + nxt_unit_debug(NULL, "close(%d): %d", fd, res); + } + + return res; +} + + static nxt_int_t nxt_unit_port_hash_test(nxt_lvlhsh_query_t *lhq, void *data) { -- cgit From acb0cca49def92563d9b221d818b541b60e30eaa Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 21:48:16 +0300 Subject: Moving file descriptor blocking to libunit. The default libunit behavior relies on blocking the recv() call for port file descriptors, which an application may override if needed. For external applications, port file descriptors were toggled to blocking mode before the exec() call. If the exec() call failed, descriptor remained blocked, so the process hanged while trying to read from it. This patch moves file descriptor mode switch inside libunit. --- src/nxt_application.c | 6 ------ src/nxt_external.c | 2 -- src/nxt_unit.c | 50 +++++++++++++++++++++++++++++++++++++++----------- 3 files changed, 39 insertions(+), 19 deletions(-) diff --git a/src/nxt_application.c b/src/nxt_application.c index 372a88b4..57e4615e 100644 --- a/src/nxt_application.c +++ b/src/nxt_application.c @@ -1290,8 +1290,6 @@ nxt_unit_default_init(nxt_task_t *task, nxt_unit_init_t *init) init->ready_port.in_fd = -1; init->ready_port.out_fd = main_port->pair[1]; - nxt_fd_blocking(task, main_port->pair[1]); - init->ready_stream = my_port->process->stream; init->router_port.id.pid = router_port->pid; @@ -1299,15 +1297,11 @@ nxt_unit_default_init(nxt_task_t *task, nxt_unit_init_t *init) init->router_port.in_fd = -1; init->router_port.out_fd = router_port->pair[1]; - nxt_fd_blocking(task, router_port->pair[1]); - init->read_port.id.pid = my_port->pid; init->read_port.id.id = my_port->id; init->read_port.in_fd = my_port->pair[0]; init->read_port.out_fd = -1; - nxt_fd_blocking(task, my_port->pair[0]); - init->log_fd = 2; return NXT_OK; diff --git a/src/nxt_external.c b/src/nxt_external.c index 2471c812..1adb839c 100644 --- a/src/nxt_external.c +++ b/src/nxt_external.c @@ -52,8 +52,6 @@ nxt_external_fd_no_cloexec(nxt_task_t *task, nxt_socket_t fd) return NXT_ERROR; } - nxt_fd_blocking(task, fd); - return NXT_OK; } diff --git a/src/nxt_unit.c b/src/nxt_unit.c index 990c789c..b063058f 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -171,6 +171,7 @@ static int nxt_unit_port_queue_recv(nxt_unit_port_t *port, static int nxt_unit_app_queue_recv(nxt_unit_port_t *port, nxt_unit_read_buf_t *rbuf); nxt_inline int nxt_unit_close(int fd); +static int nxt_unit_fd_blocking(int fd); static int nxt_unit_port_hash_add(nxt_lvlhsh_t *port_hash, nxt_unit_port_t *port); @@ -413,6 +414,7 @@ nxt_unit_init(nxt_unit_init_t *init) } queue_fd = -1; + mem = MAP_FAILED; if (init->ready_port.id.pid != 0 && init->ready_stream != 0 @@ -450,6 +452,11 @@ nxt_unit_init(nxt_unit_init_t *init) ctx = &lib->main_ctx.ctx; + rc = nxt_unit_fd_blocking(router_port.out_fd); + if (nxt_slow_path(rc != NXT_UNIT_OK)) { + goto fail; + } + lib->router_port = nxt_unit_add_port(ctx, &router_port, NULL); if (nxt_slow_path(lib->router_port == NULL)) { nxt_unit_alert(NULL, "failed to add router_port"); @@ -473,12 +480,20 @@ nxt_unit_init(nxt_unit_init_t *init) nxt_port_queue_init(mem); + rc = nxt_unit_fd_blocking(read_port.in_fd); + if (nxt_slow_path(rc != NXT_UNIT_OK)) { + goto fail; + } + lib->main_ctx.read_port = nxt_unit_add_port(ctx, &read_port, mem); if (nxt_slow_path(lib->main_ctx.read_port == NULL)) { nxt_unit_alert(NULL, "failed to add read_port"); - munmap(mem, sizeof(nxt_port_queue_t)); + goto fail; + } + rc = nxt_unit_fd_blocking(ready_port.out_fd); + if (nxt_slow_path(rc != NXT_UNIT_OK)) { goto fail; } @@ -486,8 +501,6 @@ nxt_unit_init(nxt_unit_init_t *init) if (nxt_slow_path(rc != NXT_UNIT_OK)) { nxt_unit_alert(NULL, "failed to send READY message"); - munmap(mem, sizeof(nxt_port_queue_t)); - goto fail; } @@ -498,6 +511,10 @@ nxt_unit_init(nxt_unit_init_t *init) fail: + if (mem != MAP_FAILED) { + munmap(mem, sizeof(nxt_port_queue_t)); + } + if (queue_fd != -1) { nxt_unit_close(queue_fd); } @@ -1064,7 +1081,6 @@ fail: static int nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) { - int nb; void *mem; nxt_unit_impl_t *lib; nxt_unit_port_t new_port, *port; @@ -1103,13 +1119,7 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) MAP_SHARED, recv_msg->fd2, 0); } else { - nb = 0; - - if (nxt_slow_path(ioctl(recv_msg->fd, FIONBIO, &nb) == -1)) { - nxt_unit_alert(ctx, "#%"PRIu32": new_port: ioctl(%d, FIONBIO, 0) " - "failed: %s (%d)", - recv_msg->stream, recv_msg->fd, strerror(errno), errno); - + if (nxt_slow_path(nxt_unit_fd_blocking(recv_msg->fd) != NXT_UNIT_OK)) { return NXT_UNIT_ERROR; } @@ -5919,6 +5929,24 @@ nxt_unit_close(int fd) } +static int +nxt_unit_fd_blocking(int fd) +{ + int nb; + + nb = 0; + + if (nxt_slow_path(ioctl(fd, FIONBIO, &nb) == -1)) { + nxt_unit_alert(NULL, "ioctl(%d, FIONBIO, 0) failed: %s (%d)", + fd, strerror(errno), errno); + + return NXT_UNIT_ERROR; + } + + return NXT_UNIT_OK; +} + + static nxt_int_t nxt_unit_port_hash_test(nxt_lvlhsh_query_t *lhq, void *data) { -- cgit From f147943f6382c0e90a216615ff9bcf57a3db8c75 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 21:48:27 +0300 Subject: Style fixes for 2 file descriptors transfer over port. Two consecutive fd and fd2 fields replaced with array. --- src/nxt_controller.c | 4 +-- src/nxt_port.c | 28 +++++++++--------- src/nxt_port.h | 6 ++-- src/nxt_port_rpc.c | 6 ++-- src/nxt_port_socket.c | 78 +++++++++++++++++++++++++++------------------------ src/nxt_router.c | 32 ++++++++++----------- src/nxt_unit.c | 71 +++++++++++++++++++++++----------------------- 7 files changed, 115 insertions(+), 110 deletions(-) diff --git a/src/nxt_controller.c b/src/nxt_controller.c index 8c9d4c53..9a34a877 100644 --- a/src/nxt_controller.c +++ b/src/nxt_controller.c @@ -1590,9 +1590,9 @@ nxt_controller_process_cert_save(nxt_task_t *task, nxt_port_recv_msg_t *msg, mbuf = &c->read->mem; - nxt_fd_write(msg->fd, mbuf->pos, nxt_buf_mem_used_size(mbuf)); + nxt_fd_write(msg->fd[0], mbuf->pos, nxt_buf_mem_used_size(mbuf)); - nxt_fd_close(msg->fd); + nxt_fd_close(msg->fd[0]); nxt_memzero(&resp, sizeof(nxt_controller_response_t)); diff --git a/src/nxt_port.c b/src/nxt_port.c index c9189d7c..dbcdec11 100644 --- a/src/nxt_port.c +++ b/src/nxt_port.c @@ -261,15 +261,15 @@ nxt_port_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) /* TODO check b size and make plain */ nxt_debug(task, "new port %d received for process %PI:%d", - msg->fd, new_port_msg->pid, new_port_msg->id); + msg->fd[0], new_port_msg->pid, new_port_msg->id); port = nxt_runtime_port_find(rt, new_port_msg->pid, new_port_msg->id); if (port != NULL) { nxt_debug(task, "port %PI:%d already exists", new_port_msg->pid, new_port_msg->id); - nxt_fd_close(msg->fd); - msg->fd = -1; + nxt_fd_close(msg->fd[0]); + msg->fd[0] = -1; return; } @@ -280,10 +280,10 @@ nxt_port_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) return; } - nxt_fd_nonblocking(task, msg->fd); + nxt_fd_nonblocking(task, msg->fd[0]); port->pair[0] = -1; - port->pair[1] = msg->fd; + port->pair[1] = msg->fd[0]; port->max_size = new_port_msg->max_size; port->max_share = new_port_msg->max_share; @@ -319,11 +319,11 @@ nxt_port_process_ready_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) nxt_debug(task, "process %PI ready", msg->port_msg.pid); - if (msg->fd != -1) { - port->queue_fd = msg->fd; + if (msg->fd[0] != -1) { + port->queue_fd = msg->fd[0]; port->queue = nxt_mem_mmap(NULL, sizeof(nxt_port_queue_t), - PROT_READ | PROT_WRITE, MAP_SHARED, msg->fd, - 0); + PROT_READ | PROT_WRITE, MAP_SHARED, + msg->fd[0], 0); } nxt_port_send_new_port(task, rt, port, msg->port_msg.stream); @@ -338,7 +338,7 @@ nxt_port_mmap_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) rt = task->thread->runtime; - if (nxt_slow_path(msg->fd == -1)) { + if (nxt_slow_path(msg->fd[0] == -1)) { nxt_log(task, NXT_LOG_WARN, "invalid fd passed with mmap message"); return; @@ -352,11 +352,11 @@ nxt_port_mmap_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) goto fail_close; } - nxt_port_incoming_port_mmap(task, process, msg->fd); + nxt_port_incoming_port_mmap(task, process, msg->fd[0]); fail_close: - nxt_fd_close(msg->fd); + nxt_fd_close(msg->fd[0]); } @@ -409,14 +409,14 @@ nxt_port_change_log_file_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) log_file = nxt_list_elt(rt->log_files, slot); - nxt_debug(task, "change log file %FD:%FD", msg->fd, log_file->fd); + nxt_debug(task, "change log file %FD:%FD", msg->fd[0], log_file->fd); /* * The old log file descriptor must be closed at the moment when no * other threads use it. dup2() allows to use the old file descriptor * for new log file. This change is performed atomically in the kernel. */ - if (nxt_file_redirect(log_file, msg->fd) == NXT_OK) { + if (nxt_file_redirect(log_file, msg->fd[0]) == NXT_OK) { if (slot == 0) { (void) nxt_file_stderr(log_file); } diff --git a/src/nxt_port.h b/src/nxt_port.h index 9fbf00b1..3ac8c735 100644 --- a/src/nxt_port.h +++ b/src/nxt_port.h @@ -173,8 +173,7 @@ typedef struct { nxt_queue_link_t link; nxt_buf_t *buf; size_t share; - nxt_fd_t fd; - nxt_fd_t fd2; + nxt_fd_t fd[2]; nxt_port_msg_t port_msg; uint32_t tracking_msg[2]; uint8_t close_fd; /* 1 bit */ @@ -183,8 +182,7 @@ typedef struct { struct nxt_port_recv_msg_s { - nxt_fd_t fd; - nxt_fd_t fd2; + nxt_fd_t fd[2]; nxt_buf_t *buf; nxt_port_t *port; nxt_port_msg_t port_msg; diff --git a/src/nxt_port_rpc.c b/src/nxt_port_rpc.c index 37f2d902..f4008a18 100644 --- a/src/nxt_port_rpc.c +++ b/src/nxt_port_rpc.c @@ -389,7 +389,8 @@ nxt_port_rpc_remove_peer(nxt_task_t *task, nxt_port_t *port, nxt_pid_t peer) nxt_memzero(&msg, sizeof(msg)); nxt_memzero(&buf, sizeof(buf)); - msg.fd = -1; + msg.fd[0] = -1; + msg.fd[1] = -1; msg.buf = &buf; msg.port = port; @@ -500,7 +501,8 @@ nxt_port_rpc_close(nxt_task_t *task, nxt_port_t *port) return; } - msg.fd = -1; + msg.fd[0] = -1; + msg.fd[1] = -1; msg.buf = &nxt_port_close_dummy_buf; msg.port = port; msg.port_msg.stream = reg->stream; diff --git a/src/nxt_port_socket.c b/src/nxt_port_socket.c index 14e2e605..5ca2eb38 100644 --- a/src/nxt_port_socket.c +++ b/src/nxt_port_socket.c @@ -161,8 +161,8 @@ nxt_port_socket_write2(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type, msg.buf = b; msg.share = 0; - msg.fd = fd; - msg.fd2 = fd2; + msg.fd[0] = fd; + msg.fd[1] = fd2; msg.close_fd = (type & NXT_PORT_MSG_CLOSE_FD) != 0; msg.allocated = 0; @@ -365,7 +365,7 @@ next_fragment: msg->port_msg.last |= sb.last; msg->port_msg.mf = sb.limit_reached || sb.nmax_reached; - n = nxt_socketpair_send(&port->socket, &msg->fd, iov, sb.niov + 1); + n = nxt_socketpair_send(&port->socket, msg->fd, iov, sb.niov + 1); if (n > 0) { if (nxt_slow_path((size_t) n != sb.size + iov[0].iov_len)) { @@ -374,16 +374,18 @@ next_fragment: goto fail; } - if (msg->fd != -1 && msg->close_fd != 0) { - nxt_fd_close(msg->fd); + if (msg->close_fd) { + if (msg->fd[0] != -1) { + nxt_fd_close(msg->fd[0]); - msg->fd = -1; - } + msg->fd[0] = -1; + } - if (msg->fd2 != -1 && msg->close_fd != 0) { - nxt_fd_close(msg->fd2); + if (msg->fd[1] != -1) { + nxt_fd_close(msg->fd[1]); - msg->fd2 = -1; + msg->fd[1] = -1; + } } msg->buf = nxt_port_buf_completion(task, wq, msg->buf, plain_size, @@ -397,8 +399,8 @@ next_fragment: * A file descriptor is sent only * in the first message of a stream. */ - msg->fd = -1; - msg->fd2 = -1; + msg->fd[0] = -1; + msg->fd[1] = -1; msg->share += n; msg->port_msg.nf = 1; @@ -654,7 +656,7 @@ nxt_port_read_handler(nxt_task_t *task, void *obj, void *data) iov[1].iov_base = b->mem.pos; iov[1].iov_len = port->max_size; - n = nxt_socketpair_recv(&port->socket, &msg.fd, iov, 2); + n = nxt_socketpair_recv(&port->socket, msg.fd, iov, 2); if (n > 0) { @@ -750,8 +752,8 @@ nxt_port_queue_read_handler(nxt_task_t *task, void *obj, void *data) msg.port_msg = smsg->port_msg; b = smsg->buf; n = smsg->size; - msg.fd = smsg->fd; - msg.fd2 = smsg->fd2; + msg.fd[0] = smsg->fd[0]; + msg.fd[1] = smsg->fd[1]; smsg->size = 0; @@ -793,7 +795,7 @@ nxt_port_queue_read_handler(nxt_task_t *task, void *obj, void *data) iov[1].iov_base = b->mem.pos; iov[1].iov_len = port->max_size; - n = nxt_socketpair_recv(&port->socket, &msg.fd, iov, 2); + n = nxt_socketpair_recv(&port->socket, msg.fd, iov, 2); if (n == (ssize_t) sizeof(nxt_port_msg_t) && msg.port_msg.type == _NXT_PORT_MSG_READ_QUEUE) @@ -848,8 +850,8 @@ nxt_port_queue_read_handler(nxt_task_t *task, void *obj, void *data) smsg->port_msg = msg.port_msg; smsg->buf = b; smsg->size = n; - smsg->fd = msg.fd; - smsg->fd2 = msg.fd2; + smsg->fd[0] = msg.fd[0]; + smsg->fd[1] = msg.fd[1]; continue; } @@ -1048,12 +1050,12 @@ nxt_port_read_msg_process(nxt_task_t *task, nxt_port_t *port, nxt_alert(task, "port %d: too small message:%uz", port->socket.fd, msg->size); - if (msg->fd != -1) { - nxt_fd_close(msg->fd); + if (msg->fd[0] != -1) { + nxt_fd_close(msg->fd[0]); } - if (msg->fd2 != -1) { - nxt_fd_close(msg->fd2); + if (msg->fd[1] != -1) { + nxt_fd_close(msg->fd[1]); } return; @@ -1094,8 +1096,8 @@ nxt_port_read_msg_process(nxt_task_t *task, nxt_port_t *port, port->handler(task, fmsg); msg->buf = fmsg->buf; - msg->fd = fmsg->fd; - msg->fd2 = fmsg->fd2; + msg->fd[0] = fmsg->fd[0]; + msg->fd[1] = fmsg->fd[1]; /* * To disable instant completion or buffer re-usage, @@ -1129,17 +1131,17 @@ nxt_port_read_msg_process(nxt_task_t *task, nxt_port_t *port, if (nxt_fast_path(msg->cancelled == 0)) { msg->buf = NULL; - msg->fd = -1; - msg->fd2 = -1; + msg->fd[0] = -1; + msg->fd[1] = -1; b = NULL; } else { - if (msg->fd != -1) { - nxt_fd_close(msg->fd); + if (msg->fd[0] != -1) { + nxt_fd_close(msg->fd[0]); } - if (msg->fd2 != -1) { - nxt_fd_close(msg->fd2); + if (msg->fd[1] != -1) { + nxt_fd_close(msg->fd[1]); } } } else { @@ -1240,16 +1242,18 @@ nxt_port_error_handler(nxt_task_t *task, void *obj, void *data) nxt_queue_each(msg, &port->messages, nxt_port_send_msg_t, link) { - if (msg->fd != -1 && msg->close_fd != 0) { - nxt_fd_close(msg->fd); + if (msg->close_fd) { + if (msg->fd[0] != -1) { + nxt_fd_close(msg->fd[0]); - msg->fd = -1; - } + msg->fd[0] = -1; + } - if (msg->fd2 != -1 && msg->close_fd != 0) { - nxt_fd_close(msg->fd2); + if (msg->fd[1] != -1) { + nxt_fd_close(msg->fd[1]); - msg->fd2 = -1; + msg->fd[1] = -1; + } } for (b = msg->buf; b != NULL; b = next) { diff --git a/src/nxt_router.c b/src/nxt_router.c index 3dd0878b..df0d96ad 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -607,14 +607,14 @@ nxt_router_new_port_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) msg->port_msg.type = _NXT_PORT_MSG_RPC_ERROR; } else { - if (msg->fd2 != -1) { - res = nxt_router_port_queue_map(task, port, msg->fd2); + if (msg->fd[1] != -1) { + res = nxt_router_port_queue_map(task, port, msg->fd[1]); if (nxt_slow_path(res != NXT_OK)) { return; } - nxt_fd_close(msg->fd2); - msg->fd2 = -1; + nxt_fd_close(msg->fd[1]); + msg->fd[1] = -1; } } @@ -669,7 +669,7 @@ nxt_router_conf_data_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) return; } - if (nxt_slow_path(msg->fd == -1)) { + if (nxt_slow_path(msg->fd[0] == -1)) { nxt_alert(task, "conf_data_handler: invalid file shm fd"); return; } @@ -678,18 +678,18 @@ nxt_router_conf_data_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) nxt_alert(task, "conf_data_handler: unexpected buffer size (%d)", (int) nxt_buf_mem_used_size(&msg->buf->mem)); - nxt_fd_close(msg->fd); - msg->fd = -1; + nxt_fd_close(msg->fd[0]); + msg->fd[0] = -1; return; } nxt_memcpy(&size, msg->buf->mem.pos, sizeof(size_t)); - p = nxt_mem_mmap(NULL, size, PROT_READ, MAP_SHARED, msg->fd, 0); + p = nxt_mem_mmap(NULL, size, PROT_READ, MAP_SHARED, msg->fd[0], 0); - nxt_fd_close(msg->fd); - msg->fd = -1; + nxt_fd_close(msg->fd[0]); + msg->fd[0] = -1; if (nxt_slow_path(p == MAP_FAILED)) { return; @@ -2133,7 +2133,7 @@ nxt_router_listen_socket_ready(nxt_task_t *task, nxt_port_recv_msg_t *msg, rpc = data; - s = msg->fd; + s = msg->fd[0]; ret = nxt_socket_nonblocking(task, s); if (nxt_slow_path(ret != NXT_OK)) { @@ -2271,7 +2271,7 @@ nxt_router_tls_rpc_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg, goto fail; } - tlscf->chain_file = msg->fd; + tlscf->chain_file = msg->fd[0]; ret = task->thread->runtime->tls->server_init(task, tlscf); if (nxt_slow_path(ret != NXT_OK)) { @@ -3392,7 +3392,7 @@ nxt_router_access_log_ready(nxt_task_t *task, nxt_port_recv_msg_t *msg, access_log = tmcf->router_conf->access_log; - access_log->fd = msg->fd; + access_log->fd = msg->fd[0]; nxt_work_queue_add(&task->thread->engine->fast_work_queue, nxt_router_conf_apply, task, tmcf, NULL); @@ -3541,13 +3541,13 @@ nxt_router_access_log_reopen_ready(nxt_task_t *task, nxt_port_recv_msg_t *msg, if (access_log == nxt_router->access_log) { - if (nxt_slow_path(dup2(msg->fd, access_log->fd) == -1)) { + if (nxt_slow_path(dup2(msg->fd[0], access_log->fd) == -1)) { nxt_alert(task, "dup2(%FD, %FD) failed %E", - msg->fd, access_log->fd, nxt_errno); + msg->fd[0], access_log->fd, nxt_errno); } } - nxt_fd_close(msg->fd); + nxt_fd_close(msg->fd[0]); nxt_mp_release(reopen->mem_pool); } diff --git a/src/nxt_unit.c b/src/nxt_unit.c index b063058f..8dd03b82 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -211,8 +211,7 @@ struct nxt_unit_recv_msg_s { void *start; uint32_t size; - int fd; - int fd2; + int fd[2]; nxt_unit_mmap_buf_t *incoming_buf; }; @@ -900,8 +899,8 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); rc = NXT_UNIT_ERROR; - recv_msg.fd = -1; - recv_msg.fd2 = -1; + recv_msg.fd[0] = -1; + recv_msg.fd[1] = -1; port_msg = (nxt_port_msg_t *) rbuf->buf; cm = (struct cmsghdr *) rbuf->oob; @@ -909,11 +908,11 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) && cm->cmsg_type == SCM_RIGHTS) { if (cm->cmsg_len == CMSG_LEN(sizeof(int))) { - memcpy(&recv_msg.fd, CMSG_DATA(cm), sizeof(int)); + memcpy(recv_msg.fd, CMSG_DATA(cm), sizeof(int)); } if (cm->cmsg_len == CMSG_LEN(sizeof(int) * 2)) { - memcpy(&recv_msg.fd, CMSG_DATA(cm), sizeof(int) * 2); + memcpy(recv_msg.fd, CMSG_DATA(cm), sizeof(int) * 2); } } @@ -933,9 +932,9 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) goto fail; } - nxt_unit_debug(ctx, "#%"PRIu32": process message %d fd %d fd2 %d", + nxt_unit_debug(ctx, "#%"PRIu32": process message %d fd[0] %d fd[1] %d", port_msg->stream, (int) port_msg->type, - recv_msg.fd, recv_msg.fd2); + recv_msg.fd[0], recv_msg.fd[1]); recv_msg.stream = port_msg->stream; recv_msg.pid = port_msg->pid; @@ -964,8 +963,8 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) if (nxt_slow_path(rc != NXT_UNIT_OK)) { if (rc == NXT_UNIT_AGAIN) { - recv_msg.fd = -1; - recv_msg.fd2 = -1; + recv_msg.fd[0] = -1; + recv_msg.fd[1] = -1; } goto fail; @@ -987,11 +986,11 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) case _NXT_PORT_MSG_CHANGE_FILE: nxt_unit_debug(ctx, "#%"PRIu32": change_file: fd %d", - port_msg->stream, recv_msg.fd); + port_msg->stream, recv_msg.fd[0]); - if (dup2(recv_msg.fd, lib->log_fd) == -1) { + if (dup2(recv_msg.fd[0], lib->log_fd) == -1) { nxt_unit_alert(ctx, "#%"PRIu32": dup2(%d, %d) failed: %s (%d)", - port_msg->stream, recv_msg.fd, lib->log_fd, + port_msg->stream, recv_msg.fd[0], lib->log_fd, strerror(errno), errno); goto fail; @@ -1001,14 +1000,14 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) break; case _NXT_PORT_MSG_MMAP: - if (nxt_slow_path(recv_msg.fd < 0)) { + if (nxt_slow_path(recv_msg.fd[0] < 0)) { nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for mmap", - port_msg->stream, recv_msg.fd); + port_msg->stream, recv_msg.fd[0]); goto fail; } - rc = nxt_unit_incoming_mmap(ctx, port_msg->pid, recv_msg.fd); + rc = nxt_unit_incoming_mmap(ctx, port_msg->pid, recv_msg.fd[0]); break; case _NXT_PORT_MSG_REQ_HEADERS: @@ -1055,12 +1054,12 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) fail: - if (recv_msg.fd != -1) { - nxt_unit_close(recv_msg.fd); + if (recv_msg.fd[0] != -1) { + nxt_unit_close(recv_msg.fd[0]); } - if (recv_msg.fd2 != -1) { - nxt_unit_close(recv_msg.fd2); + if (recv_msg.fd[1] != -1) { + nxt_unit_close(recv_msg.fd[1]); } while (recv_msg.incoming_buf != NULL) { @@ -1094,32 +1093,34 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) return NXT_UNIT_ERROR; } - if (nxt_slow_path(recv_msg->fd < 0)) { + if (nxt_slow_path(recv_msg->fd[0] < 0)) { nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for new port", - recv_msg->stream, recv_msg->fd); + recv_msg->stream, recv_msg->fd[0]); return NXT_UNIT_ERROR; } new_port_msg = recv_msg->start; - nxt_unit_debug(ctx, "#%"PRIu32": new_port: port{%d,%d} fd %d fd2 %d", + nxt_unit_debug(ctx, "#%"PRIu32": new_port: port{%d,%d} fd[0] %d fd[1] %d", recv_msg->stream, (int) new_port_msg->pid, - (int) new_port_msg->id, recv_msg->fd, recv_msg->fd2); + (int) new_port_msg->id, recv_msg->fd[0], recv_msg->fd[1]); lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); if (new_port_msg->id == (nxt_port_id_t) -1) { nxt_unit_port_id_init(&new_port.id, lib->pid, new_port_msg->id); - new_port.in_fd = recv_msg->fd; + new_port.in_fd = recv_msg->fd[0]; new_port.out_fd = -1; mem = mmap(NULL, sizeof(nxt_app_queue_t), PROT_READ | PROT_WRITE, - MAP_SHARED, recv_msg->fd2, 0); + MAP_SHARED, recv_msg->fd[1], 0); } else { - if (nxt_slow_path(nxt_unit_fd_blocking(recv_msg->fd) != NXT_UNIT_OK)) { + if (nxt_slow_path(nxt_unit_fd_blocking(recv_msg->fd[0]) + != NXT_UNIT_OK)) + { return NXT_UNIT_ERROR; } @@ -1127,14 +1128,14 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) new_port_msg->id); new_port.in_fd = -1; - new_port.out_fd = recv_msg->fd; + new_port.out_fd = recv_msg->fd[0]; mem = mmap(NULL, sizeof(nxt_port_queue_t), PROT_READ | PROT_WRITE, - MAP_SHARED, recv_msg->fd2, 0); + MAP_SHARED, recv_msg->fd[1], 0); } if (nxt_slow_path(mem == MAP_FAILED)) { - nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", recv_msg->fd2, + nxt_unit_alert(ctx, "mmap(%d) failed: %s (%d)", recv_msg->fd[1], strerror(errno), errno); return NXT_UNIT_ERROR; @@ -1142,7 +1143,7 @@ nxt_unit_process_new_port(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) new_port.data = NULL; - recv_msg->fd = -1; + recv_msg->fd[0] = -1; port = nxt_unit_add_port(ctx, &new_port, mem); if (nxt_slow_path(port == NULL)) { @@ -1224,8 +1225,8 @@ nxt_unit_process_req_headers(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) req_impl->incoming_buf->prev = &req_impl->incoming_buf; recv_msg->incoming_buf = NULL; - req->content_fd = recv_msg->fd; - recv_msg->fd = -1; + req->content_fd = recv_msg->fd[0]; + recv_msg->fd[0] = -1; req->response_max_fields = 0; req_impl->state = NXT_UNIT_RS_START; @@ -1312,8 +1313,8 @@ nxt_unit_process_req_body(nxt_unit_ctx_t *ctx, nxt_unit_recv_msg_t *recv_msg) recv_msg->incoming_buf = NULL; } - req->content_fd = recv_msg->fd; - recv_msg->fd = -1; + req->content_fd = recv_msg->fd[0]; + recv_msg->fd[0] = -1; lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); -- cgit From fd2c01c58f5f3bfd357e9931a9abb64083afc3ac Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Tue, 11 Aug 2020 21:48:46 +0300 Subject: Fixing return value initialization. --- src/nxt_unit.c | 44 +++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/src/nxt_unit.c b/src/nxt_unit.c index 8dd03b82..6b7d631d 100644 --- a/src/nxt_unit.c +++ b/src/nxt_unit.c @@ -898,7 +898,6 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) lib = nxt_container_of(ctx->unit, nxt_unit_impl_t, unit); - rc = NXT_UNIT_ERROR; recv_msg.fd[0] = -1; recv_msg.fd[1] = -1; port_msg = (nxt_port_msg_t *) rbuf->buf; @@ -924,12 +923,13 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) nxt_unit_quit(ctx); rc = NXT_UNIT_OK; - - goto fail; + goto done; } nxt_unit_alert(ctx, "message too small (%d bytes)", (int) rbuf->size); - goto fail; + + rc = NXT_UNIT_ERROR; + goto done; } nxt_unit_debug(ctx, "#%"PRIu32": process message %d fd[0] %d fd[1] %d", @@ -946,16 +946,18 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) recv_msg.size = rbuf->size - sizeof(nxt_port_msg_t); if (nxt_slow_path(port_msg->type >= NXT_PORT_MSG_MAX)) { - nxt_unit_warn(ctx, "#%"PRIu32": unknown message type (%d)", - port_msg->stream, (int) port_msg->type); - goto fail; + nxt_unit_alert(ctx, "#%"PRIu32": unknown message type (%d)", + port_msg->stream, (int) port_msg->type); + rc = NXT_UNIT_ERROR; + goto done; } /* Fragmentation is unsupported. */ if (nxt_slow_path(port_msg->nf != 0 || port_msg->mf != 0)) { - nxt_unit_warn(ctx, "#%"PRIu32": fragmented message type (%d)", - port_msg->stream, (int) port_msg->type); - goto fail; + nxt_unit_alert(ctx, "#%"PRIu32": fragmented message type (%d)", + port_msg->stream, (int) port_msg->type); + rc = NXT_UNIT_ERROR; + goto done; } if (port_msg->mmap) { @@ -967,7 +969,7 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) recv_msg.fd[1] = -1; } - goto fail; + goto done; } } @@ -993,7 +995,8 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) port_msg->stream, recv_msg.fd[0], lib->log_fd, strerror(errno), errno); - goto fail; + rc = NXT_UNIT_ERROR; + goto done; } rc = NXT_UNIT_OK; @@ -1004,7 +1007,8 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) nxt_unit_alert(ctx, "#%"PRIu32": invalid fd %d for mmap", port_msg->stream, recv_msg.fd[0]); - goto fail; + rc = NXT_UNIT_ERROR; + goto done; } rc = nxt_unit_incoming_mmap(ctx, port_msg->pid, recv_msg.fd[0]); @@ -1024,11 +1028,12 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) case _NXT_PORT_MSG_REMOVE_PID: if (nxt_slow_path(recv_msg.size != sizeof(pid))) { - nxt_unit_warn(ctx, "#%"PRIu32": remove_pid: invalid message size " - "(%d != %d)", port_msg->stream, (int) recv_msg.size, - (int) sizeof(pid)); + nxt_unit_alert(ctx, "#%"PRIu32": remove_pid: invalid message size " + "(%d != %d)", port_msg->stream, (int) recv_msg.size, + (int) sizeof(pid)); - goto fail; + rc = NXT_UNIT_ERROR; + goto done; } memcpy(&pid, recv_msg.start, sizeof(pid)); @@ -1049,10 +1054,11 @@ nxt_unit_process_msg(nxt_unit_ctx_t *ctx, nxt_unit_read_buf_t *rbuf) nxt_unit_debug(ctx, "#%"PRIu32": ignore message type: %d", port_msg->stream, (int) port_msg->type); - goto fail; + rc = NXT_UNIT_ERROR; + goto done; } -fail: +done: if (recv_msg.fd[0] != -1) { nxt_unit_close(recv_msg.fd[0]); -- cgit From 2136eb411c9b99ffd65751bd13e10ce426be2492 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Wed, 12 Aug 2020 13:37:49 +0300 Subject: Fixing issues found by static analyzer. --- src/nxt_port_socket.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/nxt_port_socket.c b/src/nxt_port_socket.c index 5ca2eb38..9d8096b2 100644 --- a/src/nxt_port_socket.c +++ b/src/nxt_port_socket.c @@ -208,6 +208,10 @@ nxt_port_socket_write2(nxt_task_t *task, nxt_port_t *port, nxt_uint_t type, nxt_debug(task, "port{%d,%d} %d: enqueue 1 notify %d, %d", (int) port->pid, (int) port->id, port->socket.fd, notify, res); + + if (nxt_slow_path(res == NXT_AGAIN)) { + return NXT_AGAIN; + } } } @@ -738,8 +742,6 @@ nxt_port_queue_read_handler(nxt_task_t *task, void *obj, void *data) (int) port->pid, (int) port->id, port->socket.fd, port->from_socket); - n = -1; - continue; } -- cgit From 09685e2b4143ec19afef7673a455cf7e4d1414b7 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Wed, 12 Aug 2020 15:25:29 +0300 Subject: Responding with error in case of first process start failure. After shared application port introducing, request queue in router was removed and requests may stuck forever waiting for another process start. --- src/nxt_http.h | 4 ++ src/nxt_router.c | 142 ++++++++++++++++++++++++++++++++++++++++++++++++------- src/nxt_router.h | 1 + 3 files changed, 129 insertions(+), 18 deletions(-) diff --git a/src/nxt_http.h b/src/nxt_http.h index 67ac00d8..6c84843f 100644 --- a/src/nxt_http.h +++ b/src/nxt_http.h @@ -169,6 +169,10 @@ struct nxt_http_request_s { nxt_http_peer_t *peer; nxt_buf_t *last; + nxt_queue_link_t app_link; /* nxt_app_t.ack_waiting_req */ + nxt_event_engine_t *engine; + nxt_work_t err_work; + nxt_http_response_t resp; nxt_http_status_t status:16; diff --git a/src/nxt_router.c b/src/nxt_router.c index df0d96ad..0ccf6593 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -204,6 +204,8 @@ static void nxt_router_app_port_release(nxt_task_t *task, nxt_port_t *port, nxt_apr_action_t action); static void nxt_router_app_port_get(nxt_task_t *task, nxt_app_t *app, nxt_request_rpc_data_t *req_rpc_data); +static void nxt_router_http_request_error(nxt_task_t *task, void *obj, + void *data); static void nxt_router_http_request_done(nxt_task_t *task, void *obj, void *data); @@ -539,6 +541,8 @@ nxt_inline void nxt_request_rpc_data_unlink(nxt_task_t *task, nxt_request_rpc_data_t *req_rpc_data) { + nxt_app_t *app; + nxt_bool_t unlinked; nxt_http_request_t *r; nxt_router_msg_cancel(task, req_rpc_data); @@ -550,12 +554,7 @@ nxt_request_rpc_data_unlink(nxt_task_t *task, req_rpc_data->app_port = NULL; } - if (req_rpc_data->app != NULL) { - nxt_router_app_use(task, req_rpc_data->app, -1); - - req_rpc_data->app = NULL; - } - + app = req_rpc_data->app; r = req_rpc_data->request; if (r != NULL) { @@ -565,6 +564,31 @@ nxt_request_rpc_data_unlink(nxt_task_t *task, r->req_rpc_data = NULL; req_rpc_data->request = NULL; + + if (app != NULL) { + unlinked = 0; + + nxt_thread_mutex_lock(&app->mutex); + + if (r->app_link.next != NULL) { + nxt_queue_remove(&r->app_link); + r->app_link.next = NULL; + + unlinked = 1; + } + + nxt_thread_mutex_unlock(&app->mutex); + + if (unlinked) { + nxt_mp_release(r->mem_pool); + } + } + } + + if (app != NULL) { + nxt_router_app_use(task, app, -1); + + req_rpc_data->app = NULL; } if (req_rpc_data->msg_info.body_fd != -1) { @@ -1492,6 +1516,7 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, nxt_queue_init(&app->ports); nxt_queue_init(&app->spare_ports); nxt_queue_init(&app->idle_ports); + nxt_queue_init(&app->ack_waiting_req); app->name.length = name.length; nxt_memcpy(app->name.start, name.start, name.length); @@ -3784,7 +3809,7 @@ nxt_router_req_headers_ack_handler(nxt_task_t *task, { int res; nxt_app_t *app; - nxt_bool_t start_process; + nxt_bool_t start_process, unlinked; nxt_port_t *app_port, *main_app_port, *idle_port; nxt_queue_link_t *idle_lnk; nxt_http_request_t *r; @@ -3797,19 +3822,31 @@ nxt_router_req_headers_ack_handler(nxt_task_t *task, msg->port_msg.pid); app = req_rpc_data->app; + r = req_rpc_data->request; start_process = 0; + unlinked = 0; nxt_thread_mutex_lock(&app->mutex); + if (r->app_link.next != NULL) { + nxt_queue_remove(&r->app_link); + r->app_link.next = NULL; + + unlinked = 1; + } + app_port = nxt_port_hash_find(&app->port_hash, msg->port_msg.pid, msg->port_msg.reply_port); if (nxt_slow_path(app_port == NULL)) { nxt_thread_mutex_unlock(&app->mutex); - r = req_rpc_data->request; nxt_http_request_error(task, r, NXT_HTTP_INTERNAL_SERVER_ERROR); + if (unlinked) { + nxt_mp_release(r->mem_pool); + } + return; } @@ -3857,6 +3894,10 @@ nxt_router_req_headers_ack_handler(nxt_task_t *task, nxt_thread_mutex_unlock(&app->mutex); + if (unlinked) { + nxt_mp_release(r->mem_pool); + } + if (start_process) { nxt_router_start_app_process(task, app); } @@ -3877,15 +3918,11 @@ nxt_router_req_headers_ack_handler(nxt_task_t *task, task->thread->engine->port->id, NULL); if (nxt_slow_path(res != NXT_OK)) { - r = req_rpc_data->request; - nxt_http_request_error(task, r, NXT_HTTP_INTERNAL_SERVER_ERROR); } } if (app->timeout != 0) { - r = req_rpc_data->request; - r->timer.handler = nxt_router_app_timeout; r->timer_data = req_rpc_data; nxt_timer_add(task->thread->engine, &r->timer, app->timeout); @@ -4028,8 +4065,10 @@ static void nxt_router_app_port_error(nxt_task_t *task, nxt_port_recv_msg_t *msg, void *data) { - nxt_app_t *app; - nxt_app_joint_t *app_joint; + nxt_app_t *app; + nxt_app_joint_t *app_joint; + nxt_queue_link_t *link; + nxt_http_request_t *r; app_joint = data; @@ -4047,15 +4086,43 @@ nxt_router_app_port_error(nxt_task_t *task, nxt_port_recv_msg_t *msg, nxt_debug(task, "app '%V' %p start error", &app->name, app); + link = NULL; + nxt_thread_mutex_lock(&app->mutex); nxt_assert(app->pending_processes != 0); app->pending_processes--; + if (app->processes == 0 && !nxt_queue_is_empty(&app->ack_waiting_req)) { + link = nxt_queue_first(&app->ack_waiting_req); + + nxt_queue_remove(link); + link->next = NULL; + } + nxt_thread_mutex_unlock(&app->mutex); - /* TODO req_app_link to cancel first pending message */ + while (link != NULL) { + r = nxt_container_of(link, nxt_http_request_t, app_link); + + nxt_event_engine_post(r->engine, &r->err_work); + + link = NULL; + + nxt_thread_mutex_lock(&app->mutex); + + if (app->processes == 0 && app->pending_processes == 0 + && !nxt_queue_is_empty(&app->ack_waiting_req)) + { + link = nxt_queue_first(&app->ack_waiting_req); + + nxt_queue_remove(link); + link->next = NULL; + } + + nxt_thread_mutex_unlock(&app->mutex); + } } @@ -4541,8 +4608,9 @@ static void nxt_router_app_port_get(nxt_task_t *task, nxt_app_t *app, nxt_request_rpc_data_t *req_rpc_data) { - nxt_bool_t start_process; - nxt_port_t *port; + nxt_bool_t start_process; + nxt_port_t *port; + nxt_http_request_t *r; start_process = 0; @@ -4558,8 +4626,22 @@ nxt_router_app_port_get(nxt_task_t *task, nxt_app_t *app, start_process = 1; } + r = req_rpc_data->request; + + /* + * Put request into application-wide list to be able to cancel request + * if something goes wrong with application processes. + */ + nxt_queue_insert_tail(&app->ack_waiting_req, &r->app_link); + nxt_thread_mutex_unlock(&app->mutex); + /* + * Retain request memory pool while request is linked in ack_waiting_req + * to guarantee request structure memory is accessble. + */ + nxt_mp_retain(r->mem_pool); + req_rpc_data->app_port = port; req_rpc_data->apr_action = NXT_APR_REQUEST_FAILED; @@ -4602,6 +4684,11 @@ nxt_router_process_http_request(nxt_task_t *task, nxt_http_request_t *r, r->timer.log = engine->task.log; r->timer.bias = NXT_TIMER_DEFAULT_BIAS; + r->engine = engine; + r->err_work.handler = nxt_router_http_request_error; + r->err_work.task = task; + r->err_work.obj = r; + req_rpc_data->stream = nxt_port_rpc_ex_stream(req_rpc_data); req_rpc_data->app = app; req_rpc_data->msg_info.body_fd = -1; @@ -4621,6 +4708,25 @@ nxt_router_process_http_request(nxt_task_t *task, nxt_http_request_t *r, } +static void +nxt_router_http_request_error(nxt_task_t *task, void *obj, void *data) +{ + nxt_http_request_t *r; + + r = obj; + + nxt_debug(task, "router http request error (rpc_data %p)", r->req_rpc_data); + + nxt_http_request_error(task, r, NXT_HTTP_SERVICE_UNAVAILABLE); + + if (r->req_rpc_data != NULL) { + nxt_request_rpc_data_unlink(task, r->req_rpc_data); + } + + nxt_mp_release(r->mem_pool); +} + + static void nxt_router_http_request_done(nxt_task_t *task, void *obj, void *data) { @@ -4630,7 +4736,7 @@ nxt_router_http_request_done(nxt_task_t *task, void *obj, void *data) nxt_debug(task, "router http request done (rpc_data %p)", r->req_rpc_data); - if (r->req_rpc_data) { + if (r->req_rpc_data != NULL) { nxt_request_rpc_data_unlink(task, r->req_rpc_data); } diff --git a/src/nxt_router.h b/src/nxt_router.h index ead8f292..0b1147f8 100644 --- a/src/nxt_router.h +++ b/src/nxt_router.h @@ -138,6 +138,7 @@ struct nxt_app_s { nxt_str_t conf; nxt_atomic_t use_count; + nxt_queue_t ack_waiting_req; /* of nxt_http_request_t.app_link */ nxt_app_joint_t *joint; nxt_port_t *shared_port; -- cgit From 21ac95f17e70f2f20fe8e2a99bbe9cc7328a6e62 Mon Sep 17 00:00:00 2001 From: Remi Collet Date: Wed, 12 Aug 2020 22:55:01 +0300 Subject: PHP: compatibility with 8.0.0 Beta 1. This closes #441 PR on GitHub. --- src/nxt_php_sapi.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/nxt_php_sapi.c b/src/nxt_php_sapi.c index 00671b4a..bc8341f4 100644 --- a/src/nxt_php_sapi.c +++ b/src/nxt_php_sapi.c @@ -28,6 +28,9 @@ #if PHP_VERSION_ID >= 70000 #define NXT_PHP7 1 #endif +#if PHP_VERSION_ID >= 80000 +#define NXT_PHP8 1 +#endif /* PHP 8 */ #ifndef TSRMLS_CC @@ -61,7 +64,9 @@ typedef struct { } nxt_php_run_ctx_t; -#ifdef NXT_PHP7 +#if NXT_PHP8 +typedef int (*nxt_php_disable_t)(const char *p, size_t size); +#elif NXT_PHP7 typedef int (*nxt_php_disable_t)(char *p, size_t size); #else typedef int (*nxt_php_disable_t)(char *p, uint TSRMLS_DC); @@ -105,11 +110,15 @@ nxt_inline void nxt_php_set_str(nxt_unit_request_info_t *req, const char *name, static void nxt_php_set_cstr(nxt_unit_request_info_t *req, const char *name, const char *str, uint32_t len, zval *track_vars_array TSRMLS_DC); static void nxt_php_register_variables(zval *track_vars_array TSRMLS_DC); +#if NXT_PHP8 +static void nxt_php_log_message(const char *message, int syslog_type_int); +#else #ifdef NXT_HAVE_PHP_LOG_MESSAGE_WITH_SYSLOG_TYPE static void nxt_php_log_message(char *message, int syslog_type_int); #else static void nxt_php_log_message(char *message TSRMLS_DC); #endif +#endif #ifdef NXT_PHP7 static size_t nxt_php_unbuffered_write(const char *str, @@ -1240,6 +1249,10 @@ nxt_php_set_cstr(nxt_unit_request_info_t *req, const char *name, } +#if NXT_PHP8 +static void +nxt_php_log_message(const char *message, int syslog_type_int) +#else #ifdef NXT_HAVE_PHP_LOG_MESSAGE_WITH_SYSLOG_TYPE static void nxt_php_log_message(char *message, int syslog_type_int) @@ -1247,6 +1260,7 @@ nxt_php_log_message(char *message, int syslog_type_int) static void nxt_php_log_message(char *message TSRMLS_DC) #endif +#endif { nxt_php_run_ctx_t *ctx; -- cgit From 93146616cf56a94fc2979cb978c7b451c5592594 Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Thu, 13 Aug 2020 02:46:54 +0300 Subject: Basic variables support. --- auto/sources | 2 + src/nxt_array.c | 2 +- src/nxt_array.h | 8 + src/nxt_conf_validation.c | 21 ++ src/nxt_http.h | 12 +- src/nxt_http_request.c | 15 +- src/nxt_http_route.c | 207 +++++++++------ src/nxt_http_variables.c | 59 +++++ src/nxt_main.h | 1 + src/nxt_mp.c | 14 ++ src/nxt_mp.h | 4 + src/nxt_router.c | 183 ++++++++++---- src/nxt_router.h | 4 +- src/nxt_runtime.c | 8 + src/nxt_runtime.h | 2 + src/nxt_upstream.c | 6 +- src/nxt_var.c | 616 +++++++++++++++++++++++++++++++++++++++++++++ src/nxt_var.h | 48 ++++ src/test/nxt_lvlhsh_test.c | 18 +- 19 files changed, 1086 insertions(+), 144 deletions(-) create mode 100644 src/nxt_http_variables.c create mode 100644 src/nxt_var.c create mode 100644 src/nxt_var.h diff --git a/auto/sources b/auto/sources index 0dd2cbd6..a61577dc 100644 --- a/auto/sources +++ b/auto/sources @@ -31,6 +31,7 @@ NXT_LIB_SRCS=" \ src/nxt_utf8.c \ src/nxt_parse.c \ src/nxt_sprintf.c \ + src/nxt_var.c \ src/nxt_file_name.c \ src/nxt_log.c \ src/nxt_djb_hash.c \ @@ -91,6 +92,7 @@ NXT_LIB_SRCS=" \ src/nxt_http_static.c \ src/nxt_http_proxy.c \ src/nxt_http_chunk_parse.c \ + src/nxt_http_variables.c \ src/nxt_application.c \ src/nxt_external.c \ src/nxt_port_hash.c \ diff --git a/src/nxt_array.c b/src/nxt_array.c index 6fe9ad6a..1e13c22a 100644 --- a/src/nxt_array.c +++ b/src/nxt_array.c @@ -51,7 +51,7 @@ nxt_array_add(nxt_array_t *array) if (nalloc < 16) { /* Allocate new array twice larger than current. */ - new_alloc = nalloc * 2; + new_alloc = (nalloc == 0) ? 4 : nalloc * 2; } else { /* Allocate new array 1.5 times larger than current. */ diff --git a/src/nxt_array.h b/src/nxt_array.h index 5762ec27..8318fccd 100644 --- a/src/nxt_array.h +++ b/src/nxt_array.h @@ -18,6 +18,14 @@ typedef struct { } nxt_array_t; +nxt_inline void +nxt_array_init(nxt_array_t *array, nxt_mp_t *mp, size_t size) +{ + array->elts = nxt_pointer_to(array, sizeof(nxt_array_t)); + array->size = size; + array->mem_pool = mp; +} + NXT_EXPORT nxt_array_t *nxt_array_create(nxt_mp_t *mp, nxt_uint_t n, size_t size); NXT_EXPORT void nxt_array_destroy(nxt_array_t *array); diff --git a/src/nxt_conf_validation.c b/src/nxt_conf_validation.c index a5e0663f..b5530b85 100644 --- a/src/nxt_conf_validation.c +++ b/src/nxt_conf_validation.c @@ -64,6 +64,8 @@ static nxt_int_t nxt_conf_vldt_type(nxt_conf_validation_t *vldt, nxt_str_t *name, nxt_conf_value_t *value, nxt_conf_vldt_type_t type); static nxt_int_t nxt_conf_vldt_error(nxt_conf_validation_t *vldt, const char *fmt, ...); +static nxt_int_t nxt_conf_vldt_var(nxt_conf_validation_t *vldt, + const char *option, nxt_str_t *value); static nxt_int_t nxt_conf_vldt_mtypes(nxt_conf_validation_t *vldt, nxt_conf_value_t *value, void *data); @@ -1065,6 +1067,21 @@ nxt_conf_vldt_error(nxt_conf_validation_t *vldt, const char *fmt, ...) } +static nxt_int_t +nxt_conf_vldt_var(nxt_conf_validation_t *vldt, const char *option, + nxt_str_t *value) +{ + u_char error[NXT_MAX_ERROR_STR]; + + if (nxt_var_test(value, error) != NXT_OK) { + return nxt_conf_vldt_error(vldt, "%s in the \"%s\" value.", + error, option); + } + + return NXT_OK; +} + + typedef struct { nxt_mp_t *pool; nxt_str_t *type; @@ -1242,6 +1259,10 @@ nxt_conf_vldt_pass(nxt_conf_validation_t *vldt, nxt_conf_value_t *value, nxt_conf_get_string(value, &pass); + if (nxt_is_var(&pass)) { + return nxt_conf_vldt_var(vldt, "pass", &pass); + } + ret = nxt_http_pass_segments(vldt->pool, &pass, segments, 3); if (ret != NXT_OK) { diff --git a/src/nxt_http.h b/src/nxt_http.h index 6c84843f..08181520 100644 --- a/src/nxt_http.h +++ b/src/nxt_http.h @@ -164,6 +164,8 @@ struct nxt_http_request_s { nxt_timer_t timer; void *timer_data; + nxt_var_query_t *var_query; + void *req_rpc_data; nxt_http_peer_t *peer; @@ -202,6 +204,7 @@ struct nxt_http_action_s { nxt_upstream_t *upstream; uint32_t upstream_number; nxt_http_status_t return_code; + nxt_var_t *var; } u; nxt_str_t name; @@ -287,15 +290,16 @@ nxt_int_t nxt_http_routes_resolve(nxt_task_t *task, nxt_int_t nxt_http_pass_segments(nxt_mp_t *mp, nxt_str_t *pass, nxt_str_t *segments, nxt_uint_t n); nxt_http_action_t *nxt_http_pass_application(nxt_task_t *task, - nxt_router_temp_conf_t *tmcf, nxt_str_t *name); -void nxt_http_routes_cleanup(nxt_task_t *task, nxt_http_routes_t *routes); -void nxt_http_action_cleanup(nxt_task_t *task, nxt_http_action_t *action); + nxt_router_conf_t *rtcf, nxt_str_t *name); nxt_int_t nxt_upstreams_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, nxt_conf_value_t *conf); nxt_int_t nxt_upstreams_joint_create(nxt_router_temp_conf_t *tmcf, nxt_upstream_t ***upstream_joint); +void nxt_http_request_action(nxt_task_t *task, nxt_http_request_t *r, + nxt_http_action_t *action); + nxt_http_action_t *nxt_http_return_handler(nxt_task_t *task, nxt_http_request_t *r, nxt_http_action_t *action); @@ -309,7 +313,7 @@ nxt_str_t *nxt_http_static_mtypes_hash_find(nxt_lvlhsh_t *hash, nxt_http_action_t *nxt_http_application_handler(nxt_task_t *task, nxt_http_request_t *r, nxt_http_action_t *action); -void nxt_upstream_find(nxt_upstreams_t *upstreams, nxt_str_t *name, +nxt_int_t nxt_upstream_find(nxt_upstreams_t *upstreams, nxt_str_t *name, nxt_http_action_t *action); nxt_http_action_t *nxt_upstream_proxy_handler(nxt_task_t *task, nxt_http_request_t *r, nxt_upstream_t *upstream); diff --git a/src/nxt_http_request.c b/src/nxt_http_request.c index cc1ae17d..76fb3427 100644 --- a/src/nxt_http_request.c +++ b/src/nxt_http_request.c @@ -10,7 +10,7 @@ static nxt_int_t nxt_http_validate_host(nxt_str_t *host, nxt_mp_t *mp); static void nxt_http_request_start(nxt_task_t *task, void *obj, void *data); -static void nxt_http_request_action(nxt_task_t *task, void *obj, void *data); +static void nxt_http_request_ready(nxt_task_t *task, void *obj, void *data); static void nxt_http_request_proto_info(nxt_task_t *task, nxt_http_request_t *r); static void nxt_http_request_mem_buf_completion(nxt_task_t *task, void *obj, @@ -285,21 +285,28 @@ nxt_http_request_start(nxt_task_t *task, void *obj, void *data) static const nxt_http_request_state_t nxt_http_request_body_state nxt_aligned(64) = { - .ready_handler = nxt_http_request_action, + .ready_handler = nxt_http_request_ready, .error_handler = nxt_http_request_close_handler, }; static void -nxt_http_request_action(nxt_task_t *task, void *obj, void *data) +nxt_http_request_ready(nxt_task_t *task, void *obj, void *data) { nxt_http_action_t *action; nxt_http_request_t *r; r = obj; - action = r->conf->socket_conf->action; + nxt_http_request_action(task, r, action); +} + + +void +nxt_http_request_action(nxt_task_t *task, nxt_http_request_t *r, + nxt_http_action_t *action) +{ if (nxt_fast_path(action != NULL)) { do { diff --git a/src/nxt_http_route.c b/src/nxt_http_route.c index ae745f11..346629c3 100644 --- a/src/nxt_http_route.c +++ b/src/nxt_http_route.c @@ -221,9 +221,16 @@ static nxt_int_t nxt_http_route_resolve(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, nxt_http_route_t *route); static nxt_int_t nxt_http_action_resolve(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, nxt_http_action_t *action); -static void nxt_http_route_find(nxt_http_routes_t *routes, nxt_str_t *name, +static nxt_http_action_t *nxt_http_action_pass_var(nxt_task_t *task, + nxt_http_request_t *r, nxt_http_action_t *action); +static void nxt_http_action_pass_var_ready(nxt_task_t *task, void *obj, + void *data); +static void nxt_http_action_pass_var_error(nxt_task_t *task, void *obj, + void *data); +static nxt_int_t nxt_http_pass_find(nxt_task_t *task, nxt_mp_t *mp, + nxt_router_conf_t *rtcf, nxt_http_action_t *action); +static nxt_int_t nxt_http_route_find(nxt_http_routes_t *routes, nxt_str_t *name, nxt_http_action_t *action); -static void nxt_http_route_cleanup(nxt_task_t *task, nxt_http_route_t *routes); static nxt_http_action_t *nxt_http_route_handler(nxt_task_t *task, nxt_http_request_t *r, nxt_http_action_t *start); @@ -1353,10 +1360,8 @@ static nxt_int_t nxt_http_action_resolve(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, nxt_http_action_t *action) { - nxt_str_t *targets; - nxt_int_t ret; - nxt_uint_t i; - nxt_str_t segments[3]; + nxt_var_t *var; + nxt_int_t ret; if (action->handler != NULL) { if (action->handler == nxt_http_static_handler @@ -1368,14 +1373,118 @@ nxt_http_action_resolve(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, return NXT_OK; } - ret = nxt_http_pass_segments(tmcf->mem_pool, &action->name, segments, 3); + if (nxt_is_var(&action->name)) { + var = nxt_var_compile(&action->name, tmcf->router_conf->mem_pool); + if (nxt_slow_path(var == NULL)) { + return NXT_ERROR; + } + + action->u.var = var; + action->handler = nxt_http_action_pass_var; + return NXT_OK; + } + + ret = nxt_http_pass_find(task, tmcf->mem_pool, tmcf->router_conf, action); if (nxt_slow_path(ret != NXT_OK)) { return NXT_ERROR; } + return NXT_OK; +} + + +static nxt_http_action_t * +nxt_http_action_pass_var(nxt_task_t *task, nxt_http_request_t *r, + nxt_http_action_t *action) +{ + nxt_var_t *var; + nxt_int_t ret; + + ret = nxt_var_query_init(&r->var_query, r, r->mem_pool); + if (nxt_slow_path(ret != NXT_OK)) { + goto fail; + } + + var = action->u.var; + + action = nxt_mp_get(r->mem_pool, sizeof(nxt_http_action_t)); + if (nxt_slow_path(action == NULL)) { + goto fail; + } + + nxt_var_query(task, r->var_query, var, &action->name); + nxt_var_query_resolve(task, r->var_query, action, + nxt_http_action_pass_var_ready, + nxt_http_action_pass_var_error); + return NULL; + +fail: + + nxt_http_request_error(task, r, NXT_HTTP_INTERNAL_SERVER_ERROR); + return NULL; +} + + +static void +nxt_http_action_pass_var_ready(nxt_task_t *task, void *obj, void *data) +{ + nxt_int_t ret; + nxt_router_conf_t *rtcf; + nxt_http_action_t *action; + nxt_http_status_t status; + nxt_http_request_t *r; + + r = obj; + action = data; + rtcf = r->conf->socket_conf->router_conf; + + nxt_debug(task, "http pass lookup: %V", &action->name); + + ret = nxt_http_pass_find(task, r->mem_pool, rtcf, action); + + if (ret != NXT_OK) { + status = (ret == NXT_DECLINED) ? NXT_HTTP_NOT_FOUND + : NXT_HTTP_INTERNAL_SERVER_ERROR; + + nxt_http_request_error(task, r, status); + return; + } + + nxt_http_request_action(task, r, action); +} + + +static void +nxt_http_action_pass_var_error(nxt_task_t *task, void *obj, void *data) +{ + nxt_http_request_t *r; + + r = obj; + + nxt_http_request_error(task, r, NXT_HTTP_INTERNAL_SERVER_ERROR); +} + + +static nxt_int_t +nxt_http_pass_find(nxt_task_t *task, nxt_mp_t *mp, nxt_router_conf_t *rtcf, + nxt_http_action_t *action) +{ + nxt_str_t *targets; + nxt_int_t ret; + nxt_uint_t i; + nxt_str_t segments[3]; + + ret = nxt_http_pass_segments(mp, &action->name, segments, 3); + if (nxt_slow_path(ret != NXT_OK)) { + return ret; + } + if (nxt_str_eq(&segments[0], "applications", 12)) { - nxt_router_listener_application(tmcf, &segments[1], action); - nxt_router_app_use(task, action->u.application, 1); + ret = nxt_router_listener_application(rtcf, &segments[1], action); + + if (ret != NXT_OK) { + return ret; + } if (segments[2].length != 0) { targets = action->u.application->targets; @@ -1388,14 +1497,20 @@ nxt_http_action_resolve(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, action->target = 0; } - } else if (nxt_str_eq(&segments[0], "upstreams", 9)) { - nxt_upstream_find(tmcf->router_conf->upstreams, &segments[1], action); + return NXT_OK; + } - } else if (nxt_str_eq(&segments[0], "routes", 6)) { - nxt_http_route_find(tmcf->router_conf->routes, &segments[1], action); + if (segments[2].length == 0) { + if (nxt_str_eq(&segments[0], "upstreams", 9)) { + return nxt_upstream_find(rtcf->upstreams, &segments[1], action); + } + + if (nxt_str_eq(&segments[0], "routes", 6)) { + return nxt_http_route_find(rtcf->routes, &segments[1], action); + } } - return NXT_OK; + return NXT_DECLINED; } @@ -1451,7 +1566,7 @@ nxt_http_pass_segments(nxt_mp_t *mp, nxt_str_t *pass, nxt_str_t *segments, } -static void +static nxt_int_t nxt_http_route_find(nxt_http_routes_t *routes, nxt_str_t *name, nxt_http_action_t *action) { @@ -1465,11 +1580,13 @@ nxt_http_route_find(nxt_http_routes_t *routes, nxt_str_t *name, action->u.route = *route; action->handler = nxt_http_route_handler; - return; + return NXT_OK; } route++; } + + return NXT_DECLINED; } @@ -1497,21 +1614,19 @@ nxt_http_action_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, /* COMPATIBILITY: listener application. */ nxt_http_action_t * -nxt_http_pass_application(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, +nxt_http_pass_application(nxt_task_t *task, nxt_router_conf_t *rtcf, nxt_str_t *name) { nxt_http_action_t *action; - action = nxt_mp_alloc(tmcf->router_conf->mem_pool, - sizeof(nxt_http_action_t)); + action = nxt_mp_alloc(rtcf->mem_pool, sizeof(nxt_http_action_t)); if (nxt_slow_path(action == NULL)) { return NULL; } action->name = *name; - nxt_router_listener_application(tmcf, name, action); - nxt_router_app_use(task, action->u.application, 1); + (void) nxt_router_listener_application(rtcf, name, action); action->target = 0; @@ -1519,56 +1634,6 @@ nxt_http_pass_application(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, } -void -nxt_http_routes_cleanup(nxt_task_t *task, nxt_http_routes_t *routes) -{ - nxt_http_route_t **route, **end; - - if (routes != NULL) { - route = &routes->route[0]; - end = route + routes->items; - - while (route < end) { - nxt_http_route_cleanup(task, *route); - - route++; - } - } -} - - -static void -nxt_http_route_cleanup(nxt_task_t *task, nxt_http_route_t *route) -{ - nxt_http_route_match_t **match, **end; - - match = &route->match[0]; - end = match + route->items; - - while (match < end) { - nxt_http_action_cleanup(task, &(*match)->action); - - match++; - } -} - - -void -nxt_http_action_cleanup(nxt_task_t *task, nxt_http_action_t *action) -{ - if (action->handler == nxt_http_application_handler) { - nxt_router_app_use(task, action->u.application, -1); - return; - } - - if (action->handler == nxt_http_static_handler - && action->u.fallback != NULL) - { - nxt_http_action_cleanup(task, action->u.fallback); - } -} - - static nxt_http_action_t * nxt_http_route_handler(nxt_task_t *task, nxt_http_request_t *r, nxt_http_action_t *start) diff --git a/src/nxt_http_variables.c b/src/nxt_http_variables.c new file mode 100644 index 00000000..222d717c --- /dev/null +++ b/src/nxt_http_variables.c @@ -0,0 +1,59 @@ + +/* + * Copyright (C) NGINX, Inc. + */ + +#include +#include + + +static nxt_int_t nxt_http_var_method(nxt_task_t *task, nxt_var_query_t *query, + nxt_str_t *str, void *ctx); +static nxt_int_t nxt_http_var_uri(nxt_task_t *task, nxt_var_query_t *query, + nxt_str_t *str, void *ctx); + + +static nxt_var_decl_t nxt_http_vars[] = { + { nxt_string("method"), + &nxt_http_var_method, + 0 }, + + { nxt_string("uri"), + &nxt_http_var_uri, + 0 }, +}; + + +nxt_int_t +nxt_http_register_variables(void) +{ + return nxt_var_register(nxt_http_vars, nxt_nitems(nxt_http_vars)); +} + + +static nxt_int_t +nxt_http_var_method(nxt_task_t *task, nxt_var_query_t *query, nxt_str_t *str, + void *ctx) +{ + nxt_http_request_t *r; + + r = ctx; + + *str = *r->method; + + return NXT_OK; +} + + +static nxt_int_t +nxt_http_var_uri(nxt_task_t *task, nxt_var_query_t *query, nxt_str_t *str, + void *ctx) +{ + nxt_http_request_t *r; + + r = ctx; + + *str = *r->path; + + return NXT_OK; +} diff --git a/src/nxt_main.h b/src/nxt_main.h index 5914fbd1..7f812568 100644 --- a/src/nxt_main.h +++ b/src/nxt_main.h @@ -66,6 +66,7 @@ typedef uint16_t nxt_port_id_t; #include #include +#include /* TODO: remove unused */ diff --git a/src/nxt_mp.c b/src/nxt_mp.c index 5c1a4d00..4eaa16d0 100644 --- a/src/nxt_mp.c +++ b/src/nxt_mp.c @@ -1059,3 +1059,17 @@ nxt_mp_cleanup(nxt_mp_t *mp, nxt_work_handler_t handler, return NXT_OK; } + + +void * +nxt_mp_lvlhsh_alloc(void *pool, size_t size) +{ + return nxt_mp_align(pool, size, size); +} + + +void +nxt_mp_lvlhsh_free(void *pool, void *p) +{ + nxt_mp_free(pool, p); +} diff --git a/src/nxt_mp.h b/src/nxt_mp.h index 53d1f011..a5aaabd1 100644 --- a/src/nxt_mp.h +++ b/src/nxt_mp.h @@ -112,4 +112,8 @@ NXT_EXPORT nxt_int_t nxt_mp_cleanup(nxt_mp_t *mp, nxt_work_handler_t handler, NXT_EXPORT void nxt_mp_thread_adopt(nxt_mp_t *mp); + +NXT_EXPORT void *nxt_mp_lvlhsh_alloc(void *pool, size_t size); +NXT_EXPORT void nxt_mp_lvlhsh_free(void *pool, void *p); + #endif /* _NXT_MP_H_INCLUDED_ */ diff --git a/src/nxt_router.c b/src/nxt_router.c index 0ccf6593..1318eeb4 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -93,7 +93,16 @@ static nxt_int_t nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, u_char *start, u_char *end); static nxt_int_t nxt_router_conf_process_static(nxt_task_t *task, nxt_router_conf_t *rtcf, nxt_conf_value_t *conf); + static nxt_app_t *nxt_router_app_find(nxt_queue_t *queue, nxt_str_t *name); +static nxt_int_t nxt_router_apps_hash_test(nxt_lvlhsh_query_t *lhq, void *data); +static nxt_int_t nxt_router_apps_hash_add(nxt_router_conf_t *rtcf, + nxt_app_t *app); +static nxt_app_t *nxt_router_apps_hash_get(nxt_router_conf_t *rtcf, + nxt_str_t *name); +static void nxt_router_apps_hash_use(nxt_task_t *task, nxt_router_conf_t *rtcf, + int i); + static nxt_int_t nxt_router_app_queue_init(nxt_task_t *task, nxt_port_t *port); static nxt_int_t nxt_router_port_queue_init(nxt_task_t *task, @@ -198,6 +207,7 @@ static nxt_int_t nxt_router_app_shared_port_send(nxt_task_t *task, static void nxt_router_app_port_error(nxt_task_t *task, nxt_port_recv_msg_t *msg, void *data); +static void nxt_router_app_use(nxt_task_t *task, nxt_app_t *app, int i); static void nxt_router_app_unlink(nxt_task_t *task, nxt_app_t *app); static void nxt_router_app_port_release(nxt_task_t *task, nxt_port_t *port, @@ -954,6 +964,8 @@ nxt_router_conf_apply(nxt_task_t *task, void *obj, void *data) nxt_router_apps_sort(task, router, tmcf); + nxt_router_apps_hash_use(task, rtcf, 1); + nxt_router_engines_post(router, tmcf); nxt_queue_add(&router->sockets, &updating_sockets); @@ -1012,7 +1024,7 @@ nxt_router_conf_ready(nxt_task_t *task, nxt_router_temp_conf_t *tmcf) nxt_debug(task, "rtcf %p: %D", rtcf, count); if (count == 0) { - nxt_http_routes_cleanup(task, rtcf->routes); + nxt_router_apps_hash_use(task, rtcf, -1); nxt_router_access_log_release(task, lock, rtcf->access_log); @@ -1057,16 +1069,6 @@ nxt_router_conf_error(nxt_task_t *task, nxt_router_temp_conf_t *tmcf) rtcf = tmcf->router_conf; - nxt_http_routes_cleanup(task, rtcf->routes); - - nxt_queue_each(skcf, &new_socket_confs, nxt_socket_conf_t, link) { - - if (skcf->action != NULL) { - nxt_http_action_cleanup(task, skcf->action); - } - - } nxt_queue_loop; - nxt_queue_each(app, &tmcf->apps, nxt_app_t, link) { nxt_router_app_unlink(task, app); @@ -1406,6 +1408,12 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, nxt_queue_remove(&prev->link); nxt_queue_insert_tail(&tmcf->previous, &prev->link); + + ret = nxt_router_apps_hash_add(tmcf->router_conf, prev); + if (nxt_slow_path(ret != NXT_OK)) { + goto fail; + } + continue; } @@ -1543,6 +1551,11 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, nxt_queue_insert_tail(&tmcf->apps, &app->link); + ret = nxt_router_apps_hash_add(tmcf->router_conf, app); + if (nxt_slow_path(ret != NXT_OK)) { + goto app_fail; + } + nxt_router_app_use(task, app, 1); app->joint = app_joint; @@ -1717,7 +1730,8 @@ nxt_router_conf_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, /* COMPATIBILITY: listener application. */ } else if (lscf.application.length > 0) { - skcf->action = nxt_http_pass_application(task, tmcf, + skcf->action = nxt_http_pass_application(task, + tmcf->router_conf, &lscf.application); } } @@ -1959,20 +1973,106 @@ nxt_router_port_queue_map(nxt_task_t *task, nxt_port_t *port, nxt_fd_t fd) } -void -nxt_router_listener_application(nxt_router_temp_conf_t *tmcf, nxt_str_t *name, +static const nxt_lvlhsh_proto_t nxt_router_apps_hash_proto nxt_aligned(64) = { + NXT_LVLHSH_DEFAULT, + nxt_router_apps_hash_test, + nxt_mp_lvlhsh_alloc, + nxt_mp_lvlhsh_free, +}; + + +static nxt_int_t +nxt_router_apps_hash_test(nxt_lvlhsh_query_t *lhq, void *data) +{ + nxt_app_t *app; + + app = data; + + return nxt_strstr_eq(&lhq->key, &app->name) ? NXT_OK : NXT_DECLINED; +} + + +static nxt_int_t +nxt_router_apps_hash_add(nxt_router_conf_t *rtcf, nxt_app_t *app) +{ + nxt_lvlhsh_query_t lhq; + + lhq.key_hash = nxt_djb_hash(app->name.start, app->name.length); + lhq.replace = 0; + lhq.key = app->name; + lhq.value = app; + lhq.proto = &nxt_router_apps_hash_proto; + lhq.pool = rtcf->mem_pool; + + switch (nxt_lvlhsh_insert(&rtcf->apps_hash, &lhq)) { + + case NXT_OK: + return NXT_OK; + + case NXT_DECLINED: + nxt_thread_log_alert("router app hash adding failed: " + "\"%V\" is already in hash", &lhq.key); + /* Fall through. */ + default: + return NXT_ERROR; + } +} + + +static nxt_app_t * +nxt_router_apps_hash_get(nxt_router_conf_t *rtcf, nxt_str_t *name) +{ + nxt_lvlhsh_query_t lhq; + + lhq.key_hash = nxt_djb_hash(name->start, name->length); + lhq.key = *name; + lhq.proto = &nxt_router_apps_hash_proto; + + if (nxt_lvlhsh_find(&rtcf->apps_hash, &lhq) != NXT_OK) { + return NULL; + } + + return lhq.value; +} + + +static void +nxt_router_apps_hash_use(nxt_task_t *task, nxt_router_conf_t *rtcf, int i) +{ + nxt_app_t *app; + nxt_lvlhsh_each_t lhe; + + nxt_lvlhsh_each_init(&lhe, &nxt_router_apps_hash_proto); + + for ( ;; ) { + app = nxt_lvlhsh_each(&rtcf->apps_hash, &lhe); + + if (app == NULL) { + break; + } + + nxt_router_app_use(task, app, i); + } +} + + + +nxt_int_t +nxt_router_listener_application(nxt_router_conf_t *rtcf, nxt_str_t *name, nxt_http_action_t *action) { nxt_app_t *app; - app = nxt_router_app_find(&tmcf->apps, name); + app = nxt_router_apps_hash_get(rtcf, name); if (app == NULL) { - app = nxt_router_app_find(&tmcf->previous, name); + return NXT_DECLINED; } action->u.application = app; action->handler = nxt_http_application_handler; + + return NXT_OK; } @@ -3201,24 +3301,18 @@ nxt_router_conf_release(nxt_task_t *task, nxt_socket_conf_joint_t *joint) nxt_thread_spin_unlock(lock); - if (skcf != NULL) { - if (skcf->action != NULL) { - nxt_http_action_cleanup(task, skcf->action); - } - #if (NXT_TLS) - if (skcf->tls != NULL) { - task->thread->runtime->tls->server_free(task, skcf->tls); - } -#endif + if (skcf != NULL && skcf->tls != NULL) { + task->thread->runtime->tls->server_free(task, skcf->tls); } +#endif /* TODO remove engine->port */ if (rtcf != NULL) { nxt_debug(task, "old router conf is destroyed"); - nxt_http_routes_cleanup(task, rtcf->routes); + nxt_router_apps_hash_use(task, rtcf, -1); nxt_router_access_log_release(task, lock, rtcf->access_log); @@ -4126,24 +4220,6 @@ nxt_router_app_port_error(nxt_task_t *task, nxt_port_recv_msg_t *msg, } -void -nxt_router_app_use(nxt_task_t *task, nxt_app_t *app, int i) -{ - int c; - - c = nxt_atomic_fetch_add(&app->use_count, i); - - if (i < 0 && c == -i) { - - if (task->thread->engine != app->engine) { - nxt_event_engine_post(app->engine, &app->joint->free_app_work); - - } else { - nxt_router_free_app(task, app->joint, NULL); - } - } -} - nxt_inline nxt_port_t * nxt_router_app_get_port_for_quit(nxt_task_t *task, nxt_app_t *app) @@ -4183,6 +4259,25 @@ nxt_router_app_get_port_for_quit(nxt_task_t *task, nxt_app_t *app) } +static void +nxt_router_app_use(nxt_task_t *task, nxt_app_t *app, int i) +{ + int c; + + c = nxt_atomic_fetch_add(&app->use_count, i); + + if (i < 0 && c == -i) { + + if (task->thread->engine != app->engine) { + nxt_event_engine_post(app->engine, &app->joint->free_app_work); + + } else { + nxt_router_free_app(task, app->joint, NULL); + } + } +} + + static void nxt_router_app_unlink(nxt_task_t *task, nxt_app_t *app) { diff --git a/src/nxt_router.h b/src/nxt_router.h index 0b1147f8..81b3538c 100644 --- a/src/nxt_router.h +++ b/src/nxt_router.h @@ -48,6 +48,7 @@ typedef struct { nxt_upstreams_t *upstreams; nxt_lvlhsh_t mtypes_hash; + nxt_lvlhsh_t apps_hash; nxt_router_access_log_t *access_log; } nxt_router_conf_t; @@ -221,9 +222,8 @@ struct nxt_router_access_log_s { void nxt_router_process_http_request(nxt_task_t *task, nxt_http_request_t *r, nxt_app_t *app); void nxt_router_app_port_close(nxt_task_t *task, nxt_port_t *port); -void nxt_router_listener_application(nxt_router_temp_conf_t *tmcf, +nxt_int_t nxt_router_listener_application(nxt_router_conf_t *rtcf, nxt_str_t *name, nxt_http_action_t *action); -void nxt_router_app_use(nxt_task_t *task, nxt_app_t *app, int i); void nxt_router_listen_event_release(nxt_task_t *task, nxt_listen_event_t *lev, nxt_socket_conf_joint_t *joint); void nxt_router_conf_release(nxt_task_t *task, nxt_socket_conf_joint_t *joint); diff --git a/src/nxt_runtime.c b/src/nxt_runtime.c index 5f4b3e58..435276a0 100644 --- a/src/nxt_runtime.c +++ b/src/nxt_runtime.c @@ -124,6 +124,14 @@ nxt_runtime_create(nxt_task_t *task) goto fail; } + if (nxt_slow_path(nxt_http_register_variables() != NXT_OK)) { + goto fail; + } + + if (nxt_slow_path(nxt_var_index_init() != NXT_OK)) { + goto fail; + } + nxt_work_queue_add(&task->thread->engine->fast_work_queue, nxt_runtime_start, task, rt, NULL); diff --git a/src/nxt_runtime.h b/src/nxt_runtime.h index d29b6b4d..0fb8c9a1 100644 --- a/src/nxt_runtime.h +++ b/src/nxt_runtime.h @@ -135,6 +135,8 @@ void nxt_cdecl nxt_log_time_handler(nxt_uint_t level, nxt_log_t *log, void nxt_stream_connection_init(nxt_task_t *task, void *obj, void *data); +nxt_int_t nxt_http_register_variables(void); + #define nxt_runtime_process_each(rt, process) \ do { \ diff --git a/src/nxt_upstream.c b/src/nxt_upstream.c index 66b6619a..c8ecbbe6 100644 --- a/src/nxt_upstream.c +++ b/src/nxt_upstream.c @@ -71,7 +71,7 @@ nxt_upstreams_create(nxt_task_t *task, nxt_router_temp_conf_t *tmcf, } -void +nxt_int_t nxt_upstream_find(nxt_upstreams_t *upstreams, nxt_str_t *name, nxt_http_action_t *action) { @@ -86,9 +86,11 @@ nxt_upstream_find(nxt_upstreams_t *upstreams, nxt_str_t *name, action->u.upstream_number = i; action->handler = nxt_upstream_handler; - return; + return NXT_DECLINED; } } + + return NXT_OK; } diff --git a/src/nxt_var.c b/src/nxt_var.c new file mode 100644 index 00000000..2731fd09 --- /dev/null +++ b/src/nxt_var.c @@ -0,0 +1,616 @@ + +/* + * Copyright (C) NGINX, Inc. + */ + +#include + + +struct nxt_var_s { + size_t plain; + nxt_uint_t vars; + u_char data[]; + +/* + uint32_t indexes[vars]; + size_t positions[vars]; + u_char chars[plain]; +*/ +}; + + +typedef struct { + nxt_var_t *var; + nxt_str_t *value; +} nxt_var_value_t; + + +struct nxt_var_query_s { + nxt_array_t values; /* of nxt_var_value_t */ + nxt_array_t parts; /* of nxt_str_t * */ + + nxt_lvlhsh_t cache; + + nxt_str_t *spare; + nxt_uint_t waiting; + nxt_uint_t failed; /* 1 bit */ + + void *ctx; + void *data; + + nxt_work_handler_t ready; + nxt_work_handler_t error; +}; + + +#define nxt_var_indexes(var) ((uint32_t *) (var)->data) + +#define nxt_var_positions(var) \ + ((size_t *) ((var)->data + (var)->vars * sizeof(uint32_t))) + +#define nxt_var_plain_start(var) \ + ((var)->data + (var)->vars * (sizeof(uint32_t) + sizeof(size_t))) + + +static nxt_int_t nxt_var_hash_test(nxt_lvlhsh_query_t *lhq, void *data); +static nxt_var_decl_t *nxt_var_hash_find(nxt_str_t *name); + +static nxt_int_t nxt_var_cache_test(nxt_lvlhsh_query_t *lhq, void *data); +static nxt_str_t *nxt_var_cache_find(nxt_lvlhsh_t *lh, uint32_t index); +static nxt_int_t nxt_var_cache_add(nxt_lvlhsh_t *lh, uint32_t index, + nxt_str_t *value, nxt_mp_t *mp); + +static u_char *nxt_var_next_part(u_char *start, size_t length, nxt_str_t *part, + nxt_bool_t *is_var); + +static void nxt_var_query_finish(nxt_task_t *task, nxt_var_query_t *query); + + +static const nxt_lvlhsh_proto_t nxt_var_hash_proto nxt_aligned(64) = { + NXT_LVLHSH_DEFAULT, + nxt_var_hash_test, + nxt_lvlhsh_alloc, + nxt_lvlhsh_free, +}; + +static const nxt_lvlhsh_proto_t nxt_var_cache_proto nxt_aligned(64) = { + NXT_LVLHSH_DEFAULT, + nxt_var_cache_test, + nxt_mp_lvlhsh_alloc, + nxt_mp_lvlhsh_free, +}; + + +static nxt_lvlhsh_t nxt_var_hash; +static uint32_t nxt_var_count; + +static nxt_var_handler_t *nxt_var_index; + + +static nxt_int_t +nxt_var_hash_test(nxt_lvlhsh_query_t *lhq, void *data) +{ + nxt_var_decl_t *decl; + + decl = data; + + return nxt_strstr_eq(&lhq->key, &decl->name) ? NXT_OK : NXT_DECLINED; +} + + +static nxt_var_decl_t * +nxt_var_hash_find(nxt_str_t *name) +{ + nxt_lvlhsh_query_t lhq; + + lhq.key_hash = nxt_djb_hash(name->start, name->length); + lhq.key = *name; + lhq.proto = &nxt_var_hash_proto; + + if (nxt_lvlhsh_find(&nxt_var_hash, &lhq) != NXT_OK) { + return NULL; + } + + return lhq.value; +} + + +static nxt_int_t +nxt_var_cache_test(nxt_lvlhsh_query_t *lhq, void *data) +{ + return NXT_OK; +} + + +static nxt_str_t * +nxt_var_cache_find(nxt_lvlhsh_t *lh, uint32_t index) +{ + nxt_lvlhsh_query_t lhq; + + lhq.key_hash = nxt_murmur_hash2_uint32(&index); + lhq.key.length = sizeof(uint32_t); + lhq.key.start = (u_char *) &index; + lhq.proto = &nxt_var_cache_proto; + + if (nxt_lvlhsh_find(lh, &lhq) != NXT_OK) { + return NULL; + } + + return lhq.value; +} + + +static nxt_int_t +nxt_var_cache_add(nxt_lvlhsh_t *lh, uint32_t index, nxt_str_t *value, + nxt_mp_t *mp) +{ + nxt_lvlhsh_query_t lhq; + + lhq.key_hash = nxt_murmur_hash2_uint32(&index); + lhq.replace = 0; + lhq.key.length = sizeof(uint32_t); + lhq.key.start = (u_char *) &index; + lhq.value = value; + lhq.proto = &nxt_var_cache_proto; + lhq.pool = mp; + + return nxt_lvlhsh_insert(lh, &lhq); +} + + +nxt_int_t +nxt_var_register(nxt_var_decl_t *decl, size_t n) +{ + nxt_uint_t i; + nxt_lvlhsh_query_t lhq; + + lhq.replace = 0; + lhq.proto = &nxt_var_hash_proto; + + for (i = 0; i < n; i++) { + lhq.key = decl[i].name; + lhq.key_hash = nxt_djb_hash(lhq.key.start, lhq.key.length); + lhq.value = &decl[i]; + + if (nxt_slow_path(nxt_lvlhsh_insert(&nxt_var_hash, &lhq) != NXT_OK)) { + return NXT_ERROR; + } + } + + nxt_var_count += n; + + return NXT_OK; +} + + +nxt_int_t +nxt_var_index_init(void) +{ + nxt_uint_t i; + nxt_var_decl_t *decl; + nxt_var_handler_t *index; + nxt_lvlhsh_each_t lhe; + + index = nxt_memalign(64, nxt_var_count * sizeof(nxt_var_handler_t)); + if (index == NULL) { + return NXT_ERROR; + } + + nxt_lvlhsh_each_init(&lhe, &nxt_var_hash_proto); + + for (i = 0; i < nxt_var_count; i++) { + decl = nxt_lvlhsh_each(&nxt_var_hash, &lhe); + decl->index = i; + index[i] = decl->handler; + } + + nxt_var_index = index; + + return NXT_OK; +} + + +nxt_var_t * +nxt_var_compile(nxt_str_t *str, nxt_mp_t *mp) +{ + u_char *p, *end, *plain_pos; + size_t plain, size, *positions; + uint32_t *indexes; + nxt_var_t *var; + nxt_str_t part; + nxt_uint_t n; + nxt_bool_t is_var; + nxt_var_decl_t *decl; + + plain = 0; + n = 0; + + p = str->start; + end = p + str->length; + + while (p < end) { + p = nxt_var_next_part(p, end - p, &part, &is_var); + + if (nxt_slow_path(p == NULL)) { + return NULL; + } + + if (is_var) { + n++; + + } else { + plain += part.length; + } + } + + size = sizeof(nxt_var_t) + + n * (sizeof(nxt_var_handler_t) + sizeof (size_t)) + + plain; + + var = nxt_mp_get(mp, size); + if (nxt_slow_path(var == NULL)) { + return NULL; + } + + var->plain = plain; + var->vars = n; + + indexes = nxt_var_indexes(var); + positions = nxt_var_positions(var); + plain_pos = nxt_var_plain_start(var); + + plain = 0; + n = 0; + + p = str->start; + + while (p < end) { + p = nxt_var_next_part(p, end - p, &part, &is_var); + + if (is_var) { + decl = nxt_var_hash_find(&part); + + if (nxt_slow_path(decl == NULL)) { + return NULL; + } + + indexes[n] = decl->index; + positions[n] = plain; + + n++; + + } else { + plain_pos = nxt_cpymem(plain_pos, part.start, part.length); + plain += part.length; + } + } + + return var; +} + + +nxt_int_t +nxt_var_test(nxt_str_t *str, u_char *error) +{ + u_char *p, *end, *next; + nxt_str_t part; + nxt_bool_t is_var; + nxt_var_decl_t *decl; + + p = str->start; + end = p + str->length; + + while (p < end) { + next = nxt_var_next_part(p, end - p, &part, &is_var); + + if (next == NULL) { + nxt_sprintf(error, error + NXT_MAX_ERROR_STR, + "Invalid variable at position %uz%Z", p - str->start); + + return NXT_ERROR; + } + + if (is_var) { + decl = nxt_var_hash_find(&part); + + if (decl == NULL) { + nxt_sprintf(error, error + NXT_MAX_ERROR_STR, + "Unknown variable \"%V\"%Z", &part); + + return NXT_ERROR; + } + } + + p = next; + } + + return NXT_OK; +} + + +static u_char * +nxt_var_next_part(u_char *start, size_t length, nxt_str_t *part, + nxt_bool_t *is_var) +{ + u_char *p, *end, ch, c; + nxt_bool_t bracket; + + end = start + length; + + p = nxt_memchr(start, '$', length); + + if (p == start) { + *is_var = 1; + + p++; + + if (p == end) { + return NULL; + } + + if (*p == '{') { + bracket = 1; + + if (end - p < 2) { + return NULL; + } + + p++; + + } else { + bracket = 0; + } + + start = p; + + for ( ;; ) { + ch = *p; + + c = (u_char) (ch | 0x20); + if ((c < 'a' || c > 'z') && ch != '_') { + + if (bracket && ch != '}') { + return NULL; + } + + break; + } + + p++; + + if (p == end) { + if (bracket) { + return NULL; + } + + break; + } + } + + length = p - start; + end = p + bracket; + + } else { + *is_var = 0; + + if (p != NULL) { + length = p - start; + end = p; + } + } + + part->length = length; + part->start = start; + + return end; +} + + +nxt_int_t +nxt_var_query_init(nxt_var_query_t **query_p, void *ctx, nxt_mp_t *mp) +{ + nxt_var_query_t *query; + + query = *query_p; + + if (*query_p == NULL) { + query = nxt_mp_zget(mp, sizeof(nxt_var_query_t)); + if (nxt_slow_path(query == NULL)) { + return NXT_ERROR; + } + + nxt_array_init(&query->values, mp, sizeof(nxt_var_value_t)); + nxt_array_init(&query->parts, mp, sizeof(nxt_str_t *)); + + } else { + nxt_array_reset(&query->values); + } + + query->ctx = ctx; + + *query_p = query; + + return NXT_OK; +} + + +void +nxt_var_query(nxt_task_t *task, nxt_var_query_t *query, nxt_var_t *var, + nxt_str_t *str) +{ + uint32_t *indexes; + nxt_mp_t *mp; + nxt_str_t *value; + nxt_int_t ret; + nxt_uint_t i; + nxt_var_value_t *val; + + if (var->vars == 0) { + str->length = var->plain; + str->start = nxt_var_plain_start(var); + return; + } + + if (nxt_slow_path(query->failed)) { + return; + } + + mp = query->values.mem_pool; + indexes = nxt_var_indexes(var); + value = query->spare; + + for (i = 0; i < var->vars; i++) { + + if (value == NULL) { + value = nxt_mp_zget(mp, sizeof(nxt_str_t)); + if (nxt_slow_path(value == NULL)) { + goto fail; + } + } + + ret = nxt_var_cache_add(&query->cache, indexes[i], value, mp); + + if (ret != NXT_OK) { + if (nxt_slow_path(ret == NXT_ERROR)) { + goto fail; + } + + continue; /* NXT_DECLINED */ + } + + ret = nxt_var_index[indexes[i]](task, query, value, query->ctx); + + value = NULL; + + if (ret != NXT_OK) { + if (nxt_slow_path(ret != NXT_AGAIN)) { + goto fail; + } + + query->waiting++; + } + } + + query->spare = value; + + val = nxt_array_add(&query->values); + if (nxt_slow_path(val == NULL)) { + goto fail; + } + + val->var = var; + val->value = str; + + return; + +fail: + + query->failed = 1; +} + + +void +nxt_var_query_resolve(nxt_task_t *task, nxt_var_query_t *query, void *data, + nxt_work_handler_t ready, nxt_work_handler_t error) +{ + query->data = data; + query->ready = ready; + query->error = error; + + if (query->waiting == 0) { + nxt_var_query_finish(task, query); + } +} + + +void +nxt_var_query_handle(nxt_task_t *task, nxt_var_query_t *query, + nxt_bool_t failed) +{ + query->failed |= failed; + + if (--query->waiting == 0) { + nxt_var_query_finish(task, query); + } +} + + +static void +nxt_var_query_finish(nxt_task_t *task, nxt_var_query_t *query) +{ + u_char *p, *src; + size_t length, plain, next, *positions; + uint32_t *indexes; + nxt_str_t *str, **part; + nxt_var_t *var; + nxt_uint_t i, j; + nxt_var_value_t *val; + + if (query->failed) { + goto done; + } + + val = query->values.elts; + + for (i = 0; i < query->values.nelts; i++) { + var = val[i].var; + + length = var->plain; + indexes = nxt_var_indexes(var); + + for (j = 0; j < var->vars; j++) { + str = nxt_var_cache_find(&query->cache, indexes[j]); + + nxt_assert(str != NULL); + + part = nxt_array_add(&query->parts); + + if (nxt_slow_path(part == NULL)) { + query->failed = 1; + goto done; + } + + *part = str; + + length += str->length; + } + + p = nxt_mp_nget(query->values.mem_pool, length); + if (nxt_slow_path(p == NULL)) { + query->failed = 1; + goto done; + } + + val[i].value->length = length; + val[i].value->start = p; + + part = query->parts.elts; + positions = nxt_var_positions(var); + src = nxt_var_plain_start(var); + + plain = 0; + + for (j = 0; j < var->vars; j++) { + next = positions[j]; + + if (next != plain) { + p = nxt_cpymem(p, &src[plain], next - plain); + plain = next; + } + + p = nxt_cpymem(p, part[j]->start, part[j]->length); + } + + if (plain != var->plain) { + nxt_memcpy(p, &src[plain], var->plain - plain); + } + + nxt_array_reset(&query->parts); + } + +done: + + nxt_work_queue_add(&task->thread->engine->fast_work_queue, + query->failed ? query->error : query->ready, + task, query->ctx, query->data); +} diff --git a/src/nxt_var.h b/src/nxt_var.h new file mode 100644 index 00000000..7e0a2a21 --- /dev/null +++ b/src/nxt_var.h @@ -0,0 +1,48 @@ + +/* + * Copyright (C) NGINX, Inc. + */ + +#ifndef _NXT_VAR_H_INCLUDED_ +#define _NXT_VAR_H_INCLUDED_ + + +typedef struct nxt_var_s nxt_var_t; +typedef struct nxt_var_query_s nxt_var_query_t; + + +typedef nxt_int_t (*nxt_var_handler_t)(nxt_task_t *task, + nxt_var_query_t *query, + nxt_str_t *str, + void *ctx); + +typedef struct { + nxt_str_t name; + nxt_var_handler_t handler; + uint32_t index; +} nxt_var_decl_t; + + +nxt_inline nxt_bool_t +nxt_is_var(nxt_str_t *str) +{ + return (nxt_memchr(str->start, '$', str->length) != NULL); +} + + +nxt_int_t nxt_var_register(nxt_var_decl_t *decl, size_t n); +nxt_int_t nxt_var_index_init(void); +nxt_var_t *nxt_var_compile(nxt_str_t *str, nxt_mp_t *mp); +nxt_int_t nxt_var_test(nxt_str_t *str, u_char *error); + +nxt_int_t nxt_var_query_init(nxt_var_query_t **query_p, void *ctx, + nxt_mp_t *mp); +void nxt_var_query(nxt_task_t *task, nxt_var_query_t *query, + nxt_var_t *var, nxt_str_t *str); +void nxt_var_query_resolve(nxt_task_t *task, nxt_var_query_t *query, void *data, + nxt_work_handler_t ready, nxt_work_handler_t error); +void nxt_var_query_handle(nxt_task_t *task, nxt_var_query_t *query, + nxt_bool_t failed); + + +#endif /* _NXT_VAR_H_INCLUDED_ */ diff --git a/src/test/nxt_lvlhsh_test.c b/src/test/nxt_lvlhsh_test.c index 2e1e0b20..baa6d0e1 100644 --- a/src/test/nxt_lvlhsh_test.c +++ b/src/test/nxt_lvlhsh_test.c @@ -19,20 +19,6 @@ nxt_lvlhsh_test_key_test(nxt_lvlhsh_query_t *lhq, void *data) } -static void * -nxt_lvlhsh_test_pool_alloc(void *pool, size_t size) -{ - return nxt_mp_align(pool, size, size); -} - - -static void -nxt_lvlhsh_test_pool_free(void *pool, void *p) -{ - nxt_mp_free(pool, p); -} - - static const nxt_lvlhsh_proto_t malloc_proto nxt_aligned(64) = { //NXT_LVLHSH_LARGE_MEMALIGN, NXT_LVLHSH_DEFAULT, @@ -44,8 +30,8 @@ static const nxt_lvlhsh_proto_t malloc_proto nxt_aligned(64) = { static const nxt_lvlhsh_proto_t pool_proto nxt_aligned(64) = { NXT_LVLHSH_LARGE_SLAB, nxt_lvlhsh_test_key_test, - nxt_lvlhsh_test_pool_alloc, - nxt_lvlhsh_test_pool_free, + nxt_mp_lvlhsh_alloc, + nxt_mp_lvlhsh_free, }; -- cgit From a58f224e26cf3bcb79adbd69ccc2c67733fe372f Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Thu, 13 Aug 2020 03:45:54 +0300 Subject: Fixed typo in return value check. Found by Coverity (CID 361277). --- src/nxt_http_route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nxt_http_route.c b/src/nxt_http_route.c index 346629c3..0b2103cd 100644 --- a/src/nxt_http_route.c +++ b/src/nxt_http_route.c @@ -1294,7 +1294,7 @@ nxt_http_route_pattern_slice(nxt_array_t *slices, } slice = nxt_array_add(slices); - if (nxt_slow_path(slices == NULL)) { + if (nxt_slow_path(slice == NULL)) { return NXT_ERROR; } -- cgit From b9ed3384cb620618c2622e387d1770bb8c43ff13 Mon Sep 17 00:00:00 2001 From: Tiago Natel de Moura Date: Thu, 13 Aug 2020 12:25:52 +0100 Subject: Fixed error handling of prefork callback. Previously, an error during the prefork phase triggered assert: src/nxt_port.c:27 assertion failed: port->pair[0] == -1 and resulted in exiting of the main process. This could be easily reproduced by pushing a configuration with "rootfs", when daemon is running without required permissions. --- src/nxt_main_process.c | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/src/nxt_main_process.c b/src/nxt_main_process.c index a16e44d3..48eb2abb 100644 --- a/src/nxt_main_process.c +++ b/src/nxt_main_process.c @@ -605,25 +605,22 @@ nxt_main_start_process(nxt_task_t *task, nxt_process_t *process) nxt_process_port_add(task, process, port); - nxt_process_use(task, process, -1); - - ret = NXT_ERROR; - tmp_mp = NULL; - ret = nxt_port_socket_init(task, port, 0); if (nxt_slow_path(ret != NXT_OK)) { - goto fail; + goto free_port; } tmp_mp = nxt_mp_create(1024, 128, 256, 32); - if (tmp_mp == NULL) { - goto fail; + if (nxt_slow_path(tmp_mp == NULL)) { + ret = NXT_ERROR; + + goto close_port; } if (init->prefork) { ret = init->prefork(task, process, tmp_mp); if (nxt_slow_path(ret != NXT_OK)) { - goto fail; + goto free_mempool; } } @@ -632,18 +629,22 @@ nxt_main_start_process(nxt_task_t *task, nxt_process_t *process) switch (pid) { case -1: - nxt_port_close(task, port); + ret = NXT_ERROR; break; case 0: /* The child process: return to the event engine work queue loop. */ + nxt_process_use(task, process, -1); + ret = NXT_AGAIN; break; default: /* The main process created a new process. */ + nxt_process_use(task, process, -1); + nxt_port_read_close(port); nxt_port_write_enable(task, port); @@ -651,14 +652,20 @@ nxt_main_start_process(nxt_task_t *task, nxt_process_t *process) break; } -fail: +free_mempool: - nxt_port_use(task, port, -1); + nxt_mp_destroy(tmp_mp); + +close_port: - if (nxt_fast_path(tmp_mp != NULL)) { - nxt_mp_destroy(tmp_mp); + if (nxt_slow_path(ret == NXT_ERROR)) { + nxt_port_close(task, port); } +free_port: + + nxt_port_use(task, port, -1); + return ret; } -- cgit From b04b5ce430ef055a7552b9fc451ca23f7d5effb3 Mon Sep 17 00:00:00 2001 From: Max Romanov Date: Thu, 13 Aug 2020 16:08:38 +0300 Subject: Fixing router assertion in result of application prefork error. Buffer for application prefork request allocated from temp conf mem_pool. If error response from main process received before buffer completion handler, temp conf mem_pool destroyed and router may crash in completion handler. Assertion "src/nxt_buf.c:208 assertion failed: data == b->parent" triggered when NXT_DEBUG_ALLOC enabled in configure. This patch disables completion handler and memory allocated for buffer released with memory pool. --- src/nxt_router.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/nxt_router.c b/src/nxt_router.c index 1318eeb4..0e1de6fa 100644 --- a/src/nxt_router.c +++ b/src/nxt_router.c @@ -219,6 +219,8 @@ static void nxt_router_http_request_error(nxt_task_t *task, void *obj, static void nxt_router_http_request_done(nxt_task_t *task, void *obj, void *data); +static void nxt_router_dummy_buf_completion(nxt_task_t *task, void *obj, + void *data); static void nxt_router_app_prepare_request(nxt_task_t *task, nxt_request_rpc_data_t *req_rpc_data); static nxt_buf_t *nxt_router_prepare_msg(nxt_task_t *task, @@ -2218,6 +2220,8 @@ nxt_router_listen_socket_rpc_create(nxt_task_t *task, goto fail; } + b->completion_handler = nxt_router_dummy_buf_completion; + b->mem.free = nxt_cpymem(b->mem.free, skcf->listen->sockaddr, size); rt = task->thread->runtime; @@ -2446,6 +2450,8 @@ nxt_router_app_rpc_create(nxt_task_t *task, goto fail; } + b->completion_handler = nxt_router_dummy_buf_completion; + nxt_buf_cpystr(b, &app->name); *b->mem.free++ = '\0'; nxt_buf_cpystr(b, &app->conf); @@ -3469,6 +3475,8 @@ nxt_router_access_log_open(nxt_task_t *task, nxt_router_temp_conf_t *tmcf) goto fail; } + b->completion_handler = nxt_router_dummy_buf_completion; + nxt_buf_cpystr(b, &access_log->path); *b->mem.free++ = '\0'; -- cgit From c40e45344fa606984910a7e47db2b2f881cb963c Mon Sep 17 00:00:00 2001 From: Andrei Zeliankou Date: Thu, 13 Aug 2020 13:17:01 +0100 Subject: Tests: added variables tests. --- test/test_variables.py | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 test/test_variables.py diff --git a/test/test_variables.py b/test/test_variables.py new file mode 100644 index 00000000..805c5144 --- /dev/null +++ b/test/test_variables.py @@ -0,0 +1,94 @@ +from unit.applications.proto import TestApplicationProto + + +class TestVariables(TestApplicationProto): + prerequisites = {} + + def setUp(self): + super().setUp() + + self.assertIn( + 'success', + self.conf( + { + "listeners": {"*:7080": {"pass": "routes/$method"}}, + "routes": { + "GET": [{"action": {"return": 201}}], + "POST": [{"action": {"return": 202}}], + "3": [{"action": {"return": 203}}], + "4": [{"action": {"return": 204}}], + "blahGET}": [{"action": {"return": 205}}], + "5GET": [{"action": {"return": 206}}], + "GETGET": [{"action": {"return": 207}}], + }, + }, + ), + 'configure routes', + ) + + def conf_routes(self, routes): + self.assertIn( + 'success', + self.conf(routes, 'listeners/*:7080/pass') + ) + + def test_variables_method(self): + self.assertEqual(self.get()['status'], 201, 'method GET') + self.assertEqual(self.post()['status'], 202, 'method POST') + + def test_variables_uri(self): + self.conf_routes("\"routes$uri\"") + + self.assertEqual(self.get(url='/3')['status'], 203, 'uri') + self.assertEqual(self.get(url='/4')['status'], 204, 'uri 2') + + def test_variables_many(self): + self.conf_routes("\"routes$uri$method\"") + self.assertEqual(self.get(url='/5')['status'], 206, 'many') + + self.conf_routes("\"routes${uri}${method}\"") + self.assertEqual(self.get(url='/5')['status'], 206, 'many 2') + + self.conf_routes("\"routes${uri}$method\"") + self.assertEqual(self.get(url='/5')['status'], 206, 'many 3') + + self.conf_routes("\"routes/$method$method\"") + self.assertEqual(self.get()['status'], 207, 'many 4') + + self.conf_routes("\"routes/$method$uri\"") + self.assertEqual(self.get()['status'], 404, 'no route') + self.assertEqual(self.get(url='/blah')['status'], 404, 'no route 2') + + def test_variables_replace(self): + self.assertEqual(self.get()['status'], 201) + + self.conf_routes("\"routes$uri\"") + self.assertEqual(self.get(url='/3')['status'], 203) + + self.conf_routes("\"routes/${method}\"") + self.assertEqual(self.post()['status'], 202) + + self.conf_routes("\"routes${uri}\"") + self.assertEqual(self.get(url='/4')['status'], 204) + + self.conf_routes("\"routes/blah$method}\"") + self.assertEqual(self.get()['status'], 205) + + def test_variables_invalid(self): + def check_variables(routes): + self.assertIn( + 'error', + self.conf(routes, 'listeners/*:7080/pass'), + 'invalid variables', + ) + + check_variables("\"routes$\"") + check_variables("\"routes${\"") + check_variables("\"routes${}\"") + check_variables("\"routes$ur\"") + check_variables("\"routes$uriblah\"") + check_variables("\"routes${uri\"") + check_variables("\"routes${{uri}\"") + +if __name__ == '__main__': + TestVariables.main() -- cgit From 8032686a57c739d673e5554ba8517a4076f9e742 Mon Sep 17 00:00:00 2001 From: Andrei Zeliankou Date: Thu, 13 Aug 2020 13:17:27 +0100 Subject: Tests: added test with error on loading application. --- test/test_python_application.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/test_python_application.py b/test/test_python_application.py index 8bd3f750..4b8983ff 100644 --- a/test/test_python_application.py +++ b/test/test_python_application.py @@ -579,6 +579,17 @@ last line: 987654321 self.assertEqual(self.get()['status'], 500, 'syntax error') + def test_python_application_loading_error(self): + self.skip_alerts.append(r'Python failed to import module "blah"') + + self.load('empty') + + self.assertIn( + 'success', self.conf('"blah"', 'applications/empty/module'), + ) + + self.assertEqual(self.get()['status'], 503, 'loading error') + def test_python_application_close(self): self.load('close') -- cgit From 479fdff39d3d213c6067ca34ef3b7476bd2b7826 Mon Sep 17 00:00:00 2001 From: Tiago Natel de Moura Date: Thu, 13 Aug 2020 12:17:15 +0100 Subject: Tests: set root by unprivilaged user. --- test/test_configuration.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/test/test_configuration.py b/test/test_configuration.py index fec1b3dc..0b0c9c78 100644 --- a/test/test_configuration.py +++ b/test/test_configuration.py @@ -409,6 +409,33 @@ class TestConfiguration(TestControl): self.assertIn('success', self.conf(conf)) + def test_unprivileged_user_error(self): + self.skip_alerts.extend( + [ + r'cannot set user "root"', + r'failed to apply new conf', + ] + ) + if self.is_su: + print('unprivileged tests, skip this') + raise unittest.SkipTest() + + self.assertIn( + 'error', + self.conf( + { + "app": { + "type": "external", + "processes": 1, + "executable": "/app", + "user": "root", + } + }, + 'applications', + ), + 'setting user', + ) + if __name__ == '__main__': TestConfiguration.main() -- cgit From b8c7bc5a47c422cf6e3147e567bd386bcfdd46a3 Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Thu, 13 Aug 2020 19:22:41 +0300 Subject: Added version 1.19.0 CHANGES. --- CHANGES | 38 ++++++++++++++++++ docs/changes.xml | 118 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 156 insertions(+) diff --git a/CHANGES b/CHANGES index 5e49979f..236371e1 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,42 @@ +Changes with Unit 1.19.0 13 Aug 2020 + + *) Feature: reworked IPC between the router process and the applications + to lower latencies, increase performance, and improve scalability. + + *) Feature: support for an arbitrary number of wildcards in route + matching patterns. + + *) Feature: chunked transfer encoding in proxy responses. + + *) Feature: basic variables support in the "pass" option. + + *) Feature: compatibility with PHP 8 Beta 1. Thanks to Remi Collet. + + *) Bugfix: the router process could crash while passing requests to an + application under high load. + + *) Bugfix: a number of language modules failed to build on some systems; + the bug had appeared in 1.18.0. + + *) Bugfix: time in error log messages from PHP applications could lag. + + *) Bugfix: reconfiguration requests could hang if an application had + failed to start; the bug had appeared in 1.18.0. + + *) Bugfix: memory leak during reconfiguration. + + *) Bugfix: the daemon didn't start without language modules; the bug had + appeared in 1.18.0. + + *) Bugfix: the router process could crash at exit. + + *) Bugfix: Node.js applications could crash at exit. + + *) Bugfix: the Ruby module could be linked against a wrong library + version. + + Changes with Unit 1.18.0 28 May 2020 *) Feature: the "rootfs" isolation option for changing root filesystem diff --git a/docs/changes.xml b/docs/changes.xml index f3ec7518..b3b8a201 100644 --- a/docs/changes.xml +++ b/docs/changes.xml @@ -5,6 +5,124 @@ + + + + +NGINX Unit updated to 1.19.0. + + + + + + + + + + +reworked IPC between the router process and the applications to lower latencies, +increase performance, and improve scalability. + + + + + +support for an arbitrary number of wildcards in route matching patterns. + + + + + +chunked transfer encoding in proxy responses. + + + + + +basic variables support in the "pass" option. + + + + + +compatibility with PHP 8 Beta 1. +Thanks to Remi Collet. + + + + + +the router process could crash while passing requests to an application under +high load. + + + + + +a number of language modules failed to build on some systems; +the bug had appeared in 1.18.0. + + + + + +time in error log messages from PHP applications could lag. + + + + + +reconfiguration requests could hang if an application had failed to start; +the bug had appeared in 1.18.0. + + + + + +memory leak during reconfiguration. + + + + + +the daemon didn't start without language modules; +the bug had appeared in 1.18.0. + + + + + +the router process could crash at exit. + + + + + +Node.js applications could crash at exit. + + + + + +the Ruby module could be linked against a wrong library version. + + + + + + " -ENV UNIT_VERSION 1.18.0-1~buster +ENV UNIT_VERSION 1.19.0-1~buster RUN set -x \ && apt-get update \ diff --git a/pkg/docker/Dockerfile.go1.11-dev b/pkg/docker/Dockerfile.go1.11-dev index ee51f83e..7c3f234e 100644 --- a/pkg/docker/Dockerfile.go1.11-dev +++ b/pkg/docker/Dockerfile.go1.11-dev @@ -2,7 +2,7 @@ FROM debian:buster-slim LABEL maintainer="NGINX Docker Maintainers " -ENV UNIT_VERSION 1.18.0-1~buster +ENV UNIT_VERSION 1.19.0-1~buster RUN set -x \ && apt-get update \ diff --git a/pkg/docker/Dockerfile.minimal b/pkg/docker/Dockerfile.minimal index 08be78bc..48f1864c 100644 --- a/pkg/docker/Dockerfile.minimal +++ b/pkg/docker/Dockerfile.minimal @@ -2,7 +2,7 @@ FROM debian:buster-slim LABEL maintainer="NGINX Docker Maintainers " -ENV UNIT_VERSION 1.18.0-1~buster +ENV UNIT_VERSION 1.19.0-1~buster RUN set -x \ && apt-get update \ diff --git a/pkg/docker/Dockerfile.perl5.28 b/pkg/docker/Dockerfile.perl5.28 index a8f84c17..bff0ba0c 100644 --- a/pkg/docker/Dockerfile.perl5.28 +++ b/pkg/docker/Dockerfile.perl5.28 @@ -2,7 +2,7 @@ FROM debian:buster-slim LABEL maintainer="NGINX Docker Maintainers " -ENV UNIT_VERSION 1.18.0-1~buster +ENV UNIT_VERSION 1.19.0-1~buster RUN set -x \ && apt-get update \ diff --git a/pkg/docker/Dockerfile.php7.3 b/pkg/docker/Dockerfile.php7.3 index fe60cf44..832baa5d 100644 --- a/pkg/docker/Dockerfile.php7.3 +++ b/pkg/docker/Dockerfile.php7.3 @@ -2,7 +2,7 @@ FROM debian:buster-slim LABEL maintainer="NGINX Docker Maintainers " -ENV UNIT_VERSION 1.18.0-1~buster +ENV UNIT_VERSION 1.19.0-1~buster RUN set -x \ && apt-get update \ diff --git a/pkg/docker/Dockerfile.python2.7 b/pkg/docker/Dockerfile.python2.7 index f80ca098..85f0add6 100644 --- a/pkg/docker/Dockerfile.python2.7 +++ b/pkg/docker/Dockerfile.python2.7 @@ -2,7 +2,7 @@ FROM debian:buster-slim LABEL maintainer="NGINX Docker Maintainers " -ENV UNIT_VERSION 1.18.0-1~buster +ENV UNIT_VERSION 1.19.0-1~buster RUN set -x \ && apt-get update \ diff --git a/pkg/docker/Dockerfile.python3.7 b/pkg/docker/Dockerfile.python3.7 index 8d5b4d9b..cefd15c1 100644 --- a/pkg/docker/Dockerfile.python3.7 +++ b/pkg/docker/Dockerfile.python3.7 @@ -2,7 +2,7 @@ FROM debian:buster-slim LABEL maintainer="NGINX Docker Maintainers " -ENV UNIT_VERSION 1.18.0-1~buster +ENV UNIT_VERSION 1.19.0-1~buster RUN set -x \ && apt-get update \ diff --git a/pkg/docker/Dockerfile.ruby2.5 b/pkg/docker/Dockerfile.ruby2.5 index 365d71ac..36f9594f 100644 --- a/pkg/docker/Dockerfile.ruby2.5 +++ b/pkg/docker/Dockerfile.ruby2.5 @@ -2,7 +2,7 @@ FROM debian:buster-slim LABEL maintainer="NGINX Docker Maintainers " -ENV UNIT_VERSION 1.18.0-1~buster +ENV UNIT_VERSION 1.19.0-1~buster RUN set -x \ && apt-get update \ -- cgit From 6473d4b65a99aa10d509220fb99d8c4f65631ed0 Mon Sep 17 00:00:00 2001 From: Valentin Bartenev Date: Thu, 13 Aug 2020 19:22:57 +0300 Subject: Added tag 1.19.0 for changeset 86cdf66f8274 --- .hgtags | 1 + 1 file changed, 1 insertion(+) diff --git a/.hgtags b/.hgtags index f123c0f6..a75d1a97 100644 --- a/.hgtags +++ b/.hgtags @@ -25,3 +25,4 @@ b391df5f0102aa6afe660cfc863729c1b1111c9e 1.12.0 8bab088952dd9d7caa3d04fd4b3026cef26fcf7d 1.16.0 4b13438632bc37ca599113be90af64f6e2f09d83 1.17.0 9e14c63773be52613dd47dea9fd113037f15a3eb 1.18.0 +86cdf66f82745d8db35345368dcdb38c79a4f03a 1.19.0 -- cgit