# optimizations
#NGX_GCC_OPT="-O2"
-#NGX_GCC_OPT="-Os"
-NGX_GCC_OPT="-O"
+NGX_GCC_OPT="-Os"
+#NGX_GCC_OPT="-O"
#CFLAGS="$CFLAGS -fomit-frame-pointer"
|| cp conf/nginx.conf '\$(DESTDIR)$NGX_CONF_PATH'
cp conf/nginx.conf '\$(DESTDIR)$NGX_CONF_PREFIX/nginx.conf.default'
+ test -f '\$(DESTDIR)$NGX_CONF_PREFIX/f-stack.conf' \\
+ || cp conf/f-stack.conf '\$(DESTDIR)$NGX_CONF_PREFIX'
+
test -d '\$(DESTDIR)`dirname "$NGX_PID_PATH"`' \\
|| mkdir -p '\$(DESTDIR)`dirname "$NGX_PID_PATH"`'
ngx_use_pch=`echo $NGX_USE_PCH | sed -e "s/\//$ngx_regex_dirsep/g"`
+if [ ! $FF_PATH ]; then
+ FF_PATH=/usr/local
+ echo "FF_PATH environment variable not defined, default FF_PATH=$FF_PATH"
+fi
+
+if [ ! $(pkg-config --exists libdpdk && echo 1) ]; then
+ echo "error: no installation of DPDK found, maybe you shuld export environment variable \`PKG_CONFIG_PATH\`"
+ exit 1
+fi
+
+CORE_LIBS="$CORE_LIBS \$(shell pkg-config --static --libs libdpdk)"
+CORE_LIBS="$CORE_LIBS -L$FF_PATH/lib -Wl,--whole-archive,-lfstack,--no-whole-archive"
+CORE_LIBS="$CORE_LIBS -Wl,--no-whole-archive -lrt -lm -ldl -lcrypto -lpthread -lnuma"
+
cat << END > $NGX_MAKEFILE
CC = $CC
CPP = $CPP
LINK = $LINK
+CFLAGS += -I$FF_PATH/lib
+
END
CORE_SRCS="$CORE_SRCS $SOLARIS_SENDFILEV_SRCS"
fi
+if [ $USE_FSTACK = YES ]; then
+ have=NGX_HAVE_FSTACK . auto/have
+ have=NGX_HAVE_FSTACK . auto/have_headers
+ have=SOCK_FSTACK value=0x1000 . auto/define
+ CORE_SRCS="$CORE_SRCS $KQUEUE_SRCS"
+ EVENT_MODULES="$EVENT_MODULES $KQUEUE_MODULE"
+fi
if [ $HTTP = YES ]; then
HTTP_MODULES=
NGX_POST_CONF_MSG=
+USE_FSTACK=NO
+
opt=
for option
--with-poll_module) EVENT_POLL=YES ;;
--without-poll_module) EVENT_POLL=NONE ;;
+ --with-ff_module) USE_FSTACK=YES ;;
+
--with-threads) USE_THREADS=YES ;;
--with-file-aio) NGX_FILE_AIO=YES ;;
--with-poll_module enable poll module
--without-poll_module disable poll module
+ --with-ff_module enable F-Stack module
+
--with-threads enable thread pool support
--with-file-aio enable file AIO support
ee.events = EPOLLIN|EPOLLEXCLUSIVE;
ee.data.ptr = NULL;
epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ee)"
- . auto/feature
+ #. auto/feature
# eventfd()
attr.map_type = BPF_MAP_TYPE_SOCKHASH;
syscall(__NR_bpf, 0, &attr, 0);"
-. auto/feature
-
-if [ $ngx_found = yes ]; then
- CORE_SRCS="$CORE_SRCS src/core/ngx_bpf.c"
- CORE_DEPS="$CORE_DEPS src/core/ngx_bpf.h"
-
- if [ $QUIC_BPF != NONE ]; then
- QUIC_BPF=YES
- fi
-fi
+#. auto/feature
+#
+#if [ $ngx_found = yes ]; then
+# CORE_SRCS="$CORE_SRCS src/core/ngx_bpf.c"
+# CORE_DEPS="$CORE_DEPS src/core/ngx_bpf.h"
+#
+# if [ $QUIC_BPF != NONE ]; then
+# QUIC_BPF=YES
+# fi
+#fi
ngx_feature="SO_COOKIE"
ngx_feature_test="socklen_t optlen = sizeof(uint64_t);
uint64_t cookie;
getsockopt(0, SOL_SOCKET, SO_COOKIE, &cookie, &optlen)"
-. auto/feature
-
-if [ $ngx_found = yes ]; then
- SO_COOKIE_FOUND=YES
-fi
+#. auto/feature
+#
+#if [ $ngx_found = yes ]; then
+# SO_COOKIE_FOUND=YES
+#fi
# UDP segmentation offloading
ngx_feature_test="socklen_t optlen = sizeof(int);
int val;
getsockopt(0, SOL_UDP, UDP_SEGMENT, &val, &optlen)"
-. auto/feature
+#. auto/feature
CC_AUX_FLAGS="$cc_aux_flags -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64"
POLL_SRCS=src/event/modules/ngx_poll_module.c
WIN32_POLL_SRCS=src/event/modules/ngx_win32_poll_module.c
-KQUEUE_MODULE=ngx_kqueue_module
-KQUEUE_SRCS=src/event/modules/ngx_kqueue_module.c
+KQUEUE_MODULE="ngx_kqueue_module ngx_ff_host_event_module"
+KQUEUE_SRCS="src/event/modules/ngx_kqueue_module.c src/event/modules/ngx_ff_module.c src/event/modules/ngx_ff_host_event_module.c"
DEVPOLL_MODULE=ngx_devpoll_module
DEVPOLL_SRCS=src/event/modules/ngx_devpoll_module.c
ngx_feature_path=
ngx_feature_libs=
ngx_feature_test="setsockopt(0, IPPROTO_IP, IP_RECVDSTADDR, NULL, 0)"
-. auto/feature
+#. auto/feature
+have=NGX_HAVE_IP_RECVDSTADDR . auto/have
+have=IP_RECVDSTADDR value=7 . auto/define
# BSD way to set IPv4 datagram source address
ngx_feature_path=
ngx_feature_libs=
ngx_feature_test="setsockopt(0, IPPROTO_IP, IP_SENDSRCADDR, NULL, 0)"
-. auto/feature
+#. auto/feature
+have=NGX_HAVE_IP_SENDSRCADDR . auto/have
+have=IP_SENDSRCADDR value=7 . auto/define
# Linux way to get IPv4 datagram destination address
pkt.ipi_spec_dst.s_addr = INADDR_ANY;
(void) pkt;
setsockopt(0, IPPROTO_IP, IP_PKTINFO, NULL, 0)"
-. auto/feature
+#. auto/feature
# RFC 3542 way to get IPv6 datagram destination address
ngx_feature_libs=
ngx_feature_test="(void) IP_PMTUDISC_DO;
setsockopt(0, IPPROTO_IP, IP_MTU_DISCOVER, NULL, 0)"
-. auto/feature
+#. auto/feature
ngx_feature="IPV6_MTU_DISCOVER"
ngx_feature_libs=
ngx_feature_test="(void) IPV6_PMTUDISC_DO;
setsockopt(0, IPPROTO_IPV6, IPV6_MTU_DISCOVER, NULL, 0)"
-. auto/feature
+#. auto/feature
ngx_feature="IP_DONTFRAG"
ngx_feature_path=
ngx_feature_libs=
ngx_feature_test="setsockopt(0, IPPROTO_IP, IP_DONTFRAG, NULL, 0)"
-. auto/feature
+#. auto/feature
+have=NGX_HAVE_IP_DONTFRAG . auto/have
+have=IP_DONTFRAG value=67 . auto/define
ngx_feature="IPV6_DONTFRAG"
ngx_feature_path=
ngx_feature_libs=
ngx_feature_test="setsockopt(0, IPPROTO_IP, IPV6_DONTFRAG, NULL, 0)"
-. auto/feature
+#. auto/feature
+have=NGX_HAVE_IPV6_DONTFRAG . auto/have
+have=IPV6_DONTFRAG value=62 . auto/define
ngx_feature="TCP_DEFER_ACCEPT"
ngx_feature_path=
ngx_feature_libs=
ngx_feature_test="accept4(0, NULL, NULL, SOCK_NONBLOCK)"
-. auto/feature
+#. auto/feature
if [ $NGX_FILE_AIO = YES ]; then
--- /dev/null
+[dpdk]
+# Hexadecimal bitmask of cores to run on.
+lcore_mask=1
+
+# Number of memory channels.
+channel=4
+
+# Specify base virtual address to map.
+#base_virtaddr=0x7f0000000000
+
+# Promiscuous mode of nic, defualt: enabled.
+promiscuous=1
+numa_on=1
+
+# TX checksum offload skip, default: disabled.
+# We need this switch enabled in the following cases:
+# -> The application want to enforce wrong checksum for testing purposes
+# -> Some cards advertize the offload capability. However, doesn't calculate checksum.
+tx_csum_offoad_skip=0
+
+# TCP segment offload, default: disabled.
+tso=0
+
+# HW vlan strip, default: enabled.
+vlan_strip=1
+
+# sleep when no pkts incomming
+# unit: microseconds
+idle_sleep=0
+
+# sent packet delay time(0-100) while send less than 32 pkts.
+# default 100 us.
+# if set 0, means send pkts immediately.
+# if set >100, will dealy 100 us.
+# unit: microseconds
+pkt_tx_delay=100
+
+# use symmetric Receive-side Scaling(RSS) key, default: disabled.
+symmetric_rss=0
+
+# enabled port list
+#
+# EBNF grammar:
+#
+# exp ::= num_list {"," num_list}
+# num_list ::= <num> | <range>
+# range ::= <num>"-"<num>
+# num ::= '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9'
+#
+# examples
+# 0-3 ports 0, 1,2,3 are enabled
+# 1-3,4,7 ports 1,2,3,4,7 are enabled
+#
+# If use bonding, shoule config the bonding port id in port_list
+# and not config slave port id in port_list
+# such as, port 0 and port 1 trank to a bonding port 2,
+# should set `port_list=2` and config `[port2]` section
+
+port_list=0
+
+# Number of vdev.
+nb_vdev=0
+
+# Number of bond.
+nb_bond=0
+
+# Each core write into own pcap file, which is open one time, close one time if enough.
+# Support dump the first snaplen bytes of each packet.
+# if pcap file is lager than savelen bytes, it will be closed and next file was dumped into.
+[pcap]
+enable = 0
+snaplen= 96
+savelen= 16777216
+
+# Port config section
+# Correspond to dpdk.port_list's index: port0, port1...
+[port0]
+addr=192.168.1.2
+netmask=255.255.255.0
+broadcast=192.168.1.255
+gateway=192.168.1.1
+# IPv6 net addr
+# Optional parameters
+#addr6=ff::02
+#prefix_len=64
+#gateway6=ff::01
+
+# lcore list used to handle this port
+# the format is same as port_list
+#lcore_list=0
+
+# bonding slave port list used to handle this port
+# need to config while this port is a bonding port
+# the format is same as port_list
+#slave_port_list=0,1
+
+# Packet capture path, this will hurt performance
+#pcap=./a.pcap
+
+# Vdev config section
+# orrespond to dpdk.nb_vdev's index: vdev0, vdev1...
+# iface : Shouldn't set always.
+# path : The vuser device path in container. Required.
+# queues : The max queues of vuser. Optional, default 1, greater or equal to the number of processes.
+# queue_size : Queue size.Optional, default 256.
+# mac : The mac address of vuser. Optional, default random, if vhost use phy NIC, it should be set to the phy NIC's mac.
+# cq : Optional, if queues = 1, default 0; if queues > 1 default 1.
+#[vdev0]
+##iface=/usr/local/var/run/openvswitch/vhost-user0
+#path=/var/run/openvswitch/vhost-user0
+#queues=1
+#queue_size=256
+#mac=00:00:00:00:00:01
+#cq=0
+
+# bond config section
+# See http://doc.dpdk.org/guides/prog_guide/link_bonding_poll_mode_drv_lib.html
+#[bond0]
+#mode=4
+#slave=0000:0a:00.0,slave=0000:0a:00.1
+#primary=0000:0a:00.0
+#mac=f0:98:38:xx:xx:xx
+## opt argument
+#socket_id=0
+#xmit_policy=l23
+#lsc_poll_period_ms=100
+#up_delay=10
+#down_delay=50
+
+# Kni config: if enabled and method=reject,
+# all packets that do not belong to the following tcp_port and udp_port
+# will transmit to kernel; if method=accept, all packets that belong to
+# the following tcp_port and udp_port will transmit to kernel.
+#[kni]
+#enable=1
+#method=reject
+# The format is same as port_list
+#tcp_port=80,443
+#udp_port=53
+
+# FreeBSD network performance tuning configurations.
+# Most native FreeBSD configurations are supported.
+[freebsd.boot]
+hz=100
+
+# Block out a range of descriptors to avoid overlap
+# with the kernel's descriptor space.
+# You can increase this value according to your app.
+fd_reserve=1024
+
+kern.ipc.maxsockets=262144
+
+net.inet.tcp.syncache.hashsize=4096
+net.inet.tcp.syncache.bucketlimit=100
+
+net.inet.tcp.tcbhashsize=65536
+
+kern.ncallout=262144
+
+kern.features.inet6=1
+net.inet6.ip6.auto_linklocal=1
+net.inet6.ip6.accept_rtadv=2
+net.inet6.icmp6.rediraccept=1
+net.inet6.ip6.forwarding=0
+
+[freebsd.sysctl]
+kern.ipc.somaxconn=32768
+kern.ipc.maxsockbuf=16777216
+
+net.link.ether.inet.maxhold=5
+
+net.inet.tcp.fast_finwait2_recycle=1
+net.inet.tcp.sendspace=16384
+net.inet.tcp.recvspace=8192
+#net.inet.tcp.nolocaltimewait=1
+net.inet.tcp.cc.algorithm=cubic
+net.inet.tcp.sendbuf_max=16777216
+net.inet.tcp.recvbuf_max=16777216
+net.inet.tcp.sendbuf_auto=1
+net.inet.tcp.recvbuf_auto=1
+net.inet.tcp.sendbuf_inc=16384
+net.inet.tcp.recvbuf_inc=524288
+net.inet.tcp.sack.enable=1
+net.inet.tcp.blackhole=1
+net.inet.tcp.msl=2000
+net.inet.tcp.delayed_ack=0
+
+net.inet.udp.blackhole=1
+net.inet.ip.redirect=0
+net.inet.ip.forwarding=0
-
-#user nobody;
+# root account is necessary.
+user root;
+# should be equal to the lcore count of `dpdk.lcore_mask` in f-stack.conf.
worker_processes 1;
#error_log logs/error.log;
#pid logs/nginx.pid;
+# path of f-stack configuration file, default: $NGX_PREFIX/conf/f-stack.conf.
+fstack_conf f-stack.conf;
events {
- worker_connections 1024;
+ worker_connections 102400;
+ use kqueue;
}
#access_log logs/access.log main;
- sendfile on;
+ sendfile off;
#tcp_nopush on;
#keepalive_timeout 0;
listen 80;
server_name localhost;
+ # bulid server on kernel network stack
+ #
+ #kernel_network_stack on;
+
#charset koi8-r;
+ access_log /dev/null;
#access_log logs/host.access.log main;
location / {
- root html;
- index index.html index.htm;
+ #root html;
+ #index index.html index.htm;
+ return 200 "<title>Welcome to F-Stack Nginx!</title>\r\n pad data:0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789";
}
#error_page 404 /404.html;
static void ngx_unload_module(void *data);
#endif
+#if (NGX_HAVE_FSTACK)
+static char *ngx_set_fstack_conf(ngx_conf_t *cf, ngx_command_t *cmd,
+ void *conf);
+#endif
static ngx_conf_enum_t ngx_debug_points[] = {
{ ngx_string("stop"), NGX_DEBUG_POINTS_STOP },
0,
NULL },
+#if (NGX_HAVE_FSTACK)
+ { ngx_string("fstack_conf"),
+ NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
+ ngx_set_fstack_conf,
+ 0,
+ offsetof(ngx_core_conf_t, fstack_conf),
+ NULL },
+
+ { ngx_string("schedule_timeout"),
+ NGX_MAIN_CONF|NGX_DIRECT_CONF|NGX_CONF_TAKE1,
+ ngx_conf_set_msec_slot,
+ 0,
+ offsetof(ngx_core_conf_t, schedule_timeout),
+ NULL },
+#endif
+
ngx_null_command
};
ccf->user = (ngx_uid_t) NGX_CONF_UNSET_UINT;
ccf->group = (ngx_gid_t) NGX_CONF_UNSET_UINT;
+#if (NGX_HAVE_FSTACK)
+ ccf->schedule_timeout = NGX_CONF_UNSET_MSEC;
+#endif
+
if (ngx_array_init(&ccf->env, cycle->pool, 1, sizeof(ngx_str_t))
!= NGX_OK)
{
ngx_conf_init_value(ccf->worker_processes, 1);
ngx_conf_init_value(ccf->debug_points, 0);
+#if (NGX_HAVE_FSTACK)
+ ngx_conf_init_msec_value(ccf->schedule_timeout, 30);
+#endif
+
#if (NGX_HAVE_CPU_AFFINITY)
if (!ccf->cpu_affinity_auto
}
#endif
+
+#if (NGX_HAVE_FSTACK)
+static
+char *ngx_set_fstack_conf(ngx_conf_t *cf, ngx_command_t *cmd,
+ void *conf)
+{
+ char *p = conf;
+
+ ngx_str_t *field, *value;
+ ngx_str_t full;
+
+ field = (ngx_str_t *)(p + cmd->offset);
+
+ if (field->data) {
+ return "is duplicate";
+ }
+
+ value = cf->args->elts;
+ full = value[1];
+
+ if (ngx_conf_full_name(cf->cycle, &full, 1) != NGX_OK) {
+ return NGX_CONF_ERROR;
+ }
+
+ *field = full;
+
+ return NGX_CONF_OK;
+}
+#endif
+
#endif
+#if (NGX_HAVE_FSTACK)
+#include "ff_api.h"
+#endif
#ifndef NGX_HAVE_SO_SNDLOWAT
#define NGX_HAVE_SO_SNDLOWAT 1
#include <ngx_event.h>
+#if (NGX_HAVE_FSTACK)
+extern int fstack_territory(int domain, int type, int protocol);
+extern int is_fstack_fd(int sockfd);
+
+/*
+ * ngx_ff_skip_listening_socket() decides whether to skip `ls`.
+ * If the current process is NGX_PROCESS_WORKER and `ls` is belong to fstack,
+ * then `*type` will be ls->type|SOCK_FSTACK when `type` is not null.
+ */
+static ngx_inline int
+ngx_ff_skip_listening_socket(ngx_cycle_t *cycle, const ngx_listening_t *ls, int *type)
+{
+ if (ngx_ff_process == NGX_FF_PROCESS_NONE) {
+
+ /* process master, kernel network stack*/
+ if (!ls->belong_to_host) {
+ /* We should continue to process the listening socket,
+ if it is not supported by fstack. */
+ if (fstack_territory(ls->sockaddr->sa_family, ls->type, 0)) {
+ return 1;
+ }
+ }
+ } else {
+ /* process worker, fstack */
+ if (ls->belong_to_host) {
+ return 1;
+ }
+
+ if (!fstack_territory(ls->sockaddr->sa_family, ls->type, 0)) {
+ return 1;
+ }
+
+ if(type) {
+ *type |= SOCK_FSTACK;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+
ngx_os_io_t ngx_io;
ngx_socket_t s;
ngx_listening_t *ls;
+#if (NGX_HAVE_FSTACK)
+ int type;
+#endif
+
reuseaddr = 1;
#if (NGX_SUPPRESS_WARN)
failed = 0;
ls = cycle->listening.elts;
for (i = 0; i < cycle->listening.nelts; i++) {
+#if (NGX_HAVE_FSTACK)
+ type = ls[i].type;
+ if(ngx_ff_skip_listening_socket(cycle, &ls[i], &type)){
+ continue;
+ }
+#endif
+
if (ls[i].ignore) {
continue;
}
continue;
}
+#if (NGX_HAVE_FSTACK)
+ s = ngx_socket(ls[i].sockaddr->sa_family, type, 0);
+#else
s = ngx_socket(ls[i].sockaddr->sa_family, ls[i].type, 0);
+#endif
+
if (s == (ngx_socket_t) -1) {
ngx_log_error(NGX_LOG_EMERG, log, ngx_socket_errno,
ls = cycle->listening.elts;
for (i = 0; i < cycle->listening.nelts; i++) {
+#if (NGX_HAVE_FSTACK)
+ if(ngx_ff_skip_listening_socket(cycle, &ls[i], NULL)){
+ continue;
+ }
+#endif
+
ls[i].log = *ls[i].logp;
if (ls[i].rcvbuf != -1) {
ngx_log_debug2(NGX_LOG_DEBUG_CORE, cycle->log, 0,
"close listening %V #%d ", &ls[i].addr_text, ls[i].fd);
+#if (NGX_HAVE_FSTACK)
+ if(ls[i].fd != (ngx_socket_t) -1) {
+ if (ngx_close_socket(ls[i].fd) == -1) {
+ ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno,
+ ngx_close_socket_n " %V failed", &ls[i].addr_text);
+ }
+ }
+#else
if (ngx_close_socket(ls[i].fd) == -1) {
ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno,
ngx_close_socket_n " %V failed", &ls[i].addr_text);
}
+#endif //(NGX_HAVE_FSTACK)
#if (NGX_HAVE_UNIX_DOMAIN)
wev->write = 1;
+#if (NGX_HAVE_FSTACK)
+ rev->belong_to_host = wev->belong_to_host = is_fstack_fd(s) ? 0 : 1;
+#endif
+
return c;
}
}
if (!c->shared) {
+#if (NGX_HAVE_FSTACK)
+ if (ngx_event_actions.del_conn) {
+#else
if (ngx_del_conn) {
+#endif
ngx_del_conn(c, NGX_CLOSE_EVENT);
} else {
int fastopen;
#endif
+#if (NGX_HAVE_FSTACK)
+ unsigned belong_to_host:1;
+#endif
};
char **environment;
ngx_uint_t transparent; /* unsigned transparent:1; */
+
+#if (NGX_HAVE_FSTACK)
+ ngx_str_t fstack_conf;
+ ngx_msec_t schedule_timeout;
+#endif
} ngx_core_conf_t;
ngx_value(NGX_SIG_ATOMIC_T_SIZE) "," \
ngx_value(NGX_TIME_T_SIZE) ","
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
#define NGX_MODULE_SIGNATURE_1 "1"
#else
#define NGX_MODULE_SIGNATURE_1 "0"
c->start_time = ngx_current_msec;
+#if (NGX_HAVE_FSTACK)
+ if (ngx_event_actions.add_conn) {
+#else
if (ngx_add_conn) {
+#endif
if (ngx_add_conn(c) == NGX_ERROR) {
goto failed;
}
}
}
+#if (NGX_HAVE_FSTACK)
+ if (ngx_event_actions.add_conn) {
+#else
if (ngx_add_conn) {
+#endif
if (rc == -1) {
/* NGX_EINPROGRESS */
--- /dev/null
+ /*
+ * Copyright (C) 2017-2021 THL A29 Limited, a Tencent company.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+ #include <ngx_config.h>
+ #include <ngx_core.h>
+ #include <ngx_event.h>
+ #include <ngx_channel.h>
+ #include <ngx_cycle.h>
+
+ #include <pthread.h>
+
+ #if (NGX_HAVE_FSTACK)
+ static void * ngx_ff_host_event_create_conf(ngx_cycle_t *cycle);
+ static char * ngx_ff_host_event_init_conf(ngx_cycle_t *cycle,
+ void *conf);
+ static ngx_int_t ngx_ff_epoll_init(ngx_cycle_t *cycle, ngx_msec_t timer);
+ static ngx_int_t ngx_ff_epoll_add_event(ngx_event_t *ev,
+ ngx_int_t event, ngx_uint_t flags);
+ static ngx_int_t ngx_ff_epoll_del_event(ngx_event_t *ev,
+ ngx_int_t event, ngx_uint_t flags);
+ static ngx_int_t ngx_ff_epoll_process_events(ngx_cycle_t *cycle,
+ ngx_msec_t timer, ngx_uint_t flags);
+
+ static int ep = -1;
+ static struct epoll_event *event_list;
+ static ngx_uint_t nevents;
+
+ typedef struct {
+ ngx_uint_t events;
+ } ngx_ff_host_event_conf_t;
+
+
+ static ngx_command_t ngx_ff_host_event_commands[] = {
+ ngx_null_command
+ };
+
+ ngx_core_module_t ngx_ff_host_event_module_ctx = {
+ ngx_string("ff_host_event"),
+ ngx_ff_host_event_create_conf, /* create configuration */
+ ngx_ff_host_event_init_conf, /* init configuration */
+ };
+
+ ngx_module_t ngx_ff_host_event_module = {
+ NGX_MODULE_V1,
+ &ngx_ff_host_event_module_ctx, /* module context */
+ ngx_ff_host_event_commands, /* module directives */
+ NGX_CORE_MODULE, /* module type */
+ NULL, /* init master */
+ NULL, /* init module */
+ NULL, /* init process */
+ NULL, /* init thread */
+ NULL, /* exit thread */
+ NULL, /* exit process */
+ NULL, /* exit master */
+ NGX_MODULE_V1_PADDING
+ };
+
+ static void *
+ ngx_ff_host_event_create_conf(ngx_cycle_t *cycle)
+ {
+ ngx_ff_host_event_conf_t *cf;
+ cf = ngx_palloc(cycle->pool, sizeof(ngx_ff_host_event_conf_t));
+ if (cf == NULL) {
+ return NULL;
+ }
+ cf->events = NGX_CONF_UNSET;
+ return cf;
+ }
+
+ static char *
+ ngx_ff_host_event_init_conf(ngx_cycle_t *cycle, void *conf)
+ {
+ ngx_ff_host_event_conf_t *cf = conf;
+ cf->events = 8;
+ return NGX_CONF_OK;
+ }
+
+
+ static ngx_int_t
+ ngx_ff_epoll_init(ngx_cycle_t *cycle, ngx_msec_t timer)
+ {
+ if (ep == -1) {
+ /* The size is just a hint */
+ ep = epoll_create(100);
+
+ if (ep == -1) {
+ ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
+ "epoll_create() failed");
+ return NGX_ERROR;
+ }
+ }
+
+ if (event_list) {
+ ngx_free(event_list);
+ }
+
+ nevents = 64;
+
+ event_list = ngx_alloc(sizeof(struct epoll_event) * nevents, cycle->log);
+ if (event_list == NULL) {
+ return NGX_ERROR;
+ }
+
+ return NGX_OK;
+ }
+
+ static void
+ ngx_ff_epoll_done(ngx_cycle_t *cycle)
+ {
+ if (close(ep) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
+ "epoll close() failed");
+ }
+
+ ep = -1;
+
+ ngx_free(event_list);
+
+ event_list = NULL;
+ nevents = 0;
+ }
+
+ static ngx_int_t
+ ngx_ff_epoll_add_event(ngx_event_t *ev, ngx_int_t event,
+ ngx_uint_t flags)
+ {
+ int op;
+ uint32_t events, prev;
+ ngx_event_t *e;
+ ngx_connection_t *c;
+ struct epoll_event ee;
+
+ c = ev->data;
+
+ events = (uint32_t) event;
+
+ if (event == NGX_READ_EVENT) {
+ e = c->write;
+ prev = EPOLLOUT;
+ #if (NGX_READ_EVENT != EPOLLIN|EPOLLRDHUP)
+ events = EPOLLIN|EPOLLRDHUP;
+ #endif
+
+ } else {
+ e = c->read;
+ prev = EPOLLIN|EPOLLRDHUP;
+ #if (NGX_WRITE_EVENT != EPOLLOUT)
+ events = EPOLLOUT;
+ #endif
+ }
+
+ if (e->active) {
+ op = EPOLL_CTL_MOD;
+ events |= prev;
+
+ } else {
+ op = EPOLL_CTL_ADD;
+ }
+
+ ee.events = events | (uint32_t) flags;
+ ee.data.ptr = (void *) ((uintptr_t) c | ev->instance);
+
+ ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ev->log, 0,
+ "epoll add event: fd:%d op:%d ev:%08XD",
+ c->fd, op, ee.events);
+
+ if (epoll_ctl(ep, op, c->fd, &ee) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_errno,
+ "epoll_ctl(%d, %d) failed", op, c->fd);
+ return NGX_ERROR;
+ }
+
+ ev->active = 1;
+ #if 0
+ ev->oneshot = (flags & NGX_ONESHOT_EVENT) ? 1 : 0;
+ #endif
+
+ return NGX_OK;
+ }
+
+ static ngx_int_t
+ ngx_ff_epoll_del_event(ngx_event_t *ev, ngx_int_t event,
+ ngx_uint_t flags)
+ {
+ int op;
+ uint32_t prev;
+ ngx_event_t *e;
+ ngx_connection_t *c;
+ struct epoll_event ee;
+
+ /*
+ * when the file descriptor is closed, the epoll automatically deletes
+ * it from its queue, so we do not need to delete explicitly the event
+ * before the closing the file descriptor
+ */
+
+ if (flags & NGX_CLOSE_EVENT) {
+ ev->active = 0;
+ return NGX_OK;
+ }
+
+ c = ev->data;
+
+ if (event == NGX_READ_EVENT) {
+ e = c->write;
+ prev = EPOLLOUT;
+
+ } else {
+ e = c->read;
+ prev = EPOLLIN|EPOLLRDHUP;
+ }
+
+ if (e->active) {
+ op = EPOLL_CTL_MOD;
+ ee.events = prev | (uint32_t) flags;
+ ee.data.ptr = (void *) ((uintptr_t) c | ev->instance);
+
+ } else {
+ op = EPOLL_CTL_DEL;
+ ee.events = 0;
+ ee.data.ptr = NULL;
+ }
+
+ ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ev->log, 0,
+ "epoll del event: fd:%d op:%d ev:%08XD",
+ c->fd, op, ee.events);
+
+ if (epoll_ctl(ep, op, c->fd, &ee) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_errno,
+ "epoll_ctl(%d, %d) failed", op, c->fd);
+ return NGX_ERROR;
+ }
+
+ ev->active = 0;
+
+ return NGX_OK;
+ }
+
+ static ngx_int_t
+ ngx_ff_epoll_add_connection(ngx_connection_t *c)
+ {
+ struct epoll_event ee;
+
+ ee.events = EPOLLIN|EPOLLOUT|EPOLLET|EPOLLRDHUP;
+ ee.data.ptr = (void *) ((uintptr_t) c | c->read->instance);
+
+ ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0,
+ "epoll add connection: fd:%d ev:%08XD", c->fd, ee.events);
+
+ if (epoll_ctl(ep, EPOLL_CTL_ADD, c->fd, &ee) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, c->log, ngx_errno,
+ "epoll_ctl(EPOLL_CTL_ADD, %d) failed", c->fd);
+ return NGX_ERROR;
+ }
+
+ c->read->active = 1;
+ c->write->active = 1;
+
+ return NGX_OK;
+ }
+
+
+ static ngx_int_t
+ ngx_ff_epoll_del_connection(ngx_connection_t *c, ngx_uint_t flags)
+ {
+ int op;
+ struct epoll_event ee;
+
+ /*
+ * when the file descriptor is closed the epoll automatically deletes
+ * it from its queue so we do not need to delete explicitly the event
+ * before the closing the file descriptor
+ */
+
+ if (flags & NGX_CLOSE_EVENT) {
+ c->read->active = 0;
+ c->write->active = 0;
+ return NGX_OK;
+ }
+
+ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0,
+ "epoll del connection: fd:%d", c->fd);
+
+ op = EPOLL_CTL_DEL;
+ ee.events = 0;
+ ee.data.ptr = NULL;
+
+ if (epoll_ctl(ep, op, c->fd, &ee) == -1) {
+ ngx_log_error(NGX_LOG_ALERT, c->log, ngx_errno,
+ "epoll_ctl(%d, %d) failed", op, c->fd);
+ return NGX_ERROR;
+ }
+
+ c->read->active = 0;
+ c->write->active = 0;
+
+ return NGX_OK;
+ }
+
+ static ngx_int_t
+ ngx_ff_epoll_process_events(ngx_cycle_t *cycle,
+ ngx_msec_t timer, ngx_uint_t flags)
+ {
+ int events;
+ uint32_t revents;
+ ngx_int_t instance, i;
+ ngx_uint_t level;
+ ngx_err_t err;
+ ngx_event_t *rev, *wev;
+ ngx_connection_t *c;
+
+ /* NGX_TIMER_INFINITE == INFTIM */
+ /*
+ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
+ "epoll timer: %M", timer);
+ */
+ events = epoll_wait(ep, event_list, (int) nevents, timer);
+
+ err = (events == -1) ? ngx_errno : 0;
+
+ if (flags & NGX_UPDATE_TIME || ngx_event_timer_alarm) {
+ ngx_time_update();
+ }
+
+ if (err) {
+ if (err == NGX_EINTR) {
+ level = NGX_LOG_INFO;
+ } else {
+ level = NGX_LOG_ALERT;
+ }
+
+ ngx_log_error(level, cycle->log, err, "epoll_wait() failed");
+ return NGX_ERROR;
+ }
+
+ if (events == 0) {
+ if (timer != NGX_TIMER_INFINITE) {
+ return NGX_OK;
+ }
+
+ ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
+ "epoll_wait() returned no events without timeout");
+ return NGX_ERROR;
+ }
+
+ for (i = 0; i < events; i++) {
+ c = event_list[i].data.ptr;
+
+ instance = (uintptr_t) c & 1;
+ c = (ngx_connection_t *) ((uintptr_t) c & (uintptr_t) ~1);
+
+ rev = c->read;
+
+ if (c->fd == -1 || rev->instance != instance) {
+
+ /*
+ * the stale event from a file descriptor
+ * that was just closed in this iteration
+ */
+
+ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
+ "epoll: stale event %p", c);
+ continue;
+ }
+
+ revents = event_list[i].events;
+
+ ngx_log_debug3(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
+ "epoll: fd:%d ev:%04XD d:%p",
+ c->fd, revents, event_list[i].data.ptr);
+
+ if (revents & (EPOLLERR|EPOLLHUP)) {
+ ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
+ "epoll_wait() error on fd:%d ev:%04XD",
+ c->fd, revents);
+
+ /*
+ * if the error events were returned, add EPOLLIN and EPOLLOUT
+ * to handle the events at least in one active handler
+ */
+
+ revents |= EPOLLIN|EPOLLOUT;
+ }
+
+ if ((revents & EPOLLIN) && rev->active) {
+ rev->ready = 1;
+ rev->available = -1;
+ rev->handler(rev);
+ }
+
+ wev = c->write;
+
+ if ((revents & EPOLLOUT) && wev->active) {
+
+ if (c->fd == -1 || wev->instance != instance) {
+
+ /*
+ * the stale event from a file descriptor
+ * that was just closed in this iteration
+ */
+
+ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
+ "epoll: stale event %p", c);
+ continue;
+ }
+
+ wev->ready = 1;
+ #if (NGX_THREADS)
+ wev->complete = 1;
+ #endif
+ wev->handler(wev);
+ }
+ }
+
+ return NGX_OK;
+ }
+
+ ngx_event_actions_t ngx_ff_host_event_actions = {
+ ngx_ff_epoll_add_event, /* add an event */
+ ngx_ff_epoll_del_event, /* delete an event */
+ ngx_ff_epoll_add_event, /* enable an event */
+ ngx_ff_epoll_add_event, /* disable an event */
+ ngx_ff_epoll_add_connection, /* add an connection */
+ ngx_ff_epoll_del_connection, /* delete an connection */
+ NULL, /* trigger a notify */
+ ngx_ff_epoll_process_events, /* process the events */
+ ngx_ff_epoll_init, /* init the events */
+ ngx_ff_epoll_done, /* done the events */
+ };
+
+ #endif
+
\ No newline at end of file
--- /dev/null
+/*
+ * Inspired by opendp/dpdk-nginx's ans_module.c.
+ * License of opendp:
+ *
+ BSD LICENSE
+ Copyright(c) 2015-2017 Ansyun anssupport@163.com. All rights reserved.
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ Neither the name of Ansyun anssupport@163.com nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ Author: JiaKai (jiakai1000@gmail.com) and Bluestar (anssupport@163.com)
+ */
+
+/*
+ * Copyright (C) 2017-2021 THL A29 Limited, a Tencent company.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <string.h>
+ #include <stdlib.h>
+ #include <stdarg.h>
+ #include <errno.h>
+ #include <netinet/in.h>
+ #include <assert.h>
+ #include <unistd.h>
+ #include <sys/types.h>
+ #include <sys/socket.h>
+ #include <arpa/inet.h>
+ #include <sys/time.h>
+
+ #include <ngx_auto_config.h>
+ #include "ff_api.h"
+
+ #define _GNU_SOURCE
+ #define __USE_GNU
+
+ #include <unistd.h>
+ #include <sched.h>
+ #include <sys/types.h>
+ #include <fcntl.h>
+ #include <sys/syscall.h>
+ #include <dlfcn.h>
+ #include <limits.h>
+
+ #ifndef likely
+ #define likely(x) __builtin_expect((x),1)
+ #endif
+
+ #ifndef unlikely
+ #define unlikely(x) __builtin_expect((x),0)
+ #endif
+
+ static int (*real_close)(int);
+ static int (*real_socket)(int, int, int);
+ static int (*real_bind)(int, const struct sockaddr*, socklen_t);
+ static int (*real_connect)(int, const struct sockaddr*, socklen_t);
+ static int (*real_listen)(int, int);
+
+ static int (*real_getsockopt)(int, int, int, void *, socklen_t*);
+ static int (*real_setsockopt)(int, int, int, const void *, socklen_t);
+
+ static int (*real_accept)(int, struct sockaddr *, socklen_t *);
+ static int (*real_accept4)(int, struct sockaddr *, socklen_t *, int);
+ static ssize_t (*real_recv)(int, void *, size_t, int);
+ static ssize_t (*real_send)(int, const void *, size_t, int);
+ static ssize_t (*real_sendto)(int, const void *, size_t, int,
+ const struct sockaddr*, socklen_t);
+ static ssize_t (*real_sendmsg)(int, const struct msghdr*, int);
+ static ssize_t (*real_recvmsg)(int, struct msghdr *, int);
+ static ssize_t (*real_writev)(int, const struct iovec *, int);
+ static ssize_t (*real_readv)(int, const struct iovec *, int);
+
+ static ssize_t (*real_read)(int, void *, size_t);
+ static ssize_t (*real_write)(int, const void *, size_t);
+
+ static int (*real_shutdown)(int, int);
+
+ static int (*real_ioctl)(int, int, void *);
+
+ static int (*real_gettimeofday)(struct timeval *tv, struct timezone *tz);
+
+ static int (*real_getpeername)(int sockfd, struct sockaddr * name, socklen_t *namelen);
+ static int (*real_getsockname)(int s, struct sockaddr *name, socklen_t *namelen);
+
+ static __thread int inited;
+
+ #define SYSCALL(func) \
+ ({ \
+ if (unlikely(!real_##func)) { \
+ real_##func = dlsym(RTLD_NEXT, #func); \
+ } \
+ real_##func; \
+ })
+
+ extern intptr_t ngx_max_sockets;
+
+ /*-
+ * Make sockfd assigned by the fstack plus the value of maximum kernel socket.
+ * so we can tell them apart according to different scopes.
+ * Solve the condominium ownership at Application Layer and obtain more freedom.
+ * fstack tried to do this by 'fd_reserve', unfortunately, it doesn't work well.
+ */
+ static inline int convert_fstack_fd(int sockfd) {
+ return sockfd + ngx_max_sockets;
+ }
+
+ /* Restore socket fd. */
+ static inline int restore_fstack_fd(int sockfd) {
+ if(sockfd <= ngx_max_sockets) {
+ return sockfd;
+ }
+
+ return sockfd - ngx_max_sockets;
+ }
+
+ /* Tell whether a 'sockfd' belongs to fstack. */
+ int is_fstack_fd(int sockfd) {
+ if (unlikely(inited == 0)) {
+ return 0;
+ }
+
+ return sockfd >= ngx_max_sockets;
+ }
+
+ // proc_type, 1: primary, 0: secondary.
+ int
+ ff_mod_init(const char *conf, int proc_id, int proc_type) {
+ int rc, i;
+ int ff_argc = 4;
+
+ char **ff_argv = malloc(sizeof(char *)*ff_argc);
+ for (i = 0; i < ff_argc; i++) {
+ ff_argv[i] = malloc(sizeof(char)*PATH_MAX);
+ }
+
+ sprintf(ff_argv[0], "nginx");
+ sprintf(ff_argv[1], "--conf=%s", conf);
+ sprintf(ff_argv[2], "--proc-id=%d", proc_id);
+ if (proc_type == 1) {
+ sprintf(ff_argv[3], "--proc-type=primary");
+ } else {
+ sprintf(ff_argv[3], "--proc-type=secondary");
+ }
+
+ rc = ff_init(ff_argc, ff_argv);
+ if (rc == 0) {
+ /* Ensure that the socket we converted
+ does not exceed the maximum value of 'int' */
+
+ if(ngx_max_sockets + (unsigned)ff_getmaxfd() > INT_MAX)
+ {
+ rc = -1;
+ }
+
+ inited = 1;
+ }
+
+ for (i = 0; i < ff_argc; i++) {
+ free(ff_argv[i]);
+ }
+
+ free(ff_argv);
+
+ return rc;
+ }
+
+ /*-
+ * Verify whether the socket is supported by fstack or not.
+ */
+ int
+ fstack_territory(int domain, int type, int protocol)
+ {
+ /* Remove creation flags */
+ type &= ~SOCK_CLOEXEC;
+ type &= ~SOCK_NONBLOCK;
+ type &= ~SOCK_FSTACK;
+
+ if ((AF_INET != domain && AF_INET6 != domain) || (SOCK_STREAM != type && SOCK_DGRAM != type)) {
+ return 0;
+ }
+
+ return 1;
+ }
+
+ int
+ socket(int domain, int type, int protocol)
+ {
+ int sock;
+ if (unlikely(inited == 0)) {
+ return SYSCALL(socket)(domain, type, protocol);
+ }
+
+ if (unlikely(fstack_territory(domain, type, protocol) == 0)) {
+ return SYSCALL(socket)(domain, type, protocol);
+ }
+
+ if (unlikely((type & SOCK_FSTACK) == 0)) {
+ return SYSCALL(socket)(domain, type, protocol);
+ }
+
+ type &= ~SOCK_FSTACK;
+ sock = ff_socket(domain, type, protocol);
+
+ if (sock != -1) {
+ sock = convert_fstack_fd(sock);
+ }
+
+ return sock;
+ }
+
+ int
+ bind(int sockfd, const struct sockaddr *addr, socklen_t addrlen)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_bind(sockfd, (struct linux_sockaddr *)addr, addrlen);
+ }
+
+ return SYSCALL(bind)(sockfd, addr, addrlen);
+ }
+
+ int
+ connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_connect(sockfd, (struct linux_sockaddr *)addr, addrlen);
+ }
+
+ return SYSCALL(connect)(sockfd, addr, addrlen);
+ }
+
+ int
+ getpeername(int sockfd, struct sockaddr * name,
+ socklen_t *namelen)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_getpeername(sockfd,
+ (struct linux_sockaddr *)name, namelen);
+ }
+
+ return SYSCALL(getpeername)(sockfd, name, namelen);
+ }
+
+ int
+ getsockname(int sockfd, struct sockaddr *name,
+ socklen_t *namelen)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_getsockname(sockfd,
+ (struct linux_sockaddr *)name, namelen);
+ }
+
+ return SYSCALL(getsockname)(sockfd, name, namelen);
+ }
+
+ ssize_t
+ send(int sockfd, const void *buf, size_t len, int flags)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_send(sockfd, buf, len, flags);
+ }
+
+ return SYSCALL(send)(sockfd, buf, len, flags);
+ }
+
+ ssize_t
+ sendto(int sockfd, const void *buf, size_t len, int flags,
+ const struct sockaddr *dest_addr, socklen_t addrlen)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_sendto(sockfd, buf, len, flags,
+ (struct linux_sockaddr *)dest_addr, addrlen);
+ }
+
+ return SYSCALL(sendto)(sockfd, buf, len, flags, dest_addr, addrlen);
+ }
+
+ ssize_t
+ sendmsg(int sockfd, const struct msghdr *msg, int flags)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_sendmsg(sockfd, msg, flags);
+ }
+
+ return SYSCALL(sendmsg)(sockfd, msg, flags);
+ }
+
+ ssize_t recvmsg(int sockfd, struct msghdr *msg, int flags)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_recvmsg(sockfd, msg, flags);
+ }
+
+ return SYSCALL(recvmsg)(sockfd, msg, flags);
+ }
+
+ ssize_t
+ recv(int sockfd, void *buf, size_t len, int flags)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_recv(sockfd, buf, len, flags);
+ }
+
+ return SYSCALL(recv)(sockfd, buf, len, flags);
+ }
+
+ ssize_t
+ __recv_chk (int fd, void *buf, size_t n, size_t buflen, int flags)
+ {
+ /*
+ if (n > buflen)
+ __chk_fail ();
+ */
+ return recv (fd, buf, n, flags);
+ }
+
+ int
+ listen(int sockfd, int backlog)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_listen(sockfd, backlog);
+ }
+
+ return SYSCALL(listen)(sockfd, backlog);
+ }
+
+ int
+ getsockopt(int sockfd, int level, int optname,
+ void *optval, socklen_t *optlen)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_getsockopt(sockfd, level, optname, optval, optlen);
+ }
+
+ return SYSCALL(getsockopt)(sockfd, level, optname, optval, optlen);
+ }
+
+ int
+ setsockopt (int sockfd, int level, int optname,
+ const void *optval, socklen_t optlen)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_setsockopt(sockfd, level, optname, optval, optlen);
+ }
+
+ return SYSCALL(setsockopt)(sockfd, level, optname, optval, optlen);
+ }
+
+ int
+ accept(int sockfd, struct sockaddr *addr, socklen_t *addrlen)
+ {
+ int rc;
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ rc = ff_accept(sockfd, (struct linux_sockaddr *)addr, addrlen);
+ if (rc != -1) {
+ rc = convert_fstack_fd(rc);
+ }
+
+ return rc;
+ }
+
+ return SYSCALL(accept)(sockfd, addr, addrlen);
+ }
+
+ int
+ accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen, int flags)
+ {
+ int rc;
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ rc = ff_accept(sockfd, (struct linux_sockaddr *)addr, addrlen);
+ if (rc != -1) {
+ rc = convert_fstack_fd(rc);
+ }
+
+ return rc;
+ }
+
+ return SYSCALL(accept4)(sockfd, addr, addrlen, flags);
+ }
+
+ int
+ close(int sockfd)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_close(sockfd);
+ }
+
+ return SYSCALL(close)(sockfd);
+ }
+
+ int
+ shutdown(int sockfd, int how)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_shutdown(sockfd, how);
+ }
+
+ return SYSCALL(shutdown)(sockfd, how);
+ }
+
+ ssize_t
+ writev(int sockfd, const struct iovec *iov, int iovcnt)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_writev(sockfd, iov, iovcnt);
+ }
+
+ return SYSCALL(writev)(sockfd, iov, iovcnt);
+ }
+
+ ssize_t
+ readv(int sockfd, const struct iovec *iov, int iovcnt)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_readv(sockfd, iov, iovcnt);
+ }
+
+ return SYSCALL(readv)(sockfd, iov, iovcnt);
+ }
+
+ ssize_t
+ read(int sockfd, void *buf, size_t count)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_read(sockfd, buf, count);
+ }
+
+ return SYSCALL(read)(sockfd, buf, count);
+ }
+
+ ssize_t
+ write(int sockfd, const void *buf, size_t count)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_write(sockfd, buf, count);
+ }
+
+ return SYSCALL(write)(sockfd, buf, count);
+ }
+
+ int
+ ioctl(int sockfd, int request, void *p)
+ {
+ if(is_fstack_fd(sockfd)){
+ sockfd = restore_fstack_fd(sockfd);
+ return ff_ioctl(sockfd, request, p);
+ }
+
+ return SYSCALL(ioctl)(sockfd, request, p);
+ }
+
+ int
+ kqueue(void)
+ {
+ return ff_kqueue();
+ }
+
+ int
+ kevent(int kq, const struct kevent *changelist, int nchanges,
+ struct kevent *eventlist, int nevents, const struct timespec *timeout)
+ {
+ struct kevent *kev;
+ int i = 0;
+ for(i = 0; i < nchanges; i++) {
+ kev = (struct kevent *)&changelist[i];
+ switch (kev->filter) {
+
+ case EVFILT_READ:
+ case EVFILT_WRITE:
+ case EVFILT_VNODE:
+ kev->ident = restore_fstack_fd(kev->ident);
+ break;
+ case EVFILT_AIO:
+ case EVFILT_PROC:
+ case EVFILT_SIGNAL:
+ case EVFILT_TIMER:
+ case EVFILT_USER:
+ default:
+ break;
+ }
+ }
+ return ff_kevent(kq, changelist, nchanges, eventlist, nevents, timeout);
+ }
+
+ int
+ #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 31)
+ gettimeofday(struct timeval *tv, void *tz)
+ #else
+ gettimeofday(struct timeval *tv, struct timezone *tz)
+ #endif
+ {
+ if (unlikely(inited == 0)) {
+ return SYSCALL(gettimeofday)(tv, tz);
+ }
+
+ return ff_gettimeofday(tv, tz);
+ }
+
\ No newline at end of file
static struct kevent notify_kev;
#endif
+#if (NGX_HAVE_FSTACK)
+extern int kqueue(void);
+extern int kevent(int kq, const struct kevent *changelist, int nchanges,
+ struct kevent *eventlist, int nevents, const struct timespec *timeout);
+#endif
static ngx_str_t kqueue_name = ngx_string("kqueue");
struct kevent kev;
#endif
+#if (NGX_HAVE_FSTACK)
+ if(ngx_ff_process == NGX_FF_PROCESS_NONE) {
+ return NGX_OK;
+ }
+#endif
+
kcf = ngx_event_get_conf(cycle->conf_ctx, ngx_kqueue_module);
if (ngx_kqueue == -1) {
tp = &ts;
}
+#if (NGX_HAVE_FSTACK)
+ if (n > 0) {
+ ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
+ "kevent timer: %M, changes: %d", timer, n);
+ }
+#else
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"kevent timer: %M, changes: %d", timer, n);
+#endif
events = kevent(ngx_kqueue, change_list, n, event_list, (int) nevents, tp);
ngx_time_update();
}
+#if !(NGX_HAVE_FSTACK)
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"kevent events: %d", events);
+#endif
if (err) {
if (err == NGX_EINTR) {
}
if (events == 0) {
+#if (NGX_HAVE_FSTACK)
+ return NGX_OK;
+#else
if (timer != NGX_TIMER_INFINITE) {
return NGX_OK;
}
ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
"kevent() returned no events without timeout");
return NGX_ERROR;
+#endif
}
for (i = 0; i < events; i++) {
ngx_uint_t flags);
static ngx_int_t ngx_select_process_events(ngx_cycle_t *cycle, ngx_msec_t timer,
ngx_uint_t flags);
+#if !(NGX_HAVE_FSTACK)
static void ngx_select_repair_fd_sets(ngx_cycle_t *cycle);
+#endif
static char *ngx_select_init_conf(ngx_cycle_t *cycle, void *conf);
ngx_log_error(level, cycle->log, err, "select() failed");
if (err == NGX_EBADF) {
+#if !(NGX_HAVE_FSTACK)
ngx_select_repair_fd_sets(cycle);
+#endif
}
return NGX_ERROR;
return NGX_OK;
}
+#if !(NGX_HAVE_FSTACK)
ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
"select() returned no events without timeout");
+#endif
return NGX_ERROR;
}
ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
"select ready != events: %d:%d", ready, nready);
+#if !(NGX_HAVE_FSTACK)
ngx_select_repair_fd_sets(cycle);
+#endif
}
return NGX_OK;
}
-
+#if !(NGX_HAVE_FSTACK)
static void
ngx_select_repair_fd_sets(ngx_cycle_t *cycle)
{
max_fd = -1;
}
-
+#endif
static char *
ngx_select_init_conf(ngx_cycle_t *cycle, void *conf)
static void *ngx_event_core_create_conf(ngx_cycle_t *cycle);
static char *ngx_event_core_init_conf(ngx_cycle_t *cycle, void *conf);
+#if (NGX_HAVE_FSTACK)
+extern ngx_int_t ngx_ff_epoll_process_events(ngx_cycle_t *cycle,
+ ngx_msec_t timer, ngx_uint_t flags);
+#endif
static ngx_uint_t ngx_timer_resolution;
sig_atomic_t ngx_event_timer_alarm;
ngx_int_t ngx_accept_disabled;
ngx_uint_t ngx_use_exclusive_accept;
+#if (NGX_HAVE_FSTACK)
+static ngx_msec_t ngx_schedule_timeout;
+#endif
#if (NGX_STAT_STUB)
{
ngx_uint_t flags;
ngx_msec_t timer, delta;
+#if (NGX_HAVE_FSTACK)
+ static ngx_msec_t initial; //msec
+#endif
if (ngx_timer_resolution) {
timer = NGX_TIMER_INFINITE;
delta = ngx_current_msec;
+#if (NGX_HAVE_FSTACK)
+ /*
+ * NGX_FF_PROCESS_*s run on both fstack and kernel,
+ * others ( e.g. cache manager/loader ) only run on kernel.
+ */
+ if(!!ngx_ff_process) {
+ (void) ngx_process_events(cycle, timer, flags);
+
+ /*
+ * handle message from kernel ( e.g. signals)
+ * in case of network inactivity
+ */
+ if (ngx_current_msec - initial >= ngx_schedule_timeout) {
+ (void) ngx_ff_process_host_events(cycle, 0, flags);
+
+ /* Update timer*/
+ initial = ngx_current_msec;
+ }
+ } else {
+ (void) ngx_ff_process_host_events(cycle, timer, flags);
+ }
+
+ delta = ngx_current_msec - delta;
+#else
(void) ngx_process_events(cycle, timer, flags);
delta = ngx_current_msec - delta;
"timer delta: %M", delta);
ngx_event_process_posted(cycle, &ngx_posted_accept_events);
+#endif
if (ngx_accept_mutex_held) {
ngx_shmtx_unlock(&ngx_accept_mutex);
#endif
+#if (NGX_HAVE_FSTACK)
+ ngx_schedule_timeout = ccf->schedule_timeout;
+#endif
+
return NGX_OK;
}
#endif
+#if (NGX_HAVE_FSTACK)
+
+extern ngx_event_actions_t ngx_ff_host_event_actions;
+
+#endif
static ngx_int_t
ngx_event_process_init(ngx_cycle_t *cycle)
break;
}
+#if (NGX_HAVE_FSTACK)
+ if (ngx_ff_host_event_actions.init(cycle, ngx_timer_resolution) != NGX_OK) {
+ /* fatal */
+ exit(2);
+ }
+#endif
+
#if !(NGX_WIN32)
if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) {
cycle->files_n = (ngx_uint_t) rlmt.rlim_cur;
+#if (NGX_HAVE_FSTACK)
+ cycle->files = ngx_calloc(sizeof(ngx_connection_t *) * cycle->files_n * 2,
+ cycle->log);
+#else
cycle->files = ngx_calloc(sizeof(ngx_connection_t *) * cycle->files_n,
cycle->log);
+#endif
if (cycle->files == NULL) {
return NGX_ERROR;
}
rev->log = c->log;
rev->accept = 1;
+#if (NGX_HAVE_FSTACK)
+ /* Note when nginx running on fstack,
+ make sure that add the right fd to kqueue !! */
+ c->read->belong_to_host = c->write->belong_to_host = ls[i].belong_to_host;
+#endif
+
#if (NGX_HAVE_DEFERRED_ACCEPT)
rev->deferred_accept = ls[i].deferred_accept;
#endif
#endif
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
module = &ngx_kqueue_module;
unsigned cancelable:1;
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
unsigned kq_vnode:1;
/* the pending errno reported by kqueue */
uint32_t padding[NGX_EVENT_T_PADDING];
#endif
#endif
+
+#if (NGX_HAVE_FSTACK)
+ unsigned belong_to_host:1;
+#endif
};
#if (NGX_HAVE_EPOLLRDHUP)
extern ngx_uint_t ngx_use_epoll_rdhup;
#endif
+#if (NGX_HAVE_FSTACK)
+extern ngx_event_actions_t ngx_ff_host_event_actions;
+#endif
/*
#endif
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
#define NGX_READ_EVENT EVFILT_READ
#define NGX_WRITE_EVENT EVFILT_WRITE
#define NGX_CLEAR_EVENT 0 /* dummy declaration */
#endif
+#if (NGX_HAVE_FSTACK)
+
+static inline ngx_int_t
+ngx_add_event(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags) {
+ if (1 == ev->belong_to_host) {
+ return ngx_ff_host_event_actions.add(ev, event, flags);
+ } else {
+ return ngx_event_actions.add(ev, event, flags);
+ }
+}
+
+static inline ngx_int_t
+ngx_del_event(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags) {
+ if (1 == ev->belong_to_host) {
+ return ngx_ff_host_event_actions.del(ev, event, flags);
+ } else {
+ return ngx_event_actions.del(ev, event, flags);
+ }
+}
+
+static inline ngx_int_t ngx_add_conn(ngx_connection_t *c)
+{
+ return ngx_event_actions.add_conn(c);
+}
+
+static inline ngx_int_t ngx_del_conn(
+ ngx_connection_t *c, ngx_uint_t flags) {
+ return ngx_event_actions.del_conn(c, flags);
+}
+
+static inline ngx_int_t ngx_notify(ngx_event_handler_pt handler) {
+ return ngx_event_actions.notify(handler);
+}
+
+static inline ngx_int_t ngx_process_events(
+ ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags)
+{
+ return ngx_event_actions.process_events(cycle, timer, flags);
+}
+
+#define ngx_ff_process_host_events ngx_ff_host_event_actions.process_events
+
+#else
#define ngx_process_events ngx_event_actions.process_events
#define ngx_done_events ngx_event_actions.done
#define ngx_notify ngx_event_actions.notify
+#endif /* NGX_HAVE_FSTACK */
+
#define ngx_add_timer ngx_event_add_timer
#define ngx_del_timer ngx_event_del_timer
rev = c->read;
wev = c->write;
+#if (NGX_HAVE_FSTACK)
+ rev->belong_to_host = wev->belong_to_host = ev->belong_to_host;
+#endif
+
wev->ready = 1;
if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
if (ev->deferred_accept) {
rev->ready = 1;
-#if (NGX_HAVE_KQUEUE || NGX_HAVE_EPOLLRDHUP)
+#if (NGX_HAVE_KQUEUE || NGX_HAVE_EPOLLRDHUP) || (NGX_HAVE_FSTACK)
rev->available = 1;
#endif
}
}
#endif
+#if (NGX_HAVE_FSTACK)
+ if (ngx_event_actions.add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) {
+#else
if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) {
+#endif
if (ngx_add_conn(c) == NGX_ERROR) {
ngx_close_accepted_connection(c);
return;
type = (pc->type ? pc->type : SOCK_STREAM);
+#if (NGX_HAVE_FSTACK)
+ /*
+ We use a creation flags created by fstack's adaptable layer to
+ to explicitly call the needed socket() function.
+ */
+ if (!pc->belong_to_host) {
+ s = ngx_socket(pc->sockaddr->sa_family, type | SOCK_FSTACK, 0);
+ } else {
+ s = ngx_socket(pc->sockaddr->sa_family, type, 0);
+ }
+#else
s = ngx_socket(pc->sockaddr->sa_family, type, 0);
+#endif
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, "%s socket %d",
(type == SOCK_STREAM) ? "stream" : "dgram", s);
c->start_time = ngx_current_msec;
+#if (NGX_HAVE_FSTACK)
+ if (ngx_event_actions.add_conn) {
+#else
if (ngx_add_conn) {
+#endif
if (ngx_add_conn(c) == NGX_ERROR) {
goto failed;
}
}
}
+#if (NGX_HAVE_FSTACK)
+ if (ngx_event_actions.add_conn) {
+#else
if (ngx_add_conn) {
+#endif
if (rc == -1) {
/* NGX_EINPROGRESS */
#if (NGX_HAVE_TRANSPARENT_PROXY)
+#if (NGX_HAVE_FSTACK)
+extern int is_fstack_fd(int sockfd);
+#ifndef IP_BINDANY
+#define IP_BINDANY 24
+#endif
+#endif
+
static ngx_int_t
ngx_event_connect_set_transparent(ngx_peer_connection_t *pc, ngx_socket_t s)
{
int value;
+#if defined(NGX_HAVE_FSTACK)
+ int optname;
+#endif
value = 1;
case AF_INET:
-#if defined(IP_TRANSPARENT)
+#if defined(NGX_HAVE_FSTACK)
+ /****
+ FreeBSD define IP_BINDANY in freebsd/netinet/in.h
+ Fstack should only support IP_BINDANY.
+ ****/
+ if(is_fstack_fd(s)){
+ optname = IP_BINDANY;
+ } else {
+ optname = IP_TRANSPARENT;
+ }
- if (setsockopt(s, IPPROTO_IP, IP_TRANSPARENT,
+ if (setsockopt(s, IPPROTO_IP, optname,
(const void *) &value, sizeof(int)) == -1)
{
ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
- "setsockopt(IP_TRANSPARENT) failed");
+ "setsockopt(IP_BINDANY/IP_TRANSPARENT) failed");
return NGX_ERROR;
}
-#elif defined(IP_BINDANY)
-
- if (setsockopt(s, IPPROTO_IP, IP_BINDANY,
+#elif defined(IP_TRANSPARENT)
+ if (setsockopt(s, IPPROTO_IP, IP_TRANSPARENT,
(const void *) &value, sizeof(int)) == -1)
{
ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
- "setsockopt(IP_BINDANY) failed");
+ "setsockopt(IP_TRANSPARENT) failed");
return NGX_ERROR;
}
/* ngx_connection_log_error_e */
unsigned log_error:2;
+#if (NGX_HAVE_FSTACK)
+ unsigned belong_to_host:1;
+#endif
+
NGX_COMPAT_BEGIN(2)
NGX_COMPAT_END
};
} else {
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
/*
* kqueue notifies about the end of file or a pending error.
*valp = segment;
#if (NGX_HAVE_ADDRINFO_CMSG)
+#if (NGX_HAVE_FSTACK)
+ if ((!c->listening->belong_to_host) && c->listening && c->listening->wildcard && c->local_sockaddr) {
+#else
if (c->listening && c->listening->wildcard && c->local_sockaddr) {
+#endif
cmsg = CMSG_NXTHDR(&msg, cmsg);
clen += ngx_set_srcaddr_cmsg(cmsg, c->local_sockaddr);
}
msg.msg_namelen = socklen;
#if (NGX_HAVE_ADDRINFO_CMSG)
+#if (NGX_HAVE_FSTACK)
+ if ((!c->listening->belong_to_host) && c->listening && c->listening->wildcard && c->local_sockaddr) {
+#else
if (c->listening && c->listening->wildcard && c->local_sockaddr) {
+#endif
msg.msg_control = msg_control;
msg.msg_controllen = sizeof(msg_control);
ngx_str_t ssl_crl;
ngx_array_t *ssl_conf_commands;
#endif
+
+#if (NGX_HAVE_FSTACK)
+ ngx_flag_t kernel_network_stack;
+#endif
} ngx_http_proxy_loc_conf_t;
offsetof(ngx_http_proxy_loc_conf_t, ssl_conf_commands),
&ngx_http_proxy_ssl_conf_command_post },
+#endif
+
+#if (NGX_HAVE_FSTACK)
+
+ { ngx_string("proxy_kernel_network_stack"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_flag_slot,
+ NGX_HTTP_LOC_CONF_OFFSET,
+ offsetof(ngx_http_proxy_loc_conf_t, kernel_network_stack),
+ NULL },
+
#endif
ngx_null_command
u->accel = 1;
+#if (NGX_HAVE_FSTACK)
+ u->peer.belong_to_host = plcf->kernel_network_stack;
+#endif
+
if (!plcf->upstream.request_buffering
&& plcf->body_values == NULL && plcf->upstream.pass_request_body
&& (!r->headers_in.chunked
ngx_str_set(&conf->upstream.module, "proxy");
+#if (NGX_HAVE_FSTACK)
+ conf->kernel_network_stack = NGX_CONF_UNSET;
+#endif
+
return conf;
}
#endif
}
+#if (NGX_HAVE_FSTACK)
+ /* By default, we set up a proxy on fstack */
+ ngx_conf_merge_value(conf->kernel_network_stack,
+ prev->kernel_network_stack, 0);
+#endif
+
return NGX_CONF_OK;
}
ls->quic = addr->opt.quic;
#endif
+#if (NGX_HAVE_FSTACK)
+ ls->belong_to_host = cscf->kernel_network_stack;
+#endif
+
return ls;
}
0,
NULL },
+#if (NGX_HAVE_FSTACK)
+ { ngx_string("kernel_network_stack"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_flag_slot,
+ NGX_HTTP_SRV_CONF_OFFSET,
+ offsetof(ngx_http_core_srv_conf_t, kernel_network_stack),
+ NULL },
+#endif
+
{ ngx_string("types_hash_max_size"),
NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1,
ngx_conf_set_num_slot,
cscf->file_name = cf->conf_file->file.name.data;
cscf->line = cf->conf_file->line;
+#if (NGX_HAVE_FSTACK)
+ cscf->kernel_network_stack = NGX_CONF_UNSET;
+#endif
+
return cscf;
}
ngx_conf_merge_value(conf->underscores_in_headers,
prev->underscores_in_headers, 0);
+#if (NGX_HAVE_FSTACK)
+ /* By default, we set up a server on fstack */
+ ngx_conf_merge_value(conf->kernel_network_stack,
+ prev->kernel_network_stack, 0);
+#endif
+
if (conf->server_names.nelts == 0) {
/* the array has 4 empty preallocated elements, so push cannot fail */
sn = ngx_array_push(&conf->server_names);
ngx_flag_t merge_slashes;
ngx_flag_t underscores_in_headers;
+#if (NGX_HAVE_FSTACK)
+ ngx_flag_t kernel_network_stack; /* kernel_network_stack */
+#endif
+
unsigned listen:1;
#if (NGX_PCRE)
unsigned captures:1;
#endif
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
return;
}
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
if (rev->pending_eof) {
#endif
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
int err;
socklen_t len;
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
if (c->write->pending_eof || c->read->pending_eof) {
return;
}
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
if (rev->pending_eof) {
ls->ipv6only = addr[i].opt.ipv6only;
#endif
+#if (NGX_HAVE_FSTACK)
+ ls->belong_to_host = cscf->kernel_network_stack;
+#endif
+
mport = ngx_palloc(cf->pool, sizeof(ngx_mail_port_t));
if (mport == NULL) {
return NGX_CONF_ERROR;
ngx_str_t server_name;
+#if (NGX_HAVE_FSTACK)
+ ngx_flag_t kernel_network_stack; /* kernel_network_stack */
+#endif
+
u_char *file_name;
ngx_uint_t line;
offsetof(ngx_mail_core_srv_conf_t, server_name),
NULL },
+#if (NGX_HAVE_FSTACK)
+ { ngx_string("kernel_network_stack"),
+ NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_flag_slot,
+ NGX_MAIL_SRV_CONF_OFFSET,
+ offsetof(ngx_mail_core_srv_conf_t, kernel_network_stack),
+ NULL },
+#endif
+
{ ngx_string("error_log"),
NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_1MORE,
ngx_mail_core_error_log,
cscf->file_name = cf->conf_file->file.name.data;
cscf->line = cf->conf_file->line;
+#if (NGX_HAVE_FSTACK)
+ cscf->kernel_network_stack = NGX_CONF_UNSET;
+#endif
+
return cscf;
}
ngx_conf_merge_ptr_value(conf->resolver, prev->resolver, NULL);
+#if (NGX_HAVE_FSTACK)
+ /* By default, we set up a server on fstack */
+ ngx_conf_merge_value(conf->kernel_network_stack,
+ prev->kernel_network_stack, 0);
+#endif
+
return NGX_CONF_OK;
}
ngx_flag_t proxy_protocol;
size_t buffer_size;
ngx_msec_t timeout;
+
+#if (NGX_HAVE_FSTACK)
+ ngx_flag_t kernel_network_stack;
+#endif
} ngx_mail_proxy_conf_t;
rev = c->read;
wev = c->write;
+#if (NGX_HAVE_FSTACK)
+ rev->belong_to_host = wev->belong_to_host = 1;
+#endif
+
rev->log = cycle->log;
wev->log = cycle->log;
ev->handler = handler;
+#if (NGX_HAVE_FSTACK)
+ if (ngx_event_actions.add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) {
+#else
if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) {
+#endif
if (ngx_add_conn(c) == NGX_ERROR) {
ngx_free_connection(c);
return NGX_ERROR;
return in;
}
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if ((ngx_event_flags & NGX_USE_KQUEUE_EVENT) && wev->pending_eof) {
(void) ngx_connection_error(c, wev->kq_errno,
aio->aiocb.aio_offset = offset;
aio->aiocb.aio_buf = buf;
aio->aiocb.aio_nbytes = size;
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
aio->aiocb.aio_sigevent.sigev_notify_kqueue = ngx_kqueue;
aio->aiocb.aio_sigevent.sigev_notify = SIGEV_KEVENT;
aio->aiocb.aio_sigevent.sigev_value.sival_ptr = ev;
return in;
}
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if ((ngx_event_flags & NGX_USE_KQUEUE_EVENT) && wev->pending_eof) {
(void) ngx_connection_error(c, wev->kq_errno,
static void ngx_cache_manager_process_handler(ngx_event_t *ev);
static void ngx_cache_loader_process_handler(ngx_event_t *ev);
+#if (NGX_HAVE_FSTACK)
+extern int ff_mod_init(const char *conf, int proc_id, int proc_type);
+ngx_int_t ngx_ff_process;
+#endif
ngx_uint_t ngx_process;
ngx_uint_t ngx_worker;
static ngx_log_t ngx_exit_log;
static ngx_open_file_t ngx_exit_log_file;
+#if (NGX_HAVE_FSTACK)
+static sem_t *ngx_ff_worker_sem;
+#endif
+
void
ngx_master_process_cycle(ngx_cycle_t *cycle)
size_t size;
ngx_int_t i;
ngx_uint_t sigio;
+#if (NGX_HAVE_FSTACK)
+ ngx_uint_t sig_worker_quit = 0;
+#endif
sigset_t set;
struct itimerval itv;
ngx_uint_t live;
if (ngx_quit) {
ngx_signal_worker_processes(cycle,
ngx_signal_value(NGX_SHUTDOWN_SIGNAL));
+#if (!NGX_HAVE_FSTACK)
ngx_close_listening_sockets(cycle);
-
+#endif
continue;
}
if (ngx_reconfigure) {
+#if (NGX_HAVE_FSTACK)
+ if (!sig_worker_quit) {
+ sig_worker_quit = 1;
+ ngx_signal_worker_processes(cycle,
+ ngx_signal_value(NGX_SHUTDOWN_SIGNAL));
+ continue;
+ }
+
+ if (live) {
+ continue;
+ }
+
+ sig_worker_quit = 0;
+#endif
ngx_reconfigure = 0;
if (ngx_new_binary) {
}
}
+#if (NGX_HAVE_FSTACK)
+static int
+ngx_single_process_cycle_loop(void *arg)
+{
+ ngx_uint_t i;
+ ngx_cycle_t *cycle = (ngx_cycle_t *)arg;
+
+ //ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "worker cycle");
+
+ ngx_process_events_and_timers(cycle);
+
+ if (ngx_terminate || ngx_quit) {
+
+ for (i = 0; cycle->modules[i]; i++) {
+ if (cycle->modules[i]->exit_process) {
+ cycle->modules[i]->exit_process(cycle);
+ }
+ }
+
+ ngx_master_process_exit(cycle);
+ }
+
+ if (ngx_reconfigure) {
+ ngx_reconfigure = 0;
+ ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "reconfiguring");
+
+ cycle = ngx_init_cycle(cycle);
+ if (cycle == NULL) {
+ cycle = (ngx_cycle_t *) ngx_cycle;
+ return 0;
+ }
+
+ ngx_cycle = cycle;
+ }
+
+ if (ngx_reopen) {
+ ngx_reopen = 0;
+ ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "reopening logs");
+ ngx_reopen_files(cycle, (ngx_uid_t) -1);
+ }
+
+ return 0;
+}
+#endif
void
ngx_single_process_cycle(ngx_cycle_t *cycle)
exit(2);
}
+#if (NGX_HAVE_FSTACK)
+ ngx_core_conf_t *ccf;
+ ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
+ if (ccf->fstack_conf.len == 0) {
+ ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
+ "fstack_conf null");
+ exit(2);
+ }
+
+ ngx_ff_process = NGX_FF_PROCESS_PRIMARY;
+
+ if (ff_mod_init((const char *)ccf->fstack_conf.data, 0,
+ ngx_ff_process == NGX_FF_PROCESS_PRIMARY)) {
+ ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
+ "ff_mod_init failed");
+ exit(2);
+ }
+
+ if (ngx_open_listening_sockets(cycle) != NGX_OK) {
+ ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
+ "ngx_open_listening_sockets failed");
+ exit(2);
+ }
+
+ if (!ngx_test_config) {
+ ngx_configure_listening_sockets(cycle);
+ }
+#endif
+
for (i = 0; cycle->modules[i]; i++) {
if (cycle->modules[i]->init_process) {
if (cycle->modules[i]->init_process(cycle) == NGX_ERROR) {
}
}
+#if (NGX_HAVE_FSTACK)
+ ff_run(ngx_single_process_cycle_loop, (void *)cycle);
+#else
for ( ;; ) {
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "worker cycle");
ngx_reopen_files(cycle, (ngx_uid_t) -1);
}
}
+#endif
}
{
ngx_int_t i;
+#if (NGX_HAVE_FSTACK)
+ const char *shm_name = "ff_shm";
+ int shm_fd, r;
+
+ shm_fd = shm_open(shm_name, O_CREAT|O_TRUNC|O_RDWR, 0666);
+ if (shm_fd == -1) {
+ ngx_log_error(NGX_LOG_ERR, cycle->log, ngx_errno,
+ "start worker processes shm_open");
+ exit(2);
+ }
+ r = ftruncate(shm_fd, sizeof(sem_t));
+ if (r == -1) {
+ ngx_log_error(NGX_LOG_ERR, cycle->log, ngx_errno,
+ "start worker processes ftruncate");
+ exit(2);
+ }
+ ngx_ff_worker_sem = (sem_t *) mmap(NULL, sizeof(sem_t),
+ PROT_READ|PROT_WRITE,MAP_SHARED, shm_fd, 0);
+ if (ngx_ff_worker_sem == MAP_FAILED) {
+ ngx_log_error(NGX_LOG_ERR, cycle->log, ngx_errno,
+ "start worker processes mmap");
+ shm_unlink(shm_name);
+ exit(2);
+ }
+ if (sem_init(ngx_ff_worker_sem, 1, 0) != 0)
+ {
+ ngx_log_error(NGX_LOG_ERR, cycle->log, ngx_errno,
+ "start worker processes sem_init");
+
+ munmap(ngx_ff_worker_sem, sizeof(sem_t));
+ shm_unlink(shm_name);
+ exit(2);
+ }
+#endif
+
ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "start worker processes");
for (i = 0; i < n; i++) {
(void *) (intptr_t) i, "worker process", type);
ngx_pass_open_channel(cycle);
+
+#if (NGX_HAVE_FSTACK)
+
+ // wait for ff_primary worker process startup.
+ if (i == 0) {
+ struct timespec ts;
+ (void) clock_gettime(CLOCK_REALTIME,&ts);
+
+ ts.tv_sec += 15; //15s
+ while ((r = sem_timedwait(ngx_ff_worker_sem, &ts)) == -1
+ && errno == EINTR)
+ {
+ continue; /* Restart if interrupted by signal handler */
+ }
+
+ if (r == -1) {
+ ngx_log_error(NGX_LOG_ERR, cycle->log, ngx_errno,
+ "primary worker process failed to initialize");
+ exit(2);
+ }
+
+ sem_destroy(ngx_ff_worker_sem);
+ munmap(ngx_ff_worker_sem, sizeof(sem_t));
+ shm_unlink(shm_name);
+ }
+#endif
+
}
}
if (ngx_processes[i].respawn
&& !ngx_processes[i].exiting
&& !ngx_terminate
- && !ngx_quit)
+ && !ngx_quit
+#if (NGX_HAVE_FSTACK)
+ && !ngx_reconfigure
+#endif
+ )
{
if (ngx_spawn_process(cycle, ngx_processes[i].proc,
ngx_processes[i].data,
exit(0);
}
+#if (NGX_HAVE_FSTACK)
+static int
+ngx_worker_process_cycle_loop(void *arg)
+{
+ ngx_cycle_t *cycle = (ngx_cycle_t *)arg;
+
+ if (ngx_exiting) {
+ if (ngx_event_no_timers_left() == NGX_OK) {
+ ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "exiting");
+ ngx_worker_process_exit(cycle);
+ }
+ }
+
+ //ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "worker cycle");
+
+ ngx_process_events_and_timers(cycle);
+
+ if (ngx_terminate) {
+ ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "exiting");
+ ngx_worker_process_exit(cycle);
+ }
+
+ if (ngx_quit) {
+ ngx_quit = 0;
+ ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0,
+ "gracefully shutting down");
+ ngx_setproctitle("worker process is shutting down");
+
+ if (!ngx_exiting) {
+ ngx_exiting = 1;
+ ngx_close_listening_sockets(cycle);
+ ngx_close_idle_connections(cycle);
+ }
+ }
+
+ if (ngx_reopen) {
+ ngx_reopen = 0;
+ ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "reopening logs");
+ ngx_reopen_files(cycle, -1);
+ }
+
+ return 0;
+}
+#endif
static void
ngx_worker_process_cycle(ngx_cycle_t *cycle, void *data)
ngx_setproctitle("worker process");
+#if (NGX_HAVE_FSTACK)
+ ff_run(ngx_worker_process_cycle_loop, (void *)cycle);
+#else
+
for ( ;; ) {
if (ngx_exiting) {
ngx_reopen_files(cycle, -1);
}
}
+#endif
}
tp = ngx_timeofday();
srandom(((unsigned) ngx_pid << 16) ^ tp->sec ^ tp->msec);
+#if (NGX_HAVE_FSTACK)
+ if (worker >= 0) {
+ if (ccf->fstack_conf.len == 0) {
+ ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
+ "fstack_conf null");
+ exit(2);
+ }
+
+ if (worker == 0) {
+ ngx_ff_process = NGX_FF_PROCESS_PRIMARY;
+ } else {
+ ngx_ff_process = NGX_FF_PROCESS_SECONDARY;
+ }
+
+ if (ff_mod_init((const char *)ccf->fstack_conf.data, worker,
+ ngx_ff_process == NGX_FF_PROCESS_PRIMARY)) {
+ ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
+ "ff_mod_init failed");
+ exit(2);
+ }
+
+ if (worker == 0) {
+ (void) sem_post(ngx_ff_worker_sem);
+ }
+
+ if (ngx_open_listening_sockets(cycle) != NGX_OK) {
+ ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
+ "ngx_open_listening_sockets failed");
+ exit(2);
+ }
+
+ if (!ngx_test_config) {
+ ngx_configure_listening_sockets(cycle);
+ }
+ }
+#endif
+
for (i = 0; cycle->modules[i]; i++) {
if (cycle->modules[i]->init_process) {
if (cycle->modules[i]->init_process(cycle) == NGX_ERROR) {
ngx_log_error(NGX_LOG_NOTICE, ngx_cycle->log, 0, "exit");
+#if (NGX_HAVE_FSTACK)
+ if (ngx_ff_process == NGX_FF_PROCESS_PRIMARY) {
+ // wait for secondary worker processes to exit.
+ ngx_msleep(500);
+ }
+#endif
+
exit(0);
}
void ngx_master_process_cycle(ngx_cycle_t *cycle);
void ngx_single_process_cycle(ngx_cycle_t *cycle);
+#if (NGX_HAVE_FSTACK)
+#define NGX_FF_PROCESS_NONE 0
+#define NGX_FF_PROCESS_PRIMARY 1
+#define NGX_FF_PROCESS_SECONDARY 2
+extern ngx_int_t ngx_ff_process;
+#endif
extern ngx_uint_t ngx_process;
extern ngx_uint_t ngx_worker;
rev = c->read;
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
rev->ready = 0;
rev->eof = 1;
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
/*
* on FreeBSD readv() may return 0 on closed socket
if (n > 0) {
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
rev->available -= n;
rev = c->read;
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
rev->ready = 0;
rev->eof = 1;
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
/*
* on FreeBSD recv() may return 0 on closed socket
if (n > 0) {
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
rev->available -= n;
wev = c->write;
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if ((ngx_event_flags & NGX_USE_KQUEUE_EVENT) && wev->pending_eof) {
(void) ngx_connection_error(c, wev->kq_errno,
if (n >= 0) {
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
rev->available -= n;
return in;
}
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if ((ngx_event_flags & NGX_USE_KQUEUE_EVENT) && wev->pending_eof) {
(void) ngx_connection_error(c, wev->kq_errno,
msg.msg_iovlen = vec->count;
#if (NGX_HAVE_ADDRINFO_CMSG)
+#if (NGX_HAVE_FSTACK)
+ if ((!c->listening->belong_to_host) && c->listening && c->listening->wildcard && c->local_sockaddr) {
+#else
if (c->listening && c->listening->wildcard && c->local_sockaddr) {
+#endif
msg.msg_control = msg_control;
msg.msg_controllen = sizeof(msg_control);
#if (NGX_DEBUG)
size_t size;
ngx_uint_t i;
+
+#if (NGX_HAVE_FSTACK)
+ for (i = 0, size = 0; i < (size_t) msg->msg_iovlen; i++) {
+ size += msg->msg_iov[i].iov_len;
+ }
+#endif
#endif
eintr:
}
#if (NGX_DEBUG)
+#if (!NGX_HAVE_FSTACK)
for (i = 0, size = 0; i < (size_t) msg->msg_iovlen; i++) {
size += msg->msg_iov[i].iov_len;
}
+#endif
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0,
"sendmsg: %z of %uz", n, size);
return in;
}
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if ((ngx_event_flags & NGX_USE_KQUEUE_EVENT) && wev->pending_eof) {
(void) ngx_connection_error(c, wev->kq_errno,
ls->reuseport = addr->opt.reuseport;
#endif
+#if (NGX_HAVE_FSTACK)
+ ls->belong_to_host = cscf->kernel_network_stack;
+#endif
+
ls->wildcard = addr->opt.wildcard;
return ls;
ngx_msec_t proxy_protocol_timeout;
+ #if (NGX_HAVE_FSTACK)
+ ngx_flag_t kernel_network_stack; /* kernel_network_stack */
+ #endif
+
unsigned listen:1;
#if (NGX_PCRE)
unsigned captures:1;
0,
NULL },
+#if (NGX_HAVE_FSTACK)
+ { ngx_string("kernel_network_stack"),
+ NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_flag_slot,
+ NGX_STREAM_SRV_CONF_OFFSET,
+ offsetof(ngx_stream_core_srv_conf_t, kernel_network_stack),
+ NULL },
+#endif
+
{ ngx_string("server_name"),
NGX_STREAM_SRV_CONF|NGX_CONF_1MORE,
ngx_stream_core_server_name,
cscf->preread_buffer_size = NGX_CONF_UNSET_SIZE;
cscf->preread_timeout = NGX_CONF_UNSET_MSEC;
+#if (NGX_HAVE_FSTACK)
+ cscf->kernel_network_stack = NGX_CONF_UNSET;
+#endif
+
return cscf;
}
ngx_conf_merge_msec_value(conf->preread_timeout,
prev->preread_timeout, 30000);
+#if (NGX_HAVE_FSTACK)
+ /* By default, we set up a server on fstack */
+ ngx_conf_merge_value(conf->kernel_network_stack,
+ prev->kernel_network_stack, 0);
+#endif
+
if (conf->server_names.nelts == 0) {
/* the array has 4 empty preallocated elements, so push cannot fail */
sn = ngx_array_push(&conf->server_names);
ngx_ssl_t *ssl;
#endif
+#if (NGX_HAVE_FSTACK)
+ ngx_flag_t kernel_network_stack;
+#endif
+
ngx_stream_upstream_srv_conf_t *upstream;
ngx_stream_complex_value_t *upstream_value;
} ngx_stream_proxy_srv_conf_t;
#endif
+#if (NGX_HAVE_FSTACK)
+ { ngx_string("proxy_kernel_network_stack"),
+ NGX_STREAM_MAIN_CONF|NGX_STREAM_SRV_CONF|NGX_CONF_TAKE1,
+ ngx_conf_set_flag_slot,
+ NGX_STREAM_SRV_CONF_OFFSET,
+ offsetof(ngx_stream_proxy_srv_conf_t, kernel_network_stack),
+ NULL },
+#endif
+
ngx_null_command
};
u->peer.type = c->type;
u->start_sec = ngx_time();
+#if (NGX_HAVE_FSTACK)
+ u->peer.belong_to_host = pscf->kernel_network_stack;
+#endif
+
c->write->handler = ngx_stream_proxy_downstream_handler;
c->read->handler = ngx_stream_proxy_downstream_handler;
int err;
socklen_t len;
-#if (NGX_HAVE_KQUEUE)
+#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_FSTACK)
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
err = c->write->kq_errno ? c->write->kq_errno : c->read->kq_errno;
conf->ssl_conf_commands = NGX_CONF_UNSET_PTR;
#endif
+#if (NGX_HAVE_FSTACK)
+ conf->kernel_network_stack = NGX_CONF_UNSET;
+#endif
+
return conf;
}
#endif
+#if (NGX_HAVE_FSTACK)
+ /* By default, we set up a proxy on fstack */
+ ngx_conf_merge_value(conf->kernel_network_stack,
+ prev->kernel_network_stack, 0);
+#endif
+
return NGX_CONF_OK;
}