环境:

Distributor ID:	Ubuntu
Description:	Ubuntu 16.04.2 LTS
Release:	16.04
Codename:	xenial

nginx version: openresty/1.11.2.5

8核心 Intel(R) Xeon(R) Platinum 8163 CPU @ 2.50GHz
16GB 物理内存

limit

$ulimit -a

core file size          (blocks, -c) 0
data seg size           (kbytes, -d) unlimited
scheduling priority             (-e) 0
file size               (blocks, -f) unlimited
pending signals                 (-i) 64045
max locked memory       (kbytes, -l) 64
max memory size         (kbytes, -m) unlimited
open files                      (-n) 65535
pipe size            (512 bytes, -p) 8
POSIX message queues     (bytes, -q) 819200
real-time priority              (-r) 0
stack size              (kbytes, -s) 8192
cpu time               (seconds, -t) unlimited
max user processes              (-u) 64045
virtual memory          (kbytes, -v) unlimited
file locks                      (-x) unlimited

nginx.conf

worker_processes  auto;
worker_cpu_affinity auto;


events {
    use epoll; # 通常不需要设置, 默认情况下, Nginx 会自动选择最高效的方式的.  http://nginx.org/en/docs/events.html
    worker_connections 4096; 	# 经测试太大和太小最不好, 这个4096的值是比较适合的
    multi_accept off; # 建议为默认值 off, 除非你测试时确认为 on 时有性能提升
    accept_mutex off; # 当使用 epoll 或 listen reuseport 时, 要关掉这个, 当然, 默认情况下也是关闭的. http://nginx.org/en/docs/ngx_core_module.html#events
}


http {
	keepalive_timeout  65;
    gzip  on;
    
    upstream bid_server {
		# 竞价的 server
		server 内网IP:端口
        
        # 这时根据你的 bid 服务连接数大小和nginx的情况来决定
		keepalive 2000;
	}
	server {
        listen 888 backlog=65535 reuseport;
        server_name localhost 你的服务器网址或IP;

        location / {
                # 配合 upstream 的 keepalive . 参考 https://www.nginx.com/blog/performance-tuning-tips-tricks/
				proxy_http_version 1.1;
				proxy_set_header Connection "";
                
                proxy_pass http://bid_server;
                proxy_redirect off;
                proxy_set_header Host $host;
                proxy_set_header X-Real-IP $remote_addr;
                proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                client_max_body_size 10m;
                client_body_buffer_size 512k;
                proxy_buffer_size 256k;
                proxy_buffers 4 256k;
                proxy_busy_buffers_size 512k;
                proxy_temp_file_write_size 512k;
        }
	}    
}

OS 层参数

sudo sysctl -w net.core.netdev_max_backlog=2000

# 为了配合 listen 的 backlog=65535 参数
sudo sysctl -w net.core.somaxconn=65535

关于 backlog 的参数, 可以参考 Socket参数资料收集与整理

参考资料

https://www.nginx.com/blog/performance-tuning-tips-tricks/