Project

General

Profile

Feature #14435

add SOCKET_BACKLOG setting in ceph.conf

Added by Diluga Salome about 8 years ago. Updated about 2 months ago.

Status:
Resolved
Priority:
High
Assignee:
-
Target version:
-
% Done:

0%

Source:
Community (user)
Tags:
Backport:
Reviewed:
Affected Versions:
Pull request ID:

Description

i use nginx+radosgw,and with cosbench for test,in one of my case with 2000 workers,a lot of 502 errs was appeared in log,i try to change my kernel setting and nginx setting,but it is not valid,at last i found the radosgw socket backlog is 1024,and i found is define in "rgw_main.cc" (#define SOCKET_BACKLOG 1024),is that any way to change it? or any feature to define it in ceph.conf?it is very useful setting to improve the rgw performance!

ss -ln |grep sock
u_str LISTEN 0 1024 /home/ceph/var/run/ceph-client.radosgw.cn-zone1.sock 669841332 * 0

1. sysctl setting:

net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_keepalive_time = 180
net.ipv4.tcp_keepalive_intvl = 10
net.ipv4.tcp_keepalive_probes = 9
net.ipv4.icmp_ignore_bogus_error_responses = 1
net.ipv4.conf.all.log_martians = 1
net.ipv4.tcp_retries1 = 3
net.ipv4.tcp_synack_retries = 3
net.ipv4.tcp_syn_retries = 3
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 40960
net.ipv4.icmp_echo_ignore_broadcasts = 1
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 0
net.core.somaxconn = 81920
net.core.netdev_max_backlog = 65535
net.ipv4.tcp_max_syn_backlog = 65535
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.wmem_default = 2097152
kernel.threads-max = 100000
net.ipv4.ip_forward=1
net.ipv4.tcp_rmem=4096 87380 16777216
net.ipv4.tcp_wmem=4096 65536 16777216
net.netfilter.nf_conntrack_max=1000000
vm.swappiness=0

2. nginx setting:

user  nginx;
worker_processes auto;
error_log /home/ceph/log/nginx/error.log debug;
pid /var/run/nginx.pid;
worker_rlimit_nofile 65535;
events {
worker_connections 4000;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /home/ceph/log/nginx/access.log main;
sendfile on;
include /etc/nginx/conf.d/*.conf;
}
server {
listen 80 backlog=10240;
#ssl on;
gzip off;
#client_body_buffer_size 2000M;
client_max_body_size 0;
server_name *.s3.xxx.com;
location / {
#fastcgi_buffers 64 128k;
#fastcgi_buffer_size 128k;
#fastcgi_busy_buffers_size 128k;
#fastcgi_temp_file_write_size 128k;
#fastcgi_max_temp_file_size 1024m;
fastcgi_pass_header Authorization;
fastcgi_pass_request_headers on;
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_LENGTH $content_length;
fastcgi_param CONTENT_TYPE $content_type;
if ($request_method = PUT) {
rewrite ^ /PUT$request_uri;
}
include fastcgi_params;
fastcgi_pass unix:/home/ceph/var/run/ceph-client.radosgw.cn-zone1.sock;
#fastcgi_pass localhost:9000;
}
location /PUT/ {
#fastcgi_buffers 64 128k;
#fastcgi_buffer_size 128k;
internal;
#fastcgi_max_temp_file_size 1024m;
fastcgi_pass_header Authorization;
fastcgi_pass_request_headers on;
include fastcgi_params;
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_LENGTH $content_length;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_pass unix:/home/ceph/var/run/ceph-client.radosgw.cn-zone1.sock;
#fastcgi_pass localhost:9000;
}
}

3. ceph.conf

[client.radosgw.cn-zone1]
rgw dns name = s3.xxx.com
rgw frontends = fastcgi
host = xxx
rgw region = cn
rgw region root pool = .cn.rgw.root
rgw zone = cn-zone1
rgw zone root pool = .cn-zone1.rgw.root
keyring = /etc/ceph/ceph.client.radosgw.keyring
rgw socket path = /home/ceph/var/run/ceph-client.radosgw.cn-zone1.sock
log file = /home/ceph/log/radosgw.cn-zone1.log
rgw print continue = false
rgw content length compat = true
rgw ops log rados = false

4. cosbench

<workload name="buckets*objects=100*10000 size=4k num_workers=2000 read:write=100:0" description="buckets*objects=100*10000 size=4k num_workers=2000 read:write=100:0">
<storage type="s3" config="accesskey=xxx;secretkey=xxx;proxyhost=;proxyport=;endpoint=http://xxx:80"/>
<!-- Small Objects, Write & Read -->
<workflow>
<!-- Create bucket -->
<workstage name="create-buckets">
<work type="init" workers="2000" config="cprefix=g3test;containers=r(1,100)"/>
</workstage>
<!-- prepare objects for read -->
<workstage name="prepare" >
<work type="prepare" workers="2000" config="cprefix=g3test;containers=r(1,100);objects=r(1,10000);sizes=c(4)KB"/>
</workstage>
<!-- read and write -->
<workstage name="main" >
<work name="main" workers="2000" runtime="3600" >
<operation type="read" ratio="100" config="cprefix=g3test;containers=r(1,100);objects=u(1,10000)"/>
</work>
</workstage>
<!-- del objects -->
<workstage name="cleanup" >
<work type="cleanup" workers="2000" config="cprefix=g3test;containers=r(1,100);objects=r(1,10000)" />
</workstage>
<!-- del buckets -->
<workstage name="dispose" >
<work type="dispose" workers="2000" config="cprefix=g3test;containers=r(1,100)" />
</workstage>
</workflow>
</workload>


Related issues

Related to rgw - Feature #43952: Beast frontend option to configure the maximum number of connections Resolved

History

#1 Updated by Casey Bodley about 2 months ago

  • Related to Feature #43952: Beast frontend option to configure the maximum number of connections added

#2 Updated by Casey Bodley about 2 months ago

  • Status changed from New to Resolved

Also available in: Atom PDF