Project

General

Profile

Bug #14525 » fio performan test result after adding cache pool.txt

chuanhong wang, 01/27/2016 10:33 AM

 
[root@ceph102 ~]# ceph osd dump |grep pool
pool 0 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 1 flags hashpspool stripe_width 0
pool 1 'volumes' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 1024 pgp_num 1024 last_change 3309 lfor 3309 flags hashpspool tiers 4 read_tier 4 write_tier 4 stripe_width 0
pool 2 'images' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 128 pgp_num 128 last_change 2416 flags hashpspool stripe_width 0
pool 3 'vms' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 128 pgp_num 128 last_change 1824 flags hashpspool stripe_width 0
pool 4 'ssd-pool' replicated size 3 min_size 2 crush_ruleset 1 object_hash rjenkins pg_num 64 pgp_num 64 last_change 3310 flags hashpspool,incomplete_clones tier_of 1 cache_mode writeback hit_set bloom{false_positive_probability: 0.05, target_size: 0, seed: 0} 0s x0 stripe_width 0

[root@ceph102 ~]# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-5 1.07996 root ssd-root
30 0.35999 osd.30 up 1.00000 1.00000
31 0.35999 osd.31 up 1.00000 1.00000
32 0.35999 osd.32 up 1.00000 1.00000
-1 8.09967 root default
-2 2.69989 host ceph104
1 0.26999 osd.1 up 1.00000 1.00000
4 0.26999 osd.4 up 1.00000 1.00000
7 0.26999 osd.7 up 1.00000 1.00000
10 0.26999 osd.10 up 1.00000 1.00000
14 0.26999 osd.14 up 1.00000 1.00000
17 0.26999 osd.17 up 1.00000 1.00000
20 0.26999 osd.20 up 1.00000 1.00000
23 0.26999 osd.23 up 1.00000 1.00000
25 0.26999 osd.25 up 1.00000 1.00000
27 0.26999 osd.27 up 1.00000 1.00000
-3 2.69989 host ceph103
2 0.26999 osd.2 up 1.00000 1.00000
5 0.26999 osd.5 up 1.00000 1.00000
9 0.26999 osd.9 up 1.00000 1.00000
12 0.26999 osd.12 up 1.00000 1.00000
16 0.26999 osd.16 up 1.00000 1.00000
19 0.26999 osd.19 up 1.00000 1.00000
22 0.26999 osd.22 up 1.00000 1.00000
24 0.26999 osd.24 up 1.00000 1.00000
28 0.26999 osd.28 up 1.00000 1.00000
0 0.26999 osd.0 up 1.00000 1.00000
-4 2.69989 host ceph102
3 0.26999 osd.3 up 1.00000 1.00000
6 0.26999 osd.6 up 1.00000 1.00000
8 0.26999 osd.8 up 1.00000 1.00000
11 0.26999 osd.11 up 1.00000 1.00000
13 0.26999 osd.13 up 1.00000 1.00000
15 0.26999 osd.15 up 1.00000 1.00000
18 0.26999 osd.18 up 1.00000 1.00000
21 0.26999 osd.21 up 1.00000 1.00000
26 0.26999 osd.26 up 1.00000 1.00000
29 0.26999 osd.29 up 1.00000 1.00000
[root@ceph102 ~]# ceph osd crush rule dump
[
{
"rule_id": 0,
"rule_name": "replicated_ruleset",
"ruleset": 0,
"type": 1,
"min_size": 1,
"max_size": 10,
"steps": [
{
"op": "take",
"item": -1,
"item_name": "default"
},
{
"op": "chooseleaf_firstn",
"num": 0,
"type": "host"
},
{
"op": "emit"
}
]
},
{
"rule_id": 1,
"rule_name": "ssd-rule",
"ruleset": 1,
"type": 1,
"min_size": 1,
"max_size": 10,
"steps": [
{
"op": "take",
"item": -5,
"item_name": "ssd-root"
},
{
"op": "choose_firstn",
"num": 0,
"type": "osd"
},
{
"op": "emit"
}
]
}
]

[root@ceph102 ~]# ceph osd df
ID WEIGHT REWEIGHT SIZE USE AVAIL %USE VAR
30 0.35999 1.00000 372G 220G 152G 59.15 1.10
31 0.35999 1.00000 372G 220G 152G 59.15 1.10
32 0.35999 1.00000 372G 220G 152G 59.15 1.10
1 0.26999 1.00000 279G 149G 130G 53.37 0.99
4 0.26999 1.00000 279G 138G 141G 49.50 0.92
7 0.26999 1.00000 279G 148G 131G 53.00 0.98
10 0.26999 1.00000 279G 146G 132G 52.58 0.98
14 0.26999 1.00000 279G 131G 147G 47.25 0.88
17 0.26999 1.00000 279G 149G 129G 53.62 0.99
20 0.26999 1.00000 279G 174G 104G 62.65 1.16
23 0.26999 1.00000 279G 139G 139G 50.12 0.93
25 0.26999 1.00000 279G 151G 127G 54.19 1.00
27 0.26999 1.00000 279G 150G 128G 54.06 1.00
2 0.26999 1.00000 279G 144G 134G 51.92 0.96
5 0.26999 1.00000 279G 159G 119G 57.25 1.06
9 0.26999 1.00000 279G 134G 144G 48.29 0.90
12 0.26999 1.00000 279G 147G 131G 52.81 0.98
16 0.26999 1.00000 279G 154G 124G 55.24 1.02
19 0.26999 1.00000 279G 133G 145G 47.83 0.89
22 0.26999 1.00000 279G 131G 148G 46.91 0.87
24 0.26999 1.00000 279G 152G 126G 54.72 1.01
28 0.26999 1.00000 279G 152G 126G 54.76 1.02
0 0.26999 1.00000 279G 177G 101G 63.50 1.18
3 0.26999 1.00000 279G 127G 151G 45.76 0.85
6 0.26999 1.00000 279G 153G 125G 54.99 1.02
8 0.26999 1.00000 279G 152G 126G 54.66 1.01
11 0.26999 1.00000 279G 137G 141G 49.28 0.91
13 0.26999 1.00000 279G 177G 101G 63.68 1.18
15 0.26999 1.00000 279G 140G 139G 50.14 0.93
18 0.26999 1.00000 279G 144G 135G 51.61 0.96
21 0.26999 1.00000 279G 152G 126G 54.63 1.01
26 0.26999 1.00000 279G 159G 119G 57.14 1.06
29 0.26999 1.00000 279G 143G 135G 51.44 0.95
TOTAL 9495G 5120G 4374G 53.93
MIN/MAX VAR: 0.85/1.18 STDDEV: 4.57
[root@ceph102 ~]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
9495G 4374G 5120G 53.93
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 0 303G 3.20 1014G 120040
volumes 1 2010G 21.17 1014G 735455
images 2 57340M 0.59 1014G 7184
vms 3 25544M 0.26 1014G 3291
ssd-pool 4 216G 2.28 152G 69380
[root@ceph102 ~]#

[root@centos7-server1 ~]# fio -filename=/dev/vdc -direct=1 -iodepth 128 -thread -rw=randwrite -ioengine=libaio -bs=4k -numjobs=32 -runtime=60 -group_reporting -name=vmtest
vmtest: (g=0): rw=randwrite, bs=4K-4K/4K-4K/4K-4K, ioengine=libaio, iodepth=128
...
fio-2.2.10
Starting 32 threads
Jobs: 32 (f=32): [w(32)] [0.0% done] [0KB/8500KB/0KB /s] [0/2125/0 iops] [eta 05d:22h:45m:46s]
vmtest: (groupid=0, jobs=32): err= 0: pid=2214: Wed Jan 27 04:53:53 2016
write: io=466172KB, bw=7739.7KB/s, iops=1934, runt= 60232msec
slat (usec): min=3, max=1451.8K, avg=16443.02, stdev=85975.27
clat (msec): min=9, max=7108, avg=2055.04, stdev=770.59
lat (msec): min=9, max=7108, avg=2071.49, stdev=772.77
clat percentiles (msec):
| 1.00th=[ 529], 5.00th=[ 1074], 10.00th=[ 1221], 20.00th=[ 1418],
| 30.00th=[ 1614], 40.00th=[ 1778], 50.00th=[ 1942], 60.00th=[ 2147],
| 70.00th=[ 2343], 80.00th=[ 2638], 90.00th=[ 3032], 95.00th=[ 3458],
| 99.00th=[ 4490], 99.50th=[ 5080], 99.90th=[ 5604], 99.95th=[ 5735],
| 99.99th=[ 6259]
bw (KB /s): min= 1, max= 787, per=3.20%, avg=247.92, stdev=133.23
lat (msec) : 10=0.01%, 20=0.01%, 50=0.02%, 100=0.12%, 250=0.34%
lat (msec) : 500=0.48%, 750=0.59%, 1000=1.88%, 2000=49.66%, >=2000=46.90%
cpu : usr=0.05%, sys=0.05%, ctx=6570, majf=0, minf=9
IO depths : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.2%, 16=0.4%, 32=0.9%, >=64=98.3%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.1%
issued : total=r=0/w=116543/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0
latency : target=0, window=0, percentile=100.00%, depth=128

Run status group 0 (all jobs):
WRITE: io=466172KB, aggrb=7739KB/s, minb=7739KB/s, maxb=7739KB/s, mint=60232msec, maxt=60232msec

Disk stats (read/write):
vdc: ios=186/115996, merge=0/0, ticks=439/9224398, in_queue=9229402, util=99.63%
[root@centos7-server1 ~]#


[root@centos7-server1 ~]# fio -filename=/dev/vdc -direct=1 -iodepth 128 -thread -rw=randread -ioengine=libaio -bs=4k -numjobs=32 -runtime=60 -group_reporting -name=vmtest
vmtest: (g=0): rw=randread, bs=4K-4K/4K-4K/4K-4K, ioengine=libaio, iodepth=128
...
fio-2.2.10
Starting 32 threads
Jobs: 31 (f=31): [r(1),_(1),r(30)] [50.4% done] [71164KB/0KB/0KB /s] [17.8K/0/0 iops] [eta 01m:00s]
vmtest: (groupid=0, jobs=32): err= 0: pid=2258: Wed Jan 27 04:55:04 2016
read : io=4121.6MB, bw=70278KB/s, iops=17569, runt= 60054msec
slat (usec): min=2, max=395943, avg=1814.30, stdev=10100.76
clat (usec): min=970, max=764228, avg=231074.85, stdev=50232.78
lat (msec): min=1, max=819, avg=232.89, stdev=50.91
clat percentiles (msec):
| 1.00th=[ 137], 5.00th=[ 174], 10.00th=[ 188], 20.00th=[ 200],
| 30.00th=[ 208], 40.00th=[ 215], 50.00th=[ 223], 60.00th=[ 231],
| 70.00th=[ 243], 80.00th=[ 260], 90.00th=[ 289], 95.00th=[ 318],
| 99.00th=[ 392], 99.50th=[ 453], 99.90th=[ 619], 99.95th=[ 644],
| 99.99th=[ 725]
bw (KB /s): min= 288, max= 3528, per=3.11%, avg=2188.55, stdev=300.93
lat (usec) : 1000=0.01%
lat (msec) : 2=0.01%, 4=0.02%, 10=0.02%, 20=0.03%, 50=0.18%
lat (msec) : 100=0.22%, 250=75.33%, 500=23.81%, 750=0.39%, 1000=0.01%
cpu : usr=0.21%, sys=0.40%, ctx=37551, majf=0, minf=4106
IO depths : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.1%, 16=0.1%, 32=0.1%, >=64=99.8%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.1%
issued : total=r=1055115/w=0/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0
latency : target=0, window=0, percentile=100.00%, depth=128

Run status group 0 (all jobs):
READ: io=4121.6MB, aggrb=70277KB/s, minb=70277KB/s, maxb=70277KB/s, mint=60054msec, maxt=60054msec

Disk stats (read/write):
vdc: ios=1052317/0, merge=2/0, ticks=8968586/0, in_queue=8970112, util=99.97%
[root@centos7-server1 ~]#
[root@centos7-server1 ~]#
[root@centos7-server1 ~]#
[root@centos7-server1 ~]# fio -filename=/dev/vdc -direct=1 -iodepth 128 -thread -rw=randread -ioengine=libaio -bs=512k -numjobs=32 -runtime=60 -group_reporting -name=vmtest
vmtest: (g=0): rw=randread, bs=512K-512K/512K-512K/512K-512K, ioengine=libaio, iodepth=128
...
fio-2.2.10
Starting 32 threads
Jobs: 11 (f=11): [_(11),r(2),_(4),r(2),_(1),r(1),_(1),r(2),_(1),r(4),E(1),_(2)] [4.9% done] [2288MB/0KB/0KB /s] [4575/0/0 iops] [eta 19m:59s]
vmtest: (groupid=0, jobs=32): err= 0: pid=2294: Wed Jan 27 04:56:52 2016
read : io=64512MB, bw=1066.7MB/s, iops=2133, runt= 60481msec
slat (usec): min=16, max=981822, avg=14902.46, stdev=82830.07
clat (msec): min=2, max=3039, avg=1869.38, stdev=309.44
lat (msec): min=5, max=3441, avg=1884.28, stdev=314.55
clat percentiles (msec):
| 1.00th=[ 297], 5.00th=[ 1418], 10.00th=[ 1778], 20.00th=[ 1827],
| 30.00th=[ 1860], 40.00th=[ 1893], 50.00th=[ 1909], 60.00th=[ 1926],
| 70.00th=[ 1958], 80.00th=[ 1991], 90.00th=[ 2024], 95.00th=[ 2057],
| 99.00th=[ 2638], 99.50th=[ 2835], 99.90th=[ 2933], 99.95th=[ 2999],
| 99.99th=[ 3032]
bw (KB /s): min= 219, max=380342, per=3.08%, avg=33686.80, stdev=12398.29
lat (msec) : 4=0.01%, 10=0.01%, 20=0.01%, 50=0.16%, 100=0.16%
lat (msec) : 250=0.54%, 500=0.91%, 750=0.59%, 1000=0.58%, 2000=82.38%
lat (msec) : >=2000=14.67%
cpu : usr=0.02%, sys=0.29%, ctx=4642, majf=0, minf=12805
IO depths : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.2%, 16=0.4%, 32=0.8%, >=64=98.4%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.1%
issued : total=r=129023/w=0/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0
latency : target=0, window=0, percentile=100.00%, depth=128

Run status group 0 (all jobs):
READ: io=64512MB, aggrb=1066.7MB/s, minb=1066.7MB/s, maxb=1066.7MB/s, mint=60481msec, maxt=60481msec

Disk stats (read/write):
vdc: ios=128902/0, merge=129023/0, ticks=8897009/0, in_queue=8906779, util=99.94%
[root@centos7-server1 ~]#
[root@centos7-server1 ~]#
[root@centos7-server1 ~]# fio -filename=/dev/vdc -direct=1 -iodepth 128 -thread -rw=randwrite -ioengine=libaio -bs=512k -numjobs=32 -runtime=60 -group_reporting -name=vmtest
vmtest: (g=0): rw=randwrite, bs=512K-512K/512K-512K/512K-512K, ioengine=libaio, iodepth=128
...
fio-2.2.10
Starting 32 threads
Jobs: 30 (f=30): [w(1),_(2),w(29)] [34.1% done] [0KB/264.3MB/0KB /s] [0/528/0 iops] [eta 02m:00s]
vmtest: (groupid=0, jobs=32): err= 0: pid=2330: Wed Jan 27 04:58:11 2016
write: io=12389MB, bw=205677KB/s, iops=401, runt= 61681msec
slat (usec): min=27, max=5172.5K, avg=78284.77, stdev=365112.73
clat (msec): min=125, max=24993, avg=9165.50, stdev=4566.77
lat (msec): min=186, max=25323, avg=9243.79, stdev=4582.34
clat percentiles (msec):
| 1.00th=[ 420], 5.00th=[ 2573], 10.00th=[ 3916], 20.00th=[ 5342],
| 30.00th=[ 6128], 40.00th=[ 7177], 50.00th=[ 8717], 60.00th=[10159],
| 70.00th=[11338], 80.00th=[12780], 90.00th=[15401], 95.00th=[16712],
| 99.00th=[16712], 99.50th=[16712], 99.90th=[16712], 99.95th=[16712],
| 99.99th=[16712]
bw (KB /s): min= 25, max=93391, per=3.86%, avg=7936.56, stdev=10695.74
lat (msec) : 250=0.20%, 500=1.01%, 750=0.08%, 1000=0.34%, 2000=1.78%
lat (msec) : >=2000=96.60%
cpu : usr=0.15%, sys=0.05%, ctx=3316, majf=0, minf=9
IO depths : 1=0.1%, 2=0.3%, 4=0.5%, 8=1.0%, 16=2.1%, 32=4.1%, >=64=91.9%
submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
complete : 0=0.0%, 4=99.8%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.2%
issued : total=r=0/w=24778/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0
latency : target=0, window=0, percentile=100.00%, depth=128

Run status group 0 (all jobs):
WRITE: io=12389MB, aggrb=205676KB/s, minb=205676KB/s, maxb=205676KB/s, mint=61681msec, maxt=61681msec

Disk stats (read/write):
vdc: ios=182/24663, merge=0/24778, ticks=128/9671785, in_queue=9789391, util=99.42%
[root@centos7-server1 ~]#
(1-1/2)