Project

General

Profile

Bug #15314 » runtests.tiered.xfs.yaml

Mark Nelson, 03/30/2016 12:02 AM

 
cluster:
user: 'perf'
head: "incerta05"
clients: ["incerta05", "incerta06", "incerta07", "incerta08"]
osds: ["incerta05", "incerta06", "incerta07", "incerta08"]
mons:
incerta05.front.sepia.ceph.com:
a: "10.0.10.105:6789"
osds_per_node: 10
fs: 'xfs'
mkfs_opts: '-f -i size=2048'
mount_opts: '-o inode64,noatime'
conf_file: '/home/perf/incerta/ceph.conf.bluestore.tiered'
iterations: 1
use_existing: False
newstore_block: True
clusterid: "ceph"
tmp_dir: "/home/perf/tmp/cbt"
# ceph-osd_cmd: "env -i TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728 /usr/bin/ceph-osd"
ceph-osd_cmd: "/usr/local/bin/ceph-osd"
ceph-mon_cmd: "/usr/local/bin/ceph-mon"
ceph-run_cmd: "/usr/local/bin/ceph-run"
rados_cmd: "/usr/local/bin/rados"
ceph_cmd: "/usr/local/bin/ceph"

crush_profiles:
cache:
osds: [6,7,8,9,16,17,18,19,26,27,28,29,36,37,38,39]
pool_profiles:
# radosbench:
# pg_size: 1024
# pgp_size: 1024
# replication: 3
# replication: 'erasure'
# erasure_profile: 'ec62'
rbd:
pg_size: 8192
pgp_size: 8192
replication: 3
cache_profile: 'rbd_cache'

rbd_cache:
crush_profile: 'cache'
cache_mode: 'writeback'
pg_size: 8192
pgp_size: 8192
replication: 3
hit_set_type: 'bloom'
hit_set_count: 8
hit_set_period: 60
# target_max_objects: 32768
# target_max_bytes: 137438953472
target_max_objects: 262144
target_max_bytes: 549755813888
min_read_recency_for_promote: 2
min_write_recency_for_promote: 2
erasure_profiles:
ec62:
erasure_k: 6
erasure_m: 2

benchmarks:
# mysqlsysbench:
# num-threads: 16
# cmd_path: "/home/perf/packages/sysbench/sysbench/sysbench"
# test-path: "/home/perf/packages/sysbench/sysbench/tests/db/oltp.lua"
# prepare-path: "/home/perf/packages/sysbench/sysbench/tests/db/parallel_prepare.lua"
# max-time: 600

# nullbench:
# none:
# radosbench:
# op_size: [4194304, 131072, 4096]
# write_only: False
# time: 300
# concurrent_ops: [32]
# concurrent_procs: 4
# osd_ra: [4096]
librbdfio:
time: 300
vol_size: 131072
mode: ['read', 'write', 'randread', 'randwrite', 'rw', 'randrw']
rwmixread: 50
op_size: [4194304, 2097152, 1048576, 524288, 262144, 131072, 65536, 32768, 16384, 8192, 4096]
procs_per_volume: [1]
volumes_per_client: [2]
iodepth: [32]
osd_ra: [4096]
cmd_path: '/home/perf/src/fio/fio'
pool_profile: 'rbd'
log_avg_msec: 100

(3-3/4)