|
[global]
|
|
log file = /var/log/ceph/$type.$id/log
|
|
pid file = /var/run/ceph/$type.$id/pid
|
|
#logging options
|
|
#debug ms = 1/5
|
|
#debug osd = 1/5
|
|
#debug filestore = 1/5
|
|
#debug journal = 1
|
|
#debug monc = 5/20
|
|
|
|
#debuggin all off
|
|
debug lockdep = 0/0
|
|
debug context = 0/0
|
|
debug crush = 0/0
|
|
debug mds = 0/0
|
|
debug mds balancer = 0/0
|
|
debug mds locker = 0/0
|
|
debug mds log = 0/0
|
|
debug mds log expire = 0/0
|
|
debug mds migrator = 0/0
|
|
debug buffer = 0/0
|
|
debug timer = 0/0
|
|
debug filer = 0/0
|
|
debug objecter = 0/0
|
|
debug rados = 0/0
|
|
debug rbd = 0/0
|
|
debug journaler = 0/0
|
|
debug objectcacher = 0/0
|
|
debug client = 0/0
|
|
debug osd = 0/0
|
|
debug optracker = 0/0
|
|
debug objclass = 0/0
|
|
debug filestore = 0/0
|
|
debug journal = 0/0
|
|
debug ms = 0/0
|
|
debug mon = 0/0
|
|
debug monc = 0/0
|
|
debug paxos = 0/0
|
|
debug tp = 0/0
|
|
debug auth = 0/0
|
|
debug finisher = 0/0
|
|
debug heartbeatmap = 0/0
|
|
debug perfcounter = 0/0
|
|
debug rgw = 0/0
|
|
debug hadoop = 0/0
|
|
debug asok = 0/0
|
|
debug throttle = 0/0
|
|
|
|
#define 2 networks because we like to over-complicate shit
|
|
public network = 10.0.1.0/24
|
|
cluster network = 10.0.0.0/24
|
|
|
|
#cluster UUID
|
|
fsid = 41ea6be5-aa11-47fb-ab3c-02254a775aa0
|
|
|
|
#authentication mechanism
|
|
auth cluster required = cephx
|
|
auth service required = cephx
|
|
auth client required = cephx
|
|
|
|
#older CephFS kernel clients may have trouble when signatures are required
|
|
|
|
cephx require signatures = true
|
|
|
|
#default number of placement groups
|
|
#this is set during pool creation
|
|
#to change an existing pool, you must run a command against it
|
|
#you cannot reduce the number of placement groups in an existing pool
|
|
#only increases are supported
|
|
|
|
#read the docs carefully to understand how to set this
|
|
#the number you choose is highly dependent on how many disks you have
|
|
|
|
#9 bits = 512 placement groups (recommended for 5-10 OSD's)
|
|
#12 bits = 4096 placement groups (recommended for 10-50 OSD's)
|
|
|
|
osd pg bits = 9
|
|
osd pgp bits = 9
|
|
|
|
#replicas
|
|
#replica count is set when you create a pool
|
|
#to change this and apply it to existing pools, you must run a command against each pool.
|
|
osd pool default size = 2
|
|
|
|
#chooseleaf type
|
|
#allow running on a single osd or single host (bad idea on multi-node setups)
|
|
#osd crush chooseleaf type = 0
|
|
|
|
#require replicas to be split across hosts
|
|
osd crush chooseleaf type = 1
|
|
|
|
#prevent the startup sript from mucking up a non-standard crush map
|
|
#set this to false if you make nonstandard modifications to the crush map
|
|
|
|
osd crush update on start = true
|
|
|
|
#primary affinity weight for osds
|
|
|
|
#enable the use of primary affinity to steer reads toward faster disks
|
|
#(commented out because we aren't mixing slow and fast disks)
|
|
#mon osd allow primary affinity = true
|
|
|
|
[mon]
|
|
mon data = /srv/ceph/mon/$type.$id
|
|
mon osd full ratio = .99
|
|
mon osd nearfull ratio = .50
|
|
|
|
[mon.elara]
|
|
host = elara
|
|
mon data = /srv/ceph/mon/mon.elara
|
|
mon addr = 10.0.1.42:6789
|
|
|
|
[osd]
|
|
osd data = /srv/ceph/osd/$type.$id
|
|
osd journal size = 2000
|
|
|
|
osd op threads = 4
|
|
osd disk threads = 1
|
|
osd max backfills = 1
|
|
osd recovery max active = 1
|
|
|
|
osd mkfs type = xfs
|
|
#osd mkfs type = btrfs
|
|
osd mkfs options xfs = "-f -i size=2048"
|
|
osd mount options xfs = 'noatime,nodiratime,logbsize=256k,logbufs=8,allocsize=4M'
|
|
#osd mkfs options btrfs = "-l 16k -n 16k"
|
|
#osd mount options btrfs = 'noatime,space_cache,user_subvol_rm_allowed'
|
|
#osd journal = /dev/disk/by-partlabel/Ceph_OSD.$id.BLKjournal
|
|
osd journal = /dev/disk/by-partlabel/Ceph_SSD_journal.$id
|
|
devs = /dev/disk/by-partlabel/Ceph_OSD.$id.XFSdata
|
|
|
|
#osd journal = /srv/ceph/osd/$type.$id/journal
|
|
#devs = /dev/disk/by-partlabel/Ceph_OSD.$id.BTRFSdata
|
|
|
|
#tuning
|
|
filestore flusher = false
|
|
filestore max sync interval = 100
|
|
filestore min sync interval = 50
|
|
filestore queue max ops = 5000
|
|
filestore queue committing max ops = 5000
|
|
filestore op threads = 4
|
|
journal max write entries = 1000
|
|
journal queue max ops = 5000
|
|
objecter inflight ops = 8192
|
|
|
|
[osd.0]
|
|
host = elara
|
|
|
|
[osd.1]
|
|
host = elara
|
|
|
|
[osd.2]
|
|
host = elara
|
|
|
|
[osd.3]
|
|
host = elara
|
|
|
|
[osd.4]
|
|
host = elara
|
|
|
|
[osd.5]
|
|
host = elara
|
|
|
|
[osd.6]
|
|
host = elara
|
|
|
|
[osd.7]
|
|
host = titan
|
|
|
|
[osd.8]
|
|
host = titan
|
|
|
|
[osd.9]
|
|
host = titan
|
|
|
|
[osd.10]
|
|
host = titan
|
|
|
|
#[osd.11]
|
|
# host = titan
|
|
|
|
#[osd.12]
|
|
# host = titan
|
|
|
|
#[osd.13]
|
|
# host = titan
|
|
|
|
#[osd.14]
|
|
# host = titan
|
|
|
|
#[osd.17]
|
|
# host = titan
|
|
|
|
#[osd.18]
|
|
# host = titan
|
|
|
|
#[osd.15]
|
|
# host = elara
|
|
# osd journal = /srv/ceph/osd/$type.$id/journal
|
|
# osd journal size = 500
|
|
# devs = /dev/disk/by-partlabel/Ceph_SSD_cacheOSD.$id
|
|
# osd mkfs type = btrfs
|
|
|
|
#[osd.16]
|
|
# host = titan
|
|
# osd journal = /srv/ceph/osd/$type.$id/journal
|
|
# osd journal size = 500
|
|
# devs = /dev/disk/by-partlabel/Ceph_SSD_cacheOSD.$id
|
|
# osd mkfs type = btrfs
|
|
|
|
[mds]
|
|
mds data = /srv/ceph/mds/$type.$id
|
|
max mds = 1
|
|
mds standby replay = true
|
|
|
|
|
|
[mds.fs1]
|
|
host = elara
|