Project

General

Profile

Bug #38033 » ceph.conf.txt

Laura Paduano, 01/24/2019 11:35 AM

 
;generated by vstart.sh on Thu Jan 24 09:43:54 UTC 2019
[client.vstart.sh]
num mon = 3
num osd = 3
num mds = 3
num mgr = 1
num rgw = 1

[global]
fsid = d2e01c83-7c13-464d-a053-debfb239c0f4
osd failsafe full ratio = .99
mon osd full ratio = .99
mon osd nearfull ratio = .99
mon osd backfillfull ratio = .99
erasure code dir = /ceph/build/lib
plugin dir = /ceph/build/lib
filestore fd cache size = 32
run dir = /ceph/build/out
crash dir = /ceph/build/out
enable experimental unrecoverable data corrupting features = *
osd_crush_chooseleaf_type = 0
debug asok assert abort = true
ms bind msgr2 = true
ms bind msgr1 = true


lockdep = true
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
[client]
keyring = /ceph/build/keyring
log file = /ceph/build/out/$name.$pid.log
admin socket = /tmp/ceph-asok.7PseA2/$name.$pid.asok

[client.rgw]
rgw frontends = civetweb port=8000
admin socket = /ceph/build/out/radosgw.8000.asok
; needed for s3tests
rgw crypt s3 kms encryption keys = testkey-1=<key_here> testkey-2=<key_here>
rgw crypt require ssl = false
; uncomment the following to set LC days as the value in seconds;
; needed for passing lc time based s3-tests (can be verbose)
; rgw lc debug interval = 10
[mds]

log file = /ceph/build/out/$name.log
admin socket = /tmp/ceph-asok.7PseA2/$name.asok
chdir = ""
pid file = /ceph/build/out/$name.pid
heartbeat file = /ceph/build/out/$name.heartbeat

mds data = /ceph/build/dev/mds.$id
mds root ino uid = 0
mds root ino gid = 0

[mgr]

mgr data = /ceph/build/dev/mgr.$id
mgr module path = /ceph/src/pybind/mgr

log file = /ceph/build/out/$name.log
admin socket = /tmp/ceph-asok.7PseA2/$name.asok
chdir = ""
pid file = /ceph/build/out/$name.pid
heartbeat file = /ceph/build/out/$name.heartbeat


[osd]

log file = /ceph/build/out/$name.log
admin socket = /tmp/ceph-asok.7PseA2/$name.asok
chdir = ""
pid file = /ceph/build/out/$name.pid
heartbeat file = /ceph/build/out/$name.heartbeat

osd_check_max_object_name_len_on_startup = false
osd data = /ceph/build/dev/osd$id
osd journal = /ceph/build/dev/osd$id/journal
osd journal size = 100
osd class tmp = out
osd class dir = /ceph/build/lib
osd class load list = *
osd class default list = *

filestore wbthrottle xfs ios start flusher = 10
filestore wbthrottle xfs ios hard limit = 20
filestore wbthrottle xfs inodes hard limit = 30
filestore wbthrottle btrfs ios start flusher = 10
filestore wbthrottle btrfs ios hard limit = 20
filestore wbthrottle btrfs inodes hard limit = 30
bluestore fsck on mount = true
bluestore block create = true
bluestore block db path = /ceph/build/dev/osd$id/block.db.file
bluestore block db size = 67108864
bluestore block db create = true
bluestore block wal path = /ceph/build/dev/osd$id/block.wal.file
bluestore block wal size = 1048576000
bluestore block wal create = true

; kstore
kstore fsck on mount = true
osd objectstore = bluestore

[mon]
mgr initial modules = dashboard restful iostat

log file = /ceph/build/out/$name.log
admin socket = /tmp/ceph-asok.7PseA2/$name.asok
chdir = ""
pid file = /ceph/build/out/$name.pid
heartbeat file = /ceph/build/out/$name.heartbeat


debug mon = 10
debug ms = 1

mon cluster log file = /ceph/build/out/cluster.mon.$id.log
osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
[mon.a]
host = ceph-dev
mon data = /ceph/build/dev/mon.a
[mon.b]
host = ceph-dev
mon data = /ceph/build/dev/mon.b
[mon.c]
host = ceph-dev
mon data = /ceph/build/dev/mon.c
[global]
mon host = [v2:localhost:40353,v1:localhost:40354] [v2:localhost:40355,v1:localhost:40356] [v2:localhost:40357,v1:localhost:40358]
[mgr.x]
host = ceph-dev
[osd.0]
host = ceph-dev
[osd.1]
host = ceph-dev
[osd.2]
host = ceph-dev
[mds.a]
host = ceph-dev
[mds.b]
host = ceph-dev
[mds.c]
host = ceph-dev

(2-2/2)