; ; Sample ceph ceph.conf file. ; ; This file defines cluster membership, the various locations ; that Ceph stores data, and any other runtime options. ; If a 'host' is defined for a daemon, the start/stop script will ; verify that it matches the hostname (or else ignore it). If it is ; not defined, it is assumed that the daemon is intended to start on ; the current host (e.g., in a setup with a startup.conf on each ; node). ; The variables $type, $id and $name are available to use in paths ; $type = The type of daemon, possible values: mon, mds and osd ; $id = The ID of the daemon, for mon.alpha, $id will be alpha ; $name = $type.$id ; For example: ; osd.0 ; $type = osd ; $id = 0 ; $name = osd.0 ; mon.beta ; $type = mon ; $id = beta ; $name = mon.beta ; global [global] ; enable secure authentication ; allow ourselves to open a lot of files max open files = 131072 ; auth cluster required = cephx ; auth client required = cephx ; auth service required = cephx auth cluster required = none auth client required = none auth service required = none ; set log file log file = /var/log/ceph/$name.log ; log_to_syslog = true ; uncomment this line to log to syslog ; set up pid files pid file = /var/run/ceph/$name.pid ; If you want to run a IPv6 cluster, set this to true. Dual-stack isn't possible ;ms bind ipv6 = true ; monitors ; You need at least one. You need at least three if you want to ; tolerate any node failures. Always create an odd number. [mon] mon data = /data/$name ; debug mon = 20 ; If you are using for example the RADOS Gateway and want to have your newly created ; pools a higher replication level, you can set a default osd pool default size = 3 ; You can also specify a CRUSH rule for new pools ; Wiki: http://ceph.newdream.net/wiki/Custom_data_placement_with_CRUSH ;osd pool default crush rule = 0 ; Timing is critical for monitors, but if you want to allow the clocks to drift a ; bit more, you can specify the max drift. ;mon clock drift allowed = 1 ; Tell the monitor to backoff from this warning for 30 seconds ;mon clock drift warn backoff = 30 ; logging, for debugging monitor crashes, in order of ; their likelihood of being helpful :) debug ms = 1 ;debug mon = 20 ;debug paxos = 20 ;debug auth = 20 [mon.0] host = ceph1 mon addr = 192.168.2.80:6789 [mon.1] host = ceph2 mon addr = 192.168.2.81:6789 [mon.2] host = ceph3 mon addr = 192.168.2.82:6789 ; mds ; You need at least one. Define two to get a standby. [mds] ; where the mds keeps it's secret encryption keys keyring = /data/keyring.$name ; mds logging to debug issues. ;debug ms = 1 debug mds = 20 [mds.alpha] host = ceph1 [mds.baker] host = ceph2 [mds.charlie] host = ceph3 ; osd ; You need at least one. Two if you want data to be replicated. ; Define as many as you like. [osd] ; This is where the btrfs volume will be mounted. osd data = /data/$name ; Ideally, make this a separate disk or partition. A few ; hundred MB should be enough; more if you have fast or many ; disks. You can use a file under the osd data dir if need be ; (e.g. /data/$name/journal), but it will be slower than a ; separate disk or partition. ; This is an example of a file-based journal. osd journal = /data/$name/journal osd journal size = 10000 ; journal size, in megabytes ; If you want to run the journal on a tmpfs, disable DirectIO ;journal dio = false ; You can change the number of recovery operations to speed up recovery ; or slow it down if your machines can't handle it osd recovery max active = 1 ; osd logging to debug osd issues, in order of likelihood of being ; helpful ;debug ms = 1 ;debug osd = 20 ;debug auth = 20 ;debug filestore = 20 ;debug journal = 20 [osd.0] host = ceph1 ; if 'btrfs devs' is not specified, you're responsible for ; setting up the 'osd data' dir. if it is not btrfs, things ; will behave up until you try to recover from a crash (which ; usually fine for basic testing). btrfs devs = /dev/sda3 ; If you want to specify some other mount options, you can do so. ; The default values are rw,noatime ;btrfs options = rw,noatime [osd.1] host = ceph1 btrfs devs = /dev/sdb3 [osd.2] host = ceph2 btrfs devs = /dev/sda3 [osd.3] host = ceph2 btrfs devs = /dev/sdb3 [osd.4] host = ceph3 btrfs devs = /dev/sda3 [osd.5] host = ceph3 btrfs devs = /dev/sdb3