[root@ceph-client ceph-config]# ceph-deploy --version 1.5.33 [root@ceph-client ceph-config]# ceph-deploy purge ceph-client [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf [ceph_deploy.cli][INFO ] Invoked (1.5.33): /usr/bin/ceph-deploy purge ceph-client [ceph_deploy.cli][INFO ] ceph-deploy options: [ceph_deploy.cli][INFO ] username : None [ceph_deploy.cli][INFO ] verbose : False [ceph_deploy.cli][INFO ] overwrite_conf : False [ceph_deploy.cli][INFO ] quiet : False [ceph_deploy.cli][INFO ] cd_conf : [ceph_deploy.cli][INFO ] cluster : ceph [ceph_deploy.cli][INFO ] host : ['ceph-client'] [ceph_deploy.cli][INFO ] func : [ceph_deploy.cli][INFO ] ceph_conf : None [ceph_deploy.cli][INFO ] default_release : False [ceph_deploy.install][INFO ] note that some dependencies *will not* be removed because they can cause issues with qemu-kvm [ceph_deploy.install][INFO ] like: librbd1 and librados2 [ceph_deploy.install][DEBUG ] Purging on cluster ceph hosts ceph-client [ceph_deploy.install][DEBUG ] Detecting platform for host ceph-client ... [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph_deploy.install][INFO ] Distro info: Redhat Linux Server 7.2 [ceph-client][INFO ] Purging Ceph on ceph-client [ceph-client][INFO ] Running command: yum -y -q remove ceph ceph-common ceph-mon ceph-osd ceph-radosgw [ceph-client][WARNIN] No Match for argument: ceph [ceph-client][INFO ] Running command: yum clean all [ceph-client][DEBUG ] Loaded plugins: ulninfo [ceph-client][DEBUG ] Cleaning repos: cephLocal epel ol7_UEKR3 ol7_UEKR3_OFED20 ol7_UEKR4 [ceph-client][DEBUG ] : ol7_UEKR4_OFED ol7_addons ol7_ceph10 ol7_latest ol7_openstack20 [ceph-client][DEBUG ] : ol7_optional_latest ol7_software_collections [ceph-client][DEBUG ] : ol7_spacewalk22_client ol7_u0_base ol7_u1_base ol7_u2_base [ceph-client][DEBUG ] Cleaning up everything [root@ceph-client ceph-config]# ceph-deploy purgedata ceph-client [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf [ceph_deploy.cli][INFO ] Invoked (1.5.33): /usr/bin/ceph-deploy purgedata ceph-client [ceph_deploy.cli][INFO ] ceph-deploy options: [ceph_deploy.cli][INFO ] username : None [ceph_deploy.cli][INFO ] verbose : False [ceph_deploy.cli][INFO ] overwrite_conf : False [ceph_deploy.cli][INFO ] quiet : False [ceph_deploy.cli][INFO ] cd_conf : [ceph_deploy.cli][INFO ] cluster : ceph [ceph_deploy.cli][INFO ] host : ['ceph-client'] [ceph_deploy.cli][INFO ] func : [ceph_deploy.cli][INFO ] ceph_conf : None [ceph_deploy.cli][INFO ] default_release : False [ceph_deploy.install][DEBUG ] Purging data from cluster ceph hosts ceph-client [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph-client][DEBUG ] find the location of an executable [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph_deploy.install][INFO ] Distro info: Redhat Linux Server 7.2 [ceph-client][INFO ] purging data on ceph-client [ceph-client][INFO ] Running command: rm -rf --one-file-system -- /var/lib/ceph [ceph-client][WARNIN] OSDs may still be mounted, trying to unmount them [ceph-client][INFO ] Running command: find /var/lib/ceph -mindepth 1 -maxdepth 2 -type d -exec umount {} ; [ceph-client][WARNIN] umount: /var/lib/ceph/osd: not mounted [ceph-client][INFO ] Running command: rm -rf --one-file-system -- /var/lib/ceph [ceph-client][INFO ] Running command: rm -rf --one-file-system -- /etc/ceph/ [root@ceph-client ceph-config]# ls ceph.bootstrap-mds.keyring ceph.bootstrap-osd.keyring ceph.bootstrap-rgw.keyring ceph.client.admin.keyring ceph.conf ceph-deploy-ceph.log ceph.mon.keyring [root@ceph-client ceph-config]# rm -rf * [root@ceph-client ceph-config]# ceph-deploy new ceph-client [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf [ceph_deploy.cli][INFO ] Invoked (1.5.33): /usr/bin/ceph-deploy new ceph-client [ceph_deploy.cli][INFO ] ceph-deploy options: [ceph_deploy.cli][INFO ] username : None [ceph_deploy.cli][INFO ] func : [ceph_deploy.cli][INFO ] verbose : False [ceph_deploy.cli][INFO ] overwrite_conf : False [ceph_deploy.cli][INFO ] quiet : False [ceph_deploy.cli][INFO ] cd_conf : [ceph_deploy.cli][INFO ] cluster : ceph [ceph_deploy.cli][INFO ] ssh_copykey : True [ceph_deploy.cli][INFO ] mon : ['ceph-client'] [ceph_deploy.cli][INFO ] public_network : None [ceph_deploy.cli][INFO ] ceph_conf : None [ceph_deploy.cli][INFO ] cluster_network : None [ceph_deploy.cli][INFO ] default_release : False [ceph_deploy.cli][INFO ] fsid : None [ceph_deploy.new][DEBUG ] Creating new cluster named ceph [ceph_deploy.new][INFO ] making sure passwordless SSH succeeds [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph-client][DEBUG ] find the location of an executable [ceph-client][INFO ] Running command: /usr/sbin/ip link show [ceph-client][INFO ] Running command: /usr/sbin/ip addr show [ceph-client][DEBUG ] IP addresses found: ['10.211.55.93'] [ceph_deploy.new][DEBUG ] Resolving host ceph-client [ceph_deploy.new][DEBUG ] Monitor ceph-client at 10.211.55.93 [ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph-client'] [ceph_deploy.new][DEBUG ] Monitor addrs are ['10.211.55.93'] [ceph_deploy.new][DEBUG ] Creating a random mon key... [ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring... [ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf... [root@ceph-client ceph-config]# echo "osd pool default size = 1" >> ceph.conf [root@ceph-client ceph-config]# ceph-deploy install ceph-client [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf [ceph_deploy.cli][INFO ] Invoked (1.5.33): /usr/bin/ceph-deploy install ceph-client [ceph_deploy.cli][INFO ] ceph-deploy options: [ceph_deploy.cli][INFO ] verbose : False [ceph_deploy.cli][INFO ] testing : None [ceph_deploy.cli][INFO ] cd_conf : [ceph_deploy.cli][INFO ] cluster : ceph [ceph_deploy.cli][INFO ] dev_commit : None [ceph_deploy.cli][INFO ] install_mds : False [ceph_deploy.cli][INFO ] stable : None [ceph_deploy.cli][INFO ] default_release : False [ceph_deploy.cli][INFO ] username : None [ceph_deploy.cli][INFO ] adjust_repos : True [ceph_deploy.cli][INFO ] func : [ceph_deploy.cli][INFO ] install_all : False [ceph_deploy.cli][INFO ] repo : False [ceph_deploy.cli][INFO ] host : ['ceph-client'] [ceph_deploy.cli][INFO ] install_rgw : False [ceph_deploy.cli][INFO ] install_tests : False [ceph_deploy.cli][INFO ] repo_url : None [ceph_deploy.cli][INFO ] ceph_conf : None [ceph_deploy.cli][INFO ] install_osd : False [ceph_deploy.cli][INFO ] version_kind : stable [ceph_deploy.cli][INFO ] install_common : False [ceph_deploy.cli][INFO ] overwrite_conf : False [ceph_deploy.cli][INFO ] quiet : False [ceph_deploy.cli][INFO ] dev : master [ceph_deploy.cli][INFO ] local_mirror : None [ceph_deploy.cli][INFO ] release : None [ceph_deploy.cli][INFO ] install_mon : False [ceph_deploy.cli][INFO ] gpg_url : None [ceph_deploy.install][DEBUG ] Installing stable version jewel on cluster ceph hosts ceph-client [ceph_deploy.install][DEBUG ] Detecting platform for host ceph-client ... [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph_deploy.install][INFO ] Distro info: Redhat Linux Server 7.2 [ceph-client][INFO ] installing Ceph on ceph-client [ceph-client][INFO ] Running command: yum clean all [ceph-client][DEBUG ] Loaded plugins: ulninfo [ceph-client][DEBUG ] Cleaning repos: cephLocal epel ol7_UEKR3 ol7_UEKR3_OFED20 ol7_UEKR4 [ceph-client][DEBUG ] : ol7_UEKR4_OFED ol7_addons ol7_ceph10 ol7_latest ol7_openstack20 [ceph-client][DEBUG ] : ol7_optional_latest ol7_software_collections [ceph-client][DEBUG ] : ol7_spacewalk22_client ol7_u0_base ol7_u1_base ol7_u2_base [ceph-client][DEBUG ] Cleaning up everything [ceph-client][INFO ] Running command: yum -y install ceph-osd ceph-mds ceph-mon ceph-radosgw [ceph-client][DEBUG ] Loaded plugins: ulninfo [ceph-client][DEBUG ] Resolving Dependencies [ceph-client][DEBUG ] --> Running transaction check [ceph-client][DEBUG ] ---> Package ceph-mds.x86_64 1:10.2.1-0.el7 will be installed [ceph-client][DEBUG ] --> Processing Dependency: ceph-base = 1:10.2.1-0.el7 for package: 1:ceph-mds-10.2.1-0.el7.x86_64 [ceph-client][DEBUG ] ---> Package ceph-mon.x86_64 1:10.2.1-0.el7 will be installed [ceph-client][DEBUG ] ---> Package ceph-osd.x86_64 1:10.2.1-0.el7 will be installed [ceph-client][DEBUG ] ---> Package ceph-radosgw.x86_64 1:10.2.1-0.el7 will be installed [ceph-client][DEBUG ] --> Processing Dependency: ceph-selinux = 1:10.2.1-0.el7 for package: 1:ceph-radosgw-10.2.1-0.el7.x86_64 [ceph-client][DEBUG ] --> Processing Dependency: ceph-common = 1:10.2.1-0.el7 for package: 1:ceph-radosgw-10.2.1-0.el7.x86_64 [ceph-client][DEBUG ] --> Running transaction check [ceph-client][DEBUG ] ---> Package ceph-base.x86_64 1:10.2.1-0.el7 will be installed [ceph-client][DEBUG ] ---> Package ceph-common.x86_64 1:10.2.1-0.el7 will be installed [ceph-client][DEBUG ] ---> Package ceph-selinux.x86_64 1:10.2.1-0.el7 will be installed [ceph-client][DEBUG ] --> Finished Dependency Resolution [ceph-client][DEBUG ] [ceph-client][DEBUG ] Dependencies Resolved [ceph-client][DEBUG ] [ceph-client][DEBUG ] ================================================================================ [ceph-client][DEBUG ] Package Arch Version Repository Size [ceph-client][DEBUG ] ================================================================================ [ceph-client][DEBUG ] Installing: [ceph-client][DEBUG ] ceph-mds x86_64 1:10.2.1-0.el7 cephLocal 2.8 M [ceph-client][DEBUG ] ceph-mon x86_64 1:10.2.1-0.el7 cephLocal 2.8 M [ceph-client][DEBUG ] ceph-osd x86_64 1:10.2.1-0.el7 cephLocal 9.0 M [ceph-client][DEBUG ] ceph-radosgw x86_64 1:10.2.1-0.el7 cephLocal 245 k [ceph-client][DEBUG ] Installing for dependencies: [ceph-client][DEBUG ] ceph-base x86_64 1:10.2.1-0.el7 cephLocal 4.2 M [ceph-client][DEBUG ] ceph-common x86_64 1:10.2.1-0.el7 cephLocal 15 M [ceph-client][DEBUG ] ceph-selinux x86_64 1:10.2.1-0.el7 cephLocal 19 k [ceph-client][DEBUG ] [ceph-client][DEBUG ] Transaction Summary [ceph-client][DEBUG ] ================================================================================ [ceph-client][DEBUG ] Install 4 Packages (+3 Dependent packages) [ceph-client][DEBUG ] [ceph-client][DEBUG ] Total download size: 34 M [ceph-client][DEBUG ] Installed size: 128 M [ceph-client][DEBUG ] Downloading packages: [ceph-client][DEBUG ] -------------------------------------------------------------------------------- [ceph-client][DEBUG ] Total 103 MB/s | 34 MB 00:00 [ceph-client][DEBUG ] Running transaction check [ceph-client][DEBUG ] Running transaction test [ceph-client][DEBUG ] Transaction test succeeded [ceph-client][DEBUG ] Running transaction [ceph-client][DEBUG ] Installing : 1:ceph-common-10.2.1-0.el7.x86_64 1/7 [ceph-client][DEBUG ] Installing : 1:ceph-base-10.2.1-0.el7.x86_64 2/7 [ceph-client][DEBUG ] Installing : 1:ceph-selinux-10.2.1-0.el7.x86_64 3/7 [ceph-client][DEBUG ] Installing : 1:ceph-radosgw-10.2.1-0.el7.x86_64 4/7 [ceph-client][DEBUG ] Installing : 1:ceph-mon-10.2.1-0.el7.x86_64 5/7 [ceph-client][DEBUG ] Installing : 1:ceph-mds-10.2.1-0.el7.x86_64 6/7 [ceph-client][DEBUG ] Installing : 1:ceph-osd-10.2.1-0.el7.x86_64 7/7 [ceph-client][DEBUG ] Verifying : 1:ceph-radosgw-10.2.1-0.el7.x86_64 1/7 [ceph-client][DEBUG ] Verifying : 1:ceph-mon-10.2.1-0.el7.x86_64 2/7 [ceph-client][DEBUG ] Verifying : 1:ceph-mds-10.2.1-0.el7.x86_64 3/7 [ceph-client][DEBUG ] Verifying : 1:ceph-osd-10.2.1-0.el7.x86_64 4/7 [ceph-client][DEBUG ] Verifying : 1:ceph-selinux-10.2.1-0.el7.x86_64 5/7 [ceph-client][DEBUG ] Verifying : 1:ceph-base-10.2.1-0.el7.x86_64 6/7 [ceph-client][DEBUG ] Verifying : 1:ceph-common-10.2.1-0.el7.x86_64 7/7 [ceph-client][DEBUG ] [ceph-client][DEBUG ] Installed: [ceph-client][DEBUG ] ceph-mds.x86_64 1:10.2.1-0.el7 ceph-mon.x86_64 1:10.2.1-0.el7 [ceph-client][DEBUG ] ceph-osd.x86_64 1:10.2.1-0.el7 ceph-radosgw.x86_64 1:10.2.1-0.el7 [ceph-client][DEBUG ] [ceph-client][DEBUG ] Dependency Installed: [ceph-client][DEBUG ] ceph-base.x86_64 1:10.2.1-0.el7 ceph-common.x86_64 1:10.2.1-0.el7 [ceph-client][DEBUG ] ceph-selinux.x86_64 1:10.2.1-0.el7 [ceph-client][DEBUG ] [ceph-client][DEBUG ] Complete! [ceph-client][INFO ] Running command: ceph --version [ceph-client][DEBUG ] ceph version 10.2.1 (3a66dd4f30852819c1bdaa8ec23c795d4ad77269) [root@ceph-client ceph-config]# vi /usr/lib/python2.7/site-packages/ceph_ ceph_argparse.py ceph_daemon.pyc ceph_detect_init/ ceph_rest_api.py ceph_volume_client.pyc ceph_argparse.pyc ceph_daemon.pyo ceph_detect_init-1.0.1-py2.7.egg-info/ ceph_rest_api.pyc ceph_volume_client.pyo ceph_argparse.pyo ceph_deploy/ ceph_disk/ ceph_rest_api.pyo ceph_daemon.py ceph_deploy-1.5.33-py2.7.egg-info/ ceph_disk-1.0.0-py2.7.egg-info/ ceph_volume_client.py [root@ceph-client ceph-config]# vi /usr/lib/python2.7/site-packages/ceph_disk/ __init__.py __init__.pyc __init__.pyo main.py main.pyc main.pyo [root@ceph-client ceph-config]# vi /usr/lib/python2.7/site-packages/ceph_disk/main.py [root@ceph-client ceph-config]# ceph-deploy mon create-initial [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf [ceph_deploy.cli][INFO ] Invoked (1.5.33): /usr/bin/ceph-deploy mon create-initial [ceph_deploy.cli][INFO ] ceph-deploy options: [ceph_deploy.cli][INFO ] username : None [ceph_deploy.cli][INFO ] verbose : False [ceph_deploy.cli][INFO ] overwrite_conf : False [ceph_deploy.cli][INFO ] subcommand : create-initial [ceph_deploy.cli][INFO ] quiet : False [ceph_deploy.cli][INFO ] cd_conf : [ceph_deploy.cli][INFO ] cluster : ceph [ceph_deploy.cli][INFO ] func : [ceph_deploy.cli][INFO ] ceph_conf : None [ceph_deploy.cli][INFO ] default_release : False [ceph_deploy.cli][INFO ] keyrings : None [ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts ceph-client [ceph_deploy.mon][DEBUG ] detecting platform for host ceph-client ... [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph-client][DEBUG ] find the location of an executable [ceph_deploy.mon][INFO ] distro info: Redhat Linux Server 7.2 [ceph-client][DEBUG ] determining if provided host has same hostname in remote [ceph-client][DEBUG ] get remote short hostname [ceph-client][DEBUG ] deploying mon to ceph-client [ceph-client][DEBUG ] get remote short hostname [ceph-client][DEBUG ] remote hostname: ceph-client [ceph-client][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf [ceph-client][DEBUG ] create the mon path if it does not exist [ceph-client][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-ceph-client/done [ceph-client][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-ceph-client/done [ceph-client][INFO ] creating keyring file: /var/lib/ceph/tmp/ceph-ceph-client.mon.keyring [ceph-client][DEBUG ] create the monitor keyring file [ceph-client][INFO ] Running command: ceph-mon --cluster ceph --mkfs -i ceph-client --keyring /var/lib/ceph/tmp/ceph-ceph-client.mon.keyring --setuser 167 --setgroup 167 [ceph-client][DEBUG ] ceph-mon: mon.noname-a 10.211.55.93:6789/0 is local, renaming to mon.ceph-client [ceph-client][DEBUG ] ceph-mon: set fsid to 7475f68c-420a-46b5-a45a-395d2197d37e [ceph-client][DEBUG ] ceph-mon: created monfs at /var/lib/ceph/mon/ceph-ceph-client for mon.ceph-client [ceph-client][INFO ] unlinking keyring file /var/lib/ceph/tmp/ceph-ceph-client.mon.keyring [ceph-client][DEBUG ] create a done file to avoid re-doing the mon deployment [ceph-client][DEBUG ] create the init path if it does not exist [ceph-client][INFO ] Running command: systemctl enable ceph.target [ceph-client][INFO ] Running command: systemctl enable ceph-mon@ceph-client [ceph-client][INFO ] Running command: systemctl start ceph-mon@ceph-client [ceph-client][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph-client.asok mon_status [ceph-client][DEBUG ] ******************************************************************************** [ceph-client][DEBUG ] status for monitor: mon.ceph-client [ceph-client][DEBUG ] { [ceph-client][DEBUG ] "election_epoch": 3, [ceph-client][DEBUG ] "extra_probe_peers": [], [ceph-client][DEBUG ] "monmap": { [ceph-client][DEBUG ] "created": "2016-05-27 09:53:21.881989", [ceph-client][DEBUG ] "epoch": 1, [ceph-client][DEBUG ] "fsid": "7475f68c-420a-46b5-a45a-395d2197d37e", [ceph-client][DEBUG ] "modified": "2016-05-27 09:53:21.881989", [ceph-client][DEBUG ] "mons": [ [ceph-client][DEBUG ] { [ceph-client][DEBUG ] "addr": "10.211.55.93:6789/0", [ceph-client][DEBUG ] "name": "ceph-client", [ceph-client][DEBUG ] "rank": 0 [ceph-client][DEBUG ] } [ceph-client][DEBUG ] ] [ceph-client][DEBUG ] }, [ceph-client][DEBUG ] "name": "ceph-client", [ceph-client][DEBUG ] "outside_quorum": [], [ceph-client][DEBUG ] "quorum": [ [ceph-client][DEBUG ] 0 [ceph-client][DEBUG ] ], [ceph-client][DEBUG ] "rank": 0, [ceph-client][DEBUG ] "state": "leader", [ceph-client][DEBUG ] "sync_provider": [] [ceph-client][DEBUG ] } [ceph-client][DEBUG ] ******************************************************************************** [ceph-client][INFO ] monitor: mon.ceph-client is running [ceph-client][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph-client.asok mon_status [ceph_deploy.mon][INFO ] processing monitor mon.ceph-client [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph-client][DEBUG ] find the location of an executable [ceph-client][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph-client.asok mon_status [ceph_deploy.mon][INFO ] mon.ceph-client monitor has reached quorum! [ceph_deploy.mon][INFO ] all initial monitors are running and have formed quorum [ceph_deploy.mon][INFO ] Running gatherkeys... [ceph_deploy.gatherkeys][DEBUG ] Checking ceph-client for /etc/ceph/ceph.client.admin.keyring [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph-client][DEBUG ] fetch remote file [ceph_deploy.gatherkeys][DEBUG ] Got ceph.client.admin.keyring key from ceph-client. [ceph_deploy.gatherkeys][DEBUG ] Have ceph.mon.keyring [ceph_deploy.gatherkeys][DEBUG ] Checking ceph-client for /var/lib/ceph/bootstrap-osd/ceph.keyring [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph-client][DEBUG ] fetch remote file [ceph_deploy.gatherkeys][DEBUG ] Got ceph.bootstrap-osd.keyring key from ceph-client. [ceph_deploy.gatherkeys][DEBUG ] Checking ceph-client for /var/lib/ceph/bootstrap-mds/ceph.keyring [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph-client][DEBUG ] fetch remote file [ceph_deploy.gatherkeys][DEBUG ] Got ceph.bootstrap-mds.keyring key from ceph-client. [ceph_deploy.gatherkeys][DEBUG ] Checking ceph-client for /var/lib/ceph/bootstrap-rgw/ceph.keyring [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph-client][DEBUG ] fetch remote file [ceph_deploy.gatherkeys][DEBUG ] Got ceph.bootstrap-rgw.keyring key from ceph-client. [root@ceph-client ceph-config]# ceph-deploy mon create-initial ceph-client usage: ceph-deploy [-h] [-v | -q] [--version] [--username USERNAME] [--overwrite-conf] [--cluster NAME] [--ceph-conf CEPH_CONF] COMMAND ... ceph-deploy: error: unrecognized arguments: ceph-client [root@ceph-client ceph-config]# ceph-deploy gatherkeys ceph-client [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf [ceph_deploy.cli][INFO ] Invoked (1.5.33): /usr/bin/ceph-deploy gatherkeys ceph-client [ceph_deploy.cli][INFO ] ceph-deploy options: [ceph_deploy.cli][INFO ] username : None [ceph_deploy.cli][INFO ] verbose : False [ceph_deploy.cli][INFO ] overwrite_conf : False [ceph_deploy.cli][INFO ] quiet : False [ceph_deploy.cli][INFO ] cd_conf : [ceph_deploy.cli][INFO ] cluster : ceph [ceph_deploy.cli][INFO ] mon : ['ceph-client'] [ceph_deploy.cli][INFO ] func : [ceph_deploy.cli][INFO ] ceph_conf : None [ceph_deploy.cli][INFO ] default_release : False [ceph_deploy.gatherkeys][DEBUG ] Have ceph.client.admin.keyring [ceph_deploy.gatherkeys][DEBUG ] Have ceph.mon.keyring [ceph_deploy.gatherkeys][DEBUG ] Have ceph.bootstrap-osd.keyring [ceph_deploy.gatherkeys][DEBUG ] Have ceph.bootstrap-mds.keyring [ceph_deploy.gatherkeys][DEBUG ] Have ceph.bootstrap-rgw.keyring [root@ceph-client ceph-config]# ceph-deploy osd create --zap-disk --fs-type xfs ceph-client:sdb [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf [ceph_deploy.cli][INFO ] Invoked (1.5.33): /usr/bin/ceph-deploy osd create --zap-disk --fs-type xfs ceph-client:sdb [ceph_deploy.cli][INFO ] ceph-deploy options: [ceph_deploy.cli][INFO ] username : None [ceph_deploy.cli][INFO ] disk : [('ceph-client', '/dev/sdb', None)] [ceph_deploy.cli][INFO ] dmcrypt : False [ceph_deploy.cli][INFO ] verbose : False [ceph_deploy.cli][INFO ] bluestore : None [ceph_deploy.cli][INFO ] overwrite_conf : False [ceph_deploy.cli][INFO ] subcommand : create [ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys [ceph_deploy.cli][INFO ] quiet : False [ceph_deploy.cli][INFO ] cd_conf : [ceph_deploy.cli][INFO ] cluster : ceph [ceph_deploy.cli][INFO ] fs_type : xfs [ceph_deploy.cli][INFO ] func : [ceph_deploy.cli][INFO ] ceph_conf : None [ceph_deploy.cli][INFO ] default_release : False [ceph_deploy.cli][INFO ] zap_disk : True [ceph_deploy.osd][DEBUG ] Preparing cluster ceph disks ceph-client:/dev/sdb: [ceph-client][DEBUG ] connected to host: ceph-client [ceph-client][DEBUG ] detect platform information from remote host [ceph-client][DEBUG ] detect machine type [ceph-client][DEBUG ] find the location of an executable [ceph_deploy.osd][INFO ] Distro info: Redhat Linux Server 7.2 [ceph_deploy.osd][DEBUG ] Deploying osd to ceph-client [ceph-client][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf [ceph_deploy.osd][DEBUG ] Preparing host ceph-client disk /dev/sdb journal None activate True [ceph-client][DEBUG ] find the location of an executable [ceph-client][INFO ] Running command: /usr/sbin/ceph-disk -v prepare --zap-disk --cluster ceph --fs-type xfs -- /dev/sdb [ceph-client][WARNIN] command: Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid [ceph-client][WARNIN] command: Running command: /usr/bin/ceph-osd --check-allows-journal -i 0 --cluster ceph [ceph-client][WARNIN] command: Running command: /usr/bin/ceph-osd --check-wants-journal -i 0 --cluster ceph [ceph-client][WARNIN] command: Running command: /usr/bin/ceph-osd --check-needs-journal -i 0 --cluster ceph [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] set_type: Will colocate journal with data on /dev/sdb [ceph-client][WARNIN] command: Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=osd_journal_size [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb2 uuid path is /sys/dev/block/8:18/dm/uuid [ceph-client][WARNIN] command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs [ceph-client][WARNIN] command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mkfs_options_xfs [ceph-client][WARNIN] command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs [ceph-client][WARNIN] command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] zap: Zapping partition table on /dev/sdb [ceph-client][WARNIN] command_check_call: Running command: /usr/sbin/sgdisk --zap-all -- /dev/sdb [ceph-client][WARNIN] Caution: invalid backup GPT header, but valid main header; regenerating [ceph-client][WARNIN] backup header from main header. [ceph-client][WARNIN] [ceph-client][WARNIN] Warning! Main and backup partition tables differ! Use the 'c' and 'e' options [ceph-client][WARNIN] on the recovery & transformation menu to examine the two tables. [ceph-client][WARNIN] [ceph-client][WARNIN] Warning! One or more CRCs don't match. You should repair the disk! [ceph-client][WARNIN] [ceph-client][DEBUG ] **************************************************************************** [ceph-client][DEBUG ] Caution: Found protective or hybrid MBR and corrupt GPT. Using GPT, but disk [ceph-client][DEBUG ] verification and recovery are STRONGLY recommended. [ceph-client][DEBUG ] **************************************************************************** [ceph-client][DEBUG ] GPT data structures destroyed! You may now partition the disk using fdisk or [ceph-client][DEBUG ] other utilities. [ceph-client][WARNIN] command_check_call: Running command: /usr/sbin/sgdisk --clear --mbrtogpt -- /dev/sdb [ceph-client][DEBUG ] Creating new GPT entries. [ceph-client][DEBUG ] The operation has completed successfully. [ceph-client][WARNIN] update_partition: Calling partprobe on zapped device /dev/sdb [ceph-client][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600 [ceph-client][WARNIN] command: Running command: /usr/bin/flock -s /dev/sdb /usr/sbin/partprobe /dev/sdb [ceph-client][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600 [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] ptype_tobe_for_name: name = journal [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] create_partition: Creating journal partition num 2 size 5120 on /dev/sdb [ceph-client][WARNIN] command_check_call: Running command: /usr/sbin/sgdisk --new=2:0:+5120M --change-name=2:ceph journal --partition-guid=2:348f2de1-0fc7-40f3-8619-58c3e13f9a40 --typecode=2:45b0969e-9b03-4f30-b4c6-b4b80ceff106 --mbrtogpt -- /dev/sdb [ceph-client][DEBUG ] The operation has completed successfully. [ceph-client][WARNIN] update_partition: Calling partprobe on created device /dev/sdb [ceph-client][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600 [ceph-client][WARNIN] command: Running command: /usr/bin/flock -s /dev/sdb /usr/sbin/partprobe /dev/sdb [ceph-client][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600 [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb2 uuid path is /sys/dev/block/8:18/dm/uuid [ceph-client][WARNIN] prepare_device: Journal is GPT partition /dev/disk/by-partuuid/348f2de1-0fc7-40f3-8619-58c3e13f9a40 [ceph-client][WARNIN] prepare_device: Journal is GPT partition /dev/disk/by-partuuid/348f2de1-0fc7-40f3-8619-58c3e13f9a40 [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] set_data_partition: Creating osd partition on /dev/sdb [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] ptype_tobe_for_name: name = data [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] create_partition: Creating data partition num 1 size 0 on /dev/sdb [ceph-client][WARNIN] command_check_call: Running command: /usr/sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:e79fb746-b631-46b9-94ec-13f30e5ad57d --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be --mbrtogpt -- /dev/sdb [ceph-client][DEBUG ] The operation has completed successfully. [ceph-client][WARNIN] update_partition: Calling partprobe on created device /dev/sdb [ceph-client][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600 [ceph-client][WARNIN] command: Running command: /usr/bin/flock -s /dev/sdb /usr/sbin/partprobe /dev/sdb [ceph-client][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600 [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid [ceph-client][WARNIN] populate_data_path_device: Creating xfs fs on /dev/sdb1 [ceph-client][WARNIN] command_check_call: Running command: /usr/sbin/mkfs -t xfs -f -i size=2048 -- /dev/sdb1 [ceph-client][DEBUG ] meta-data=/dev/sdb1 isize=2048 agcount=4, agsize=458687 blks [ceph-client][DEBUG ] = sectsz=512 attr=2, projid32bit=1 [ceph-client][DEBUG ] = crc=0 finobt=0 [ceph-client][DEBUG ] data = bsize=4096 blocks=1834747, imaxpct=25 [ceph-client][DEBUG ] = sunit=0 swidth=0 blks [ceph-client][DEBUG ] naming =version 2 bsize=4096 ascii-ci=0 ftype=0 [ceph-client][DEBUG ] log =internal log bsize=4096 blocks=2560, version=2 [ceph-client][DEBUG ] = sectsz=512 sunit=0 blks, lazy-count=1 [ceph-client][DEBUG ] realtime =none extsz=4096 blocks=0, rtextents=0 [ceph-client][WARNIN] mount: Mounting /dev/sdb1 on /var/lib/ceph/tmp/mnt.GfW1ir with options noatime,inode64 [ceph-client][WARNIN] command_check_call: Running command: /usr/bin/mount -t xfs -o noatime,inode64 -- /dev/sdb1 /var/lib/ceph/tmp/mnt.GfW1ir [ceph-client][WARNIN] command: Running command: /usr/sbin/restorecon /var/lib/ceph/tmp/mnt.GfW1ir [ceph-client][WARNIN] populate_data_path: Preparing osd data dir /var/lib/ceph/tmp/mnt.GfW1ir [ceph-client][WARNIN] command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.GfW1ir/ceph_fsid.8175.tmp [ceph-client][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.GfW1ir/ceph_fsid.8175.tmp [ceph-client][WARNIN] command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.GfW1ir/fsid.8175.tmp [ceph-client][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.GfW1ir/fsid.8175.tmp [ceph-client][WARNIN] command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.GfW1ir/magic.8175.tmp [ceph-client][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.GfW1ir/magic.8175.tmp [ceph-client][WARNIN] command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.GfW1ir/journal_uuid.8175.tmp [ceph-client][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.GfW1ir/journal_uuid.8175.tmp [ceph-client][WARNIN] adjust_symlink: Creating symlink /var/lib/ceph/tmp/mnt.GfW1ir/journal -> /dev/disk/by-partuuid/348f2de1-0fc7-40f3-8619-58c3e13f9a40 [ceph-client][WARNIN] command: Running command: /usr/sbin/restorecon -R /var/lib/ceph/tmp/mnt.GfW1ir [ceph-client][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.GfW1ir [ceph-client][WARNIN] unmount: Unmounting /var/lib/ceph/tmp/mnt.GfW1ir [ceph-client][WARNIN] command_check_call: Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.GfW1ir [ceph-client][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid [ceph-client][WARNIN] command_check_call: Running command: /usr/sbin/sgdisk --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/sdb [ceph-client][DEBUG ] Warning: The kernel is still using the old partition table. [ceph-client][DEBUG ] The new table will be used at the next reboot. [ceph-client][DEBUG ] The operation has completed successfully. [ceph-client][WARNIN] update_partition: Calling partprobe on prepared device /dev/sdb [ceph-client][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600 [ceph-client][WARNIN] command: Running command: /usr/bin/flock -s /dev/sdb /usr/sbin/partprobe /dev/sdb [ceph-client][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600 [ceph-client][WARNIN] command_check_call: Running command: /usr/bin/udevadm trigger --action=add --sysname-match sdb1 [ceph-client][INFO ] Running command: systemctl enable ceph.target [ceph-client][INFO ] checking OSD status... [ceph-client][DEBUG ] find the location of an executable [ceph-client][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json [ceph-client][WARNIN] there is 1 OSD down [ceph-client][WARNIN] there is 1 OSD out [ceph_deploy.osd][DEBUG ] Host ceph-client is now ready for osd use. [root@ceph-client ceph-config]# ceph status cluster 7475f68c-420a-46b5-a45a-395d2197d37e health HEALTH_OK monmap e1: 1 mons at {ceph-client=10.211.55.93:6789/0} election epoch 3, quorum 0 ceph-client osdmap e2: 1 osds: 0 up, 0 in flags sortbitwise pgmap v3: 64 pgs, 1 pools, 0 bytes data, 0 objects 0 kB used, 0 kB / 0 kB avail 64 creating [root@ceph-client ceph-config]# partprobe /dev/sdb [root@ceph-client ceph-config]# ceph status cluster 7475f68c-420a-46b5-a45a-395d2197d37e health HEALTH_ERR 64 pgs are stuck inactive for more than 300 seconds 64 pgs stuck inactive monmap e1: 1 mons at {ceph-client=10.211.55.93:6789/0} election epoch 3, quorum 0 ceph-client osdmap e2: 1 osds: 0 up, 0 in flags sortbitwise pgmap v3: 64 pgs, 1 pools, 0 bytes data, 0 objects 0 kB used, 0 kB / 0 kB avail 64 creating [root@ceph-client ceph-config]# ceph status cluster 7475f68c-420a-46b5-a45a-395d2197d37e health HEALTH_ERR 64 pgs are stuck inactive for more than 300 seconds 64 pgs stuck inactive monmap e1: 1 mons at {ceph-client=10.211.55.93:6789/0} election epoch 3, quorum 0 ceph-client osdmap e2: 1 osds: 0 up, 0 in flags sortbitwise pgmap v3: 64 pgs, 1 pools, 0 bytes data, 0 objects 0 kB used, 0 kB / 0 kB avail 64 creating [root@ceph-client ceph-config]# reboot Connection to 10.211.55.93 closed by remote host. Connection to 10.211.55.93 closed. [muislam@muminul-dev ~]$ ssh root@10.211.55.93 root@10.211.55.93's password: Last login: Fri May 27 09:45:32 2016 from 10.211.55.4 [root@ceph-client ~]# cd ceph-config/ [root@ceph-client ceph-config]# ceph status cluster 7475f68c-420a-46b5-a45a-395d2197d37e health HEALTH_ERR 64 pgs are stuck inactive for more than 300 seconds 64 pgs stuck inactive monmap e1: 1 mons at {ceph-client=10.211.55.93:6789/0} election epoch 4, quorum 0 ceph-client osdmap e5: 1 osds: 1 up, 1 in flags sortbitwise pgmap v6: 64 pgs, 1 pools, 0 bytes data, 0 objects 0 kB used, 0 kB / 0 kB avail 64 creating [root@ceph-client ceph-config]# ceph status cluster 7475f68c-420a-46b5-a45a-395d2197d37e health HEALTH_OK monmap e1: 1 mons at {ceph-client=10.211.55.93:6789/0} election epoch 4, quorum 0 ceph-client osdmap e5: 1 osds: 1 up, 1 in flags sortbitwise pgmap v7: 64 pgs, 1 pools, 0 bytes data, 0 objects 33928 kB used, 7123 MB / 7156 MB avail 64 active+clean