Project

General

Profile

Bug #5920 » ceph.txt

Dmitry Panov, 08/09/2013 05:43 AM

 
root@sn2:/var/log/ceph# ceph status
2013-08-09 15:00:44.072210 7f8f08b5e700 0 -- :/26213 >> 10.202.1.52:6789/0 pipe(0x1789540 sd=3 :0 s=1 pgs=0 cs=0 l=1).fault
health HEALTH_WARN 893 pgs degraded; 406 pgs stuck unclean; recovery 357/1088 degraded (32.812%); recovering 15E o/s, 15EB/s; 2/6 in osds are down; 1 mons down, quorum 0,2 a,c
monmap e1: 3 mons at {a=10.202.1.51:6789/0,b=10.202.1.52:6789/0,c=10.202.1.53:6789/0}, election epoch 4160, quorum 0,2 a,c
osdmap e12413: 6 osds: 4 up, 6 in
pgmap v23595: 1344 pgs: 451 active+clean, 893 active+degraded; 2099 MB data, 11081 MB used, 10860 GB / 10871 GB avail; 357/1088 degraded (32.812%); recovering 15E o/s, 15EB/s
mdsmap e1: 0/0/1 up

root@sn2:/var/log/ceph# ceph statuslearecho -n > ceph-osd.3.log 2mon.bservice ceph stopart
=== mon.b ===
Starting Ceph mon.b on sn2...
[26880]: (33) Numerical argument out of domain
failed: 'ulimit -n 8192; /usr/bin/ceph-mon -i b --pid-file /var/run/ceph/mon.b.pid -c /etc/ceph/ceph.conf '
Starting ceph-create-keys on sn2...
=== osd.2 ===
2013-08-09 15:01:08.667301 7fb22a69e700 0 -- :/27036 >> 10.202.1.52:6789/0 pipe(0x17f5580 sd=3 :0 s=1 pgs=0 cs=0 l=1).fault
create-or-move updated item id 2 name 'osd.2' weight 1.77 at location {host=sn2,root=default} to crush map
Starting Ceph osd.2 on sn2...
starting osd.2 at :/0 osd_data /var/lib/ceph/osd/ceph-2 /var/lib/ceph/osd/ceph-2/journal
=== osd.3 ===
create-or-move updated item id 3 name 'osd.3' weight 1.77 at location {host=sn2,root=default} to crush map
Starting Ceph osd.3 on sn2...
starting osd.3 at :/0 osd_data /var/lib/ceph/osd/ceph-3 /var/lib/ceph/osd/ceph-3/journal
root@sn2:/var/log/ceph# ceph status
health HEALTH_WARN 893 pgs degraded; 406 pgs stuck unclean; recovery 357/1088 degraded (32.812%); 1 mons down, quorum 0,2 a,c
monmap e1: 3 mons at {a=10.202.1.51:6789/0,b=10.202.1.52:6789/0,c=10.202.1.53:6789/0}, election epoch 4160, quorum 0,2 a,c
osdmap e12416: 6 osds: 6 up, 6 in
pgmap v23597: 1344 pgs: 451 active+clean, 893 active+degraded; 2099 MB data, 11081 MB used, 10860 GB / 10871 GB avail; 357/1088 degraded (32.812%)
mdsmap e1: 0/0/1 up

root@sn2:/var/log/ceph# ceph -w
health HEALTH_WARN 487 pgs degraded; recovery 210/1088 degraded (19.301%); recovering 40 o/s, 156MB/s; 1 mons down, quorum 0,2 a,c
monmap e1: 3 mons at {a=10.202.1.51:6789/0,b=10.202.1.52:6789/0,c=10.202.1.53:6789/0}, election epoch 4160, quorum 0,2 a,c
osdmap e12416: 6 osds: 6 up, 6 in
pgmap v23600: 1344 pgs: 139 active, 718 active+clean, 487 active+degraded; 2099 MB data, 11082 MB used, 10860 GB / 10871 GB avail; 210/1088 degraded (19.301%); recovering 40 o/s, 156MB/s
mdsmap e1: 0/0/1 up

2013-08-09 15:01:19.717619 mon.0 [INF] pgmap v23599: 1344 pgs: 35 active, 606 active+clean, 703 active+degraded; 2099 MB data, 11082 MB used, 10860 GB / 10871 GB avail; 295/1088 degraded (27.114%); recovering 15 o/s, 58449KB/s
2013-08-09 15:01:20.970029 mon.0 [INF] pgmap v23600: 1344 pgs: 139 active, 718 active+clean, 487 active+degraded; 2099 MB data, 11082 MB used, 10860 GB / 10871 GB avail; 210/1088 degraded (19.301%); recovering 40 o/s, 156MB/s
2013-08-09 15:01:22.299153 mon.0 [INF] pgmap v23601: 1344 pgs: 139 active, 1205 active+clean; 2099 MB data, 11078 MB used, 10860 GB / 10871 GB avail; recovering 27 o/s, 110MB/s
2013-08-09 15:01:27.728202 mon.0 [INF] pgmap v23602: 1344 pgs: 1344 active+clean; 2099 MB data, 11079 MB used, 10860 GB / 10871 GB avail
^C
root@sn2:/var/log/ceph# ceph -wstatus
health HEALTH_WARN 1 mons down, quorum 0,2 a,c
monmap e1: 3 mons at {a=10.202.1.51:6789/0,b=10.202.1.52:6789/0,c=10.202.1.53:6789/0}, election epoch 4160, quorum 0,2 a,c
osdmap e12416: 6 osds: 6 up, 6 in
pgmap v23602: 1344 pgs: 1344 active+clean; 2099 MB data, 11079 MB used, 10860 GB / 10871 GB avail
mdsmap e1: 0/0/1 up

(7-7/14)