|
OSD 10 will not start
|
|
Last login: Wed Jul 10 15:55:02 on ttys001
|
|
Amandas-MacBook-Pro:~ ekalk$ pwd
|
|
/Users/ekalk
|
|
Amandas-MacBook-Pro:~ ekalk$ ls -lah
|
|
total 64
|
|
drwxr-xr-x+ 21 ekalk staff 672B Jul 10 07:41 .
|
|
drwxr-xr-x 6 root admin 192B Jun 1 19:27 ..
|
|
-r-------- 1 ekalk staff 7B Jun 2 16:06 .CFUserTextEncoding
|
|
-rw-r--r--@ 1 ekalk staff 10K Jul 1 14:51 .DS_Store
|
|
drwx------ 2 ekalk staff 64B Jul 10 07:41 .Trash
|
|
drwxr-xr-x 16 ekalk staff 512B Jul 10 08:11 .atom
|
|
-rw------- 1 ekalk staff 9.8K Jul 11 08:04 .bash_history
|
|
drwx------ 259 ekalk staff 8.1K Jul 11 08:04 .bash_sessions
|
|
drwx------ 3 ekalk staff 96B Jun 21 09:51 .cups
|
|
drwx------ 6 ekalk staff 192B Jun 7 13:39 .ssh
|
|
-rw------- 1 root staff 891B Jun 4 07:58 .viminfo
|
|
drwx------ 5 ekalk staff 160B Jun 5 11:02 .vnc
|
|
drwx------@ 5 ekalk staff 160B Jun 3 15:51 Applications
|
|
drwx------+ 9 ekalk staff 288B Jul 10 15:22 Desktop
|
|
drwx------+ 3 ekalk staff 96B Jun 1 19:13 Documents
|
|
drwx------+ 37 ekalk staff 1.2K Jul 10 15:22 Downloads
|
|
drwx------@ 64 ekalk staff 2.0K Jun 24 09:52 Library
|
|
drwx------+ 3 ekalk staff 96B Jun 1 19:13 Movies
|
|
drwx------+ 4 ekalk staff 128B Jun 4 07:54 Music
|
|
drwx------+ 4 ekalk staff 128B Jul 9 08:58 Pictures
|
|
drwxr-xr-x+ 5 ekalk staff 160B Jun 1 19:13 Public
|
|
Amandas-MacBook-Pro:~ ekalk$ cd .ssh
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ls
|
|
id_rsa id_rsa.pub known_hosts
|
|
Amandas-MacBook-Pro:.ssh ekalk$ scp id_rsa.pub root@synergy0:/root/.ssh/authorized_keys/
|
|
root@synergy0's password:
|
|
scp: /root/.ssh/authorized_keys/: Is a directory
|
|
Amandas-MacBook-Pro:.ssh ekalk$ scp id_rsa.pub root@synergy0:/root/.ssh/authorized_keys/id_rsa.pub
|
|
root@synergy0's password:
|
|
scp: /root/.ssh/authorized_keys/id_rsa.pub: No such file or directory
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ls
|
|
id_rsa id_rsa.pub known_hosts
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh-copy-id root@synergy0
|
|
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/Users/ekalk/.ssh/id_rsa.pub"
|
|
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
|
|
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
|
|
root@synergy0's password:
|
|
|
|
Number of key(s) added: 1
|
|
|
|
Now try logging into the machine, with: "ssh 'root@synergy0'"
|
|
and check to make sure that only the key(s) you wanted were added.
|
|
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh-copy-id root@synergy1
|
|
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/Users/ekalk/.ssh/id_rsa.pub"
|
|
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
|
|
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
|
|
root@synergy1's password:
|
|
|
|
Number of key(s) added: 1
|
|
|
|
Now try logging into the machine, with: "ssh 'root@synergy1'"
|
|
and check to make sure that only the key(s) you wanted were added.
|
|
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh-copy-id root@synergy2
|
|
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/Users/ekalk/.ssh/id_rsa.pub"
|
|
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
|
|
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
|
|
root@synergy2's password:
|
|
|
|
Number of key(s) added: 1
|
|
|
|
Now try logging into the machine, with: "ssh 'root@synergy2'"
|
|
and check to make sure that only the key(s) you wanted were added.
|
|
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh-copy-id root@synergy3
|
|
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/Users/ekalk/.ssh/id_rsa.pub"
|
|
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
|
|
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
|
|
root@synergy3's password:
|
|
|
|
Number of key(s) added: 1
|
|
|
|
Now try logging into the machine, with: "ssh 'root@synergy3'"
|
|
and check to make sure that only the key(s) you wanted were added.
|
|
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh-copy-id synergy@216.106.44.209
|
|
Amandas-MacBook-Pro:.ssh ekalk$
|
|
Amandas-MacBook-Pro:.ssh ekalk$
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh root@synergy0
|
|
Welcome to Ubuntu 16.04.6 LTS (GNU/Linux 4.4.0-150-generic x86_64)
|
|
|
|
* Documentation: https://help.ubuntu.com
|
|
* Management: https://landscape.canonical.com
|
|
* Support: https://ubuntu.com/advantage
|
|
|
|
75 packages can be updated.
|
|
51 updates are security updates.
|
|
|
|
New release '18.04.2 LTS' available.
|
|
Run 'do-release-upgrade' to upgrade to it.
|
|
|
|
|
|
Last login: Wed Jul 10 13:54:14 2019 from 216.106.0.188
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 17h)
|
|
mgr: synergy2(active, since 18h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 16h), 24 in (since 16h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 537 active+clean
|
|
|
|
io:
|
|
client: 341 B/s rd, 0 op/s rd, 0 op/s wr
|
|
|
|
root@synergy0:~# ceph pg 0.5 query
|
|
Error ENOENT: problem getting command descriptions from pg.0.5
|
|
root@synergy0:~# ceph pg 6.0 query
|
|
{
|
|
"state": "active+clean",
|
|
"snap_trimq": "[]",
|
|
"snap_trimq_len": 0,
|
|
"epoch": 43692,
|
|
"up": [
|
|
22,
|
|
3
|
|
],
|
|
"acting": [
|
|
22,
|
|
3
|
|
],
|
|
"acting_recovery_backfill": [
|
|
"3",
|
|
"22"
|
|
],
|
|
"info": {
|
|
"pgid": "6.0",
|
|
"last_update": "11519'71",
|
|
"last_complete": "11519'71",
|
|
"log_tail": "0'0",
|
|
"last_user_version": 48,
|
|
"last_backfill": "MAX",
|
|
"last_backfill_bitwise": 0,
|
|
"purged_snaps": [],
|
|
"history": {
|
|
"epoch_created": 11223,
|
|
"epoch_pool_created": 11223,
|
|
"last_epoch_started": 43423,
|
|
"last_interval_started": 43420,
|
|
"last_epoch_clean": 43423,
|
|
"last_interval_clean": 43420,
|
|
"last_epoch_split": 0,
|
|
"last_epoch_marked_full": 0,
|
|
"same_up_since": 43408,
|
|
"same_interval_since": 43420,
|
|
"same_primary_since": 43026,
|
|
"last_scrub": "11519'71",
|
|
"last_scrub_stamp": "2019-07-11 06:52:54.377482",
|
|
"last_deep_scrub": "11519'71",
|
|
"last_deep_scrub_stamp": "2019-07-11 06:52:54.377482",
|
|
"last_clean_scrub_stamp": "2019-07-11 06:52:54.377482"
|
|
},
|
|
"stats": {
|
|
"version": "11519'71",
|
|
"reported_seq": "1092",
|
|
"reported_epoch": "43692",
|
|
"state": "active+clean",
|
|
"last_fresh": "2019-07-11 06:52:54.377593",
|
|
"last_change": "2019-07-11 06:52:54.377593",
|
|
"last_active": "2019-07-11 06:52:54.377593",
|
|
"last_peered": "2019-07-11 06:52:54.377593",
|
|
"last_clean": "2019-07-11 06:52:54.377593",
|
|
"last_became_active": "2019-07-10 15:57:00.349376",
|
|
"last_became_peered": "2019-07-10 15:57:00.349376",
|
|
"last_unstale": "2019-07-11 06:52:54.377593",
|
|
"last_undegraded": "2019-07-11 06:52:54.377593",
|
|
"last_fullsized": "2019-07-11 06:52:54.377593",
|
|
"mapping_epoch": 43420,
|
|
"log_start": "0'0",
|
|
"ondisk_log_start": "0'0",
|
|
"created": 11223,
|
|
"last_epoch_clean": 43423,
|
|
"parent": "0.0",
|
|
"parent_split_bits": 0,
|
|
"last_scrub": "11519'71",
|
|
"last_scrub_stamp": "2019-07-11 06:52:54.377482",
|
|
"last_deep_scrub": "11519'71",
|
|
"last_deep_scrub_stamp": "2019-07-11 06:52:54.377482",
|
|
"last_clean_scrub_stamp": "2019-07-11 06:52:54.377482",
|
|
"log_size": 71,
|
|
"ondisk_log_size": 71,
|
|
"stats_invalid": false,
|
|
"dirty_stats_invalid": false,
|
|
"omap_stats_invalid": false,
|
|
"hitset_stats_invalid": false,
|
|
"hitset_bytes_stats_invalid": false,
|
|
"pin_stats_invalid": false,
|
|
"manifest_stats_invalid": false,
|
|
"snaptrimq_len": 0,
|
|
"stat_sum": {
|
|
"num_bytes": 0,
|
|
"num_objects": 23,
|
|
"num_object_clones": 0,
|
|
"num_object_copies": 46,
|
|
"num_objects_missing_on_primary": 0,
|
|
"num_objects_missing": 0,
|
|
"num_objects_degraded": 0,
|
|
"num_objects_misplaced": 0,
|
|
"num_objects_unfound": 0,
|
|
"num_objects_dirty": 23,
|
|
"num_whiteouts": 0,
|
|
"num_read": 0,
|
|
"num_read_kb": 0,
|
|
"num_write": 0,
|
|
"num_write_kb": 0,
|
|
"num_scrub_errors": 0,
|
|
"num_shallow_scrub_errors": 0,
|
|
"num_deep_scrub_errors": 0,
|
|
"num_objects_recovered": 0,
|
|
"num_bytes_recovered": 0,
|
|
"num_keys_recovered": 0,
|
|
"num_objects_omap": 23,
|
|
"num_objects_hit_set_archive": 0,
|
|
"num_bytes_hit_set_archive": 0,
|
|
"num_flush": 0,
|
|
"num_flush_kb": 0,
|
|
"num_evict": 0,
|
|
"num_evict_kb": 0,
|
|
"num_promote": 0,
|
|
"num_flush_mode_high": 0,
|
|
"num_flush_mode_low": 0,
|
|
"num_evict_mode_some": 0,
|
|
"num_evict_mode_full": 0,
|
|
"num_objects_pinned": 0,
|
|
"num_legacy_snapsets": 0,
|
|
"num_large_omap_objects": 0,
|
|
"num_objects_manifest": 0,
|
|
"num_omap_bytes": 23184,
|
|
"num_omap_keys": 92,
|
|
"num_objects_repaired": 0
|
|
},
|
|
"up": [
|
|
22,
|
|
3
|
|
],
|
|
"acting": [
|
|
22,
|
|
3
|
|
],
|
|
"blocked_by": [],
|
|
"up_primary": 22,
|
|
"acting_primary": 22,
|
|
"purged_snaps": []
|
|
},
|
|
"empty": 0,
|
|
"dne": 0,
|
|
"incomplete": 0,
|
|
"last_epoch_started": 43423,
|
|
"hit_set_history": {
|
|
"current_last_update": "0'0",
|
|
"history": []
|
|
}
|
|
},
|
|
"peer_info": [
|
|
{
|
|
"peer": "3",
|
|
"pgid": "6.0",
|
|
"last_update": "11519'71",
|
|
"last_complete": "11519'71",
|
|
"log_tail": "0'0",
|
|
"last_user_version": 48,
|
|
"last_backfill": "MAX",
|
|
"last_backfill_bitwise": 1,
|
|
"purged_snaps": [],
|
|
"history": {
|
|
"epoch_created": 11223,
|
|
"epoch_pool_created": 11223,
|
|
"last_epoch_started": 43423,
|
|
"last_interval_started": 43420,
|
|
"last_epoch_clean": 43423,
|
|
"last_interval_clean": 43420,
|
|
"last_epoch_split": 0,
|
|
"last_epoch_marked_full": 0,
|
|
"same_up_since": 43408,
|
|
"same_interval_since": 43420,
|
|
"same_primary_since": 43026,
|
|
"last_scrub": "11519'71",
|
|
"last_scrub_stamp": "2019-07-11 06:52:54.377482",
|
|
"last_deep_scrub": "11519'71",
|
|
"last_deep_scrub_stamp": "2019-07-11 06:52:54.377482",
|
|
"last_clean_scrub_stamp": "2019-07-11 06:52:54.377482"
|
|
},
|
|
"stats": {
|
|
"version": "11519'71",
|
|
"reported_seq": "783",
|
|
"reported_epoch": "43419",
|
|
"state": "active+remapped+backfilling",
|
|
"last_fresh": "2019-07-10 15:56:54.451100",
|
|
"last_change": "2019-07-10 15:56:46.180179",
|
|
"last_active": "2019-07-10 15:56:54.451100",
|
|
"last_peered": "2019-07-10 15:56:54.451100",
|
|
"last_clean": "2019-07-10 15:56:33.184001",
|
|
"last_became_active": "2019-07-10 15:56:46.168090",
|
|
"last_became_peered": "2019-07-10 15:56:46.168090",
|
|
"last_unstale": "2019-07-10 15:56:54.451100",
|
|
"last_undegraded": "2019-07-10 15:56:54.451100",
|
|
"last_fullsized": "2019-07-10 15:56:54.451100",
|
|
"mapping_epoch": 43420,
|
|
"log_start": "0'0",
|
|
"ondisk_log_start": "0'0",
|
|
"created": 11223,
|
|
"last_epoch_clean": 43382,
|
|
"parent": "0.0",
|
|
"parent_split_bits": 0,
|
|
"last_scrub": "11519'71",
|
|
"last_scrub_stamp": "2019-07-09 22:21:06.499081",
|
|
"last_deep_scrub": "11519'71",
|
|
"last_deep_scrub_stamp": "2019-07-09 22:21:06.499081",
|
|
"last_clean_scrub_stamp": "2019-07-09 22:21:06.499081",
|
|
"log_size": 71,
|
|
"ondisk_log_size": 71,
|
|
"stats_invalid": false,
|
|
"dirty_stats_invalid": false,
|
|
"omap_stats_invalid": false,
|
|
"hitset_stats_invalid": false,
|
|
"hitset_bytes_stats_invalid": false,
|
|
"pin_stats_invalid": false,
|
|
"manifest_stats_invalid": false,
|
|
"snaptrimq_len": 0,
|
|
"stat_sum": {
|
|
"num_bytes": 0,
|
|
"num_objects": 24,
|
|
"num_object_clones": 0,
|
|
"num_object_copies": 48,
|
|
"num_objects_missing_on_primary": 0,
|
|
"num_objects_missing": 0,
|
|
"num_objects_degraded": 0,
|
|
"num_objects_misplaced": 0,
|
|
"num_objects_unfound": 0,
|
|
"num_objects_dirty": 24,
|
|
"num_whiteouts": 0,
|
|
"num_read": 24,
|
|
"num_read_kb": 24,
|
|
"num_write": 48,
|
|
"num_write_kb": 48,
|
|
"num_scrub_errors": 0,
|
|
"num_shallow_scrub_errors": 0,
|
|
"num_deep_scrub_errors": 0,
|
|
"num_objects_recovered": 98,
|
|
"num_bytes_recovered": 0,
|
|
"num_keys_recovered": 196,
|
|
"num_objects_omap": 24,
|
|
"num_objects_hit_set_archive": 0,
|
|
"num_bytes_hit_set_archive": 0,
|
|
"num_flush": 0,
|
|
"num_flush_kb": 0,
|
|
"num_evict": 0,
|
|
"num_evict_kb": 0,
|
|
"num_promote": 0,
|
|
"num_flush_mode_high": 0,
|
|
"num_flush_mode_low": 0,
|
|
"num_evict_mode_some": 0,
|
|
"num_evict_mode_full": 0,
|
|
"num_objects_pinned": 0,
|
|
"num_legacy_snapsets": 0,
|
|
"num_large_omap_objects": 0,
|
|
"num_objects_manifest": 0,
|
|
"num_omap_bytes": 0,
|
|
"num_omap_keys": 0,
|
|
"num_objects_repaired": 0
|
|
},
|
|
"up": [
|
|
22,
|
|
3
|
|
],
|
|
"acting": [
|
|
22,
|
|
3
|
|
],
|
|
"blocked_by": [],
|
|
"up_primary": 22,
|
|
"acting_primary": 22,
|
|
"purged_snaps": []
|
|
},
|
|
"empty": 0,
|
|
"dne": 0,
|
|
"incomplete": 0,
|
|
"last_epoch_started": 43423,
|
|
"hit_set_history": {
|
|
"current_last_update": "0'0",
|
|
"history": []
|
|
}
|
|
}
|
|
],
|
|
"recovery_state": [
|
|
{
|
|
"name": "Started/Primary/Active",
|
|
"enter_time": "2019-07-10 15:57:00.147016",
|
|
"might_have_unfound": [
|
|
{
|
|
"osd": "10",
|
|
"status": "not queried"
|
|
},
|
|
{
|
|
"osd": "15",
|
|
"status": "not queried"
|
|
}
|
|
],
|
|
"recovery_progress": {
|
|
"backfill_targets": [],
|
|
"waiting_on_backfill": [],
|
|
"last_backfill_started": "MIN",
|
|
"backfill_info": {
|
|
"begin": "MIN",
|
|
"end": "MIN",
|
|
"objects": []
|
|
},
|
|
"peer_backfill_info": [],
|
|
"backfills_in_flight": [],
|
|
"recovering": [],
|
|
"pg_backend": {
|
|
"pull_from_peer": [],
|
|
"pushing": []
|
|
}
|
|
},
|
|
"scrub": {
|
|
"scrubber.epoch_start": "43420",
|
|
"scrubber.active": false,
|
|
"scrubber.state": "INACTIVE",
|
|
"scrubber.start": "MIN",
|
|
"scrubber.end": "MIN",
|
|
"scrubber.max_end": "MIN",
|
|
"scrubber.subset_last_update": "0'0",
|
|
"scrubber.deep": false,
|
|
"scrubber.waiting_on_whom": []
|
|
}
|
|
},
|
|
{
|
|
"name": "Started",
|
|
"enter_time": "2019-07-10 15:56:56.733406"
|
|
}
|
|
],
|
|
"agent_state": {}
|
|
}
|
|
root@synergy0:~# ^C
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 112323/3104528 objects degraded (3.618%), 38 pgs degraded
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 18h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 12s), 23 in (since 54s); 42 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 112323/3104528 objects degraded (3.618%)
|
|
12398/3104528 objects misplaced (0.399%)
|
|
495 active+clean
|
|
38 active+undersized+degraded+remapped+backfilling
|
|
4 active+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 266 MiB/s, 66 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[====..........................]
|
|
|
|
root@synergy0:~# ceph -w
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 108056/3104528 objects degraded (3.481%), 38 pgs degraded, 38 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 18h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 70s), 23 in (since 112s); 42 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 108056/3104528 objects degraded (3.481%)
|
|
12249/3104528 objects misplaced (0.395%)
|
|
495 active+clean
|
|
38 active+undersized+degraded+remapped+backfilling
|
|
4 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 2.7 KiB/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 278 MiB/s, 69 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[=====.........................]
|
|
|
|
|
|
2019-07-11 08:20:52.278245 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 108056/3104528 objects degraded (3.481%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:20:57.279303 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 107743/3104528 objects degraded (3.471%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:21:02.280497 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 107415/3104528 objects degraded (3.460%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:21:07.281852 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 107025/3104528 objects degraded (3.447%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:21:12.283042 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 106631/3104528 objects degraded (3.435%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:21:17.284419 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 106311/3104528 objects degraded (3.424%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:21:22.285524 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 105845/3104528 objects degraded (3.409%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
^[2019-07-11 08:21:27.286667 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 105580/3104528 objects degraded (3.401%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:21:32.287932 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 105181/3104528 objects degraded (3.388%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:21:37.288926 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 104863/3104528 objects degraded (3.378%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:21:42.290092 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 104414/3104528 objects degraded (3.363%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:21:47.291171 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 104099/3104528 objects degraded (3.353%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:21:52.292282 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 103569/3104528 objects degraded (3.336%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:21:57.293715 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 103336/3104528 objects degraded (3.329%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:02.295149 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 102895/3104528 objects degraded (3.314%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:07.296561 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 102605/3104528 objects degraded (3.305%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:12.297590 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 102188/3104528 objects degraded (3.292%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:17.298609 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 101913/3104528 objects degraded (3.283%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:22.300002 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 101599/3104528 objects degraded (3.273%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:27.301247 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 101089/3104528 objects degraded (3.256%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:32.302382 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 100607/3104528 objects degraded (3.241%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:37.303390 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 100355/3104528 objects degraded (3.233%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:42.304300 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 100077/3104528 objects degraded (3.224%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:47.305467 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 99683/3104528 objects degraded (3.211%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:52.306932 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 99435/3104528 objects degraded (3.203%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:22:57.308097 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 98942/3104528 objects degraded (3.187%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:02.309462 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 98603/3104528 objects degraded (3.176%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:07.310624 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 98130/3104528 objects degraded (3.161%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:12.311641 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 97995/3104528 objects degraded (3.157%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:17.312691 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 97537/3104528 objects degraded (3.142%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:22.314032 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 97255/3104528 objects degraded (3.133%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:27.315135 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 96713/3104528 objects degraded (3.115%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:32.316421 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 96485/3104528 objects degraded (3.108%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:37.317526 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 95934/3104528 objects degraded (3.090%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:42.318780 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 95769/3104528 objects degraded (3.085%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:47.319882 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 95351/3104528 objects degraded (3.071%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:52.321040 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 95024/3104528 objects degraded (3.061%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:23:57.322398 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 94551/3104528 objects degraded (3.046%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:02.323428 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 94278/3104528 objects degraded (3.037%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:07.324538 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 93897/3104528 objects degraded (3.025%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:12.325646 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 93473/3104528 objects degraded (3.011%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:17.326674 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 93082/3104528 objects degraded (2.998%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:22.327898 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 92817/3104528 objects degraded (2.990%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:27.328990 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 92387/3104528 objects degraded (2.976%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:32.329966 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 92010/3104528 objects degraded (2.964%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:37.331013 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 91607/3104528 objects degraded (2.951%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:42.332173 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 91247/3104528 objects degraded (2.939%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:47.333175 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 90871/3104528 objects degraded (2.927%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:52.334202 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 90498/3104528 objects degraded (2.915%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:24:57.335780 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 90297/3104528 objects degraded (2.909%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:02.336989 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 89917/3104528 objects degraded (2.896%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:07.338032 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 89488/3104528 objects degraded (2.882%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:12.339466 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 89099/3104528 objects degraded (2.870%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:17.340472 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 88838/3104528 objects degraded (2.862%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:22.341545 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 88368/3104528 objects degraded (2.846%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:27.342804 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 87977/3104528 objects degraded (2.834%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:32.344290 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 87603/3104528 objects degraded (2.822%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:37.345820 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 87290/3104528 objects degraded (2.812%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:42.353549 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 86834/3104528 objects degraded (2.797%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:47.354591 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 86418/3104528 objects degraded (2.784%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:52.355892 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 86213/3104528 objects degraded (2.777%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:25:57.357188 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 85941/3104528 objects degraded (2.768%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:02.358165 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 85503/3104528 objects degraded (2.754%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:07.359226 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 85144/3104528 objects degraded (2.743%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:12.360225 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 84803/3104528 objects degraded (2.732%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:17.361181 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 84427/3104528 objects degraded (2.719%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:22.362246 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 83984/3104528 objects degraded (2.705%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:27.363268 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 83662/3104528 objects degraded (2.695%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:32.364288 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 83272/3104528 objects degraded (2.682%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:37.365636 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 82891/3104528 objects degraded (2.670%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:42.366941 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 82495/3104528 objects degraded (2.657%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:47.368067 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 82128/3104528 objects degraded (2.645%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:52.369172 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 81722/3104528 objects degraded (2.632%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:26:57.370610 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 81471/3104528 objects degraded (2.624%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:02.371912 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 81038/3104528 objects degraded (2.610%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:07.373056 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 80769/3104528 objects degraded (2.602%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:12.384524 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 80348/3104528 objects degraded (2.588%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:17.385613 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 80124/3104528 objects degraded (2.581%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:22.387022 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 79703/3104528 objects degraded (2.567%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:27.388489 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 79390/3104528 objects degraded (2.557%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:32.389924 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 78912/3104528 objects degraded (2.542%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:37.391098 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 78625/3104528 objects degraded (2.533%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:42.392222 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 78267/3104528 objects degraded (2.521%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:47.393255 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 78037/3104528 objects degraded (2.514%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:52.394297 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 77463/3104528 objects degraded (2.495%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:27:57.395309 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 77185/3104528 objects degraded (2.486%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:02.396868 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 76806/3104528 objects degraded (2.474%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:07.397953 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 76433/3104528 objects degraded (2.462%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:12.399632 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 76013/3104528 objects degraded (2.448%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:17.400659 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 75743/3104528 objects degraded (2.440%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:22.401806 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 75290/3104528 objects degraded (2.425%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:27.403263 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 74979/3104528 objects degraded (2.415%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:32.404291 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 74538/3104528 objects degraded (2.401%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:37.405330 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 74322/3104528 objects degraded (2.394%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:42.406504 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 73805/3104528 objects degraded (2.377%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:47.407668 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 73550/3104528 objects degraded (2.369%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:52.408714 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 73110/3104528 objects degraded (2.355%), 38 pgs degraded, 38 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:28:57.409730 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 72760/3104528 objects degraded (2.344%), 37 pgs degraded, 37 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:02.410737 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 72411/3104528 objects degraded (2.332%), 37 pgs degraded, 37 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:07.411855 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 72134/3104528 objects degraded (2.324%), 37 pgs degraded, 37 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:12.413102 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 71723/3104528 objects degraded (2.310%), 37 pgs degraded, 37 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:17.414589 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 71451/3104528 objects degraded (2.302%), 37 pgs degraded, 37 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:22.415669 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 70983/3104528 objects degraded (2.286%), 37 pgs degraded, 37 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:27.416808 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 70783/3104528 objects degraded (2.280%), 37 pgs degraded, 37 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:32.417830 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 70327/3104528 objects degraded (2.265%), 37 pgs degraded, 37 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:37.418939 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 70031/3104528 objects degraded (2.256%), 37 pgs degraded, 37 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:42.420035 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 69552/3104528 objects degraded (2.240%), 37 pgs degraded, 37 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:47.421117 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 69227/3104528 objects degraded (2.230%), 36 pgs degraded, 36 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:52.422289 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 68889/3104528 objects degraded (2.219%), 36 pgs degraded, 36 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:29:57.423503 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 68705/3104528 objects degraded (2.213%), 36 pgs degraded, 36 pgs undersized (PG_DEGRADED)
|
|
root@synergy0:~# eph -s
|
|
No command 'eph' found, did you mean:
|
|
Command 'ceph' from package 'ceph-common' (main)
|
|
Command 'epm' from package 'epm' (universe)
|
|
eph: command not found
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 67485/3104528 objects degraded (2.174%), 35 pgs degraded, 35 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 18h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 10m), 23 in (since 11m); 39 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 67485/3104528 objects degraded (2.174%)
|
|
10475/3104528 objects misplaced (0.337%)
|
|
498 active+clean
|
|
35 active+undersized+degraded+remapped+backfilling
|
|
4 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 0 B/s rd, 1.3 KiB/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 248 MiB/s, 62 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============................]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 51593/3104528 objects degraded (1.662%), 33 pgs degraded, 33 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 18h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 14m), 23 in (since 15m); 37 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 51593/3104528 objects degraded (1.662%)
|
|
8732/3104528 objects misplaced (0.281%)
|
|
500 active+clean
|
|
33 active+undersized+degraded+remapped+backfilling
|
|
4 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 255 B/s rd, 0 B/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 278 MiB/s, 69 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==================............]
|
|
|
|
root@synergy0:~# ceph -w
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 46137/3104528 objects degraded (1.486%), 32 pgs degraded, 32 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 18h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 16m), 23 in (since 17m); 36 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 46137/3104528 objects degraded (1.486%)
|
|
8082/3104528 objects misplaced (0.260%)
|
|
501 active+clean
|
|
32 active+undersized+degraded+remapped+backfilling
|
|
4 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 170 B/s rd, 0 B/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 250 MiB/s, 62 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[===================...........]
|
|
|
|
|
|
2019-07-11 08:36:17.548840 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 46387/3104528 objects degraded (1.494%), 32 pgs degraded, 32 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:36:22.550300 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 46135/3104528 objects degraded (1.486%), 32 pgs degraded, 32 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:36:27.551302 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 45894/3104528 objects degraded (1.478%), 32 pgs degraded, 32 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:36:32.552341 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 45579/3104528 objects degraded (1.468%), 32 pgs degraded, 32 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:36:37.553399 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 45298/3104528 objects degraded (1.459%), 32 pgs degraded, 32 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:36:42.557202 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 44962/3104528 objects degraded (1.448%), 30 pgs degraded, 30 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:36:47.559133 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 44733/3104528 objects degraded (1.441%), 30 pgs degraded, 30 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:36:52.560171 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 44509/3104528 objects degraded (1.434%), 30 pgs degraded, 30 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:36:57.562568 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 44254/3104528 objects degraded (1.425%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:02.563725 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 44030/3104528 objects degraded (1.418%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:07.564739 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 43831/3104528 objects degraded (1.412%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:12.566271 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 43557/3104528 objects degraded (1.403%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:17.567603 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 43312/3104528 objects degraded (1.395%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:22.568696 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 43084/3104528 objects degraded (1.388%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:27.569988 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 42933/3104528 objects degraded (1.383%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:32.570984 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 42698/3104528 objects degraded (1.375%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:37.572032 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 42476/3104528 objects degraded (1.368%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:42.573215 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 42165/3104528 objects degraded (1.358%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:47.574382 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 41919/3104528 objects degraded (1.350%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:52.575734 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 41708/3104528 objects degraded (1.343%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:37:57.577182 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 41510/3104528 objects degraded (1.337%), 29 pgs degraded, 29 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:02.578146 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 41167/3104528 objects degraded (1.326%), 28 pgs degraded, 28 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:07.580499 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 41021/3104528 objects degraded (1.321%), 28 pgs degraded, 28 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:12.581647 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 40834/3104528 objects degraded (1.315%), 28 pgs degraded, 28 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:17.582621 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 40679/3104528 objects degraded (1.310%), 28 pgs degraded, 28 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:22.583662 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 40420/3104528 objects degraded (1.302%), 28 pgs degraded, 28 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:27.584620 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 40245/3104528 objects degraded (1.296%), 28 pgs degraded, 28 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:32.585647 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 39993/3104528 objects degraded (1.288%), 28 pgs degraded, 28 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:37.586653 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 39745/3104528 objects degraded (1.280%), 27 pgs degraded, 28 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:42.590467 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 39488/3104528 objects degraded (1.272%), 26 pgs degraded, 26 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:47.594146 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 39302/3104528 objects degraded (1.266%), 24 pgs degraded, 25 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:52.595137 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 39148/3104528 objects degraded (1.261%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:38:57.596198 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 39046/3104528 objects degraded (1.258%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:02.597192 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 38859/3104528 objects degraded (1.252%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:07.598245 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 38767/3104528 objects degraded (1.249%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:12.599775 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 38618/3104528 objects degraded (1.244%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:17.600843 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 38539/3104528 objects degraded (1.241%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:22.602306 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 38376/3104528 objects degraded (1.236%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:27.603466 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 38269/3104528 objects degraded (1.233%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:32.604446 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 38060/3104528 objects degraded (1.226%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:37.605452 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 37938/3104528 objects degraded (1.222%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:42.606531 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 37777/3104528 objects degraded (1.217%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:47.607640 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 37675/3104528 objects degraded (1.214%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:52.608638 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 37506/3104528 objects degraded (1.208%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:39:57.609701 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 37382/3104528 objects degraded (1.204%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:02.610719 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 37259/3104528 objects degraded (1.200%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:07.611896 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 37132/3104528 objects degraded (1.196%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:12.612948 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 36975/3104528 objects degraded (1.191%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:17.614033 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 36821/3104528 objects degraded (1.186%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:22.615104 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 36648/3104528 objects degraded (1.180%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:27.616188 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 36478/3104528 objects degraded (1.175%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:32.617142 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 36349/3104528 objects degraded (1.171%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:37.618120 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 36191/3104528 objects degraded (1.166%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:42.619212 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 36054/3104528 objects degraded (1.161%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:47.620675 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 35941/3104528 objects degraded (1.158%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:52.621811 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 35799/3104528 objects degraded (1.153%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:40:57.622727 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 35721/3104528 objects degraded (1.151%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:02.623807 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 35529/3104528 objects degraded (1.144%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:07.624817 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 35425/3104528 objects degraded (1.141%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:12.625911 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 35266/3104528 objects degraded (1.136%), 24 pgs degraded, 24 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:17.626977 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 35182/3104528 objects degraded (1.133%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:22.628140 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 35005/3104528 objects degraded (1.128%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:27.629198 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 34909/3104528 objects degraded (1.124%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:32.630233 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 34759/3104528 objects degraded (1.120%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:37.631275 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 34662/3104528 objects degraded (1.116%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:42.632224 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 34515/3104528 objects degraded (1.112%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:47.633263 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 34367/3104528 objects degraded (1.107%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:52.634420 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 34217/3104528 objects degraded (1.102%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:41:57.635790 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 34120/3104528 objects degraded (1.099%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:02.637085 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 33967/3104528 objects degraded (1.094%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:07.638100 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 33909/3104528 objects degraded (1.092%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:12.639136 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 33667/3104528 objects degraded (1.084%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:17.640146 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 33632/3104528 objects degraded (1.083%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:22.641114 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 33424/3104528 objects degraded (1.077%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:27.642126 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 33357/3104528 objects degraded (1.074%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:32.643215 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 33221/3104528 objects degraded (1.070%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:37.644306 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 33108/3104528 objects degraded (1.066%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:42.645286 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 32979/3104528 objects degraded (1.062%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:47.646252 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 32857/3104528 objects degraded (1.058%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:52.647204 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 32737/3104528 objects degraded (1.054%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:42:57.648172 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 32619/3104528 objects degraded (1.051%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:02.649188 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 32426/3104528 objects degraded (1.044%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:07.650193 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 32320/3104528 objects degraded (1.041%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:12.651298 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 32181/3104528 objects degraded (1.037%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:17.652254 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 32115/3104528 objects degraded (1.034%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:22.653361 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 31954/3104528 objects degraded (1.029%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:27.654324 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 31847/3104528 objects degraded (1.026%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:32.655319 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 31660/3104528 objects degraded (1.020%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:37.656319 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 31521/3104528 objects degraded (1.015%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:42.657421 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 31417/3104528 objects degraded (1.012%), 23 pgs degraded, 23 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:47.658486 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 31287/3104528 objects degraded (1.008%), 22 pgs degraded, 22 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:52.659496 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 31107/3104528 objects degraded (1.002%), 22 pgs degraded, 22 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:43:57.668443 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 31008/3104528 objects degraded (0.999%), 22 pgs degraded, 22 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:02.669459 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 30902/3104528 objects degraded (0.995%), 22 pgs degraded, 22 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:07.670482 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 30754/3104528 objects degraded (0.991%), 22 pgs degraded, 22 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:12.671545 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 30629/3104528 objects degraded (0.987%), 22 pgs degraded, 22 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:17.672546 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 30429/3104528 objects degraded (0.980%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:22.673550 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 30273/3104528 objects degraded (0.975%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:27.674557 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 30203/3104528 objects degraded (0.973%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:32.675520 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 30043/3104528 objects degraded (0.968%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:37.676487 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 29934/3104528 objects degraded (0.964%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:42.677485 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 29786/3104528 objects degraded (0.959%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:47.707837 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 29633/3104528 objects degraded (0.955%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:52.708865 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 29528/3104528 objects degraded (0.951%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:44:57.709851 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 29332/3104528 objects degraded (0.945%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:02.710870 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 29223/3104528 objects degraded (0.941%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:07.711955 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 29086/3104528 objects degraded (0.937%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:12.713138 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 28948/3104528 objects degraded (0.932%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:17.714111 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 28848/3104528 objects degraded (0.929%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:22.715140 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 28673/3104528 objects degraded (0.924%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:27.716179 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 28538/3104528 objects degraded (0.919%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:32.717298 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 28388/3104528 objects degraded (0.914%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:37.718286 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 28227/3104528 objects degraded (0.909%), 21 pgs degraded, 21 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:42.719304 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 27996/3104528 objects degraded (0.902%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:47.720386 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 27887/3104528 objects degraded (0.898%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:52.721776 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 27723/3104528 objects degraded (0.893%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:45:57.723011 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 27573/3104528 objects degraded (0.888%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:02.724126 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 27466/3104528 objects degraded (0.885%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:07.725118 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 27263/3104528 objects degraded (0.878%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:12.726290 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 27156/3104528 objects degraded (0.875%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:17.727886 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 27037/3104528 objects degraded (0.871%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:22.729424 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 26847/3104528 objects degraded (0.865%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:27.730468 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 26724/3104528 objects degraded (0.861%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:32.731982 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 26584/3104528 objects degraded (0.856%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:37.733039 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 26451/3104528 objects degraded (0.852%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:42.734188 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 26228/3104528 objects degraded (0.845%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:47.735531 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 26115/3104528 objects degraded (0.841%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:52.736564 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 25949/3104528 objects degraded (0.836%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:46:57.737578 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 25753/3104528 objects degraded (0.830%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:02.738753 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 25615/3104528 objects degraded (0.825%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:07.739863 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 25443/3104528 objects degraded (0.820%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:12.740810 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 25322/3104528 objects degraded (0.816%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:17.741774 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 25127/3104528 objects degraded (0.809%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:22.742741 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 25003/3104528 objects degraded (0.805%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:27.743792 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 24835/3104528 objects degraded (0.800%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:32.744791 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 24784/3104528 objects degraded (0.798%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:37.745760 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 24518/3104528 objects degraded (0.790%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:42.746747 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 24396/3104528 objects degraded (0.786%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:47.747925 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 24200/3104528 objects degraded (0.780%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:52.749083 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 24072/3104528 objects degraded (0.775%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:47:57.750073 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 23956/3104528 objects degraded (0.772%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:02.751116 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 23754/3104528 objects degraded (0.765%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:07.752230 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 23629/3104528 objects degraded (0.761%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:12.753217 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 23507/3104528 objects degraded (0.757%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:17.754190 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 23419/3104528 objects degraded (0.754%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:22.755161 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 23182/3104528 objects degraded (0.747%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:27.756125 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 23114/3104528 objects degraded (0.745%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:32.757103 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 22920/3104528 objects degraded (0.738%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:37.758182 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 22836/3104528 objects degraded (0.736%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:42.759346 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 22632/3104528 objects degraded (0.729%), 20 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:47.760416 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 22526/3104528 objects degraded (0.726%), 19 pgs degraded, 20 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:52.761704 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 22326/3104528 objects degraded (0.719%), 19 pgs degraded, 19 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:48:57.762833 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 22236/3104528 objects degraded (0.716%), 19 pgs degraded, 19 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:02.763924 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 22030/3104528 objects degraded (0.710%), 19 pgs degraded, 19 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:07.764912 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 21914/3104528 objects degraded (0.706%), 19 pgs degraded, 19 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:12.765931 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 21691/3104528 objects degraded (0.699%), 19 pgs degraded, 19 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:17.766957 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 21584/3104528 objects degraded (0.695%), 19 pgs degraded, 19 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:22.767970 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 21396/3104528 objects degraded (0.689%), 19 pgs degraded, 19 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:27.768932 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 21323/3104528 objects degraded (0.687%), 18 pgs degraded, 18 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:32.770067 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 21093/3104528 objects degraded (0.679%), 18 pgs degraded, 18 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:37.771011 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 21075/3104528 objects degraded (0.679%), 18 pgs degraded, 18 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:42.772040 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 20871/3104528 objects degraded (0.672%), 18 pgs degraded, 18 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:47.773000 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 20765/3104528 objects degraded (0.669%), 18 pgs degraded, 18 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:52.774001 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 20530/3104528 objects degraded (0.661%), 18 pgs degraded, 18 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:49:57.775010 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 20444/3104528 objects degraded (0.659%), 18 pgs degraded, 18 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:50:02.776000 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 20238/3104528 objects degraded (0.652%), 18 pgs degraded, 18 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:50:07.776960 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 20200/3104528 objects degraded (0.651%), 18 pgs degraded, 18 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:50:12.777992 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 20078/3104528 objects degraded (0.647%), 18 pgs degraded, 18 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:50:17.779117 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 19872/3104528 objects degraded (0.640%), 17 pgs degraded, 17 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:50:22.780469 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 19622/3104528 objects degraded (0.632%), 16 pgs degraded, 16 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:50:27.781709 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 19606/3104528 objects degraded (0.632%), 16 pgs degraded, 16 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:50:32.782669 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 19411/3104528 objects degraded (0.625%), 16 pgs degraded, 16 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:50:37.783697 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 19315/3104528 objects degraded (0.622%), 16 pgs degraded, 16 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:50:42.784687 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 19192/3104528 objects degraded (0.618%), 16 pgs degraded, 16 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:50:47.785633 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 19041/3104528 objects degraded (0.613%), 16 pgs degraded, 16 pgs undersized (PG_DEGRADED)
|
|
2019-07-11 08:50:52.786608 mon.synergy0 [WRN] Health check update: Degraded data redundancy: 18931/3104528 objects degraded (0.610%), 16 pgs degraded, 16 pgs undersized (PG_DEGRADED)
|
|
^Croot@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 18686/3104528 objects degraded (0.602%), 16 pgs degraded, 16 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 18h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 31m), 23 in (since 31m); 18 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 18686/3104528 objects degraded (0.602%)
|
|
3945/3104528 objects misplaced (0.127%)
|
|
519 active+clean
|
|
16 active+undersized+degraded+remapped+backfilling
|
|
2 active+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 132 MiB/s, 33 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==========================....]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 13872/3104528 objects degraded (0.447%), 16 pgs degraded, 16 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 18h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 34m), 23 in (since 35m); 17 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 13872/3104528 objects degraded (0.447%)
|
|
3489/3104528 objects misplaced (0.112%)
|
|
519 active+clean
|
|
16 active+undersized+degraded+remapped+backfilling
|
|
2 active+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 100 MiB/s, 25 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[===========================...]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 11170/3104528 objects degraded (0.360%), 15 pgs degraded, 15 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 36m), 23 in (since 36m); 17 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 11170/3104528 objects degraded (0.360%)
|
|
3226/3104528 objects misplaced (0.104%)
|
|
520 active+clean
|
|
15 active+undersized+degraded+remapped+backfilling
|
|
2 active+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 101 MiB/s, 25 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[===========================...]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 5516/3104528 objects degraded (0.178%), 12 pgs degraded, 12 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 39m), 23 in (since 40m); 14 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 5516/3104528 objects degraded (0.178%)
|
|
2667/3104528 objects misplaced (0.086%)
|
|
523 active+clean
|
|
12 active+undersized+degraded+remapped+backfilling
|
|
2 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 0 B/s rd, 639 B/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 115 MiB/s, 28 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[=============================.]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 1942/3104528 objects degraded (0.063%), 4 pgs degraded, 4 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 42m), 23 in (since 42m); 6 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 1942/3104528 objects degraded (0.063%)
|
|
2242/3104528 objects misplaced (0.072%)
|
|
531 active+clean
|
|
4 active+undersized+degraded+remapped+backfilling
|
|
2 active+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 95 MiB/s, 23 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph-volume lvm list
|
|
|
|
|
|
====== osd.0 =======
|
|
|
|
[block] /dev/ceph-d7857769-de28-4ca6-9e87-85be6da466d8/osd-block-e6055f6c-99a7-4488-84d1-5e4a42b8ed95
|
|
|
|
block device /dev/ceph-d7857769-de28-4ca6-9e87-85be6da466d8/osd-block-e6055f6c-99a7-4488-84d1-5e4a42b8ed95
|
|
block uuid 8bmixp-yzZj-z1Ug-SsmU-IGM0-BdWM-dZeSTW
|
|
cephx lockbox secret
|
|
cluster fsid d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
cluster name ceph
|
|
crush device class None
|
|
encrypted 0
|
|
osd fsid e6055f6c-99a7-4488-84d1-5e4a42b8ed95
|
|
osd id 0
|
|
type block
|
|
vdo 0
|
|
devices /dev/sdb
|
|
|
|
====== osd.15 ======
|
|
|
|
[block] /dev/ceph-e9a1422d-0254-46fa-9008-81e2be273589/osd-block-c072c3db-e99a-43ad-8cfc-b75dc0b1aa50
|
|
|
|
block device /dev/ceph-e9a1422d-0254-46fa-9008-81e2be273589/osd-block-c072c3db-e99a-43ad-8cfc-b75dc0b1aa50
|
|
block uuid eP4S4X-Gb5c-H3hv-qBGA-6MwZ-SkUy-7LdAGN
|
|
cephx lockbox secret
|
|
cluster fsid d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
cluster name ceph
|
|
crush device class None
|
|
encrypted 0
|
|
osd fsid c072c3db-e99a-43ad-8cfc-b75dc0b1aa50
|
|
osd id 15
|
|
type block
|
|
vdo 0
|
|
devices /dev/sdg
|
|
|
|
====== osd.3 =======
|
|
|
|
[block] /dev/ceph-da809eb4-ab05-46d0-8ec8-2dd969bed20c/osd-block-5ac261bf-2968-473a-bed7-a73d7f0afccb
|
|
|
|
block device /dev/ceph-da809eb4-ab05-46d0-8ec8-2dd969bed20c/osd-block-5ac261bf-2968-473a-bed7-a73d7f0afccb
|
|
block uuid 5SfLSS-uyFb-k2Ed-9gFt-0aWa-dhFD-b4vdl3
|
|
cephx lockbox secret
|
|
cluster fsid d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
cluster name ceph
|
|
crush device class None
|
|
encrypted 0
|
|
osd fsid 5ac261bf-2968-473a-bed7-a73d7f0afccb
|
|
osd id 3
|
|
type block
|
|
vdo 0
|
|
devices /dev/sdc
|
|
|
|
====== osd.6 =======
|
|
|
|
[block] /dev/ceph-a22bcc3c-35b4-48c2-87f0-eac910005c42/osd-block-fb52c5c4-0ecb-465f-8565-a39493feafa8
|
|
|
|
block device /dev/ceph-a22bcc3c-35b4-48c2-87f0-eac910005c42/osd-block-fb52c5c4-0ecb-465f-8565-a39493feafa8
|
|
block uuid hBjwpl-PIiE-yrQp-x2eS-q3zb-eSgy-MGG2We
|
|
cephx lockbox secret
|
|
cluster fsid d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
cluster name ceph
|
|
crush device class None
|
|
encrypted 0
|
|
osd fsid fb52c5c4-0ecb-465f-8565-a39493feafa8
|
|
osd id 6
|
|
type block
|
|
vdo 0
|
|
devices /dev/sdd
|
|
|
|
====== osd.7 =======
|
|
|
|
[block] /dev/ceph-b8e17aa2-4050-4f81-818a-34e5902ae708/osd-block-b58a638f-eb76-46fe-bab5-177a44cebe43
|
|
|
|
block device /dev/ceph-b8e17aa2-4050-4f81-818a-34e5902ae708/osd-block-b58a638f-eb76-46fe-bab5-177a44cebe43
|
|
block uuid 1eC7Qn-5qcA-mZq2-NdQk-0TAz-58cB-zXfvI3
|
|
cephx lockbox secret
|
|
cluster fsid d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
cluster name ceph
|
|
crush device class None
|
|
encrypted 0
|
|
osd fsid b58a638f-eb76-46fe-bab5-177a44cebe43
|
|
osd id 7
|
|
type block
|
|
vdo 0
|
|
devices /dev/sdf
|
|
|
|
====== osd.9 =======
|
|
|
|
[block] /dev/ceph-833a21b5-0af0-4a1b-b462-3f0e2f034470/osd-block-3a03d301-e36a-4100-8675-b808e4a14838
|
|
|
|
block device /dev/ceph-833a21b5-0af0-4a1b-b462-3f0e2f034470/osd-block-3a03d301-e36a-4100-8675-b808e4a14838
|
|
block uuid yOYgfm-kPkM-xzrX-BR9f-y44K-2Sgh-B5LUn2
|
|
cephx lockbox secret
|
|
cluster fsid d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
cluster name ceph
|
|
crush device class None
|
|
encrypted 0
|
|
osd fsid 3a03d301-e36a-4100-8675-b808e4a14838
|
|
osd id 9
|
|
type block
|
|
vdo 0
|
|
devices /dev/sde
|
|
root@synergy0:~# ^C
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 40/3104528 objects degraded (0.001%), 1 pg degraded, 1 pg undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 44m), 23 in (since 45m); 2 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 40/3104528 objects degraded (0.001%)
|
|
1514/3104528 objects misplaced (0.049%)
|
|
533 active+clean
|
|
2 active+remapped+backfilling
|
|
1 active+undersized+degraded+remapped+backfilling
|
|
1 active+clean+scrubbing
|
|
|
|
io:
|
|
recovery: 53 MiB/s, 13 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 44m), 23 in (since 45m); 2 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 1484/3104528 objects misplaced (0.048%)
|
|
534 active+clean
|
|
2 active+remapped+backfilling
|
|
1 active+clean+scrubbing
|
|
|
|
io:
|
|
recovery: 37 MiB/s, 9 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 44m), 23 in (since 45m); 2 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 1435/3104528 objects misplaced (0.046%)
|
|
535 active+clean
|
|
2 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 1022 B/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 29 MiB/s, 7 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 44m), 23 in (since 45m); 2 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 1435/3104528 objects misplaced (0.046%)
|
|
535 active+clean
|
|
2 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 1022 B/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 20 MiB/s, 5 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 44m), 23 in (since 45m); 2 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 1375/3104528 objects misplaced (0.044%)
|
|
535 active+clean
|
|
2 active+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 23 MiB/s, 5 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 45m), 23 in (since 45m); 2 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 1297/3104528 objects misplaced (0.042%)
|
|
535 active+clean
|
|
2 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 17 KiB/s rd, 1.1 KiB/s wr, 16 op/s rd, 11 op/s wr
|
|
recovery: 18 MiB/s, 4 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 45m), 23 in (since 46m); 2 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 1243/3104528 objects misplaced (0.040%)
|
|
535 active+clean
|
|
2 active+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 18 MiB/s, 4 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 46m), 23 in (since 46m); 1 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 0.186% pgs not active
|
|
1011/3104528 objects misplaced (0.033%)
|
|
535 active+clean
|
|
1 peering
|
|
1 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 0 B/s rd, 5.1 KiB/s wr, 0 op/s rd, 1 op/s wr
|
|
recovery: 12 MiB/s, 2 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 46m), 23 in (since 47m); 1 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 951/3104528 objects misplaced (0.031%)
|
|
536 active+clean
|
|
1 active+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 16 MiB/s, 4 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph -w
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 46m), 23 in (since 47m); 1 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 927/3104528 objects misplaced (0.030%)
|
|
536 active+clean
|
|
1 active+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 15 MiB/s, 3 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
|
|
^Croot@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 46m), 23 in (since 47m); 1 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 857/3104528 objects misplaced (0.028%)
|
|
536 active+clean
|
|
1 active+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 17 MiB/s, 4 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# man ceph
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~# ceph -w
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 47m), 23 in (since 48m); 1 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 611/3104528 objects misplaced (0.020%)
|
|
536 active+clean
|
|
1 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 0 B/s rd, 1.1 KiB/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 17 MiB/s, 4 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
|
|
^Croot@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 47m), 23 in (since 48m); 1 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 485/3104528 objects misplaced (0.016%)
|
|
536 active+clean
|
|
1 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 1022 B/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 20 MiB/s, 4 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 49m), 23 in (since 49m); 1 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 152/3104528 objects misplaced (0.005%)
|
|
536 active+clean
|
|
1 active+remapped+backfilling
|
|
|
|
io:
|
|
client: 340 B/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 17 MiB/s, 4 objects/s
|
|
|
|
progress:
|
|
Rebalancing after osd.3 marked out
|
|
[==============================]
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 51m), 23 in (since 52m)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 536 active+clean
|
|
1 active+clean+scrubbing+deep
|
|
|
|
io:
|
|
client: 14 KiB/s rd, 2.3 KiB/s wr, 12 op/s rd, 0 op/s wr
|
|
|
|
root@synergy0:~# ceph osd down osd.3
|
|
marked down osd.3.
|
|
root@synergy0:~# sudo systemctl stop ceph-osd@3
|
|
root@synergy0:~# ceph osd rm osd.3
|
|
removed osd.3
|
|
root@synergy0:~# ceph osd crush remove osd.3
|
|
removed item id 3 name 'osd.3' from crush map
|
|
root@synergy0:~# ceph auth del osd.3
|
|
updated
|
|
root@synergy0:~# sudo gdisk /dev/sdc
|
|
GPT fdisk (gdisk) version 1.0.1
|
|
|
|
Partition table scan:
|
|
MBR: not present
|
|
BSD: not present
|
|
APM: not present
|
|
GPT: not present
|
|
|
|
Creating new GPT entries.
|
|
|
|
Command (? for help): x
|
|
|
|
Expert command (? for help): z
|
|
About to wipe out GPT on /dev/sdc. Proceed? (Y/N): y
|
|
GPT data structures destroyed! You may now partition the disk using fdisk or
|
|
other utilities.
|
|
Blank out MBR? (Y/N): y
|
|
root@synergy0:~# lsblk
|
|
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
|
|
sda 8:0 0 465.7G 0 disk
|
|
├─sda1 8:1 0 464.7G 0 part /
|
|
├─sda2 8:2 0 1K 0 part
|
|
└─sda5 8:5 0 975M 0 part [SWAP]
|
|
sdb 8:16 0 931.5G 0 disk
|
|
└─ceph--d7857769--de28--4ca6--9e87--85be6da466d8-osd--block--e6055f6c--99a7--4488--84d1--5e4a42b8ed95
|
|
252:0 0 931.5G 0 lvm
|
|
sdc 8:32 0 931.5G 0 disk
|
|
└─ceph--da809eb4--ab05--46d0--8ec8--2dd969bed20c-osd--block--5ac261bf--2968--473a--bed7--a73d7f0afccb
|
|
252:1 0 931.5G 0 lvm
|
|
sdd 8:48 0 931.5G 0 disk
|
|
└─ceph--a22bcc3c--35b4--48c2--87f0--eac910005c42-osd--block--fb52c5c4--0ecb--465f--8565--a39493feafa8
|
|
252:3 0 931.5G 0 lvm
|
|
sde 8:64 0 931.5G 0 disk
|
|
└─ceph--833a21b5--0af0--4a1b--b462--3f0e2f034470-osd--block--3a03d301--e36a--4100--8675--b808e4a14838
|
|
252:5 0 931.5G 0 lvm
|
|
sdf 8:80 0 931.5G 0 disk
|
|
└─ceph--b8e17aa2--4050--4f81--818a--34e5902ae708-osd--block--b58a638f--eb76--46fe--bab5--177a44cebe43
|
|
252:2 0 931G 0 lvm
|
|
sdg 8:96 0 931.5G 0 disk
|
|
└─ceph--e9a1422d--0254--46fa--9008--81e2be273589-osd--block--c072c3db--e99a--43ad--8cfc--b75dc0b1aa50
|
|
252:4 0 931.5G 0 lvm
|
|
root@synergy0:~# dmsetup remove ceph--da809eb4--ab05--46d0--8ec8--2dd969bed20c-osd--block--5ac261bf--2968--473a--bed7--a73d7f0afccb
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 38748/3104528 objects degraded (1.248%), 13 pgs degraded
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 23 osds: 23 up (since 23s), 23 in (since 53m); 68 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 26 TiB avail
|
|
pgs: 38748/3104528 objects degraded (1.248%)
|
|
180936/3104528 objects misplaced (5.828%)
|
|
469 active+clean
|
|
55 active+remapped+backfilling
|
|
13 active+undersized+degraded+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 445 MiB/s, 111 objects/s
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 38371/3104528 objects degraded (1.236%), 13 pgs degraded, 13 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 23 osds: 23 up (since 39s), 23 in (since 54m); 68 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 26 TiB avail
|
|
pgs: 38371/3104528 objects degraded (1.236%)
|
|
179283/3104528 objects misplaced (5.775%)
|
|
469 active+clean
|
|
55 active+remapped+backfilling
|
|
13 active+undersized+degraded+remapped+backfilling
|
|
|
|
io:
|
|
client: 0 B/s rd, 3.1 KiB/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 474 MiB/s, 118 objects/s
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 38132/3104528 objects degraded (1.228%), 13 pgs degraded, 13 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 23 osds: 23 up (since 52s), 23 in (since 54m); 68 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 14 TiB / 26 TiB avail
|
|
pgs: 38132/3104528 objects degraded (1.228%)
|
|
177994/3104528 objects misplaced (5.733%)
|
|
469 active+clean
|
|
55 active+remapped+backfilling
|
|
13 active+undersized+degraded+remapped+backfilling
|
|
|
|
io:
|
|
client: 2.0 KiB/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 432 MiB/s, 108 objects/s
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 38059/3104528 objects degraded (1.226%), 13 pgs degraded, 13 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 23 osds: 23 up (since 56s), 23 in (since 54m); 68 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 14 TiB / 26 TiB avail
|
|
pgs: 38059/3104528 objects degraded (1.226%)
|
|
177674/3104528 objects misplaced (5.723%)
|
|
469 active+clean
|
|
55 active+remapped+backfilling
|
|
13 active+undersized+degraded+remapped+backfilling
|
|
|
|
io:
|
|
client: 2.0 KiB/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 432 MiB/s, 108 objects/s
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
3 osds down
|
|
Reduced data availability: 1 pg inactive, 5 pgs peering
|
|
Degraded data redundancy: 327064/3104528 objects degraded (10.535%), 107 pgs degraded
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 18h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 21 up (since 2s), 24 in (since 13s); 43 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 3.166% pgs not active
|
|
327064/3104528 objects degraded (10.535%)
|
|
103202/3104528 objects misplaced (3.324%)
|
|
362 active+clean
|
|
106 active+undersized+degraded
|
|
27 active+remapped+backfilling
|
|
17 stale+active+clean
|
|
7 active+remapped+backfill_wait
|
|
6 peering
|
|
5 activating+undersized+remapped
|
|
3 remapped+peering
|
|
2 stale+peering
|
|
1 activating+undersized+degraded+remapped
|
|
1 active+undersized
|
|
|
|
io:
|
|
client: 198 MiB/s rd, 1.6 GiB/s wr, 1.10k op/s rd, 2.22k op/s wr
|
|
recovery: 8.6 GiB/s, 7 keys/s, 2.22k objects/s
|
|
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~# cd /var/log/ceph
|
|
root@synergy0:/var/log/ceph# ls
|
|
ceph.audit.log ceph.log.1.gz ceph-mgr.synergy0.log.2.gz ceph-osd.0.log.3.gz ceph-osd.15.log.4.gz ceph-osd.6.log.5.gz ceph-volume.log.4.gz
|
|
ceph.audit.log.1.gz ceph.log.2.gz ceph-mgr.synergy0.log.3.gz ceph-osd.0.log.4.gz ceph-osd.15.log.5.gz ceph-osd.6.log.6.gz ceph-volume.log.5.gz
|
|
ceph.audit.log.2.gz ceph.log.3.gz ceph-mgr.synergy0.log.4.gz ceph-osd.0.log.5.gz ceph-osd.15.log.6.gz ceph-osd.6.log.7.gz ceph-volume.log.6.gz
|
|
ceph.audit.log.3.gz ceph.log.4.gz ceph-mgr.synergy0.log.5.gz ceph-osd.0.log.6.gz ceph-osd.15.log.7.gz ceph-osd.7.log ceph-volume.log.7.gz
|
|
ceph.audit.log.4.gz ceph.log.5.gz ceph-mgr.synergy0.log.6.gz ceph-osd.0.log.7.gz ceph-osd.3.log ceph-osd.7.log.1.gz ceph-volume-systemd.log
|
|
ceph.audit.log.5.gz ceph.log.6.gz ceph-mgr.synergy0.log.7.gz ceph-osd.12.log ceph-osd.3.log.1.gz ceph-osd.9.log ceph-volume-systemd.log.1.gz
|
|
ceph.audit.log.6.gz ceph.log.7.gz ceph-mon.synergy0.log ceph-osd.12.log.1.gz ceph-osd.3.log.2.gz ceph-osd.9.log.1.gz ceph-volume-systemd.log.2.gz
|
|
ceph.audit.log.7.gz ceph-mds.synergy0.log ceph-mon.synergy0.log.1.gz ceph-osd.12.log.2.gz ceph-osd.3.log.3.gz ceph-osd.9.log.2.gz ceph-volume-systemd.log.3.gz
|
|
ceph-client.rgw.synergy0.log ceph-mds.synergy0.log.1.gz ceph-mon.synergy0.log.2.gz ceph-osd.12.log.3.gz ceph-osd.3.log.4.gz ceph-osd.9.log.3.gz ceph-volume-systemd.log.4.gz
|
|
ceph-client.rgw.synergy0.log.1.gz ceph-mds.synergy0.log.2.gz ceph-mon.synergy0.log.3.gz ceph-osd.12.log.4.gz ceph-osd.3.log.5.gz ceph-osd.9.log.4.gz ceph-volume-systemd.log.5.gz
|
|
ceph-client.rgw.synergy0.log.2.gz ceph-mds.synergy0.log.3.gz ceph-mon.synergy0.log.4.gz ceph-osd.12.log.5.gz ceph-osd.3.log.6.gz ceph-osd.9.log.5.gz ceph-volume-systemd.log.6.gz
|
|
ceph-client.rgw.synergy0.log.3.gz ceph-mds.synergy0.log.4.gz ceph-mon.synergy0.log.5.gz ceph-osd.12.log.6.gz ceph-osd.3.log.7.gz ceph-osd.9.log.6.gz ceph-volume-systemd.log.7.gz
|
|
ceph-client.rgw.synergy0.log.4.gz ceph-mds.synergy0.log.5.gz ceph-mon.synergy0.log.6.gz ceph-osd.12.log.7.gz ceph-osd.6.log ceph-osd.9.log.7.gz
|
|
ceph-client.rgw.synergy0.log.5.gz ceph-mds.synergy0.log.6.gz ceph-mon.synergy0.log.7.gz ceph-osd.15.log ceph-osd.6.log.1.gz ceph-volume.log
|
|
ceph-client.rgw.synergy0.log.6.gz ceph-mds.synergy0.log.7.gz ceph-osd.0.log ceph-osd.15.log.1.gz ceph-osd.6.log.2.gz ceph-volume.log.1.gz
|
|
ceph-client.rgw.synergy0.log.7.gz ceph-mgr.synergy0.log ceph-osd.0.log.1.gz ceph-osd.15.log.2.gz ceph-osd.6.log.3.gz ceph-volume.log.2.gz
|
|
ceph.log ceph-mgr.synergy0.log.1.gz ceph-osd.0.log.2.gz ceph-osd.15.log.3.gz ceph-osd.6.log.4.gz ceph-volume.log.3.gz
|
|
root@synergy0:/var/log/ceph# grep boot *.log
|
|
ceph.audit.log:2019-07-11 09:14:36.928992 mon.synergy0 (mon.0) 4033 : audit [INF] from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]: dispatch
|
|
ceph.audit.log:2019-07-11 09:14:36.947349 mon.synergy0 (mon.0) 4034 : audit [INF] from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]': finished
|
|
ceph.audit.log:2019-07-11 09:14:51.715675 mon.synergy0 (mon.0) 4041 : audit [DBG] from='client.? 216.106.44.200:0/3242824165' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
|
|
ceph.log:2019-07-11 08:19:19.490252 mon.synergy0 (mon.0) 3105 : cluster [INF] osd.10 [v2:216.106.44.201:6818/23308,v1:216.106.44.201:6820/23308] boot
|
|
ceph.log:2019-07-11 08:19:20.530818 mon.synergy0 (mon.0) 3112 : cluster [INF] osd.13 [v2:216.106.44.201:6802/23324,v1:216.106.44.201:6803/23324] boot
|
|
ceph.log:2019-07-11 08:19:24.600922 mon.synergy0 (mon.0) 3121 : cluster [INF] osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph.log:2019-07-11 08:19:31.090956 mon.synergy0 (mon.0) 3130 : cluster [INF] osd.10 [v2:216.106.44.201:6802/23690,v1:216.106.44.201:6803/23690] boot
|
|
ceph.log:2019-07-11 08:19:43.060292 mon.synergy0 (mon.0) 3141 : cluster [INF] osd.13 [v2:216.106.44.201:6818/23747,v1:216.106.44.201:6820/23747] boot
|
|
ceph.log:2019-07-11 09:11:39.102500 mon.synergy0 (mon.0) 3824 : cluster [INF] osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph.log:2019-07-11 09:12:10.592171 mon.synergy0 (mon.0) 3968 : cluster [INF] osd.10 [v2:216.106.44.201:6802/24938,v1:216.106.44.201:6803/24938] boot
|
|
ceph.log:2019-07-11 09:12:16.717812 mon.synergy0 (mon.0) 3979 : cluster [INF] osd.13 [v2:216.106.44.201:6818/24941,v1:216.106.44.201:6820/24941] boot
|
|
ceph.log:2019-07-11 09:12:34.277218 mon.synergy0 (mon.0) 3997 : cluster [INF] osd.15 [v2:216.106.44.200:6802/16161,v1:216.106.44.200:6803/16161] boot
|
|
ceph.log:2019-07-11 09:15:04.320287 mon.synergy0 (mon.0) 4052 : cluster [INF] osd.3 [v2:216.106.44.200:6810/16987,v1:216.106.44.200:6811/16987] boot
|
|
ceph.log:2019-07-11 09:15:15.501601 mon.synergy0 (mon.0) 4248 : cluster [INF] osd.10 [v2:216.106.44.201:6802/25418,v1:216.106.44.201:6803/25418] boot
|
|
ceph.log:2019-07-11 09:15:20.561861 mon.synergy0 (mon.0) 4257 : cluster [INF] osd.13 [v2:216.106.44.201:6818/25427,v1:216.106.44.201:6820/25427] boot
|
|
ceph.log:2019-07-11 09:15:23.620936 mon.synergy0 (mon.0) 4263 : cluster [INF] osd.15 [v2:216.106.44.200:6802/17179,v1:216.106.44.200:6803/17179] boot
|
|
ceph.log:2019-07-11 09:15:39.440562 mon.synergy0 (mon.0) 4277 : cluster [INF] osd.9 [v2:216.106.44.200:6814/17315,v1:216.106.44.200:6815/17315] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:19.486 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6818/23308,v1:216.106.44.201:6820/23308] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:20.526 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6802/23324,v1:216.106.44.201:6803/23324] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:24.599 7f2266efe700 0 log_channel(cluster) log [INF] : osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:31.083 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6802/23690,v1:216.106.44.201:6803/23690] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:43.055 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6818/23747,v1:216.106.44.201:6820/23747] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:11:39.095 7f2266efe700 0 log_channel(cluster) log [INF] : osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:12:10.589 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6802/24938,v1:216.106.44.201:6803/24938] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:12:16.713 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6818/24941,v1:216.106.44.201:6820/24941] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:12:34.274 7f2266efe700 0 log_channel(cluster) log [INF] : osd.15 [v2:216.106.44.200:6802/16161,v1:216.106.44.200:6803/16161] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:14:36.924 7f226a705700 0 log_channel(audit) log [INF] : from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]: dispatch
|
|
ceph-mon.synergy0.log:2019-07-11 09:14:36.940 7f2266efe700 0 log_channel(audit) log [INF] : from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]': finished
|
|
ceph-mon.synergy0.log:2019-07-11 09:14:51.712 7f226a705700 0 log_channel(audit) log [DBG] : from='client.? 216.106.44.200:0/3242824165' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:04.317 7f2266efe700 0 log_channel(cluster) log [INF] : osd.3 [v2:216.106.44.200:6810/16987,v1:216.106.44.200:6811/16987] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:15.497 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6802/25418,v1:216.106.44.201:6803/25418] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:20.558 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6818/25427,v1:216.106.44.201:6820/25427] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:23.618 7f2266efe700 0 log_channel(cluster) log [INF] : osd.15 [v2:216.106.44.200:6802/17179,v1:216.106.44.200:6803/17179] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:39.438 7f2266efe700 0 log_channel(cluster) log [INF] : osd.9 [v2:216.106.44.200:6814/17315,v1:216.106.44.200:6815/17315] boot
|
|
ceph-osd.15.log:2019-07-11 09:12:33.198 7fbe6881bf00 0 osd.15 43812 done with init, starting boot process
|
|
ceph-osd.15.log:2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log:2019-07-11 09:12:34.306 7fbe51ba1700 1 osd.15 43822 state: booting -> active
|
|
ceph-osd.15.log: -2810> 2019-07-11 09:12:21.553 7fbe6881bf00 2 osd.15 0 boot
|
|
ceph-osd.15.log: -2569> 2019-07-11 09:12:33.198 7fbe6881bf00 0 osd.15 43812 done with init, starting boot process
|
|
ceph-osd.15.log: -2560> 2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log: -2078> 2019-07-11 09:12:34.306 7fbe51ba1700 1 osd.15 43822 state: booting -> active
|
|
ceph-osd.15.log:2019-07-11 09:15:22.482 7f702769ff00 0 osd.15 43834 done with init, starting boot process
|
|
ceph-osd.15.log:2019-07-11 09:15:22.482 7f702769ff00 1 osd.15 43834 start_boot
|
|
ceph-osd.15.log:2019-07-11 09:15:23.654 7f7010a25700 1 osd.15 43843 state: booting -> active
|
|
ceph-osd.3.log:2019-07-11 08:19:24.023 7f3b59bbdf00 0 osd.3 43702 done with init, starting boot process
|
|
ceph-osd.3.log:2019-07-11 08:19:24.023 7f3b59bbdf00 1 osd.3 43702 start_boot
|
|
ceph-osd.3.log:2019-07-11 08:19:24.627 7f3b42f43700 1 osd.3 43710 state: booting -> active
|
|
ceph-osd.3.log:2019-07-11 09:11:38.215 7f3b42f43700 1 osd.3 43802 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:11:39.107 7f3b42f43700 1 osd.3 43803 state: booting -> active
|
|
ceph-osd.3.log:2019-07-11 09:15:02.425 7f1dff155f00 0 osd.3 0 done with init, starting boot process
|
|
ceph-osd.3.log:2019-07-11 09:15:02.425 7f1dff155f00 1 osd.3 0 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:15:04.337 7f1de84db700 1 osd.3 43831 state: booting -> active
|
|
ceph-osd.9.log:2019-07-11 09:15:38.310 7fc965877f00 0 osd.9 43837 done with init, starting boot process
|
|
ceph-osd.9.log:2019-07-11 09:15:38.310 7fc965877f00 1 osd.9 43837 start_boot
|
|
ceph-osd.9.log:2019-07-11 09:15:39.482 7fc94ebfd700 1 osd.9 43846 state: booting -> active
|
|
ceph-volume.log:[2019-07-11 09:14:35,929][ceph_volume.process][INFO ] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 14da6185-b74e-4313-b0a5-4476554eeb92
|
|
ceph-volume.log:[2019-07-11 09:14:50,725][ceph_volume.process][INFO ] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph# ping backup2
|
|
PING backup2.socket.net (216.106.44.36) 56(84) bytes of data.
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=1 ttl=64 time=1.40 ms
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=2 ttl=64 time=0.315 ms
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=3 ttl=64 time=0.511 ms
|
|
^C
|
|
--- backup2.socket.net ping statistics ---
|
|
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
|
|
rtt min/avg/max/mdev = 0.315/0.744/1.407/0.475 ms
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph# grep "Directory not empty" *.log
|
|
ceph-osd.15.log:2019-07-11 09:12:06.061 7f0e3a44b700 -1 bluestore(/var/lib/ceph/osd/ceph-15) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.15.log: -5> 2019-07-11 09:12:06.061 7f0e3a44b700 -1 bluestore(/var/lib/ceph/osd/ceph-15) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.15.log:2019-07-11 09:15:08.593 7fbe46b8b700 -1 bluestore(/var/lib/ceph/osd/ceph-15) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.15.log: -3> 2019-07-11 09:15:08.593 7fbe46b8b700 -1 bluestore(/var/lib/ceph/osd/ceph-15) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.3.log:2019-07-11 08:19:11.710 7f4fedd22700 -1 bluestore(/var/lib/ceph/osd/ceph-3) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.3.log: -3> 2019-07-11 08:19:11.710 7f4fedd22700 -1 bluestore(/var/lib/ceph/osd/ceph-3) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.9.log:2019-07-11 09:15:13.569 7fc7b8243700 -1 bluestore(/var/lib/ceph/osd/ceph-9) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.9.log: -3> 2019-07-11 09:15:13.569 7fc7b8243700 -1 bluestore(/var/lib/ceph/osd/ceph-9) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
root@synergy0:/var/log/ceph# ceph^C
|
|
root@synergy0:/var/log/ceph# ^C
|
|
root@synergy0:/var/log/ceph# ^C
|
|
root@synergy0:/var/log/ceph# exit
|
|
logout
|
|
Connection to synergy0 closed.
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh root@synergy1
|
|
Welcome to Ubuntu 16.04.6 LTS (GNU/Linux 4.4.0-150-generic x86_64)
|
|
|
|
* Documentation: https://help.ubuntu.com
|
|
* Management: https://landscape.canonical.com
|
|
* Support: https://ubuntu.com/advantage
|
|
|
|
66 packages can be updated.
|
|
51 updates are security updates.
|
|
|
|
|
|
Last login: Thu Jul 11 05:18:04 2019 from 216.106.88.5
|
|
root@synergy1:~# cd /var/log/ceph
|
|
root@synergy1:/var/log/ceph# ls
|
|
ceph.audit.log ceph.log.6.gz ceph-mgr.synergy1.log.4.gz ceph-osd.10.log.2.gz ceph-osd.13.log.6.gz ceph-osd.1.log.4.gz ceph-osd.7.log.2.gz ceph-volume-systemd.log
|
|
ceph.audit.log.1.gz ceph.log.7.gz ceph-mgr.synergy1.log.5.gz ceph-osd.10.log.3.gz ceph-osd.13.log.7.gz ceph-osd.1.log.5.gz ceph-osd.7.log.3.gz ceph-volume-systemd.log.1.gz
|
|
ceph.audit.log.2.gz ceph-mds.synergy1.log ceph-mgr.synergy1.log.6.gz ceph-osd.10.log.4.gz ceph-osd.16.log ceph-osd.1.log.6.gz ceph-osd.7.log.4.gz ceph-volume-systemd.log.2.gz
|
|
ceph.audit.log.3.gz ceph-mds.synergy1.log.1.gz ceph-mgr.synergy1.log.7.gz ceph-osd.10.log.5.gz ceph-osd.16.log.1.gz ceph-osd.1.log.7.gz ceph-osd.7.log.5.gz ceph-volume-systemd.log.3.gz
|
|
ceph.audit.log.4.gz ceph-mds.synergy1.log.2.gz ceph-mon.synergy1.log ceph-osd.10.log.6.gz ceph-osd.16.log.2.gz ceph-osd.4.log ceph-osd.7.log.6.gz ceph-volume-systemd.log.4.gz
|
|
ceph.audit.log.5.gz ceph-mds.synergy1.log.3.gz ceph-mon.synergy1.log.1.gz ceph-osd.10.log.7.gz ceph-osd.16.log.3.gz ceph-osd.4.log.1.gz ceph-osd.7.log.7.gz ceph-volume-systemd.log.5.gz
|
|
ceph.audit.log.6.gz ceph-mds.synergy1.log.4.gz ceph-mon.synergy1.log.2.gz ceph-osd.12.log ceph-osd.16.log.4.gz ceph-osd.4.log.2.gz ceph-volume.log ceph-volume-systemd.log.6.gz
|
|
ceph.audit.log.7.gz ceph-mds.synergy1.log.5.gz ceph-mon.synergy1.log.3.gz ceph-osd.12.log.1.gz ceph-osd.16.log.5.gz ceph-osd.4.log.3.gz ceph-volume.log.1.gz ceph-volume-systemd.log.7.gz
|
|
ceph.log ceph-mds.synergy1.log.6.gz ceph-mon.synergy1.log.4.gz ceph-osd.13.log ceph-osd.16.log.6.gz ceph-osd.4.log.4.gz ceph-volume.log.2.gz
|
|
ceph.log.1.gz ceph-mds.synergy1.log.7.gz ceph-mon.synergy1.log.5.gz ceph-osd.13.log.1.gz ceph-osd.16.log.7.gz ceph-osd.4.log.5.gz ceph-volume.log.3.gz
|
|
ceph.log.2.gz ceph-mgr.synergy1.log ceph-mon.synergy1.log.6.gz ceph-osd.13.log.2.gz ceph-osd.1.log ceph-osd.4.log.6.gz ceph-volume.log.4.gz
|
|
ceph.log.3.gz ceph-mgr.synergy1.log.1.gz ceph-mon.synergy1.log.7.gz ceph-osd.13.log.3.gz ceph-osd.1.log.1.gz ceph-osd.4.log.7.gz ceph-volume.log.5.gz
|
|
ceph.log.4.gz ceph-mgr.synergy1.log.2.gz ceph-osd.10.log ceph-osd.13.log.4.gz ceph-osd.1.log.2.gz ceph-osd.7.log ceph-volume.log.6.gz
|
|
ceph.log.5.gz ceph-mgr.synergy1.log.3.gz ceph-osd.10.log.1.gz ceph-osd.13.log.5.gz ceph-osd.1.log.3.gz ceph-osd.7.log.1.gz ceph-volume.log.7.gz
|
|
root@synergy1:/var/log/ceph# grep "Directory not empty" *.log
|
|
ceph-osd.10.log:2019-07-11 08:19:05.097 7f26db3de700 -1 bluestore(/var/lib/ceph/osd/ceph-10) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.10.log: -3> 2019-07-11 08:19:05.097 7f26db3de700 -1 bluestore(/var/lib/ceph/osd/ceph-10) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.10.log:2019-07-11 08:19:19.813 7f976dbc3700 -1 bluestore(/var/lib/ceph/osd/ceph-10) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.10.log: -15> 2019-07-11 08:19:19.813 7f976dbc3700 -1 bluestore(/var/lib/ceph/osd/ceph-10) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.10.log:2019-07-11 09:12:00.911 7f9c09d75700 -1 bluestore(/var/lib/ceph/osd/ceph-10) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.10.log: -3> 2019-07-11 09:12:00.911 7f9c09d75700 -1 bluestore(/var/lib/ceph/osd/ceph-10) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.10.log:2019-07-11 09:15:08.543 7f0ef2a03700 -1 bluestore(/var/lib/ceph/osd/ceph-10) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.10.log: -3> 2019-07-11 09:15:08.543 7f0ef2a03700 -1 bluestore(/var/lib/ceph/osd/ceph-10) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.13.log:2019-07-11 08:19:05.205 7eff7a42f700 -1 bluestore(/var/lib/ceph/osd/ceph-13) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.13.log: -3> 2019-07-11 08:19:05.205 7eff7a42f700 -1 bluestore(/var/lib/ceph/osd/ceph-13) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.13.log:2019-07-11 08:19:20.693 7f64ce363700 -1 bluestore(/var/lib/ceph/osd/ceph-13) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.13.log: -3> 2019-07-11 08:19:20.693 7f64ce363700 -1 bluestore(/var/lib/ceph/osd/ceph-13) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.13.log:2019-07-11 09:12:00.927 7fa7a4af9700 -1 bluestore(/var/lib/ceph/osd/ceph-13) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.13.log: -3> 2019-07-11 09:12:00.927 7fa7a4af9700 -1 bluestore(/var/lib/ceph/osd/ceph-13) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.13.log:2019-07-11 09:15:08.627 7fc575850700 -1 bluestore(/var/lib/ceph/osd/ceph-13) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
ceph-osd.13.log: -3> 2019-07-11 09:15:08.627 7fc575850700 -1 bluestore(/var/lib/ceph/osd/ceph-13) _txc_add_transaction error (39) Directory not empty not handled on operation 21 (op 1, counting from 0)
|
|
root@synergy1:/var/log/ceph# exit
|
|
logout
|
|
Connection to synergy1 closed.
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh root@synergy0
|
|
Welcome to Ubuntu 16.04.6 LTS (GNU/Linux 4.4.0-150-generic x86_64)
|
|
|
|
* Documentation: https://help.ubuntu.com
|
|
* Management: https://landscape.canonical.com
|
|
* Support: https://ubuntu.com/advantage
|
|
|
|
75 packages can be updated.
|
|
51 updates are security updates.
|
|
|
|
New release '18.04.2 LTS' available.
|
|
Run 'do-release-upgrade' to upgrade to it.
|
|
|
|
|
|
Last login: Thu Jul 11 08:47:22 2019 from 216.106.0.188
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 9167/3104528 objects degraded (0.295%), 8 pgs degraded, 8 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 19h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 34m), 24 in (since 35m); 39 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9167/3104528 objects degraded (0.295%)
|
|
64970/3104528 objects misplaced (2.093%)
|
|
497 active+clean
|
|
23 active+remapped+backfilling
|
|
9 active+remapped+backfill_wait
|
|
8 active+undersized+degraded+remapped+backfilling
|
|
|
|
io:
|
|
client: 2.0 KiB/s rd, 45 MiB/s wr, 0 op/s rd, 18 op/s wr
|
|
recovery: 104 MiB/s, 26 objects/s
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 7857/3104528 objects degraded (0.253%), 7 pgs degraded, 7 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 19h)
|
|
mgr: synergy2(active, since 19h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 37m), 24 in (since 38m); 38 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 7857/3104528 objects degraded (0.253%)
|
|
61696/3104528 objects misplaced (1.987%)
|
|
499 active+clean
|
|
23 active+remapped+backfilling
|
|
8 active+remapped+backfill_wait
|
|
7 active+undersized+degraded+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 112 MiB/s, 27 objects/s
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 5601/3104528 objects degraded (0.180%), 5 pgs degraded, 5 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 19h)
|
|
mgr: synergy2(active, since 20h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 44m), 24 in (since 44m); 34 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 5601/3104528 objects degraded (0.180%)
|
|
53959/3104528 objects misplaced (1.738%)
|
|
503 active+clean
|
|
25 active+remapped+backfilling
|
|
5 active+undersized+degraded+remapped+backfilling
|
|
4 active+remapped+backfill_wait
|
|
|
|
io:
|
|
recovery: 102 MiB/s, 25 objects/s
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 3601/3104528 objects degraded (0.116%), 3 pgs degraded, 3 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 19h)
|
|
mgr: synergy2(active, since 20h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 52m), 24 in (since 52m); 31 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 3601/3104528 objects degraded (0.116%)
|
|
43082/3104528 objects misplaced (1.388%)
|
|
506 active+clean
|
|
27 active+remapped+backfilling
|
|
3 active+undersized+degraded+remapped+backfilling
|
|
1 active+remapped+backfill_wait
|
|
|
|
io:
|
|
client: 1.3 KiB/s wr, 0 op/s rd, 0 op/s wr
|
|
recovery: 103 MiB/s, 25 objects/s
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 3451/3104528 objects degraded (0.111%), 3 pgs degraded, 3 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 19h)
|
|
mgr: synergy2(active, since 20h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 53m), 24 in (since 53m); 30 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 3451/3104528 objects degraded (0.111%)
|
|
41650/3104528 objects misplaced (1.342%)
|
|
507 active+clean
|
|
27 active+remapped+backfilling
|
|
3 active+undersized+degraded+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 108 MiB/s, 26 objects/s
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Degraded data redundancy: 2339/3104528 objects degraded (0.075%), 2 pgs degraded, 2 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 20h)
|
|
mgr: synergy2(active, since 20h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 65m), 24 in (since 66m); 22 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 2339/3104528 objects degraded (0.075%)
|
|
22893/3104528 objects misplaced (0.737%)
|
|
515 active+clean
|
|
20 active+remapped+backfilling
|
|
2 active+undersized+degraded+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 102 MiB/s, 25 objects/s
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 20h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 103m), 24 in (since 104m)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 536 active+clean
|
|
1 active+clean+scrubbing+deep
|
|
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 20h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 107m), 24 in (since 107m)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 537 active+clean
|
|
|
|
root@synergy0:~# ssh synergy3
|
|
root@synergy3's password:
|
|
|
|
root@synergy0:~# ^C
|
|
root@synergy0:~# exit
|
|
logout
|
|
Connection to synergy0 closed.
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh synergy3
|
|
ekalk@synergy3's password:
|
|
|
|
Amandas-MacBook-Pro:.ssh ekalk$
|
|
Amandas-MacBook-Pro:.ssh ekalk$
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh root@synergy3
|
|
Welcome to Ubuntu 16.04.6 LTS (GNU/Linux 4.4.0-150-generic x86_64)
|
|
|
|
* Documentation: https://help.ubuntu.com
|
|
* Management: https://landscape.canonical.com
|
|
* Support: https://ubuntu.com/advantage
|
|
|
|
67 packages can be updated.
|
|
51 updates are security updates.
|
|
|
|
|
|
Last login: Wed Jul 10 13:37:51 2019 from 216.106.0.188
|
|
root@synergy3:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_OK
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 20h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 24 up (since 107m), 24 in (since 108m)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 537 active+clean
|
|
|
|
root@synergy3:~# sudo reboot
|
|
Connection to synergy3 closed by remote host.
|
|
Connection to synergy3 closed.
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ping synergy3
|
|
PING synergy3 (216.106.44.203): 56 data bytes
|
|
64 bytes from 216.106.44.203: icmp_seq=0 ttl=62 time=31.018 ms
|
|
64 bytes from 216.106.44.203: icmp_seq=1 ttl=62 time=1.772 ms
|
|
64 bytes from 216.106.44.203: icmp_seq=2 ttl=62 time=1.962 ms
|
|
Request timeout for icmp_seq 3
|
|
Request timeout for icmp_seq 4
|
|
Request timeout for icmp_seq 5
|
|
Request timeout for icmp_seq 6
|
|
Request timeout for icmp_seq 7
|
|
Request timeout for icmp_seq 8
|
|
Request timeout for icmp_seq 9
|
|
Request timeout for icmp_seq 10
|
|
Request timeout for icmp_seq 11
|
|
Request timeout for icmp_seq 12
|
|
Request timeout for icmp_seq 13
|
|
Request timeout for icmp_seq 14
|
|
Request timeout for icmp_seq 15
|
|
Request timeout for icmp_seq 16
|
|
Request timeout for icmp_seq 17
|
|
Request timeout for icmp_seq 18
|
|
Request timeout for icmp_seq 19
|
|
Request timeout for icmp_seq 20
|
|
Request timeout for icmp_seq 21
|
|
Request timeout for icmp_seq 22
|
|
Request timeout for icmp_seq 23
|
|
Request timeout for icmp_seq 24
|
|
Request timeout for icmp_seq 25
|
|
Request timeout for icmp_seq 26
|
|
Request timeout for icmp_seq 27
|
|
Request timeout for icmp_seq 28
|
|
Request timeout for icmp_seq 29
|
|
Request timeout for icmp_seq 30
|
|
Request timeout for icmp_seq 31
|
|
Request timeout for icmp_seq 32
|
|
Request timeout for icmp_seq 33
|
|
Request timeout for icmp_seq 34
|
|
Request timeout for icmp_seq 35
|
|
Request timeout for icmp_seq 36
|
|
Request timeout for icmp_seq 37
|
|
Request timeout for icmp_seq 38
|
|
Request timeout for icmp_seq 39
|
|
Request timeout for icmp_seq 40
|
|
Request timeout for icmp_seq 41
|
|
Request timeout for icmp_seq 42
|
|
Request timeout for icmp_seq 43
|
|
Request timeout for icmp_seq 44
|
|
Request timeout for icmp_seq 45
|
|
Request timeout for icmp_seq 46
|
|
Request timeout for icmp_seq 47
|
|
Request timeout for icmp_seq 48
|
|
Request timeout for icmp_seq 49
|
|
Request timeout for icmp_seq 50
|
|
Request timeout for icmp_seq 51
|
|
Request timeout for icmp_seq 52
|
|
Request timeout for icmp_seq 53
|
|
Request timeout for icmp_seq 54
|
|
Request timeout for icmp_seq 55
|
|
Request timeout for icmp_seq 56
|
|
Request timeout for icmp_seq 57
|
|
Request timeout for icmp_seq 58
|
|
Request timeout for icmp_seq 59
|
|
Request timeout for icmp_seq 60
|
|
Request timeout for icmp_seq 61
|
|
Request timeout for icmp_seq 62
|
|
Request timeout for icmp_seq 63
|
|
Request timeout for icmp_seq 64
|
|
Request timeout for icmp_seq 65
|
|
Request timeout for icmp_seq 66
|
|
Request timeout for icmp_seq 67
|
|
Request timeout for icmp_seq 68
|
|
Request timeout for icmp_seq 69
|
|
Request timeout for icmp_seq 70
|
|
Request timeout for icmp_seq 71
|
|
Request timeout for icmp_seq 72
|
|
Request timeout for icmp_seq 73
|
|
Request timeout for icmp_seq 74
|
|
Request timeout for icmp_seq 75
|
|
Request timeout for icmp_seq 76
|
|
Request timeout for icmp_seq 77
|
|
^C
|
|
--- synergy3 ping statistics ---
|
|
79 packets transmitted, 3 packets received, 96.2% packet loss
|
|
round-trip min/avg/max/stddev = 1.772/11.584/31.018/13.742 ms
|
|
Amandas-MacBook-Pro:.ssh ekalk$
|
|
Amandas-MacBook-Pro:.ssh ekalk$
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh root@synergy0
|
|
Welcome to Ubuntu 16.04.6 LTS (GNU/Linux 4.4.0-150-generic x86_64)
|
|
|
|
* Documentation: https://help.ubuntu.com
|
|
* Management: https://landscape.canonical.com
|
|
* Support: https://ubuntu.com/advantage
|
|
|
|
75 packages can be updated.
|
|
51 updates are security updates.
|
|
|
|
New release '18.04.2 LTS' available.
|
|
Run 'do-release-upgrade' to upgrade to it.
|
|
|
|
|
|
Last login: Thu Jul 11 09:50:14 2019 from 216.106.0.188
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~#
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
6 osds down
|
|
1 host (6 osds) down
|
|
Degraded data redundancy: 1060810/3104528 objects degraded (34.170%), 364 pgs degraded, 372 pgs undersized
|
|
too few PGs per OSD (29 < min 30)
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 20h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 18 up (since 93s), 24 in (since 110m)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 1060810/3104528 objects degraded (34.170%)
|
|
364 active+undersized+degraded
|
|
164 active+clean
|
|
8 active+undersized
|
|
1 active+clean+scrubbing+deep
|
|
|
|
io:
|
|
client: 340 B/s rd, 170 B/s wr, 0 op/s rd, 0 op/s wr
|
|
|
|
root@synergy0:~# history
|
|
1 passwd
|
|
2 exit
|
|
3 passwd
|
|
4 exit
|
|
5 cd /etc
|
|
6 ls -lah
|
|
7 vi sudoers
|
|
8 exit
|
|
9 history
|
|
10 exit
|
|
11 ls
|
|
12 cd .
|
|
13 cd /
|
|
14 ls
|
|
15 cd var/run
|
|
16 ls
|
|
17 cd ceph
|
|
18 ls
|
|
19 exit
|
|
20 cd /var/run/ceph
|
|
21 ls -lah
|
|
22 cd ..
|
|
23 cd.
|
|
24 cd ..
|
|
25 cd ~
|
|
26 ls
|
|
27 cd /
|
|
28 exit
|
|
29 cd /etc/ceph
|
|
30 ls
|
|
31 ls -lah
|
|
32 cd ..
|
|
33 ls
|
|
34 cd /var/run
|
|
35 ls
|
|
36 cd ceph
|
|
37 ls
|
|
38 ls -lah
|
|
39 cd ../..
|
|
40 cd ..
|
|
41 exit
|
|
42 yn$3rgy
|
|
43 iptables --list -n -v
|
|
44 netstat -lnp
|
|
45 netstat -lnp | grep 6789
|
|
46 netstat -lnp | grep 6798
|
|
47 dmesg
|
|
48 netstat -anp
|
|
49 netstat -anp | grep 6789
|
|
50 netstat -anp | grep 216.106.44.190
|
|
51 ps auxww
|
|
52 cd /etc/ceph/
|
|
53 ls
|
|
54 ls -la
|
|
55 joe ceph.conf
|
|
56 cat ceph.conf
|
|
57 ls
|
|
58 cat ceph.client.admin.keyring
|
|
59 cat rbdmap
|
|
60 virsh
|
|
61 virsh list
|
|
62 virsh migrate
|
|
63 virsh migrate -h
|
|
64 virsh migrate backup2 qemu+ssh://216.106.44.201/system
|
|
65 whois
|
|
66 who
|
|
67 ps auxww | grep kvm
|
|
68 apt-get install --no-install-recommends joe binutils smartmontools nagios-nrpe-server nagios-plugins-basic collectd libnet-ntp-perl
|
|
69 service collectd stop
|
|
70 cd /etc/collectd/
|
|
71 wget http://vmsupp1.socket.net/files/collectd_global.conf
|
|
72 wget http://vmsupp1.socket.net/files/collectd_local.conf
|
|
73 wget -N http://vmsupp1.socket.net/files/collectd.conf
|
|
74 service collectd start
|
|
75 joe /etc/hosts
|
|
76 hostname -f
|
|
77 hostname -s
|
|
78 hostname
|
|
79 joe /etc/hosts
|
|
80 hostname -f
|
|
81 hostname -s
|
|
82 route -n
|
|
83 scp synergy@216.106.44.201:/etc/libvirt/qemu/backup2.xml ./
|
|
84 history
|
|
85 exit
|
|
86 dd if=/dev/zero | pv >/dev/null
|
|
87 dd if=/dev/zero | pv | dd of=/dev/null
|
|
88 dd if=/dev/zero | pv | dd >/dev/null
|
|
89 dd if=/dev/zero | pv | >/dev/null
|
|
90 dd if=/dev/zero | pv >/dev/null
|
|
91 ls -la /dev/null
|
|
92 dd if=/dev/zero of=/dev/null count=1346219
|
|
93 dd if=/dev/zero of=/dev/null count=1000000
|
|
94 dd if=/dev/zero of=/dev/null count=2500000
|
|
95 dd if=/dev/zero of=/dev/null count=2000000
|
|
96 dd if=/dev/null of=/dev/null count=2000000
|
|
97 dd if=/dev/zero count=2000000 >/dev/null
|
|
98 dd if=/dev/zero count=2000000 | dd of=/dev/null
|
|
99 dd if=/dev/zero count=1000000 bs=1K >/dev/null
|
|
100 dd if=/dev/zero count=250000 bs=4K >/dev/null
|
|
101 dd if=/dev/zero count=250000 bs=4K | pv >/dev/null
|
|
102 dd if=/dev/zero | nc 216.106.44.202 5680
|
|
103 dd if=/dev/zero | nc 216.106.44.202 5678
|
|
104 dd if=/dev/zero | nc 216.106.44.202 5680
|
|
105 dd if=/dev/zero count=1000000 | nc backup2.socket.net
|
|
106 sync; echo 3 > /proc/sys/vm/drop_caches
|
|
107 htop
|
|
108 sync; echo 3 > /proc/sys/vm/drop_caches
|
|
109 exit
|
|
110 sync; echo 1 > /proc/sys/vm/drop_caches
|
|
111 top
|
|
112 exit
|
|
113 echo 0 > /sys/kernel/debug/x86/pti_enabled
|
|
114 exit
|
|
115 echo 0 > /sys/kernel/debug/x86/pti_enabled
|
|
116 echo 0 > /sys/kernel/debug
|
|
117 echo 0 > /sys/kernel/debug/
|
|
118 ls /sys/kernel/debug
|
|
119 ls /sys/kernel/debug/x86
|
|
120 ls-lah /sys/kernel/debug/x86
|
|
121 ls -lah /sys/kernel/debug/x86
|
|
122 exit
|
|
123 cd /
|
|
124 find | grep access_control.py
|
|
125 less ./usr/share/ceph/mgr/dashboard/services/access_control.py
|
|
126 vim ./usr/share/ceph/mgr/dashboard/services/access_control.py
|
|
127 exit
|
|
128 history
|
|
129 sync; echo 3 > /proc/sys/vm/drop_caches
|
|
130 exit
|
|
131 passwd
|
|
132 exit
|
|
133 cd /usr/src
|
|
134 wget http://vmsupp1.socket.net/files/nrpe_scripts.tgz
|
|
135 tar zxvf nrpe_scripts.tgz -C /usr/local/bin/
|
|
136 ssh monitor2
|
|
137 service nagios-nrpe-server restart
|
|
138 cd /etc/nagios3/conf.d
|
|
139 ls
|
|
140 cd /etc/nagios
|
|
141 ls
|
|
142 cd nrpe.d
|
|
143 ls
|
|
144 cd /etc/nagios/nrpe.d
|
|
145 wget http://vmsupp1.socket.net/files/nrpe_global.cfg
|
|
146 wget http://vmsupp1.socket.net/files/nrpe_local.cfg
|
|
147 cd /etc/nagios/nrpe.d
|
|
148 wget http://vmsupp1.socket.net/files/nrpe_global.cfg
|
|
149 wget http://vmsupp1.socket.net/files/nrpe_local.cfg
|
|
150 service nagios-nrpe-server restart
|
|
151 ifconfig -a
|
|
152 top
|
|
153 cd /etc/libvirt/qemu
|
|
154 ls
|
|
155 cd /etc/collectd/
|
|
156 ls
|
|
157 joe collectd_local.conf
|
|
158 ls
|
|
159 cd collectd.conf.d/
|
|
160 ls
|
|
161 joe collectd_ceph.conf
|
|
162 service collectd restart
|
|
163 grep collectd /var/log/syslog
|
|
164 cd /etc/collectd/collectd.d/
|
|
165 ls
|
|
166 cd /etc/collectd/collectd.conf.d/
|
|
167 ls
|
|
168 joe collectd_ceph.conf
|
|
169 service collectd restart
|
|
170 grep collectd /var/log/syslog
|
|
171 ks
|
|
172 ls
|
|
173 cp collectd_ceph.conf collectd_kvm.conf
|
|
174 joe collectd_kvm.conf
|
|
175 service collectd restart
|
|
176 ls
|
|
177 scp collectd_ceph.conf collectd_kvm.conf root@216.106.44.201:/etc/collectd/collectd.conf.d/
|
|
178 scp collectd_ceph.conf collectd_kvm.conf root@216.106.44.202:/etc/collectd/collectd.conf.d/
|
|
179 scp collectd_ceph.conf collectd_kvm.conf root@216.106.44.203:/etc/collectd/collectd.conf.d/
|
|
180 ceph -s
|
|
181 ceph iostat
|
|
182 exit
|
|
183 ssh 216.106.44.208
|
|
184 virsh list
|
|
185 ceph -s
|
|
186 date
|
|
187 ceph -s
|
|
188 apt-get install ifenslave
|
|
189 cat /etc/network/interfaces
|
|
190 ssh synergy1
|
|
191 ssh synergy2
|
|
192 ssh synergy3
|
|
193 exit
|
|
194 cat /etc/network/interfaces
|
|
195 ifconfig -a
|
|
196 ssh synergy3
|
|
197 exit
|
|
198 dmesg
|
|
199 cd /var/log
|
|
200 ls
|
|
201 tail syslog
|
|
202 grep osd syslog
|
|
203 journalctl
|
|
204 journalctl --help
|
|
205 journalctl --no-pager
|
|
206 journalctl --no-pager | tail -n 500000
|
|
207 journalctl --no-pager | tail -n 50000
|
|
208 histroy
|
|
209 history
|
|
210 sudo vim /etc/network/interfaces
|
|
211 sudo cat /etc/network/interfaces
|
|
212 ifconfig -a
|
|
213 ceph -s
|
|
214 ceph pg dump pgs_brief | egrep '\[0,|UP_' | head -5
|
|
215 ceph -s
|
|
216 ceph pg dump pgs_brief | grep scrub
|
|
217 ceph -s
|
|
218 exit
|
|
219 iftop
|
|
220 iostat -xtc 3
|
|
221 exit
|
|
222 apt-get install nload
|
|
223 nload -m enp3s0f0 enp5s0f0 enp3s0f1 enp5s0f1 -u m
|
|
224 iostat -xtc 3
|
|
225 ceph -s
|
|
226 top
|
|
227 iostat -xtc 3
|
|
228 ceph -s
|
|
229 top
|
|
230 iostat -xtc 3
|
|
231 ceph -s
|
|
232 ceph -w
|
|
233 ceph -s
|
|
234 sudo ceph-volume lvm list
|
|
235 ceph -s
|
|
236 ceph -w
|
|
237 ceph -s
|
|
238 [A
|
|
239 ceph -s
|
|
240 ceph -w
|
|
241 ceph -s
|
|
242 ping backup2
|
|
243 ceph -s
|
|
244 exit
|
|
245 ceph -s
|
|
246 ceph -w
|
|
247 ceph -s
|
|
248 ping backup2
|
|
249 ceph -s
|
|
250 ping backup2
|
|
251 ceph -s
|
|
252 ping backup2
|
|
253 ceph -s
|
|
254 history
|
|
255 su synergy
|
|
256 ceph -s
|
|
257 history | grep health
|
|
258 su synergy
|
|
259 ceph osd ls
|
|
260 ceph osd tree
|
|
261 ceph iostat
|
|
262 ceph -s
|
|
263 apt-get upgrade ceph-mgr
|
|
264 ceph -s
|
|
265 ssh synergy3
|
|
266 exit
|
|
267 ceph -s
|
|
268 sudo reboot
|
|
269 ceph pg 6.0 query
|
|
270 ceph pg ls
|
|
271 ceph pg ls | cut -f 1
|
|
272 ceph pg ls | cut -f 1 -d ' '
|
|
273 ceph pg ls
|
|
274 ceph pg 6.0 query
|
|
275 ceph -s
|
|
276 ceph pg 0.5 query
|
|
277 ceph pg 6.0 query
|
|
278 ceph -s
|
|
279 ceph -w
|
|
280 eph -s
|
|
281 ceph -s
|
|
282 ceph -w
|
|
283 ceph -s
|
|
284 ceph-volume lvm list
|
|
285 ceph -s
|
|
286 ceph -w
|
|
287 ceph -s
|
|
288 man ceph
|
|
289 ceph -w
|
|
290 ceph -s
|
|
291 ceph osd down osd.3
|
|
292 sudo systemctl stop ceph-osd@3
|
|
293 ceph osd rm osd.3
|
|
294 ceph osd crush remove osd.3
|
|
295 ceph auth del osd.3
|
|
296 sudo gdisk /dev/sdc
|
|
297 lsblk
|
|
298 dmsetup remove ceph--da809eb4--ab05--46d0--8ec8--2dd969bed20c-osd--block--5ac261bf--2968--473a--bed7--a73d7f0afccb
|
|
299 ceph -s
|
|
300 cd /var/log/ceph
|
|
301 ls
|
|
302 grep boot *.log
|
|
303 ping backup2
|
|
304 grep "Directory not empty" *.log
|
|
305 exit
|
|
306 ceph -s
|
|
307 ssh synergy3
|
|
308 exit
|
|
309 ceph -s
|
|
310 history
|
|
root@synergy0:~# cd /var/log/ceph
|
|
root@synergy0:/var/log/ceph# ls
|
|
ceph.audit.log ceph.log.1.gz ceph-mgr.synergy0.log.2.gz ceph-osd.0.log.3.gz ceph-osd.15.log.4.gz ceph-osd.6.log.5.gz ceph-volume.log.4.gz
|
|
ceph.audit.log.1.gz ceph.log.2.gz ceph-mgr.synergy0.log.3.gz ceph-osd.0.log.4.gz ceph-osd.15.log.5.gz ceph-osd.6.log.6.gz ceph-volume.log.5.gz
|
|
ceph.audit.log.2.gz ceph.log.3.gz ceph-mgr.synergy0.log.4.gz ceph-osd.0.log.5.gz ceph-osd.15.log.6.gz ceph-osd.6.log.7.gz ceph-volume.log.6.gz
|
|
ceph.audit.log.3.gz ceph.log.4.gz ceph-mgr.synergy0.log.5.gz ceph-osd.0.log.6.gz ceph-osd.15.log.7.gz ceph-osd.7.log ceph-volume.log.7.gz
|
|
ceph.audit.log.4.gz ceph.log.5.gz ceph-mgr.synergy0.log.6.gz ceph-osd.0.log.7.gz ceph-osd.3.log ceph-osd.7.log.1.gz ceph-volume-systemd.log
|
|
ceph.audit.log.5.gz ceph.log.6.gz ceph-mgr.synergy0.log.7.gz ceph-osd.12.log ceph-osd.3.log.1.gz ceph-osd.9.log ceph-volume-systemd.log.1.gz
|
|
ceph.audit.log.6.gz ceph.log.7.gz ceph-mon.synergy0.log ceph-osd.12.log.1.gz ceph-osd.3.log.2.gz ceph-osd.9.log.1.gz ceph-volume-systemd.log.2.gz
|
|
ceph.audit.log.7.gz ceph-mds.synergy0.log ceph-mon.synergy0.log.1.gz ceph-osd.12.log.2.gz ceph-osd.3.log.3.gz ceph-osd.9.log.2.gz ceph-volume-systemd.log.3.gz
|
|
ceph-client.rgw.synergy0.log ceph-mds.synergy0.log.1.gz ceph-mon.synergy0.log.2.gz ceph-osd.12.log.3.gz ceph-osd.3.log.4.gz ceph-osd.9.log.3.gz ceph-volume-systemd.log.4.gz
|
|
ceph-client.rgw.synergy0.log.1.gz ceph-mds.synergy0.log.2.gz ceph-mon.synergy0.log.3.gz ceph-osd.12.log.4.gz ceph-osd.3.log.5.gz ceph-osd.9.log.4.gz ceph-volume-systemd.log.5.gz
|
|
ceph-client.rgw.synergy0.log.2.gz ceph-mds.synergy0.log.3.gz ceph-mon.synergy0.log.4.gz ceph-osd.12.log.5.gz ceph-osd.3.log.6.gz ceph-osd.9.log.5.gz ceph-volume-systemd.log.6.gz
|
|
ceph-client.rgw.synergy0.log.3.gz ceph-mds.synergy0.log.4.gz ceph-mon.synergy0.log.5.gz ceph-osd.12.log.6.gz ceph-osd.3.log.7.gz ceph-osd.9.log.6.gz ceph-volume-systemd.log.7.gz
|
|
ceph-client.rgw.synergy0.log.4.gz ceph-mds.synergy0.log.5.gz ceph-mon.synergy0.log.6.gz ceph-osd.12.log.7.gz ceph-osd.6.log ceph-osd.9.log.7.gz
|
|
ceph-client.rgw.synergy0.log.5.gz ceph-mds.synergy0.log.6.gz ceph-mon.synergy0.log.7.gz ceph-osd.15.log ceph-osd.6.log.1.gz ceph-volume.log
|
|
ceph-client.rgw.synergy0.log.6.gz ceph-mds.synergy0.log.7.gz ceph-osd.0.log ceph-osd.15.log.1.gz ceph-osd.6.log.2.gz ceph-volume.log.1.gz
|
|
ceph-client.rgw.synergy0.log.7.gz ceph-mgr.synergy0.log ceph-osd.0.log.1.gz ceph-osd.15.log.2.gz ceph-osd.6.log.3.gz ceph-volume.log.2.gz
|
|
ceph.log ceph-mgr.synergy0.log.1.gz ceph-osd.0.log.2.gz ceph-osd.15.log.3.gz ceph-osd.6.log.4.gz ceph-volume.log.3.gz
|
|
root@synergy0:/var/log/ceph# grep boot *.log
|
|
ceph.audit.log:2019-07-11 09:14:36.928992 mon.synergy0 (mon.0) 4033 : audit [INF] from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]: dispatch
|
|
ceph.audit.log:2019-07-11 09:14:36.947349 mon.synergy0 (mon.0) 4034 : audit [INF] from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]': finished
|
|
ceph.audit.log:2019-07-11 09:14:51.715675 mon.synergy0 (mon.0) 4041 : audit [DBG] from='client.? 216.106.44.200:0/3242824165' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
|
|
ceph.log:2019-07-11 08:19:19.490252 mon.synergy0 (mon.0) 3105 : cluster [INF] osd.10 [v2:216.106.44.201:6818/23308,v1:216.106.44.201:6820/23308] boot
|
|
ceph.log:2019-07-11 08:19:20.530818 mon.synergy0 (mon.0) 3112 : cluster [INF] osd.13 [v2:216.106.44.201:6802/23324,v1:216.106.44.201:6803/23324] boot
|
|
ceph.log:2019-07-11 08:19:24.600922 mon.synergy0 (mon.0) 3121 : cluster [INF] osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph.log:2019-07-11 08:19:31.090956 mon.synergy0 (mon.0) 3130 : cluster [INF] osd.10 [v2:216.106.44.201:6802/23690,v1:216.106.44.201:6803/23690] boot
|
|
ceph.log:2019-07-11 08:19:43.060292 mon.synergy0 (mon.0) 3141 : cluster [INF] osd.13 [v2:216.106.44.201:6818/23747,v1:216.106.44.201:6820/23747] boot
|
|
ceph.log:2019-07-11 09:11:39.102500 mon.synergy0 (mon.0) 3824 : cluster [INF] osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph.log:2019-07-11 09:12:10.592171 mon.synergy0 (mon.0) 3968 : cluster [INF] osd.10 [v2:216.106.44.201:6802/24938,v1:216.106.44.201:6803/24938] boot
|
|
ceph.log:2019-07-11 09:12:16.717812 mon.synergy0 (mon.0) 3979 : cluster [INF] osd.13 [v2:216.106.44.201:6818/24941,v1:216.106.44.201:6820/24941] boot
|
|
ceph.log:2019-07-11 09:12:34.277218 mon.synergy0 (mon.0) 3997 : cluster [INF] osd.15 [v2:216.106.44.200:6802/16161,v1:216.106.44.200:6803/16161] boot
|
|
ceph.log:2019-07-11 09:15:04.320287 mon.synergy0 (mon.0) 4052 : cluster [INF] osd.3 [v2:216.106.44.200:6810/16987,v1:216.106.44.200:6811/16987] boot
|
|
ceph.log:2019-07-11 09:15:15.501601 mon.synergy0 (mon.0) 4248 : cluster [INF] osd.10 [v2:216.106.44.201:6802/25418,v1:216.106.44.201:6803/25418] boot
|
|
ceph.log:2019-07-11 09:15:20.561861 mon.synergy0 (mon.0) 4257 : cluster [INF] osd.13 [v2:216.106.44.201:6818/25427,v1:216.106.44.201:6820/25427] boot
|
|
ceph.log:2019-07-11 09:15:23.620936 mon.synergy0 (mon.0) 4263 : cluster [INF] osd.15 [v2:216.106.44.200:6802/17179,v1:216.106.44.200:6803/17179] boot
|
|
ceph.log:2019-07-11 09:15:39.440562 mon.synergy0 (mon.0) 4277 : cluster [INF] osd.9 [v2:216.106.44.200:6814/17315,v1:216.106.44.200:6815/17315] boot
|
|
ceph.log:2019-07-11 11:03:30.716413 mon.synergy0 (mon.0) 5454 : cluster [INF] osd.10 [v2:216.106.44.201:6818/27445,v1:216.106.44.201:6820/27445] boot
|
|
ceph.log:2019-07-11 11:03:31.746593 mon.synergy0 (mon.0) 5460 : cluster [INF] osd.9 [v2:216.106.44.200:6814/18374,v1:216.106.44.200:6815/18374] boot
|
|
ceph.log:2019-07-11 11:03:35.890543 mon.synergy0 (mon.0) 5467 : cluster [INF] osd.13 [v2:216.106.44.201:6802/27448,v1:216.106.44.201:6803/27448] boot
|
|
ceph.log:2019-07-11 11:03:39.123178 mon.synergy0 (mon.0) 5474 : cluster [INF] osd.15 [v2:216.106.44.200:6802/18372,v1:216.106.44.200:6803/18372] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:19.486 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6818/23308,v1:216.106.44.201:6820/23308] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:20.526 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6802/23324,v1:216.106.44.201:6803/23324] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:24.599 7f2266efe700 0 log_channel(cluster) log [INF] : osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:31.083 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6802/23690,v1:216.106.44.201:6803/23690] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:43.055 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6818/23747,v1:216.106.44.201:6820/23747] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:11:39.095 7f2266efe700 0 log_channel(cluster) log [INF] : osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:12:10.589 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6802/24938,v1:216.106.44.201:6803/24938] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:12:16.713 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6818/24941,v1:216.106.44.201:6820/24941] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:12:34.274 7f2266efe700 0 log_channel(cluster) log [INF] : osd.15 [v2:216.106.44.200:6802/16161,v1:216.106.44.200:6803/16161] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:14:36.924 7f226a705700 0 log_channel(audit) log [INF] : from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]: dispatch
|
|
ceph-mon.synergy0.log:2019-07-11 09:14:36.940 7f2266efe700 0 log_channel(audit) log [INF] : from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]': finished
|
|
ceph-mon.synergy0.log:2019-07-11 09:14:51.712 7f226a705700 0 log_channel(audit) log [DBG] : from='client.? 216.106.44.200:0/3242824165' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:04.317 7f2266efe700 0 log_channel(cluster) log [INF] : osd.3 [v2:216.106.44.200:6810/16987,v1:216.106.44.200:6811/16987] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:15.497 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6802/25418,v1:216.106.44.201:6803/25418] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:20.558 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6818/25427,v1:216.106.44.201:6820/25427] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:23.618 7f2266efe700 0 log_channel(cluster) log [INF] : osd.15 [v2:216.106.44.200:6802/17179,v1:216.106.44.200:6803/17179] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:39.438 7f2266efe700 0 log_channel(cluster) log [INF] : osd.9 [v2:216.106.44.200:6814/17315,v1:216.106.44.200:6815/17315] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:03:30.712 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6818/27445,v1:216.106.44.201:6820/27445] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:03:31.740 7f2266efe700 0 log_channel(cluster) log [INF] : osd.9 [v2:216.106.44.200:6814/18374,v1:216.106.44.200:6815/18374] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:03:35.885 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6802/27448,v1:216.106.44.201:6803/27448] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:03:39.117 7f2266efe700 0 log_channel(cluster) log [INF] : osd.15 [v2:216.106.44.200:6802/18372,v1:216.106.44.200:6803/18372] boot
|
|
ceph-osd.15.log:2019-07-11 09:12:33.198 7fbe6881bf00 0 osd.15 43812 done with init, starting boot process
|
|
ceph-osd.15.log:2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log:2019-07-11 09:12:34.306 7fbe51ba1700 1 osd.15 43822 state: booting -> active
|
|
ceph-osd.15.log: -2810> 2019-07-11 09:12:21.553 7fbe6881bf00 2 osd.15 0 boot
|
|
ceph-osd.15.log: -2569> 2019-07-11 09:12:33.198 7fbe6881bf00 0 osd.15 43812 done with init, starting boot process
|
|
ceph-osd.15.log: -2560> 2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log: -2078> 2019-07-11 09:12:34.306 7fbe51ba1700 1 osd.15 43822 state: booting -> active
|
|
ceph-osd.15.log:2019-07-11 09:15:22.482 7f702769ff00 0 osd.15 43834 done with init, starting boot process
|
|
ceph-osd.15.log:2019-07-11 09:15:22.482 7f702769ff00 1 osd.15 43834 start_boot
|
|
ceph-osd.15.log:2019-07-11 09:15:23.654 7f7010a25700 1 osd.15 43843 state: booting -> active
|
|
ceph-osd.15.log: -8657> 2019-07-11 09:15:12.317 7f702769ff00 2 osd.15 0 boot
|
|
ceph-osd.15.log: -8416> 2019-07-11 09:15:22.482 7f702769ff00 0 osd.15 43834 done with init, starting boot process
|
|
ceph-osd.15.log: -8410> 2019-07-11 09:15:22.482 7f702769ff00 1 osd.15 43834 start_boot
|
|
ceph-osd.15.log: -8010> 2019-07-11 09:15:23.654 7f7010a25700 1 osd.15 43843 state: booting -> active
|
|
ceph-osd.15.log:2019-07-11 11:03:38.673 7f3a72eaff00 0 osd.15 43933 done with init, starting boot process
|
|
ceph-osd.15.log:2019-07-11 11:03:38.673 7f3a72eaff00 1 osd.15 43933 start_boot
|
|
ceph-osd.15.log:2019-07-11 11:03:39.149 7f3a5c235700 1 osd.15 43942 state: booting -> active
|
|
ceph-osd.3.log:2019-07-11 08:19:24.023 7f3b59bbdf00 0 osd.3 43702 done with init, starting boot process
|
|
ceph-osd.3.log:2019-07-11 08:19:24.023 7f3b59bbdf00 1 osd.3 43702 start_boot
|
|
ceph-osd.3.log:2019-07-11 08:19:24.627 7f3b42f43700 1 osd.3 43710 state: booting -> active
|
|
ceph-osd.3.log:2019-07-11 09:11:38.215 7f3b42f43700 1 osd.3 43802 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:11:39.107 7f3b42f43700 1 osd.3 43803 state: booting -> active
|
|
ceph-osd.3.log:2019-07-11 09:15:02.425 7f1dff155f00 0 osd.3 0 done with init, starting boot process
|
|
ceph-osd.3.log:2019-07-11 09:15:02.425 7f1dff155f00 1 osd.3 0 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:15:04.337 7f1de84db700 1 osd.3 43831 state: booting -> active
|
|
ceph-osd.9.log:2019-07-11 09:15:38.310 7fc965877f00 0 osd.9 43837 done with init, starting boot process
|
|
ceph-osd.9.log:2019-07-11 09:15:38.310 7fc965877f00 1 osd.9 43837 start_boot
|
|
ceph-osd.9.log:2019-07-11 09:15:39.482 7fc94ebfd700 1 osd.9 43846 state: booting -> active
|
|
ceph-osd.9.log: -8559> 2019-07-11 09:15:33.198 7fc965877f00 2 osd.9 0 boot
|
|
ceph-osd.9.log: -8356> 2019-07-11 09:15:38.310 7fc965877f00 0 osd.9 43837 done with init, starting boot process
|
|
ceph-osd.9.log: -8353> 2019-07-11 09:15:38.310 7fc965877f00 1 osd.9 43837 start_boot
|
|
ceph-osd.9.log: -8020> 2019-07-11 09:15:39.482 7fc94ebfd700 1 osd.9 43846 state: booting -> active
|
|
ceph-osd.9.log:2019-07-11 11:03:31.060 7f0654fdef00 0 osd.9 43933 done with init, starting boot process
|
|
ceph-osd.9.log:2019-07-11 11:03:31.060 7f0654fdef00 1 osd.9 43933 start_boot
|
|
ceph-osd.9.log:2019-07-11 11:03:31.776 7f063e364700 1 osd.9 43938 state: booting -> active
|
|
ceph-volume.log:[2019-07-11 09:14:35,929][ceph_volume.process][INFO ] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 14da6185-b74e-4313-b0a5-4476554eeb92
|
|
ceph-volume.log:[2019-07-11 09:14:50,725][ceph_volume.process][INFO ] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap
|
|
root@synergy0:/var/log/ceph# grep start_boot *.log
|
|
ceph-osd.15.log:2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log: -2560> 2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log:2019-07-11 09:15:22.482 7f702769ff00 1 osd.15 43834 start_boot
|
|
ceph-osd.15.log: -8410> 2019-07-11 09:15:22.482 7f702769ff00 1 osd.15 43834 start_boot
|
|
ceph-osd.15.log:2019-07-11 11:03:38.673 7f3a72eaff00 1 osd.15 43933 start_boot
|
|
ceph-osd.3.log:2019-07-11 08:19:24.023 7f3b59bbdf00 1 osd.3 43702 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:11:38.215 7f3b42f43700 1 osd.3 43802 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:15:02.425 7f1dff155f00 1 osd.3 0 start_boot
|
|
ceph-osd.9.log:2019-07-11 09:15:38.310 7fc965877f00 1 osd.9 43837 start_boot
|
|
ceph-osd.9.log: -8353> 2019-07-11 09:15:38.310 7fc965877f00 1 osd.9 43837 start_boot
|
|
ceph-osd.9.log:2019-07-11 11:03:31.060 7f0654fdef00 1 osd.9 43933 start_boot
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph#
|
|
root@synergy0:/var/log/ceph# ceph osd tree
|
|
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
|
|
-1 27.28700 root default
|
|
-3 5.45718 host synergy0
|
|
0 hdd 0.90970 osd.0 up 1.00000 1.00000
|
|
3 hdd 0.90919 osd.3 up 1.00000 1.00000
|
|
6 hdd 0.90970 osd.6 up 1.00000 1.00000
|
|
7 hdd 0.90919 osd.7 up 1.00000 1.00000
|
|
9 hdd 0.90970 osd.9 up 1.00000 1.00000
|
|
15 hdd 0.90970 osd.15 up 1.00000 1.00000
|
|
-5 5.45668 host synergy1
|
|
1 hdd 0.90970 osd.1 up 1.00000 1.00000
|
|
4 hdd 0.90919 osd.4 up 1.00000 1.00000
|
|
10 hdd 0.90919 osd.10 up 1.00000 1.00000
|
|
12 hdd 0.90919 osd.12 up 1.00000 1.00000
|
|
13 hdd 0.90970 osd.13 up 1.00000 1.00000
|
|
16 hdd 0.90970 osd.16 up 1.00000 1.00000
|
|
-7 5.45718 host synergy2
|
|
2 hdd 0.90970 osd.2 up 1.00000 1.00000
|
|
8 hdd 0.90970 osd.8 up 1.00000 1.00000
|
|
14 hdd 0.90970 osd.14 up 1.00000 1.00000
|
|
17 hdd 0.90970 osd.17 up 1.00000 1.00000
|
|
18 hdd 0.90919 osd.18 up 1.00000 1.00000
|
|
19 hdd 0.90919 osd.19 up 1.00000 1.00000
|
|
-9 10.91595 host synergy3
|
|
5 hdd 1.81929 osd.5 up 1.00000 1.00000
|
|
11 hdd 1.81940 osd.11 up 1.00000 1.00000
|
|
20 hdd 1.81940 osd.20 up 1.00000 1.00000
|
|
21 hdd 1.81929 osd.21 up 1.00000 1.00000
|
|
22 hdd 1.81929 osd.22 up 1.00000 1.00000
|
|
23 hdd 1.81929 osd.23 up 1.00000 1.00000
|
|
root@synergy0:/var/log/ceph# grep start_boot *.log
|
|
ceph-osd.15.log:2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log: -2560> 2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log:2019-07-11 09:15:22.482 7f702769ff00 1 osd.15 43834 start_boot
|
|
ceph-osd.15.log: -8410> 2019-07-11 09:15:22.482 7f702769ff00 1 osd.15 43834 start_boot
|
|
ceph-osd.15.log:2019-07-11 11:03:38.673 7f3a72eaff00 1 osd.15 43933 start_boot
|
|
ceph-osd.15.log: -1743> 2019-07-11 11:03:38.673 7f3a72eaff00 1 osd.15 43933 start_boot
|
|
ceph-osd.15.log:2019-07-11 11:07:58.753 7f4e13266f00 1 osd.15 43950 start_boot
|
|
ceph-osd.15.log: -2477> 2019-07-11 11:07:58.753 7f4e13266f00 1 osd.15 43950 start_boot
|
|
ceph-osd.15.log:2019-07-11 11:12:12.309 7f84a85f6f00 1 osd.15 43964 start_boot
|
|
ceph-osd.3.log:2019-07-11 08:19:24.023 7f3b59bbdf00 1 osd.3 43702 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:11:38.215 7f3b42f43700 1 osd.3 43802 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:15:02.425 7f1dff155f00 1 osd.3 0 start_boot
|
|
ceph-osd.9.log:2019-07-11 09:15:38.310 7fc965877f00 1 osd.9 43837 start_boot
|
|
ceph-osd.9.log: -8353> 2019-07-11 09:15:38.310 7fc965877f00 1 osd.9 43837 start_boot
|
|
ceph-osd.9.log:2019-07-11 11:03:31.060 7f0654fdef00 1 osd.9 43933 start_boot
|
|
ceph-osd.9.log: -1410> 2019-07-11 11:03:31.060 7f0654fdef00 1 osd.9 43933 start_boot
|
|
ceph-osd.9.log:2019-07-11 11:07:57.557 7f019fe1cf00 1 osd.9 43950 start_boot
|
|
ceph-osd.9.log: -1734> 2019-07-11 11:07:57.557 7f019fe1cf00 1 osd.9 43950 start_boot
|
|
ceph-osd.9.log:2019-07-11 11:12:13.553 7f9f49deaf00 1 osd.9 43964 start_boot
|
|
root@synergy0:/var/log/ceph# grep start_boot *.log
|
|
ceph-osd.15.log:2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log: -2560> 2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log:2019-07-11 09:15:22.482 7f702769ff00 1 osd.15 43834 start_boot
|
|
ceph-osd.15.log: -8410> 2019-07-11 09:15:22.482 7f702769ff00 1 osd.15 43834 start_boot
|
|
ceph-osd.15.log:2019-07-11 11:03:38.673 7f3a72eaff00 1 osd.15 43933 start_boot
|
|
ceph-osd.15.log: -1743> 2019-07-11 11:03:38.673 7f3a72eaff00 1 osd.15 43933 start_boot
|
|
ceph-osd.15.log:2019-07-11 11:07:58.753 7f4e13266f00 1 osd.15 43950 start_boot
|
|
ceph-osd.15.log: -2477> 2019-07-11 11:07:58.753 7f4e13266f00 1 osd.15 43950 start_boot
|
|
ceph-osd.15.log:2019-07-11 11:12:12.309 7f84a85f6f00 1 osd.15 43964 start_boot
|
|
ceph-osd.3.log:2019-07-11 08:19:24.023 7f3b59bbdf00 1 osd.3 43702 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:11:38.215 7f3b42f43700 1 osd.3 43802 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:15:02.425 7f1dff155f00 1 osd.3 0 start_boot
|
|
ceph-osd.9.log:2019-07-11 09:15:38.310 7fc965877f00 1 osd.9 43837 start_boot
|
|
ceph-osd.9.log: -8353> 2019-07-11 09:15:38.310 7fc965877f00 1 osd.9 43837 start_boot
|
|
ceph-osd.9.log:2019-07-11 11:03:31.060 7f0654fdef00 1 osd.9 43933 start_boot
|
|
ceph-osd.9.log: -1410> 2019-07-11 11:03:31.060 7f0654fdef00 1 osd.9 43933 start_boot
|
|
ceph-osd.9.log:2019-07-11 11:07:57.557 7f019fe1cf00 1 osd.9 43950 start_boot
|
|
ceph-osd.9.log: -1734> 2019-07-11 11:07:57.557 7f019fe1cf00 1 osd.9 43950 start_boot
|
|
ceph-osd.9.log:2019-07-11 11:12:13.553 7f9f49deaf00 1 osd.9 43964 start_boot
|
|
root@synergy0:/var/log/ceph# grep boot *.log
|
|
ceph.audit.log:2019-07-11 09:14:36.928992 mon.synergy0 (mon.0) 4033 : audit [INF] from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]: dispatch
|
|
ceph.audit.log:2019-07-11 09:14:36.947349 mon.synergy0 (mon.0) 4034 : audit [INF] from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]': finished
|
|
ceph.audit.log:2019-07-11 09:14:51.715675 mon.synergy0 (mon.0) 4041 : audit [DBG] from='client.? 216.106.44.200:0/3242824165' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
|
|
ceph.log:2019-07-11 08:19:19.490252 mon.synergy0 (mon.0) 3105 : cluster [INF] osd.10 [v2:216.106.44.201:6818/23308,v1:216.106.44.201:6820/23308] boot
|
|
ceph.log:2019-07-11 08:19:20.530818 mon.synergy0 (mon.0) 3112 : cluster [INF] osd.13 [v2:216.106.44.201:6802/23324,v1:216.106.44.201:6803/23324] boot
|
|
ceph.log:2019-07-11 08:19:24.600922 mon.synergy0 (mon.0) 3121 : cluster [INF] osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph.log:2019-07-11 08:19:31.090956 mon.synergy0 (mon.0) 3130 : cluster [INF] osd.10 [v2:216.106.44.201:6802/23690,v1:216.106.44.201:6803/23690] boot
|
|
ceph.log:2019-07-11 08:19:43.060292 mon.synergy0 (mon.0) 3141 : cluster [INF] osd.13 [v2:216.106.44.201:6818/23747,v1:216.106.44.201:6820/23747] boot
|
|
ceph.log:2019-07-11 09:11:39.102500 mon.synergy0 (mon.0) 3824 : cluster [INF] osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph.log:2019-07-11 09:12:10.592171 mon.synergy0 (mon.0) 3968 : cluster [INF] osd.10 [v2:216.106.44.201:6802/24938,v1:216.106.44.201:6803/24938] boot
|
|
ceph.log:2019-07-11 09:12:16.717812 mon.synergy0 (mon.0) 3979 : cluster [INF] osd.13 [v2:216.106.44.201:6818/24941,v1:216.106.44.201:6820/24941] boot
|
|
ceph.log:2019-07-11 09:12:34.277218 mon.synergy0 (mon.0) 3997 : cluster [INF] osd.15 [v2:216.106.44.200:6802/16161,v1:216.106.44.200:6803/16161] boot
|
|
ceph.log:2019-07-11 09:15:04.320287 mon.synergy0 (mon.0) 4052 : cluster [INF] osd.3 [v2:216.106.44.200:6810/16987,v1:216.106.44.200:6811/16987] boot
|
|
ceph.log:2019-07-11 09:15:15.501601 mon.synergy0 (mon.0) 4248 : cluster [INF] osd.10 [v2:216.106.44.201:6802/25418,v1:216.106.44.201:6803/25418] boot
|
|
ceph.log:2019-07-11 09:15:20.561861 mon.synergy0 (mon.0) 4257 : cluster [INF] osd.13 [v2:216.106.44.201:6818/25427,v1:216.106.44.201:6820/25427] boot
|
|
ceph.log:2019-07-11 09:15:23.620936 mon.synergy0 (mon.0) 4263 : cluster [INF] osd.15 [v2:216.106.44.200:6802/17179,v1:216.106.44.200:6803/17179] boot
|
|
ceph.log:2019-07-11 09:15:39.440562 mon.synergy0 (mon.0) 4277 : cluster [INF] osd.9 [v2:216.106.44.200:6814/17315,v1:216.106.44.200:6815/17315] boot
|
|
ceph.log:2019-07-11 11:03:30.716413 mon.synergy0 (mon.0) 5454 : cluster [INF] osd.10 [v2:216.106.44.201:6818/27445,v1:216.106.44.201:6820/27445] boot
|
|
ceph.log:2019-07-11 11:03:31.746593 mon.synergy0 (mon.0) 5460 : cluster [INF] osd.9 [v2:216.106.44.200:6814/18374,v1:216.106.44.200:6815/18374] boot
|
|
ceph.log:2019-07-11 11:03:35.890543 mon.synergy0 (mon.0) 5467 : cluster [INF] osd.13 [v2:216.106.44.201:6802/27448,v1:216.106.44.201:6803/27448] boot
|
|
ceph.log:2019-07-11 11:03:39.123178 mon.synergy0 (mon.0) 5474 : cluster [INF] osd.15 [v2:216.106.44.200:6802/18372,v1:216.106.44.200:6803/18372] boot
|
|
ceph.log:2019-07-11 11:07:29.811531 mon.synergy0 (mon.0) 5499 : cluster [INF] osd.11 [v2:216.106.44.203:6814/4107,v1:216.106.44.203:6815/4107] boot
|
|
ceph.log:2019-07-11 11:07:30.862970 mon.synergy0 (mon.0) 5501 : cluster [INF] osd.5 [v2:216.106.44.203:6801/4101,v1:216.106.44.203:6805/4101] boot
|
|
ceph.log:2019-07-11 11:07:33.965839 mon.synergy0 (mon.0) 5507 : cluster [INF] osd.21 [v2:216.106.44.203:6806/4116,v1:216.106.44.203:6807/4116] boot
|
|
ceph.log:2019-07-11 11:07:48.622653 mon.synergy0 (mon.0) 5518 : cluster [INF] osd.22 [v2:216.106.44.203:6809/4073,v1:216.106.44.203:6813/4073] boot
|
|
ceph.log:2019-07-11 11:07:51.699800 mon.synergy0 (mon.0) 5650 : cluster [INF] osd.23 [v2:216.106.44.203:6800/4111,v1:216.106.44.203:6802/4111] boot
|
|
ceph.log:2019-07-11 11:07:54.798298 mon.synergy0 (mon.0) 5657 : cluster [INF] osd.20 [v2:216.106.44.203:6811/4084,v1:216.106.44.203:6812/4084] boot
|
|
ceph.log:2019-07-11 11:07:57.843653 mon.synergy0 (mon.0) 5663 : cluster [INF] osd.9 [v2:216.106.44.200:6802/18872,v1:216.106.44.200:6803/18872] boot
|
|
ceph.log:2019-07-11 11:07:58.863949 mon.synergy0 (mon.0) 5667 : cluster [INF] osd.15 [v2:216.106.44.200:6814/18902,v1:216.106.44.200:6815/18902] boot
|
|
ceph.log:2019-07-11 11:08:00.891546 mon.synergy0 (mon.0) 5675 : cluster [INF] osd.10 [v2:216.106.44.201:6818/27940,v1:216.106.44.201:6820/27940] boot
|
|
ceph.log:2019-07-11 11:08:11.929250 mon.synergy0 (mon.0) 5685 : cluster [INF] osd.13 [v2:216.106.44.201:6802/27946,v1:216.106.44.201:6803/27946] boot
|
|
ceph.log:2019-07-11 11:12:10.233077 mon.synergy0 (mon.0) 5795 : cluster [INF] osd.10 [v2:216.106.44.201:6818/28366,v1:216.106.44.201:6820/28366] boot
|
|
ceph.log:2019-07-11 11:12:13.249309 mon.synergy0 (mon.0) 5801 : cluster [INF] osd.15 [v2:216.106.44.200:6814/19285,v1:216.106.44.200:6815/19285] boot
|
|
ceph.log:2019-07-11 11:12:14.285750 mon.synergy0 (mon.0) 5807 : cluster [INF] osd.13 [v2:216.106.44.201:6802/28364,v1:216.106.44.201:6803/28364] boot
|
|
ceph.log:2019-07-11 11:12:14.285892 mon.synergy0 (mon.0) 5808 : cluster [INF] osd.9 [v2:216.106.44.200:6802/19278,v1:216.106.44.200:6803/19278] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:19.486 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6818/23308,v1:216.106.44.201:6820/23308] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:20.526 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6802/23324,v1:216.106.44.201:6803/23324] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:24.599 7f2266efe700 0 log_channel(cluster) log [INF] : osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:31.083 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6802/23690,v1:216.106.44.201:6803/23690] boot
|
|
ceph-mon.synergy0.log:2019-07-11 08:19:43.055 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6818/23747,v1:216.106.44.201:6820/23747] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:11:39.095 7f2266efe700 0 log_channel(cluster) log [INF] : osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:12:10.589 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6802/24938,v1:216.106.44.201:6803/24938] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:12:16.713 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6818/24941,v1:216.106.44.201:6820/24941] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:12:34.274 7f2266efe700 0 log_channel(cluster) log [INF] : osd.15 [v2:216.106.44.200:6802/16161,v1:216.106.44.200:6803/16161] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:14:36.924 7f226a705700 0 log_channel(audit) log [INF] : from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]: dispatch
|
|
ceph-mon.synergy0.log:2019-07-11 09:14:36.940 7f2266efe700 0 log_channel(audit) log [INF] : from='client.? 216.106.44.200:0/1445075373' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "14da6185-b74e-4313-b0a5-4476554eeb92"}]': finished
|
|
ceph-mon.synergy0.log:2019-07-11 09:14:51.712 7f226a705700 0 log_channel(audit) log [DBG] : from='client.? 216.106.44.200:0/3242824165' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:04.317 7f2266efe700 0 log_channel(cluster) log [INF] : osd.3 [v2:216.106.44.200:6810/16987,v1:216.106.44.200:6811/16987] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:15.497 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6802/25418,v1:216.106.44.201:6803/25418] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:20.558 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6818/25427,v1:216.106.44.201:6820/25427] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:23.618 7f2266efe700 0 log_channel(cluster) log [INF] : osd.15 [v2:216.106.44.200:6802/17179,v1:216.106.44.200:6803/17179] boot
|
|
ceph-mon.synergy0.log:2019-07-11 09:15:39.438 7f2266efe700 0 log_channel(cluster) log [INF] : osd.9 [v2:216.106.44.200:6814/17315,v1:216.106.44.200:6815/17315] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:03:30.712 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6818/27445,v1:216.106.44.201:6820/27445] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:03:31.740 7f2266efe700 0 log_channel(cluster) log [INF] : osd.9 [v2:216.106.44.200:6814/18374,v1:216.106.44.200:6815/18374] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:03:35.885 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6802/27448,v1:216.106.44.201:6803/27448] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:03:39.117 7f2266efe700 0 log_channel(cluster) log [INF] : osd.15 [v2:216.106.44.200:6802/18372,v1:216.106.44.200:6803/18372] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:07:29.807 7f2266efe700 0 log_channel(cluster) log [INF] : osd.11 [v2:216.106.44.203:6814/4107,v1:216.106.44.203:6815/4107] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:07:30.856 7f2266efe700 0 log_channel(cluster) log [INF] : osd.5 [v2:216.106.44.203:6801/4101,v1:216.106.44.203:6805/4101] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:07:33.960 7f2266efe700 0 log_channel(cluster) log [INF] : osd.21 [v2:216.106.44.203:6806/4116,v1:216.106.44.203:6807/4116] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:07:48.616 7f2266efe700 0 log_channel(cluster) log [INF] : osd.22 [v2:216.106.44.203:6809/4073,v1:216.106.44.203:6813/4073] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:07:51.696 7f2266efe700 0 log_channel(cluster) log [INF] : osd.23 [v2:216.106.44.203:6800/4111,v1:216.106.44.203:6802/4111] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:07:54.793 7f2266efe700 0 log_channel(cluster) log [INF] : osd.20 [v2:216.106.44.203:6811/4084,v1:216.106.44.203:6812/4084] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:07:57.841 7f2266efe700 0 log_channel(cluster) log [INF] : osd.9 [v2:216.106.44.200:6802/18872,v1:216.106.44.200:6803/18872] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:07:58.861 7f2266efe700 0 log_channel(cluster) log [INF] : osd.15 [v2:216.106.44.200:6814/18902,v1:216.106.44.200:6815/18902] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:08:00.889 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6818/27940,v1:216.106.44.201:6820/27940] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:08:11.925 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6802/27946,v1:216.106.44.201:6803/27946] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:12:10.228 7f2266efe700 0 log_channel(cluster) log [INF] : osd.10 [v2:216.106.44.201:6818/28366,v1:216.106.44.201:6820/28366] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:12:13.245 7f2266efe700 0 log_channel(cluster) log [INF] : osd.15 [v2:216.106.44.200:6814/19285,v1:216.106.44.200:6815/19285] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:12:14.281 7f2266efe700 0 log_channel(cluster) log [INF] : osd.13 [v2:216.106.44.201:6802/28364,v1:216.106.44.201:6803/28364] boot
|
|
ceph-mon.synergy0.log:2019-07-11 11:12:14.281 7f2266efe700 0 log_channel(cluster) log [INF] : osd.9 [v2:216.106.44.200:6802/19278,v1:216.106.44.200:6803/19278] boot
|
|
ceph-osd.15.log:2019-07-11 09:12:33.198 7fbe6881bf00 0 osd.15 43812 done with init, starting boot process
|
|
ceph-osd.15.log:2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log:2019-07-11 09:12:34.306 7fbe51ba1700 1 osd.15 43822 state: booting -> active
|
|
ceph-osd.15.log: -2810> 2019-07-11 09:12:21.553 7fbe6881bf00 2 osd.15 0 boot
|
|
ceph-osd.15.log: -2569> 2019-07-11 09:12:33.198 7fbe6881bf00 0 osd.15 43812 done with init, starting boot process
|
|
ceph-osd.15.log: -2560> 2019-07-11 09:12:33.202 7fbe6881bf00 1 osd.15 43812 start_boot
|
|
ceph-osd.15.log: -2078> 2019-07-11 09:12:34.306 7fbe51ba1700 1 osd.15 43822 state: booting -> active
|
|
ceph-osd.15.log:2019-07-11 09:15:22.482 7f702769ff00 0 osd.15 43834 done with init, starting boot process
|
|
ceph-osd.15.log:2019-07-11 09:15:22.482 7f702769ff00 1 osd.15 43834 start_boot
|
|
ceph-osd.15.log:2019-07-11 09:15:23.654 7f7010a25700 1 osd.15 43843 state: booting -> active
|
|
ceph-osd.15.log: -8657> 2019-07-11 09:15:12.317 7f702769ff00 2 osd.15 0 boot
|
|
ceph-osd.15.log: -8416> 2019-07-11 09:15:22.482 7f702769ff00 0 osd.15 43834 done with init, starting boot process
|
|
ceph-osd.15.log: -8410> 2019-07-11 09:15:22.482 7f702769ff00 1 osd.15 43834 start_boot
|
|
ceph-osd.15.log: -8010> 2019-07-11 09:15:23.654 7f7010a25700 1 osd.15 43843 state: booting -> active
|
|
ceph-osd.15.log:2019-07-11 11:03:38.673 7f3a72eaff00 0 osd.15 43933 done with init, starting boot process
|
|
ceph-osd.15.log:2019-07-11 11:03:38.673 7f3a72eaff00 1 osd.15 43933 start_boot
|
|
ceph-osd.15.log:2019-07-11 11:03:39.149 7f3a5c235700 1 osd.15 43942 state: booting -> active
|
|
ceph-osd.15.log: -1974> 2019-07-11 11:03:28.296 7f3a72eaff00 2 osd.15 0 boot
|
|
ceph-osd.15.log: -1746> 2019-07-11 11:03:38.673 7f3a72eaff00 0 osd.15 43933 done with init, starting boot process
|
|
ceph-osd.15.log: -1743> 2019-07-11 11:03:38.673 7f3a72eaff00 1 osd.15 43933 start_boot
|
|
ceph-osd.15.log: -1597> 2019-07-11 11:03:39.149 7f3a5c235700 1 osd.15 43942 state: booting -> active
|
|
ceph-osd.15.log:2019-07-11 11:07:58.753 7f4e13266f00 0 osd.15 43950 done with init, starting boot process
|
|
ceph-osd.15.log:2019-07-11 11:07:58.753 7f4e13266f00 1 osd.15 43950 start_boot
|
|
ceph-osd.15.log:2019-07-11 11:07:58.897 7f4dfc5ec700 1 osd.15 43957 state: booting -> active
|
|
ceph-osd.15.log: -2689> 2019-07-11 11:07:52.845 7f4e13266f00 2 osd.15 0 boot
|
|
ceph-osd.15.log: -2480> 2019-07-11 11:07:58.753 7f4e13266f00 0 osd.15 43950 done with init, starting boot process
|
|
ceph-osd.15.log: -2477> 2019-07-11 11:07:58.753 7f4e13266f00 1 osd.15 43950 start_boot
|
|
ceph-osd.15.log: -2005> 2019-07-11 11:07:58.897 7f4dfc5ec700 1 osd.15 43957 state: booting -> active
|
|
ceph-osd.15.log:2019-07-11 11:12:12.309 7f84a85f6f00 0 osd.15 43964 done with init, starting boot process
|
|
ceph-osd.15.log:2019-07-11 11:12:12.309 7f84a85f6f00 1 osd.15 43964 start_boot
|
|
ceph-osd.15.log:2019-07-11 11:12:13.285 7f849197c700 1 osd.15 43969 state: booting -> active
|
|
ceph-osd.3.log:2019-07-11 08:19:24.023 7f3b59bbdf00 0 osd.3 43702 done with init, starting boot process
|
|
ceph-osd.3.log:2019-07-11 08:19:24.023 7f3b59bbdf00 1 osd.3 43702 start_boot
|
|
ceph-osd.3.log:2019-07-11 08:19:24.627 7f3b42f43700 1 osd.3 43710 state: booting -> active
|
|
ceph-osd.3.log:2019-07-11 09:11:38.215 7f3b42f43700 1 osd.3 43802 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:11:39.107 7f3b42f43700 1 osd.3 43803 state: booting -> active
|
|
ceph-osd.3.log:2019-07-11 09:15:02.425 7f1dff155f00 0 osd.3 0 done with init, starting boot process
|
|
ceph-osd.3.log:2019-07-11 09:15:02.425 7f1dff155f00 1 osd.3 0 start_boot
|
|
ceph-osd.3.log:2019-07-11 09:15:04.337 7f1de84db700 1 osd.3 43831 state: booting -> active
|
|
ceph-osd.9.log:2019-07-11 09:15:38.310 7fc965877f00 0 osd.9 43837 done with init, starting boot process
|
|
ceph-osd.9.log:2019-07-11 09:15:38.310 7fc965877f00 1 osd.9 43837 start_boot
|
|
ceph-osd.9.log:2019-07-11 09:15:39.482 7fc94ebfd700 1 osd.9 43846 state: booting -> active
|
|
ceph-osd.9.log: -8559> 2019-07-11 09:15:33.198 7fc965877f00 2 osd.9 0 boot
|
|
ceph-osd.9.log: -8356> 2019-07-11 09:15:38.310 7fc965877f00 0 osd.9 43837 done with init, starting boot process
|
|
ceph-osd.9.log: -8353> 2019-07-11 09:15:38.310 7fc965877f00 1 osd.9 43837 start_boot
|
|
ceph-osd.9.log: -8020> 2019-07-11 09:15:39.482 7fc94ebfd700 1 osd.9 43846 state: booting -> active
|
|
ceph-osd.9.log:2019-07-11 11:03:31.060 7f0654fdef00 0 osd.9 43933 done with init, starting boot process
|
|
ceph-osd.9.log:2019-07-11 11:03:31.060 7f0654fdef00 1 osd.9 43933 start_boot
|
|
ceph-osd.9.log:2019-07-11 11:03:31.776 7f063e364700 1 osd.9 43938 state: booting -> active
|
|
ceph-osd.9.log: -1579> 2019-07-11 11:03:26.528 7f0654fdef00 2 osd.9 0 boot
|
|
ceph-osd.9.log: -1413> 2019-07-11 11:03:31.060 7f0654fdef00 0 osd.9 43933 done with init, starting boot process
|
|
ceph-osd.9.log: -1410> 2019-07-11 11:03:31.060 7f0654fdef00 1 osd.9 43933 start_boot
|
|
ceph-osd.9.log: -1332> 2019-07-11 11:03:31.776 7f063e364700 1 osd.9 43938 state: booting -> active
|
|
ceph-osd.9.log:2019-07-11 11:07:57.557 7f019fe1cf00 0 osd.9 43950 done with init, starting boot process
|
|
ceph-osd.9.log:2019-07-11 11:07:57.557 7f019fe1cf00 1 osd.9 43950 start_boot
|
|
ceph-osd.9.log:2019-07-11 11:07:57.865 7f01891a2700 1 osd.9 43956 state: booting -> active
|
|
ceph-osd.9.log: -1902> 2019-07-11 11:07:52.857 7f019fe1cf00 2 osd.9 0 boot
|
|
ceph-osd.9.log: -1737> 2019-07-11 11:07:57.557 7f019fe1cf00 0 osd.9 43950 done with init, starting boot process
|
|
ceph-osd.9.log: -1734> 2019-07-11 11:07:57.557 7f019fe1cf00 1 osd.9 43950 start_boot
|
|
ceph-osd.9.log: -1470> 2019-07-11 11:07:57.865 7f01891a2700 1 osd.9 43956 state: booting -> active
|
|
ceph-osd.9.log:2019-07-11 11:12:13.553 7f9f49deaf00 0 osd.9 43964 done with init, starting boot process
|
|
ceph-osd.9.log:2019-07-11 11:12:13.553 7f9f49deaf00 1 osd.9 43964 start_boot
|
|
ceph-osd.9.log:2019-07-11 11:12:14.313 7f9f33170700 1 osd.9 43970 state: booting -> active
|
|
ceph-volume.log:[2019-07-11 09:14:35,929][ceph_volume.process][INFO ] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 14da6185-b74e-4313-b0a5-4476554eeb92
|
|
ceph-volume.log:[2019-07-11 09:14:50,725][ceph_volume.process][INFO ] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap
|
|
root@synergy0:/var/log/ceph# grep boot ceph.log
|
|
2019-07-11 08:19:19.490252 mon.synergy0 (mon.0) 3105 : cluster [INF] osd.10 [v2:216.106.44.201:6818/23308,v1:216.106.44.201:6820/23308] boot
|
|
2019-07-11 08:19:20.530818 mon.synergy0 (mon.0) 3112 : cluster [INF] osd.13 [v2:216.106.44.201:6802/23324,v1:216.106.44.201:6803/23324] boot
|
|
2019-07-11 08:19:24.600922 mon.synergy0 (mon.0) 3121 : cluster [INF] osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
2019-07-11 08:19:31.090956 mon.synergy0 (mon.0) 3130 : cluster [INF] osd.10 [v2:216.106.44.201:6802/23690,v1:216.106.44.201:6803/23690] boot
|
|
2019-07-11 08:19:43.060292 mon.synergy0 (mon.0) 3141 : cluster [INF] osd.13 [v2:216.106.44.201:6818/23747,v1:216.106.44.201:6820/23747] boot
|
|
2019-07-11 09:11:39.102500 mon.synergy0 (mon.0) 3824 : cluster [INF] osd.3 [v2:216.106.44.200:6802/14619,v1:216.106.44.200:6803/14619] boot
|
|
2019-07-11 09:12:10.592171 mon.synergy0 (mon.0) 3968 : cluster [INF] osd.10 [v2:216.106.44.201:6802/24938,v1:216.106.44.201:6803/24938] boot
|
|
2019-07-11 09:12:16.717812 mon.synergy0 (mon.0) 3979 : cluster [INF] osd.13 [v2:216.106.44.201:6818/24941,v1:216.106.44.201:6820/24941] boot
|
|
2019-07-11 09:12:34.277218 mon.synergy0 (mon.0) 3997 : cluster [INF] osd.15 [v2:216.106.44.200:6802/16161,v1:216.106.44.200:6803/16161] boot
|
|
2019-07-11 09:15:04.320287 mon.synergy0 (mon.0) 4052 : cluster [INF] osd.3 [v2:216.106.44.200:6810/16987,v1:216.106.44.200:6811/16987] boot
|
|
2019-07-11 09:15:15.501601 mon.synergy0 (mon.0) 4248 : cluster [INF] osd.10 [v2:216.106.44.201:6802/25418,v1:216.106.44.201:6803/25418] boot
|
|
2019-07-11 09:15:20.561861 mon.synergy0 (mon.0) 4257 : cluster [INF] osd.13 [v2:216.106.44.201:6818/25427,v1:216.106.44.201:6820/25427] boot
|
|
2019-07-11 09:15:23.620936 mon.synergy0 (mon.0) 4263 : cluster [INF] osd.15 [v2:216.106.44.200:6802/17179,v1:216.106.44.200:6803/17179] boot
|
|
2019-07-11 09:15:39.440562 mon.synergy0 (mon.0) 4277 : cluster [INF] osd.9 [v2:216.106.44.200:6814/17315,v1:216.106.44.200:6815/17315] boot
|
|
2019-07-11 11:03:30.716413 mon.synergy0 (mon.0) 5454 : cluster [INF] osd.10 [v2:216.106.44.201:6818/27445,v1:216.106.44.201:6820/27445] boot
|
|
2019-07-11 11:03:31.746593 mon.synergy0 (mon.0) 5460 : cluster [INF] osd.9 [v2:216.106.44.200:6814/18374,v1:216.106.44.200:6815/18374] boot
|
|
2019-07-11 11:03:35.890543 mon.synergy0 (mon.0) 5467 : cluster [INF] osd.13 [v2:216.106.44.201:6802/27448,v1:216.106.44.201:6803/27448] boot
|
|
2019-07-11 11:03:39.123178 mon.synergy0 (mon.0) 5474 : cluster [INF] osd.15 [v2:216.106.44.200:6802/18372,v1:216.106.44.200:6803/18372] boot
|
|
2019-07-11 11:07:29.811531 mon.synergy0 (mon.0) 5499 : cluster [INF] osd.11 [v2:216.106.44.203:6814/4107,v1:216.106.44.203:6815/4107] boot
|
|
2019-07-11 11:07:30.862970 mon.synergy0 (mon.0) 5501 : cluster [INF] osd.5 [v2:216.106.44.203:6801/4101,v1:216.106.44.203:6805/4101] boot
|
|
2019-07-11 11:07:33.965839 mon.synergy0 (mon.0) 5507 : cluster [INF] osd.21 [v2:216.106.44.203:6806/4116,v1:216.106.44.203:6807/4116] boot
|
|
2019-07-11 11:07:48.622653 mon.synergy0 (mon.0) 5518 : cluster [INF] osd.22 [v2:216.106.44.203:6809/4073,v1:216.106.44.203:6813/4073] boot
|
|
2019-07-11 11:07:51.699800 mon.synergy0 (mon.0) 5650 : cluster [INF] osd.23 [v2:216.106.44.203:6800/4111,v1:216.106.44.203:6802/4111] boot
|
|
2019-07-11 11:07:54.798298 mon.synergy0 (mon.0) 5657 : cluster [INF] osd.20 [v2:216.106.44.203:6811/4084,v1:216.106.44.203:6812/4084] boot
|
|
2019-07-11 11:07:57.843653 mon.synergy0 (mon.0) 5663 : cluster [INF] osd.9 [v2:216.106.44.200:6802/18872,v1:216.106.44.200:6803/18872] boot
|
|
2019-07-11 11:07:58.863949 mon.synergy0 (mon.0) 5667 : cluster [INF] osd.15 [v2:216.106.44.200:6814/18902,v1:216.106.44.200:6815/18902] boot
|
|
2019-07-11 11:08:00.891546 mon.synergy0 (mon.0) 5675 : cluster [INF] osd.10 [v2:216.106.44.201:6818/27940,v1:216.106.44.201:6820/27940] boot
|
|
2019-07-11 11:08:11.929250 mon.synergy0 (mon.0) 5685 : cluster [INF] osd.13 [v2:216.106.44.201:6802/27946,v1:216.106.44.201:6803/27946] boot
|
|
2019-07-11 11:12:10.233077 mon.synergy0 (mon.0) 5795 : cluster [INF] osd.10 [v2:216.106.44.201:6818/28366,v1:216.106.44.201:6820/28366] boot
|
|
2019-07-11 11:12:13.249309 mon.synergy0 (mon.0) 5801 : cluster [INF] osd.15 [v2:216.106.44.200:6814/19285,v1:216.106.44.200:6815/19285] boot
|
|
2019-07-11 11:12:14.285750 mon.synergy0 (mon.0) 5807 : cluster [INF] osd.13 [v2:216.106.44.201:6802/28364,v1:216.106.44.201:6803/28364] boot
|
|
2019-07-11 11:12:14.285892 mon.synergy0 (mon.0) 5808 : cluster [INF] osd.9 [v2:216.106.44.200:6802/19278,v1:216.106.44.200:6803/19278] boot
|
|
root@synergy0:/var/log/ceph# ping backup2
|
|
PING backup2.socket.net (216.106.44.36) 56(84) bytes of data.
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=1 ttl=64 time=0.336 ms
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=2 ttl=64 time=0.313 ms
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=3 ttl=64 time=0.283 ms
|
|
^C
|
|
--- backup2.socket.net ping statistics ---
|
|
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
|
|
rtt min/avg/max/mdev = 0.283/0.310/0.336/0.029 ms
|
|
root@synergy0:/var/log/ceph# ^C
|
|
root@synergy0:/var/log/ceph# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
4 osds down
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266666/3104528 objects degraded (8.590%), 90 pgs degraded, 91 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 3m), 24 in (since 2h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266666/3104528 objects degraded (8.590%)
|
|
384 active+clean
|
|
90 active+undersized+degraded
|
|
53 down
|
|
7 stale+active+clean
|
|
2 active+clean+scrubbing+deep
|
|
1 active+undersized
|
|
|
|
root@synergy0:/var/log/ceph# history | grep start
|
|
74 service collectd start
|
|
137 service nagios-nrpe-server restart
|
|
150 service nagios-nrpe-server restart
|
|
162 service collectd restart
|
|
169 service collectd restart
|
|
175 service collectd restart
|
|
314 grep start_boot *.log
|
|
316 grep start_boot *.log
|
|
321 history | grep start
|
|
root@synergy0:/var/log/ceph# history | grep osd
|
|
202 grep osd syslog
|
|
259 ceph osd ls
|
|
260 ceph osd tree
|
|
291 ceph osd down osd.3
|
|
292 sudo systemctl stop ceph-osd@3
|
|
293 ceph osd rm osd.3
|
|
294 ceph osd crush remove osd.3
|
|
295 ceph auth del osd.3
|
|
298 dmsetup remove ceph--da809eb4--ab05--46d0--8ec8--2dd969bed20c-osd--block--5ac261bf--2968--473a--bed7--a73d7f0afccb
|
|
315 ceph osd tree
|
|
322 history | grep osd
|
|
root@synergy0:/var/log/ceph# sudo systemctl stop ceph-osd@9
|
|
root@synergy0:/var/log/ceph# sudo systemctl stop ceph-osd@15
|
|
root@synergy0:/var/log/ceph# exit
|
|
logout
|
|
Connection to synergy0 closed.
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh root@synergy1
|
|
Welcome to Ubuntu 16.04.6 LTS (GNU/Linux 4.4.0-150-generic x86_64)
|
|
|
|
* Documentation: https://help.ubuntu.com
|
|
* Management: https://landscape.canonical.com
|
|
* Support: https://ubuntu.com/advantage
|
|
|
|
66 packages can be updated.
|
|
51 updates are security updates.
|
|
|
|
New release '18.04.2 LTS' available.
|
|
Run 'do-release-upgrade' to upgrade to it.
|
|
|
|
|
|
Last login: Thu Jul 11 09:25:45 2019 from 216.106.0.188
|
|
root@synergy1:~# sudo systemctl stop ceph-osd@10
|
|
root@synergy1:~# sudo systemctl stop ceph-osd@13
|
|
root@synergy1:~# exit
|
|
logout
|
|
Connection to synergy1 closed.
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh root@synergy0
|
|
Welcome to Ubuntu 16.04.6 LTS (GNU/Linux 4.4.0-150-generic x86_64)
|
|
|
|
* Documentation: https://help.ubuntu.com
|
|
* Management: https://landscape.canonical.com
|
|
* Support: https://ubuntu.com/advantage
|
|
|
|
75 packages can be updated.
|
|
51 updates are security updates.
|
|
|
|
New release '18.04.2 LTS' available.
|
|
Run 'do-release-upgrade' to upgrade to it.
|
|
|
|
|
|
Last login: Thu Jul 11 11:04:55 2019 from 216.106.0.188
|
|
root@synergy0:~# ceph 0s
|
|
no valid command found; 10 closest matches:
|
|
osd pool create <poolname> <int[0-]> {<int[0-]>} {replicated|erasure} {<erasure_code_profile>} {<rule>} {<int>} {<int>} {<int[0-]>} {<int[0-]>} {<float[0.0-1.0]>}
|
|
osd pool rmsnap <poolname> <snap>
|
|
osd pool ls {detail}
|
|
osd blacklist clear
|
|
osd pool mksnap <poolname> <snap>
|
|
mon set-rank <name> <int>
|
|
mon feature set <feature_name> {--yes-i-really-mean-it}
|
|
mon feature ls {--with-value}
|
|
mon rm <name>
|
|
mon add <name> <IPaddr[:port]>
|
|
Error EINVAL: invalid command
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
4 osds down
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266666/3104528 objects degraded (8.590%), 90 pgs degraded, 91 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 6m), 24 in (since 2h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266666/3104528 objects degraded (8.590%)
|
|
385 active+clean
|
|
90 active+undersized+degraded
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+undersized
|
|
1 active+clean+scrubbing+deep
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
4 osds down
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266666/3104528 objects degraded (8.590%), 90 pgs degraded, 91 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 6m), 24 in (since 2h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266666/3104528 objects degraded (8.590%)
|
|
385 active+clean
|
|
90 active+undersized+degraded
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+clean+scrubbing+deep
|
|
1 active+undersized
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
4 osds down
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266666/3104528 objects degraded (8.590%), 90 pgs degraded, 91 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 6m), 24 in (since 2h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266666/3104528 objects degraded (8.590%)
|
|
385 active+clean
|
|
90 active+undersized+degraded
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+clean+scrubbing+deep
|
|
1 active+undersized
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
4 osds down
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266666/3104528 objects degraded (8.590%), 90 pgs degraded, 91 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 6m), 24 in (since 2h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266666/3104528 objects degraded (8.590%)
|
|
385 active+clean
|
|
90 active+undersized+degraded
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+clean+scrubbing+deep
|
|
1 active+undersized
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
4 osds down
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266666/3104528 objects degraded (8.590%), 90 pgs degraded, 91 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 6m), 24 in (since 2h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266666/3104528 objects degraded (8.590%)
|
|
385 active+clean
|
|
90 active+undersized+degraded
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+undersized
|
|
1 active+clean+scrubbing+deep
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
4 osds down
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266666/3104528 objects degraded (8.590%), 90 pgs degraded, 91 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 6m), 24 in (since 2h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266666/3104528 objects degraded (8.590%)
|
|
385 active+clean
|
|
90 active+undersized+degraded
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+undersized
|
|
1 active+clean+scrubbing+deep
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
4 osds down
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266666/3104528 objects degraded (8.590%), 90 pgs degraded, 91 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 6m), 24 in (since 2h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266666/3104528 objects degraded (8.590%)
|
|
385 active+clean
|
|
90 active+undersized+degraded
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+undersized
|
|
1 active+clean+scrubbing+deep
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
4 osds down
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266666/3104528 objects degraded (8.590%), 90 pgs degraded, 91 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 6m), 24 in (since 2h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266666/3104528 objects degraded (8.590%)
|
|
385 active+clean
|
|
90 active+undersized+degraded
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+clean+scrubbing+deep
|
|
1 active+undersized
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
4 osds down
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266666/3104528 objects degraded (8.590%), 90 pgs degraded, 91 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 6m), 24 in (since 2h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266666/3104528 objects degraded (8.590%)
|
|
385 active+clean
|
|
90 active+undersized+degraded
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+clean+scrubbing+deep
|
|
1 active+undersized
|
|
|
|
root@synergy0:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
4 osds down
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266666/3104528 objects degraded (8.590%), 90 pgs degraded, 91 pgs undersized
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 6m), 24 in (since 2h)
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.55M objects, 5.9 TiB
|
|
usage: 12 TiB used, 15 TiB / 27 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266666/3104528 objects degraded (8.590%)
|
|
385 active+clean
|
|
90 active+undersized+degraded
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+clean+scrubbing+deep
|
|
1 active+undersized
|
|
|
|
root@synergy0:~# sudo systemctl start ceph-osd@15
|
|
Job for ceph-osd@15.service failed. See "systemctl status ceph-osd@15.service" and "journalctl -xe" for details.
|
|
root@synergy0:~# sudo systemctl start ceph-osd@9
|
|
Job for ceph-osd@9.service failed. See "systemctl status ceph-osd@9.service" and "journalctl -xe" for details.
|
|
root@synergy0:~# exit
|
|
logout
|
|
Connection to synergy0 closed.
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh root@synergy1
|
|
Welcome to Ubuntu 16.04.6 LTS (GNU/Linux 4.4.0-150-generic x86_64)
|
|
|
|
* Documentation: https://help.ubuntu.com
|
|
* Management: https://landscape.canonical.com
|
|
* Support: https://ubuntu.com/advantage
|
|
|
|
66 packages can be updated.
|
|
51 updates are security updates.
|
|
|
|
New release '18.04.2 LTS' available.
|
|
Run 'do-release-upgrade' to upgrade to it.
|
|
|
|
|
|
Last login: Thu Jul 11 11:21:47 2019 from 216.106.0.188
|
|
root@synergy1:~# sudo systemctl start ceph-osd@13
|
|
Job for ceph-osd@13.service failed. See "systemctl status ceph-osd@13.service" and "journalctl -xe" for details.
|
|
root@synergy1:~# sudo systemctl start ceph-osd@10
|
|
Job for ceph-osd@10.service failed. See "systemctl status ceph-osd@10.service" and "journalctl -xe" for details.
|
|
root@synergy1:~# systemctl status ceph-osd@13.service
|
|
● ceph-osd@13.service - Ceph object storage daemon osd.13
|
|
Loaded: loaded (/lib/systemd/system/ceph-osd@.service; enabled-runtime; vendor preset: enabled)
|
|
Active: failed (Result: start-limit-hit) since Thu 2019-07-11 11:16:03 CDT; 7min ago
|
|
Process: 28364 ExecStart=/usr/bin/ceph-osd -f --cluster ${CLUSTER} --id %i --setuser ceph --setgroup ceph (code=killed, signal=ABRT)
|
|
Process: 28346 ExecStartPre=/usr/lib/ceph/ceph-osd-prestart.sh --cluster ${CLUSTER} --id %i (code=exited, status=0/SUCCESS)
|
|
Main PID: 28364 (code=killed, signal=ABRT)
|
|
|
|
Jul 11 11:16:03 synergy1 systemd[1]: ceph-osd@13.service: Service hold-off time over, scheduling restart.
|
|
Jul 11 11:16:03 synergy1 systemd[1]: Stopped Ceph object storage daemon osd.13.
|
|
Jul 11 11:16:03 synergy1 systemd[1]: ceph-osd@13.service: Start request repeated too quickly.
|
|
Jul 11 11:16:03 synergy1 systemd[1]: Failed to start Ceph object storage daemon osd.13.
|
|
Jul 11 11:16:03 synergy1 systemd[1]: ceph-osd@13.service: Unit entered failed state.
|
|
Jul 11 11:16:03 synergy1 systemd[1]: ceph-osd@13.service: Failed with result 'start-limit-hit'.
|
|
Jul 11 11:21:54 synergy1 systemd[1]: Stopped Ceph object storage daemon osd.13.
|
|
Jul 11 11:23:24 synergy1 systemd[1]: ceph-osd@13.service: Start request repeated too quickly.
|
|
Jul 11 11:23:24 synergy1 systemd[1]: Failed to start Ceph object storage daemon osd.13.
|
|
Jul 11 11:23:24 synergy1 systemd[1]: ceph-osd@13.service: Failed with result 'start-limit-hit'.
|
|
root@synergy1:~# sudo systemctl start ceph-osd@10
|
|
Job for ceph-osd@10.service failed. See "systemctl status ceph-osd@10.service" and "journalctl -xe" for details.
|
|
root@synergy1:~# sudo systemctl start ceph-osd@13
|
|
Job for ceph-osd@13.service failed. See "systemctl status ceph-osd@13.service" and "journalctl -xe" for details.
|
|
root@synergy1:~# systemctl status ceph-osd@13.service
|
|
● ceph-osd@13.service - Ceph object storage daemon osd.13
|
|
Loaded: loaded (/lib/systemd/system/ceph-osd@.service; enabled-runtime; vendor preset: enabled)
|
|
Active: failed (Result: start-limit-hit) since Thu 2019-07-11 11:16:03 CDT; 10min ago
|
|
Process: 28364 ExecStart=/usr/bin/ceph-osd -f --cluster ${CLUSTER} --id %i --setuser ceph --setgroup ceph (code=killed, signal=ABRT)
|
|
Process: 28346 ExecStartPre=/usr/lib/ceph/ceph-osd-prestart.sh --cluster ${CLUSTER} --id %i (code=exited, status=0/SUCCESS)
|
|
Main PID: 28364 (code=killed, signal=ABRT)
|
|
|
|
Jul 11 11:16:03 synergy1 systemd[1]: Failed to start Ceph object storage daemon osd.13.
|
|
Jul 11 11:16:03 synergy1 systemd[1]: ceph-osd@13.service: Unit entered failed state.
|
|
Jul 11 11:16:03 synergy1 systemd[1]: ceph-osd@13.service: Failed with result 'start-limit-hit'.
|
|
Jul 11 11:21:54 synergy1 systemd[1]: Stopped Ceph object storage daemon osd.13.
|
|
Jul 11 11:23:24 synergy1 systemd[1]: ceph-osd@13.service: Start request repeated too quickly.
|
|
Jul 11 11:23:24 synergy1 systemd[1]: Failed to start Ceph object storage daemon osd.13.
|
|
Jul 11 11:23:24 synergy1 systemd[1]: ceph-osd@13.service: Failed with result 'start-limit-hit'.
|
|
Jul 11 11:26:06 synergy1 systemd[1]: ceph-osd@13.service: Start request repeated too quickly.
|
|
Jul 11 11:26:06 synergy1 systemd[1]: Failed to start Ceph object storage daemon osd.13.
|
|
Jul 11 11:26:06 synergy1 systemd[1]: ceph-osd@13.service: Failed with result 'start-limit-hit'.
|
|
root@synergy1:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 266082/3068000 objects degraded (8.673%), 88 pgs degraded
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 10m), 20 in (since 19s); 89 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.53M objects, 5.8 TiB
|
|
usage: 10 TiB used, 14 TiB / 24 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
266082/3068000 objects degraded (8.673%)
|
|
388 active+clean
|
|
88 active+undersized+degraded+remapped+backfilling
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+undersized+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 381 MiB/s, 95 objects/s
|
|
|
|
root@synergy1:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 265690/3068000 objects degraded (8.660%), 88 pgs degraded
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 10m), 20 in (since 24s); 88 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.53M objects, 5.8 TiB
|
|
usage: 10 TiB used, 14 TiB / 24 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
265690/3068000 objects degraded (8.660%)
|
|
388 active+clean
|
|
88 active+undersized+degraded+remapped+backfilling
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+undersized+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 386 MiB/s, 96 objects/s
|
|
|
|
root@synergy1:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 265441/3068000 objects degraded (8.652%), 88 pgs degraded
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 10m), 20 in (since 26s); 88 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.53M objects, 5.8 TiB
|
|
usage: 10 TiB used, 14 TiB / 24 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
265441/3068000 objects degraded (8.652%)
|
|
388 active+clean
|
|
88 active+undersized+degraded+remapped+backfilling
|
|
53 down
|
|
7 stale+active+clean
|
|
1 active+undersized+remapped+backfilling
|
|
|
|
io:
|
|
recovery: 399 MiB/s, 99 objects/s
|
|
|
|
root@synergy1:~# ceph -s
|
|
cluster:
|
|
id: d5dc1b29-606d-4f50-b0ae-1cf9c08bb14a
|
|
health: HEALTH_WARN
|
|
Reduced data availability: 53 pgs inactive, 53 pgs down, 7 pgs stale
|
|
Degraded data redundancy: 263979/3068000 objects degraded (8.604%), 88 pgs degraded
|
|
|
|
services:
|
|
mon: 3 daemons, quorum synergy0,synergy1,synergy2 (age 21h)
|
|
mgr: synergy2(active, since 21h), standbys: synergy1, synergy0
|
|
mds: cephfs_filesystem:1 {0=synergy1=up:active} 1 up:standby
|
|
osd: 24 osds: 20 up (since 10m), 20 in (since 41s); 88 remapped pgs
|
|
rgw: 1 daemon active (synergy0)
|
|
|
|
data:
|
|
pools: 8 pools, 537 pgs
|
|
objects: 1.53M objects, 5.8 TiB
|
|
usage: 10 TiB used, 14 TiB / 24 TiB avail
|
|
pgs: 9.870% pgs not active
|
|
263979/3068000 objects degraded (8.604%)
|
|
389 active+clean
|
|
88 active+undersized+degraded+remapped+backfilling
|
|
53 down
|
|
7 stale+active+clean
|
|
|
|
io:
|
|
recovery: 398 MiB/s, 99 objects/s
|
|
|
|
root@synergy1:~# exit
|
|
logout
|
|
Connection to synergy1 closed.
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh root@synergy0
|
|
Welcome to Ubuntu 16.04.6 LTS (GNU/Linux 4.4.0-150-generic x86_64)
|
|
|
|
* Documentation: https://help.ubuntu.com
|
|
* Management: https://landscape.canonical.com
|
|
* Support: https://ubuntu.com/advantage
|
|
|
|
75 packages can be updated.
|
|
51 updates are security updates.
|
|
|
|
New release '18.04.2 LTS' available.
|
|
Run 'do-release-upgrade' to upgrade to it.
|
|
|
|
|
|
Last login: Thu Jul 11 11:22:00 2019 from 216.106.0.188
|
|
root@synergy0:~# ping backup2
|
|
PING backup2.socket.net (216.106.44.36) 56(84) bytes of data.
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=1 ttl=64 time=0.396 ms
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=2 ttl=64 time=0.381 ms
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=3 ttl=64 time=1.31 ms
|
|
^C
|
|
--- backup2.socket.net ping statistics ---
|
|
3 packets transmitted, 3 received, 0% packet loss, time 2001ms
|
|
rtt min/avg/max/mdev = 0.381/0.696/1.312/0.435 ms
|
|
root@synergy0:~# ^C
|
|
root@synergy0:~# sudo systemctl start ceph-osd@9
|
|
Job for ceph-osd@9.service failed. See "systemctl status ceph-osd@9.service" and "journalctl -xe" for details.
|
|
root@synergy0:~# sudo systemctl start ceph-osd@15
|
|
Job for ceph-osd@15.service failed. See "systemctl status ceph-osd@15.service" and "journalctl -xe" for details.
|
|
root@synergy0:~# systemctl status ceph-osd@9.service
|
|
● ceph-osd@9.service - Ceph object storage daemon osd.9
|
|
Loaded: loaded (/lib/systemd/system/ceph-osd@.service; enabled-runtime; vendor preset: enabled)
|
|
Active: failed (Result: start-limit-hit) since Thu 2019-07-11 11:16:03 CDT; 11min ago
|
|
Process: 19278 ExecStart=/usr/bin/ceph-osd -f --cluster ${CLUSTER} --id %i --setuser ceph --setgroup ceph (code=killed, signal=ABRT)
|
|
Process: 19261 ExecStartPre=/usr/lib/ceph/ceph-osd-prestart.sh --cluster ${CLUSTER} --id %i (code=exited, status=0/SUCCESS)
|
|
Main PID: 19278 (code=killed, signal=ABRT)
|
|
|
|
Jul 11 11:16:03 synergy0 systemd[1]: Failed to start Ceph object storage daemon osd.9.
|
|
Jul 11 11:16:03 synergy0 systemd[1]: ceph-osd@9.service: Unit entered failed state.
|
|
Jul 11 11:16:03 synergy0 systemd[1]: ceph-osd@9.service: Failed with result 'start-limit-hit'.
|
|
Jul 11 11:21:31 synergy0 systemd[1]: Stopped Ceph object storage daemon osd.9.
|
|
Jul 11 11:23:04 synergy0 systemd[1]: ceph-osd@9.service: Start request repeated too quickly.
|
|
Jul 11 11:23:04 synergy0 systemd[1]: Failed to start Ceph object storage daemon osd.9.
|
|
Jul 11 11:23:04 synergy0 systemd[1]: ceph-osd@9.service: Failed with result 'start-limit-hit'.
|
|
Jul 11 11:27:10 synergy0 systemd[1]: ceph-osd@9.service: Start request repeated too quickly.
|
|
Jul 11 11:27:10 synergy0 systemd[1]: Failed to start Ceph object storage daemon osd.9.
|
|
Jul 11 11:27:10 synergy0 systemd[1]: ceph-osd@9.service: Failed with result 'start-limit-hit'.
|
|
root@synergy0:~# ssh backup2
|
|
^C
|
|
root@synergy0:~# ping backup2
|
|
PING backup2.socket.net (216.106.44.36) 56(84) bytes of data.
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=1 ttl=64 time=0.339 ms
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=2 ttl=64 time=0.543 ms
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=3 ttl=64 time=0.452 ms
|
|
^C
|
|
--- backup2.socket.net ping statistics ---
|
|
3 packets transmitted, 3 received, 0% packet loss, time 1998ms
|
|
rtt min/avg/max/mdev = 0.339/0.444/0.543/0.086 ms
|
|
root@synergy0:~# systemctl status ceph-osd@9.service
|
|
● ceph-osd@9.service - Ceph object storage daemon osd.9
|
|
Loaded: loaded (/lib/systemd/system/ceph-osd@.service; enabled-runtime; vendor preset: enabled)
|
|
Active: failed (Result: start-limit-hit) since Thu 2019-07-11 11:16:03 CDT; 12min ago
|
|
Process: 19278 ExecStart=/usr/bin/ceph-osd -f --cluster ${CLUSTER} --id %i --setuser ceph --setgroup ceph (code=killed, signal=ABRT)
|
|
Process: 19261 ExecStartPre=/usr/lib/ceph/ceph-osd-prestart.sh --cluster ${CLUSTER} --id %i (code=exited, status=0/SUCCESS)
|
|
Main PID: 19278 (code=killed, signal=ABRT)
|
|
|
|
Jul 11 11:16:03 synergy0 systemd[1]: Failed to start Ceph object storage daemon osd.9.
|
|
Jul 11 11:16:03 synergy0 systemd[1]: ceph-osd@9.service: Unit entered failed state.
|
|
Jul 11 11:16:03 synergy0 systemd[1]: ceph-osd@9.service: Failed with result 'start-limit-hit'.
|
|
Jul 11 11:21:31 synergy0 systemd[1]: Stopped Ceph object storage daemon osd.9.
|
|
Jul 11 11:23:04 synergy0 systemd[1]: ceph-osd@9.service: Start request repeated too quickly.
|
|
Jul 11 11:23:04 synergy0 systemd[1]: Failed to start Ceph object storage daemon osd.9.
|
|
Jul 11 11:23:04 synergy0 systemd[1]: ceph-osd@9.service: Failed with result 'start-limit-hit'.
|
|
Jul 11 11:27:10 synergy0 systemd[1]: ceph-osd@9.service: Start request repeated too quickly.
|
|
Jul 11 11:27:10 synergy0 systemd[1]: Failed to start Ceph object storage daemon osd.9.
|
|
Jul 11 11:27:10 synergy0 systemd[1]: ceph-osd@9.service: Failed with result 'start-limit-hit'.
|
|
root@synergy0:~# sudo systemctl start ceph-osd@15
|
|
Job for ceph-osd@15.service failed. See "systemctl status ceph-osd@15.service" and "journalctl -xe" for details.
|
|
root@synergy0:~# sudo systemctl start ceph-osd@9
|
|
Job for ceph-osd@9.service failed. See "systemctl status ceph-osd@9.service" and "journalctl -xe" for details.
|
|
root@synergy0:~# ping backup2
|
|
PING backup2.socket.net (216.106.44.36) 56(84) bytes of data.
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=1 ttl=64 time=0.523 ms
|
|
64 bytes from backup2.socket.net (216.106.44.36): icmp_seq=2 ttl=64 time=0.526 ms
|
|
^C
|
|
--- backup2.socket.net ping statistics ---
|
|
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
|
|
rtt min/avg/max/mdev = 0.523/0.524/0.526/0.022 ms
|
|
root@synergy0:~# ^C
|
|
root@synergy0:~# exit
|
|
logout
|
|
Connection to synergy0 closed.
|
|
Amandas-MacBook-Pro:.ssh ekalk$ ssh root@synergy1
|
|
Welcome to Ubuntu 16.04.6 LTS (GNU/Linux 4.4.0-150-generic x86_64)
|
|
|
|
* Documentation: https://help.ubuntu.com
|
|
* Management: https://landscape.canonical.com
|
|
* Support: https://ubuntu.com/advantage
|
|
|
|
66 packages can be updated.
|
|
51 updates are security updates.
|
|
|
|
New release '18.04.2 LTS' available.
|
|
Run 'do-release-upgrade' to upgrade to it.
|
|
|
|
|
|
Last login: Thu Jul 11 11:23:17 2019 from 216.106.0.188
|
|
root@synergy1:~# sudo systemctl start ceph-osd@13
|
|
Job for ceph-osd@13.service failed. See "systemctl status ceph-osd@13.service" and "journalctl -xe" for details.
|
|
root@synergy1:~# sudo systemctl start ceph-osd@10
|
|
Job for ceph-osd@10.service failed. See "systemctl status ceph-osd@10.service" and "journalctl -xe" for details.
|
|
root@synergy1:~# systemctl status ceph-osd@10.service
|
|
● ceph-osd@10.service - Ceph object storage daemon osd.10
|
|
Loaded: loaded (/lib/systemd/system/ceph-osd@.service; enabled-runtime; vendor preset: enabled)
|
|
Active: failed (Result: start-limit-hit) since Thu 2019-07-11 11:16:03 CDT; 14min ago
|
|
Process: 28366 ExecStart=/usr/bin/ceph-osd -f --cluster ${CLUSTER} --id %i --setuser ceph --setgroup ceph (code=killed, signal=ABRT)
|
|
Process: 28347 ExecStartPre=/usr/lib/ceph/ceph-osd-prestart.sh --cluster ${CLUSTER} --id %i (code=exited, status=0/SUCCESS)
|
|
Main PID: 28366 (code=killed, signal=ABRT)
|
|
|
|
Jul 11 11:21:52 synergy1 systemd[1]: Stopped Ceph object storage daemon osd.10.
|
|
Jul 11 11:23:29 synergy1 systemd[1]: ceph-osd@10.service: Start request repeated too quickly.
|
|
Jul 11 11:23:29 synergy1 systemd[1]: Failed to start Ceph object storage daemon osd.10.
|
|
Jul 11 11:23:29 synergy1 systemd[1]: ceph-osd@10.service: Failed with result 'start-limit-hit'.
|
|
Jul 11 11:25:56 synergy1 systemd[1]: ceph-osd@10.service: Start request repeated too quickly.
|
|
Jul 11 11:25:56 synergy1 systemd[1]: Failed to start Ceph object storage daemon osd.10.
|
|
Jul 11 11:25:56 synergy1 systemd[1]: ceph-osd@10.service: Failed with result 'start-limit-hit'.
|
|
Jul 11 11:30:31 synergy1 systemd[1]: ceph-osd@10.service: Start request repeated too quickly.
|
|
Jul 11 11:30:31 synergy1 systemd[1]: Failed to start Ceph object storage daemon osd.10.
|
|
Jul 11 11:30:31 synergy1 systemd[1]: ceph-osd@10.service: Failed with result 'start-limit-hit'.
|
|
root@synergy1:~# journalctl -xe
|
|
Jul 11 11:30:02 synergy1 systemd[1]: Started User Manager for UID 0.
|
|
-- Subject: Unit user@0.service has finished start-up
|
|
-- Defined-By: systemd
|
|
-- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel
|
|
--
|
|
-- Unit user@0.service has finished starting up.
|
|
--
|
|
-- The start-up result is done.
|
|
Jul 11 11:30:13 synergy1 sudo[29960]: root : TTY=pts/8 ; PWD=/root ; USER=root ; COMMAND=/bin/systemctl start ceph-osd@13
|
|
Jul 11 11:30:13 synergy1 sudo[29960]: pam_unix(sudo:session): session opened for user root by root(uid=0)
|
|
Jul 11 11:30:13 synergy1 systemd[1]: ceph-osd@13.service: Start request repeated too quickly.
|
|
Jul 11 11:30:13 synergy1 systemd[1]: Failed to start Ceph object storage daemon osd.13.
|
|
-- Subject: Unit ceph-osd@13.service has failed
|
|
-- Defined-By: systemd
|
|
-- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel
|
|
--
|
|
-- Unit ceph-osd@13.service has failed.
|
|
--
|
|
-- The result is failed.
|
|
Jul 11 11:30:13 synergy1 systemd[1]: ceph-osd@13.service: Failed with result 'start-limit-hit'.
|
|
Jul 11 11:30:13 synergy1 sudo[29960]: pam_unix(sudo:session): session closed for user root
|
|
Jul 11 11:30:31 synergy1 sudo[29963]: root : TTY=pts/8 ; PWD=/root ; USER=root ; COMMAND=/bin/systemctl start ceph-osd@10
|
|
Jul 11 11:30:31 synergy1 sudo[29963]: pam_unix(sudo:session): session opened for user root by root(uid=0)
|
|
Jul 11 11:30:31 synergy1 systemd[1]: ceph-osd@10.service: Start request repeated too quickly.
|
|
Jul 11 11:30:31 synergy1 systemd[1]: Failed to start Ceph object storage daemon osd.10.
|
|
-- Subject: Unit ceph-osd@10.service has failed
|
|
-- Defined-By: systemd
|
|
-- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel
|
|
--
|
|
-- Unit ceph-osd@10.service has failed.
|
|
--
|
|
-- The result is failed.
|
|
Jul 11 11:30:31 synergy1 systemd[1]: ceph-osd@10.service: Failed with result 'start-limit-hit'.
|
|
Jul 11 11:30:31 synergy1 sudo[29963]: pam_unix(sudo:session): session closed for user root
|
|
|
|
root@synergy1:~# ^C
|
|
root@synergy1:~# ^C
|
|
root@synergy1:~# cd /var/log/ceph
|
|
root@synergy1:/var/log/ceph# ls
|
|
ceph.audit.log ceph.log.6.gz ceph-mgr.synergy1.log.4.gz ceph-osd.10.log.2.gz ceph-osd.13.log.6.gz ceph-osd.1.log.4.gz ceph-osd.7.log.2.gz ceph-volume-systemd.log
|
|
ceph.audit.log.1.gz ceph.log.7.gz ceph-mgr.synergy1.log.5.gz ceph-osd.10.log.3.gz ceph-osd.13.log.7.gz ceph-osd.1.log.5.gz ceph-osd.7.log.3.gz ceph-volume-systemd.log.1.gz
|
|
ceph.audit.log.2.gz ceph-mds.synergy1.log ceph-mgr.synergy1.log.6.gz ceph-osd.10.log.4.gz ceph-osd.16.log ceph-osd.1.log.6.gz ceph-osd.7.log.4.gz ceph-volume-systemd.log.2.gz
|
|
ceph.audit.log.3.gz ceph-mds.synergy1.log.1.gz ceph-mgr.synergy1.log.7.gz ceph-osd.10.log.5.gz ceph-osd.16.log.1.gz ceph-osd.1.log.7.gz ceph-osd.7.log.5.gz ceph-volume-systemd.log.3.gz
|
|
ceph.audit.log.4.gz ceph-mds.synergy1.log.2.gz ceph-mon.synergy1.log ceph-osd.10.log.6.gz ceph-osd.16.log.2.gz ceph-osd.4.log ceph-osd.7.log.6.gz ceph-volume-systemd.log.4.gz
|
|
ceph.audit.log.5.gz ceph-mds.synergy1.log.3.gz ceph-mon.synergy1.log.1.gz ceph-osd.10.log.7.gz ceph-osd.16.log.3.gz ceph-osd.4.log.1.gz ceph-osd.7.log.7.gz ceph-volume-systemd.log.5.gz
|
|
ceph.audit.log.6.gz ceph-mds.synergy1.log.4.gz ceph-mon.synergy1.log.2.gz ceph-osd.12.log ceph-osd.16.log.4.gz ceph-osd.4.log.2.gz ceph-volume.log ceph-volume-systemd.log.6.gz
|
|
ceph.audit.log.7.gz ceph-mds.synergy1.log.5.gz ceph-mon.synergy1.log.3.gz ceph-osd.12.log.1.gz ceph-osd.16.log.5.gz ceph-osd.4.log.3.gz ceph-volume.log.1.gz ceph-volume-systemd.log.7.gz
|
|
ceph.log ceph-mds.synergy1.log.6.gz ceph-mon.synergy1.log.4.gz ceph-osd.13.log ceph-osd.16.log.6.gz ceph-osd.4.log.4.gz ceph-volume.log.2.gz
|
|
ceph.log.1.gz ceph-mds.synergy1.log.7.gz ceph-mon.synergy1.log.5.gz ceph-osd.13.log.1.gz ceph-osd.16.log.7.gz ceph-osd.4.log.5.gz ceph-volume.log.3.gz
|
|
ceph.log.2.gz ceph-mgr.synergy1.log ceph-mon.synergy1.log.6.gz ceph-osd.13.log.2.gz ceph-osd.1.log ceph-osd.4.log.6.gz ceph-volume.log.4.gz
|
|
ceph.log.3.gz ceph-mgr.synergy1.log.1.gz ceph-mon.synergy1.log.7.gz ceph-osd.13.log.3.gz ceph-osd.1.log.1.gz ceph-osd.4.log.7.gz ceph-volume.log.5.gz
|
|
ceph.log.4.gz ceph-mgr.synergy1.log.2.gz ceph-osd.10.log ceph-osd.13.log.4.gz ceph-osd.1.log.2.gz ceph-osd.7.log ceph-volume.log.6.gz
|
|
ceph.log.5.gz ceph-mgr.synergy1.log.3.gz ceph-osd.10.log.1.gz ceph-osd.13.log.5.gz ceph-osd.1.log.3.gz ceph-osd.7.log.1.gz ceph-volume.log.7.gz
|
|
root@synergy1:/var/log/ceph# less ceph-osd.10.log
|
|
|
|
1/ 5 auth
|
|
1/ 5 crypto
|
|
1/ 1 finisher
|
|
1/ 1 reserver
|
|
1/ 5 heartbeatmap
|
|
1/ 5 perfcounter
|
|
1/ 5 rgw
|
|
1/ 5 rgw_sync
|
|
1/10 civetweb
|
|
1/ 5 javaclient
|
|
1/ 5 asok
|
|
1/ 1 throttle
|
|
0/ 0 refs
|
|
1/ 5 xio
|
|
1/ 5 compressor
|
|
1/ 5 bluestore
|
|
1/ 5 bluefs
|
|
1/ 3 bdev
|
|
1/ 5 kstore
|
|
4/ 5 rocksdb
|
|
4/ 5 leveldb
|
|
4/ 5 memdb
|
|
1/ 5 kinetic
|
|
1/ 5 fuse
|
|
1/ 5 mgr
|
|
1/ 5 mgrc
|
|
1/ 5 dpdk
|
|
1/ 5 eventtrace
|
|
-2/-2 (syslog threshold)
|
|
-1/-1 (stderr threshold)
|
|
max_recent 10000
|
|
max_new 1000
|
|
log_file /var/log/ceph/ceph-osd.10.log
|
|
--- end dump of recent events ---
|