Bug #45152
cephadm: data structure doesn't work for multiple CephFS
Status:
Rejected
Priority:
Normal
Assignee:
-
Category:
cephadm
Target version:
-
% Done:
0%
Source:
Tags:
Backport:
Regression:
No
Severity:
3 - minor
Reviewed:
Affected Versions:
ceph-qa-suite:
Pull request ID:
Crash signature (v1):
Crash signature (v2):
Description
It is possible to move an MDS from one FS to another:
OSD=3 MDS=3 MON=1 MGR=1 ../src/vstart.sh -n
- "a" is standby
- "b" is standby
- "c" active for fs_name "a"
$ ceph fs dump -f json | jq . dumped fsmap epoch 7 { "epoch": 7, "default_fscid": 1, "compat": {...}, "feature_flags": { "enable_multiple": false, "ever_enabled_multiple": false }, "standbys": [ { "gid": 4158, "name": "a", "rank": -1, "incarnation": 0, "state": "up:standby", "state_seq": 1, "addr": "172.17.0.1:6827/3976355531", "addrs": { "addrvec": [ { "type": "v2", "addr": "172.17.0.1:6826", "nonce": 3976355531 }, { "type": "v1", "addr": "172.17.0.1:6827", "nonce": 3976355531 } ] }, "join_fscid": -1, "export_targets": [], "features": 4540138292836696000, "flags": 0, "epoch": 2 }, { "gid": 4162, "name": "b", "rank": -1, "incarnation": 0, "state": "up:standby", "state_seq": 1, "addr": "172.17.0.1:6829/1109725282", "addrs": { "addrvec": [ { "type": "v2", "addr": "172.17.0.1:6828", "nonce": 1109725282 }, { "type": "v1", "addr": "172.17.0.1:6829", "nonce": 1109725282 } ] }, "join_fscid": -1, "export_targets": [], "features": 4540138292836696000, "flags": 0, "epoch": 3 } ], "filesystems": [ { "mdsmap": { "epoch": 6, "flags": 18, "ever_allowed_features": 0, "explicitly_allowed_features": 0, "created": "2020-04-20T18:34:54.748460+0200", "modified": "2020-04-20T18:34:57.420163+0200", "tableserver": 0, "root": 0, "session_timeout": 60, "session_autoclose": 300, "min_compat_client": "0 (unknown)", "max_file_size": 1099511627776, "last_failure": 0, "last_failure_osd_epoch": 0, "compat": { "compat": {}, "ro_compat": {}, "incompat": { "feature_1": "base v0.20", "feature_2": "client writeable ranges", "feature_3": "default file layouts on dirs", "feature_4": "dir inode in separate object", "feature_5": "mds uses versioned encoding", "feature_6": "dirfrag is stored in omap", "feature_8": "no anchor table", "feature_9": "file layout v2", "feature_10": "snaprealm v2" } }, "max_mds": 1, "in": [ 0 ], "up": { "mds_0": 4166 }, "failed": [], "damaged": [], "stopped": [], "info": { "gid_4166": { "gid": 4166, "name": "c", "rank": 0, "incarnation": 5, "state": "up:active", "state_seq": 4, "addr": "172.17.0.1:6831/1354869332", "addrs": { "addrvec": [ { "type": "v2", "addr": "172.17.0.1:6830", "nonce": 1354869332 }, { "type": "v1", "addr": "172.17.0.1:6831", "nonce": 1354869332 } ] }, "join_fscid": -1, "export_targets": [], "features": 4540138292836696000, "flags": 0 } }, "data_pools": [ 3 ], "metadata_pool": 2, "enabled": true, "fs_name": "a", "balancer": "", "standby_count_wanted": 1 }, "id": 1 } ] }
Create a new CephFS:
$ ceph fs flag set enable_multiple true $ ceph fs volume create newb Volume created successfully (no MDS daemons created)
Suddenly:
- "a" is standby
- "b" active for fs_name "b"
- "c" active for fs_name "a"
$ ceph fs dump -f json | jq . { "epoch": 11, "default_fscid": 1, "compat": { "compat": {}, "ro_compat": {}, "incompat": { "feature_1": "base v0.20", "feature_2": "client writeable ranges", "feature_3": "default file layouts on dirs", "feature_4": "dir inode in separate object", "feature_5": "mds uses versioned encoding", "feature_6": "dirfrag is stored in omap", "feature_8": "no anchor table", "feature_9": "file layout v2", "feature_10": "snaprealm v2" } }, "feature_flags": { "enable_multiple": true, "ever_enabled_multiple": true }, "standbys": [ { "gid": 4158, "name": "a", "rank": -1, "incarnation": 0, "state": "up:standby", "state_seq": 1, "addr": "172.17.0.1:6827/3976355531", "addrs": { "addrvec": [ { "type": "v2", "addr": "172.17.0.1:6826", "nonce": 3976355531 }, { "type": "v1", "addr": "172.17.0.1:6827", "nonce": 3976355531 } ] }, "join_fscid": -1, "export_targets": [], "features": 4540138292836696000, "flags": 0, "epoch": 2 } ], "filesystems": [ { "mdsmap": { "epoch": 6, "flags": 18, "ever_allowed_features": 0, "explicitly_allowed_features": 0, "created": "2020-04-20T18:34:54.748460+0200", "modified": "2020-04-20T18:34:57.420163+0200", "tableserver": 0, "root": 0, "session_timeout": 60, "session_autoclose": 300, "min_compat_client": "0 (unknown)", "max_file_size": 1099511627776, "last_failure": 0, "last_failure_osd_epoch": 0, "compat": { "compat": {}, "ro_compat": {}, "incompat": { "feature_1": "base v0.20", "feature_2": "client writeable ranges", "feature_3": "default file layouts on dirs", "feature_4": "dir inode in separate object", "feature_5": "mds uses versioned encoding", "feature_6": "dirfrag is stored in omap", "feature_8": "no anchor table", "feature_9": "file layout v2", "feature_10": "snaprealm v2" } }, "max_mds": 1, "in": [ 0 ], "up": { "mds_0": 4166 }, "failed": [], "damaged": [], "stopped": [], "info": { "gid_4166": { "gid": 4166, "name": "c", "rank": 0, "incarnation": 5, "state": "up:active", "state_seq": 4, "addr": "172.17.0.1:6831/1354869332", "addrs": { "addrvec": [ { "type": "v2", "addr": "172.17.0.1:6830", "nonce": 1354869332 }, { "type": "v1", "addr": "172.17.0.1:6831", "nonce": 1354869332 } ] }, "join_fscid": -1, "export_targets": [], "features": 4540138292836696000, "flags": 0 } }, "data_pools": [ 3 ], "metadata_pool": 2, "enabled": true, "fs_name": "a", "balancer": "", "standby_count_wanted": 1 }, "id": 1 }, { "mdsmap": { "epoch": 10, "flags": 18, "ever_allowed_features": 0, "explicitly_allowed_features": 0, "created": "2020-04-20T18:35:57.580253+0200", "modified": "2020-04-20T18:35:59.602740+0200", "tableserver": 0, "root": 0, "session_timeout": 60, "session_autoclose": 300, "min_compat_client": "0 (unknown)", "max_file_size": 1099511627776, "last_failure": 0, "last_failure_osd_epoch": 0, "compat": { "compat": {}, "ro_compat": {}, "incompat": { "feature_1": "base v0.20", "feature_2": "client writeable ranges", "feature_3": "default file layouts on dirs", "feature_4": "dir inode in separate object", "feature_5": "mds uses versioned encoding", "feature_6": "dirfrag is stored in omap", "feature_8": "no anchor table", "feature_9": "file layout v2", "feature_10": "snaprealm v2" } }, "max_mds": 1, "in": [ 0 ], "up": { "mds_0": 4162 }, "failed": [], "damaged": [], "stopped": [], "info": { "gid_4162": { "gid": 4162, "name": "b", "rank": 0, "incarnation": 9, "state": "up:active", "state_seq": 20, "addr": "172.17.0.1:6829/1109725282", "addrs": { "addrvec": [ { "type": "v2", "addr": "172.17.0.1:6828", "nonce": 1109725282 }, { "type": "v1", "addr": "172.17.0.1:6829", "nonce": 1109725282 } ] }, "join_fscid": -1, "export_targets": [], "features": 4540138292836696000, "flags": 0 } }, "data_pools": [ 5 ], "metadata_pool": 4, "enabled": true, "fs_name": "newb", "balancer": "", "standby_count_wanted": 1 }, "id": 2 } ] }
promote "a" to "newb":
Suddenly:
- "a" active for fs_name "newb"
- "b" active for fs_name "newb"
- "c" active for fs_name "a"
➜ build git:(cephadm-total-scheduler) ✗ ceph fs set newb max_mds 2 ➜ build git:(cephadm-total-scheduler) ✗ ceph fs dump -f json | jq . dumped fsmap epoch 14 { "epoch": 14, "default_fscid": 1, "compat": { "compat": {}, "ro_compat": {}, "incompat": { "feature_1": "base v0.20", "feature_2": "client writeable ranges", "feature_3": "default file layouts on dirs", "feature_4": "dir inode in separate object", "feature_5": "mds uses versioned encoding", "feature_6": "dirfrag is stored in omap", "feature_8": "no anchor table", "feature_9": "file layout v2", "feature_10": "snaprealm v2" } }, "feature_flags": { "enable_multiple": true, "ever_enabled_multiple": true }, "standbys": [], "filesystems": [ { "mdsmap": { "epoch": 6, "flags": 18, "ever_allowed_features": 0, "explicitly_allowed_features": 0, "created": "2020-04-20T18:34:54.748460+0200", "modified": "2020-04-20T18:34:57.420163+0200", "tableserver": 0, "root": 0, "session_timeout": 60, "session_autoclose": 300, "min_compat_client": "0 (unknown)", "max_file_size": 1099511627776, "last_failure": 0, "last_failure_osd_epoch": 0, "compat": { "compat": {}, "ro_compat": {}, "incompat": { "feature_1": "base v0.20", "feature_2": "client writeable ranges", "feature_3": "default file layouts on dirs", "feature_4": "dir inode in separate object", "feature_5": "mds uses versioned encoding", "feature_6": "dirfrag is stored in omap", "feature_8": "no anchor table", "feature_9": "file layout v2", "feature_10": "snaprealm v2" } }, "max_mds": 1, "in": [ 0 ], "up": { "mds_0": 4166 }, "failed": [], "damaged": [], "stopped": [], "info": { "gid_4166": { "gid": 4166, "name": "c", "rank": 0, "incarnation": 5, "state": "up:active", "state_seq": 4, "addr": "172.17.0.1:6831/1354869332", "addrs": { "addrvec": [ { "type": "v2", "addr": "172.17.0.1:6830", "nonce": 1354869332 }, { "type": "v1", "addr": "172.17.0.1:6831", "nonce": 1354869332 } ] }, "join_fscid": -1, "export_targets": [], "features": 4540138292836696000, "flags": 0 } }, "data_pools": [ 3 ], "metadata_pool": 2, "enabled": true, "fs_name": "a", "balancer": "", "standby_count_wanted": 1 }, "id": 1 }, { "mdsmap": { "epoch": 14, "flags": 18, "ever_allowed_features": 0, "explicitly_allowed_features": 0, "created": "2020-04-20T18:35:57.580253+0200", "modified": "2020-04-20T18:42:24.191360+0200", "tableserver": 0, "root": 0, "session_timeout": 60, "session_autoclose": 300, "min_compat_client": "0 (unknown)", "max_file_size": 1099511627776, "last_failure": 0, "last_failure_osd_epoch": 0, "compat": { "compat": {}, "ro_compat": {}, "incompat": { "feature_1": "base v0.20", "feature_2": "client writeable ranges", "feature_3": "default file layouts on dirs", "feature_4": "dir inode in separate object", "feature_5": "mds uses versioned encoding", "feature_6": "dirfrag is stored in omap", "feature_8": "no anchor table", "feature_9": "file layout v2", "feature_10": "snaprealm v2" } }, "max_mds": 2, "in": [ 0, 1 ], "up": { "mds_0": 4162, "mds_1": 4158 }, "failed": [], "damaged": [], "stopped": [], "info": { "gid_4158": { "gid": 4158, "name": "a", "rank": 1, "incarnation": 13, "state": "up:active", "state_seq": 116, "addr": "172.17.0.1:6827/3976355531", "addrs": { "addrvec": [ { "type": "v2", "addr": "172.17.0.1:6826", "nonce": 3976355531 }, { "type": "v1", "addr": "172.17.0.1:6827", "nonce": 3976355531 } ] }, "join_fscid": -1, "export_targets": [], "features": 4540138292836696000, "flags": 0 }, "gid_4162": { "gid": 4162, "name": "b", "rank": 0, "incarnation": 9, "state": "up:active", "state_seq": 20, "addr": "172.17.0.1:6829/1109725282", "addrs": { "addrvec": [ { "type": "v2", "addr": "172.17.0.1:6828", "nonce": 1109725282 }, { "type": "v1", "addr": "172.17.0.1:6829", "nonce": 1109725282 } ] }, "join_fscid": -1, "export_targets": [], "features": 4540138292836696000, "flags": 0 } }, "data_pools": [ 5 ], "metadata_pool": 4, "enabled": true, "fs_name": "newb", "balancer": "", "standby_count_wanted": 1 }, "id": 2 } ] }
History
#1 Updated by Sebastian Wagner almost 4 years ago
- Description updated (diff)
#2 Updated by Ramana Raja almost 4 years ago
I am unable to follow. Which data structure are you referring to, and how does it need to be corrected?
#3 Updated by Sebastian Wagner almost 4 years ago
- Status changed from New to Rejected
turns out, everything is fine - as long as you attach standby mds to a particular FS. which is the case. yay!