[root@ceph01 cephx]# cat a.conf [global] auth_cluster_required = none auth_service_required = none auth_client_required = none [root@ceph01 cephx]# cephadm --verbose bootstrap --mon-ip 172.31.242.65 --config a.conf --log-to-file --no-minimize-config -------------------------------------------------------------------------------- cephadm ['--verbose', 'bootstrap', '--mon-ip', '172.31.242.65', '--config', 'a.conf', '--log-to-file', '--no-minimize-config'] /usr/bin/podman: 4.0.2 Verifying podman|docker is present... /usr/bin/podman: 4.0.2 Verifying lvm2 is present... Verifying time synchronization is in place... systemctl: Failed to get unit file state for chrony.service: No such file or directory systemctl: inactive systemctl: enabled systemctl: active Unit chronyd.service is enabled and running Repeating the final host check... /usr/bin/podman: 4.0.2 podman (/usr/bin/podman) version 4.0.2 is present systemctl is present lvcreate is present systemctl: Failed to get unit file state for chrony.service: No such file or directory systemctl: inactive systemctl: enabled systemctl: active Unit chronyd.service is enabled and running Host looks OK Cluster fsid: de25325c-c243-11ed-8232-98944902eb22 Acquiring lock 281473664037664 on /run/cephadm/de25325c-c243-11ed-8232-98944902eb22.lock Lock 281473664037664 acquired on /run/cephadm/de25325c-c243-11ed-8232-98944902eb22.lock Verifying IP 172.31.242.65 port 3300 ... Verifying IP 172.31.242.65 port 6789 ... Base mon IP(s) is [172.31.242.65:3300, 172.31.242.65:6789], mon addrv is [v2:172.31.242.65:3300,v1:172.31.242.65:6789] /usr/sbin/ip: default via 172.31.242.1 dev enp125s0f0 proto static metric 100 /usr/sbin/ip: 172.31.242.0/24 dev enp125s0f0 proto kernel scope link src 172.31.242.65 metric 100 /usr/sbin/ip: ::1 dev lo proto kernel metric 256 pref medium /usr/sbin/ip: fe80::/64 dev enp125s0f0 proto kernel metric 1024 pref medium /usr/sbin/ip: 1: lo: mtu 65536 state UNKNOWN qlen 1000 /usr/sbin/ip: inet6 ::1/128 scope host /usr/sbin/ip: valid_lft forever preferred_lft forever /usr/sbin/ip: 2: enp125s0f0: mtu 1500 state UP qlen 1000 /usr/sbin/ip: inet6 fe80::9a94:49ff:fe02:eb22/64 scope link noprefixroute /usr/sbin/ip: valid_lft forever preferred_lft forever Mon IP `172.31.242.65` is in CIDR network `172.31.242.0/24` Mon IP `172.31.242.65` is in CIDR network `172.31.242.0/24` Inferred mon public CIDR from local network configuration ['172.31.242.0/24', '172.31.242.0/24'] Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network Pulling container image quay.io/ceph/ceph:v16... /usr/bin/podman: Trying to pull quay.io/ceph/ceph:v16... /usr/bin/podman: Getting image source signatures /usr/bin/podman: Copying blob sha256:e6afd1b78f971e7a793f64a5bf30f49ca503cc42e6e97330b2b8c6b560093b05 /usr/bin/podman: Copying blob sha256:06f289a63f1283fb3874abf875544b408549430151d56556784138133ea0cbc1 /usr/bin/podman: Copying blob sha256:98f1c0aac0d9aa81c574df1fa8cf09765c960ce90d481c3af1df290b26f64e26 /usr/bin/podman: Copying blob sha256:72b21c60b1d8418a3f543bc6df6a746b08badd2c537e84e82fefd4c3deb70788 /usr/bin/podman: Copying config sha256:2e079a719b0f7afba3d8df75cea2bb5cfb5ca5d8bd6cac808549b1a10f7e4d9b /usr/bin/podman: Writing manifest to image destination /usr/bin/podman: Storing signatures /usr/bin/podman: 2e079a719b0f7afba3d8df75cea2bb5cfb5ca5d8bd6cac808549b1a10f7e4d9b ceph: ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable) Ceph version: ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable) Extracting ceph user uid/gid from container image... stat: 167 167 Creating initial keys... /usr/bin/ceph-authtool: AQACMxBkIl3GGBAAu14WdlS81eMLOKCpZWjiHA== /usr/bin/ceph-authtool: AQACMxBkHWu1NRAAFScP4YOuLyp9QVBCt/ZZzw== /usr/bin/ceph-authtool: AQADMxBkNiRIFRAAGkw+RsLqGeAOktKDqalmcQ== Creating initial monmap... /usr/bin/monmaptool: /usr/bin/monmaptool: monmap file /tmp/monmap /usr/bin/monmaptool: /usr/bin/monmaptool: set fsid to de25325c-c243-11ed-8232-98944902eb22 /usr/bin/monmaptool: /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) monmaptool for ceph01 [v2:172.31.242.65:3300,v1:172.31.242.65:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap /usr/bin/monmaptool: set fsid to de25325c-c243-11ed-8232-98944902eb22 /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) Creating mon... /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.403+0000 ffff84be0040 0 set uid:gid to 167:167 (ceph:ceph) /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.403+0000 ffff84be0040 1 imported monmap: /usr/bin/ceph-mon: epoch 0 /usr/bin/ceph-mon: fsid de25325c-c243-11ed-8232-98944902eb22 /usr/bin/ceph-mon: last_changed 2023-03-14T08:40:35.862926+0000 /usr/bin/ceph-mon: created 2023-03-14T08:40:35.862926+0000 /usr/bin/ceph-mon: min_mon_release 0 (unknown) /usr/bin/ceph-mon: election_strategy: 1 /usr/bin/ceph-mon: 0: [v2:172.31.242.65:3300/0,v1:172.31.242.65:6789/0] mon.ceph01 /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.403+0000 ffff84be0040 0 /usr/bin/ceph-mon: set fsid to de25325c-c243-11ed-8232-98944902eb22 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: RocksDB version: 6.8.1 /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Git sha rocksdb_build_git_sha:@0@ /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Compile date Jan 24 2023 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: DB SUMMARY /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: SST files in /var/lib/ceph/mon/ceph-ceph01/store.db dir, Total Num: 0, files: /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-ceph01/store.db: /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.error_if_exists: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.create_if_missing: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.paranoid_checks: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.env: 0xaaaad2b403e8 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.fs: Posix File System /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.info_log: 0xaaab061d7280 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_file_opening_threads: 16 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.statistics: (nil) /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.use_fsync: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_log_file_size: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_manifest_file_size: 1073741824 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.log_file_time_to_roll: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.keep_log_file_num: 1000 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.recycle_log_file_num: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.allow_fallocate: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.allow_mmap_reads: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.allow_mmap_writes: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.use_direct_reads: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.create_missing_column_families: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.db_log_dir: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.wal_dir: /var/lib/ceph/mon/ceph-ceph01/store.db /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.table_cache_numshardbits: 6 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_subcompactions: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_background_flushes: -1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.WAL_ttl_seconds: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.WAL_size_limit_MB: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_write_batch_group_size_bytes: 1048576 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.manifest_preallocation_size: 4194304 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.is_fd_close_on_exec: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.advise_random_on_open: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.db_write_buffer_size: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.write_buffer_manager: 0xaaab061df680 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.access_hint_on_compaction_start: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.new_table_reader_for_compaction_inputs: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.random_access_max_buffer_size: 1048576 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.use_adaptive_mutex: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.rate_limiter: (nil) /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.wal_recovery_mode: 2 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.enable_thread_tracking: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.enable_pipelined_write: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.unordered_write: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.allow_concurrent_memtable_write: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.enable_write_thread_adaptive_yield: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.write_thread_max_yield_usec: 100 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.write_thread_slow_yield_usec: 3 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.row_cache: None /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.wal_filter: None /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.avoid_flush_during_recovery: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.allow_ingest_behind: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.preserve_deletes: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.two_write_queues: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.manual_wal_flush: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.atomic_flush: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.avoid_unnecessary_blocking_io: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.persist_stats_to_disk: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.write_dbid_to_manifest: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.log_readahead_size: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.sst_file_checksum_func: Unknown /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_background_jobs: 2 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_background_compactions: -1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.avoid_flush_during_shutdown: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.writable_file_max_buffer_size: 1048576 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.delayed_write_rate : 16777216 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_total_wal_size: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.stats_dump_period_sec: 600 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.stats_persist_period_sec: 600 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.stats_history_buffer_size: 1048576 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_open_files: -1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.bytes_per_sync: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.wal_bytes_per_sync: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.strict_bytes_per_sync: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_readahead_size: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Compression algorithms supported: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: kZSTDNotFinalCompression supported: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: kZSTD supported: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: kXpressCompression supported: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: kLZ4HCCompression supported: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: kLZ4Compression supported: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: kBZip2Compression supported: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: kZlibCompression supported: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: kSnappyCompression supported: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Fast CRC32 supported: Not supported on Arm64 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: [db_impl/db_impl_open.cc:273] Creating manifest 1 /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: [version_set.cc:4413] Recovering from manifest file: /var/lib/ceph/mon/ceph-ceph01/store.db/MANIFEST-000001 /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: [column_family.cc:552] --------------- Options for column family [default]: /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.comparator: leveldb.BytewiseComparator /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.merge_operator: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_filter: None /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_filter_factory: None /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.memtable_factory: SkipListFactory /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.table_factory: BlockBasedTable /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0xaaab060edc48) /usr/bin/ceph-mon: cache_index_and_filter_blocks: 1 /usr/bin/ceph-mon: cache_index_and_filter_blocks_with_high_priority: 0 /usr/bin/ceph-mon: pin_l0_filter_and_index_blocks_in_cache: 0 /usr/bin/ceph-mon: pin_top_level_index_and_filter: 1 /usr/bin/ceph-mon: index_type: 0 /usr/bin/ceph-mon: data_block_index_type: 0 /usr/bin/ceph-mon: index_shortening: 1 /usr/bin/ceph-mon: data_block_hash_table_util_ratio: 0.750000 /usr/bin/ceph-mon: hash_index_allow_collision: 1 /usr/bin/ceph-mon: checksum: 1 /usr/bin/ceph-mon: no_block_cache: 0 /usr/bin/ceph-mon: block_cache: 0xaaab06124d10 /usr/bin/ceph-mon: block_cache_name: BinnedLRUCache /usr/bin/ceph-mon: block_cache_options: /usr/bin/ceph-mon: capacity : 536870912 /usr/bin/ceph-mon: num_shard_bits : 4 /usr/bin/ceph-mon: strict_capacity_limit : 0 /usr/bin/ceph-mon: high_pri_pool_ratio: 0.000 /usr/bin/ceph-mon: block_cache_compressed: (nil) /usr/bin/ceph-mon: persistent_cache: (nil) /usr/bin/ceph-mon: block_size: 4096 /usr/bin/ceph-mon: block_size_deviation: 10 /usr/bin/ceph-mon: block_restart_interval: 16 /usr/bin/ceph-mon: index_block_restart_interval: 1 /usr/bin/ceph-mon: metadata_block_size: 4096 /usr/bin/ceph-mon: partition_filters: 0 /usr/bin/ceph-mon: use_delta_encoding: 1 /usr/bin/ceph-mon: filter_policy: rocksdb.BuiltinBloomFilter /usr/bin/ceph-mon: whole_key_filtering: 1 /usr/bin/ceph-mon: verify_compression: 0 /usr/bin/ceph-mon: read_amp_bytes_per_bit: 0 /usr/bin/ceph-mon: format_version: 2 /usr/bin/ceph-mon: enable_index_compression: 1 /usr/bin/ceph-mon: block_align: 0 /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.write_buffer_size: 33554432 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_write_buffer_number: 2 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compression: NoCompression /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.bottommost_compression: Disabled /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.prefix_extractor: nullptr /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.num_levels: 7 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.min_write_buffer_number_to_merge: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_write_buffer_number_to_maintain: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_write_buffer_size_to_maintain: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.bottommost_compression_opts.window_bits: -14 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.bottommost_compression_opts.level: 32767 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.bottommost_compression_opts.strategy: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.bottommost_compression_opts.enabled: false /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compression_opts.window_bits: -14 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compression_opts.level: 32767 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compression_opts.strategy: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compression_opts.max_dict_bytes: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compression_opts.enabled: false /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.level0_file_num_compaction_trigger: 4 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.level0_slowdown_writes_trigger: 20 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.level0_stop_writes_trigger: 36 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.target_file_size_base: 67108864 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.target_file_size_multiplier: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_bytes_for_level_base: 268435456 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.level_compaction_dynamic_level_bytes: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_sequential_skip_in_iterations: 8 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_compaction_bytes: 1677721600 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.arena_block_size: 4194304 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.rate_limit_delay_max_milliseconds: 100 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.disable_auto_compactions: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_style: kCompactionStyleLevel /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_pri: kMinOverlappingRatio /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_options_universal.size_ratio: 1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_options_universal.min_merge_width: 2 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_options_universal.compression_size_percent: -1 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.compaction_options_fifo.allow_compaction: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.table_properties_collectors: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.inplace_update_support: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.inplace_update_num_locks: 10000 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.memtable_whole_key_filtering: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.memtable_huge_page_size: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.bloom_locality: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.max_successive_merges: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.optimize_filters_for_hits: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.paranoid_file_checks: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.force_consistency_checks: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.report_bg_io_stats: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.ttl: 2592000 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: Options.periodic_compaction_seconds: 0 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: [version_set.cc:4568] Recovered from manifest file:/var/lib/ceph/mon/ceph-ceph01/store.db/MANIFEST-000001 succeeded,manifest_file_number is 1, next_file_number is 3, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.407+0000 ffff84be0040 4 rocksdb: [version_set.cc:4577] Column family [default] (ID 0), log number is 0 /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.411+0000 ffff84be0040 4 rocksdb: DB pointer 0xaaab061ed800 /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.411+0000 ffff71f87080 4 rocksdb: [db_impl/db_impl.cc:850] ------- DUMPING STATS ------- /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.411+0000 ffff71f87080 4 rocksdb: [db_impl/db_impl.cc:851] /usr/bin/ceph-mon: ** DB Stats ** /usr/bin/ceph-mon: Uptime(secs): 0.0 total, 0.0 interval /usr/bin/ceph-mon: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s /usr/bin/ceph-mon: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s /usr/bin/ceph-mon: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent /usr/bin/ceph-mon: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s /usr/bin/ceph-mon: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s /usr/bin/ceph-mon: Interval stall: 00:00:0.000 H:M:S, 0.0 percent /usr/bin/ceph-mon: /usr/bin/ceph-mon: ** Compaction Stats [default] ** /usr/bin/ceph-mon: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop /usr/bin/ceph-mon: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- /usr/bin/ceph-mon: Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 /usr/bin/ceph-mon: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 /usr/bin/ceph-mon: /usr/bin/ceph-mon: ** Compaction Stats [default] ** /usr/bin/ceph-mon: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop /usr/bin/ceph-mon: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- /usr/bin/ceph-mon: Uptime(secs): 0.0 total, 0.0 interval /usr/bin/ceph-mon: Flush(GB): cumulative 0.000, interval 0.000 /usr/bin/ceph-mon: AddFile(GB): cumulative 0.000, interval 0.000 /usr/bin/ceph-mon: AddFile(Total Files): cumulative 0, interval 0 /usr/bin/ceph-mon: AddFile(L0 Files): cumulative 0, interval 0 /usr/bin/ceph-mon: AddFile(Keys): cumulative 0, interval 0 /usr/bin/ceph-mon: Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds /usr/bin/ceph-mon: Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds /usr/bin/ceph-mon: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count /usr/bin/ceph-mon: /usr/bin/ceph-mon: ** File Read Latency Histogram By Level [default] ** /usr/bin/ceph-mon: /usr/bin/ceph-mon: ** Compaction Stats [default] ** /usr/bin/ceph-mon: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop /usr/bin/ceph-mon: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- /usr/bin/ceph-mon: Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 /usr/bin/ceph-mon: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 /usr/bin/ceph-mon: /usr/bin/ceph-mon: ** Compaction Stats [default] ** /usr/bin/ceph-mon: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop /usr/bin/ceph-mon: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- /usr/bin/ceph-mon: Uptime(secs): 0.0 total, 0.0 interval /usr/bin/ceph-mon: Flush(GB): cumulative 0.000, interval 0.000 /usr/bin/ceph-mon: AddFile(GB): cumulative 0.000, interval 0.000 /usr/bin/ceph-mon: AddFile(Total Files): cumulative 0, interval 0 /usr/bin/ceph-mon: AddFile(L0 Files): cumulative 0, interval 0 /usr/bin/ceph-mon: AddFile(Keys): cumulative 0, interval 0 /usr/bin/ceph-mon: Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds /usr/bin/ceph-mon: Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds /usr/bin/ceph-mon: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count /usr/bin/ceph-mon: /usr/bin/ceph-mon: ** File Read Latency Histogram By Level [default] ** /usr/bin/ceph-mon: /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.411+0000 ffff84be0040 4 rocksdb: [db_impl/db_impl.cc:397] Shutdown: canceling all background work /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.411+0000 ffff84be0040 4 rocksdb: [db_impl/db_impl.cc:573] Shutdown complete /usr/bin/ceph-mon: debug 2023-03-14T08:40:36.411+0000 ffff84be0040 0 /usr/bin/ceph-mon: created monfs at /var/lib/ceph/mon/ceph-ceph01 for mon.ceph01 create mon.ceph01 on systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-de25325c-c243-11ed-8232-98944902eb22.target → /etc/systemd/system/ceph-de25325c-c243-11ed-8232-98944902eb22.target. systemctl: Created symlink /etc/systemd/system/ceph.target.wants/ceph-de25325c-c243-11ed-8232-98944902eb22.target → /etc/systemd/system/ceph-de25325c-c243-11ed-8232-98944902eb22.target. systemctl: Failed to reset failed state of unit ceph-de25325c-c243-11ed-8232-98944902eb22@mon.ceph01.service: Unit ceph-de25325c-c243-11ed-8232-98944902eb22@mon.ceph01.service not loaded. systemctl: Created symlink /etc/systemd/system/ceph-de25325c-c243-11ed-8232-98944902eb22.target.wants/ceph-de25325c-c243-11ed-8232-98944902eb22@mon.ceph01.service → /etc/systemd/system/ceph-de25325c-c243-11ed-8232-98944902eb22@.service. systemctl: disabled systemctl: inactive firewalld.service is not enabled Not possible to enable service . firewalld.service is not available Waiting for mon to start... Waiting for mon... /usr/bin/ceph: cluster: /usr/bin/ceph: id: de25325c-c243-11ed-8232-98944902eb22 /usr/bin/ceph: health: HEALTH_OK /usr/bin/ceph: /usr/bin/ceph: services: /usr/bin/ceph: mon: 1 daemons, quorum ceph01 (age 0.556392s) /usr/bin/ceph: mgr: no daemons active /usr/bin/ceph: osd: 0 osds: 0 up, 0 in /usr/bin/ceph: /usr/bin/ceph: data: /usr/bin/ceph: pools: 0 pools, 0 pgs /usr/bin/ceph: objects: 0 objects, 0 B /usr/bin/ceph: usage: 0 B used, 0 B / 0 B avail /usr/bin/ceph: pgs: /usr/bin/ceph: mon is available Setting mon public_network to 172.31.242.0/24 Wrote config to /etc/ceph/ceph.conf Wrote keyring to /etc/ceph/ceph.client.admin.keyring Creating mgr... Verifying port 9283 ... systemctl: Failed to reset failed state of unit ceph-de25325c-c243-11ed-8232-98944902eb22@mgr.ceph01.hztcnu.service: Unit ceph-de25325c-c243-11ed-8232-98944902eb22@mgr.ceph01.hztcnu.service not loaded. systemctl: Created symlink /etc/systemd/system/ceph-de25325c-c243-11ed-8232-98944902eb22.target.wants/ceph-de25325c-c243-11ed-8232-98944902eb22@mgr.ceph01.hztcnu.service → /etc/systemd/system/ceph-de25325c-c243-11ed-8232-98944902eb22@.service. systemctl: disabled systemctl: inactive firewalld.service is not enabled Not possible to enable service . firewalld.service is not available systemctl: disabled systemctl: inactive firewalld.service is not enabled Not possible to open ports <[9283]>. firewalld.service is not available Waiting for mgr to start... Waiting for mgr... /usr/bin/ceph: /usr/bin/ceph: { /usr/bin/ceph: "fsid": "de25325c-c243-11ed-8232-98944902eb22", /usr/bin/ceph: "health": { /usr/bin/ceph: "status": "HEALTH_OK", /usr/bin/ceph: "checks": {}, /usr/bin/ceph: "mutes": [] /usr/bin/ceph: }, /usr/bin/ceph: "election_epoch": 3, /usr/bin/ceph: "quorum": [ /usr/bin/ceph: 0 /usr/bin/ceph: ], /usr/bin/ceph: "quorum_names": [ /usr/bin/ceph: "ceph01" /usr/bin/ceph: ], /usr/bin/ceph: "quorum_age": 3, /usr/bin/ceph: "monmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "min_mon_release_name": "pacific", /usr/bin/ceph: "num_mons": 1 /usr/bin/ceph: }, /usr/bin/ceph: "osdmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "num_osds": 0, /usr/bin/ceph: "num_up_osds": 0, /usr/bin/ceph: "osd_up_since": 0, /usr/bin/ceph: "num_in_osds": 0, /usr/bin/ceph: "osd_in_since": 0, /usr/bin/ceph: "num_remapped_pgs": 0 /usr/bin/ceph: }, /usr/bin/ceph: "pgmap": { /usr/bin/ceph: "pgs_by_state": [], /usr/bin/ceph: "num_pgs": 0, /usr/bin/ceph: "num_pools": 0, /usr/bin/ceph: "num_objects": 0, /usr/bin/ceph: "data_bytes": 0, /usr/bin/ceph: "bytes_used": 0, /usr/bin/ceph: "bytes_avail": 0, /usr/bin/ceph: "bytes_total": 0 /usr/bin/ceph: }, /usr/bin/ceph: "fsmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "by_rank": [], /usr/bin/ceph: "up:standby": 0 /usr/bin/ceph: }, /usr/bin/ceph: "mgrmap": { /usr/bin/ceph: "available": false, /usr/bin/ceph: "num_standbys": 0, /usr/bin/ceph: "modules": [ /usr/bin/ceph: "iostat", /usr/bin/ceph: "nfs", /usr/bin/ceph: "restful" /usr/bin/ceph: ], /usr/bin/ceph: "services": {} /usr/bin/ceph: }, /usr/bin/ceph: "servicemap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "modified": "2023-03-14T08:40:37.818278+0000", /usr/bin/ceph: "services": {} /usr/bin/ceph: }, /usr/bin/ceph: "progress_events": {} /usr/bin/ceph: } mgr not available, waiting (1/15)... /usr/bin/ceph: /usr/bin/ceph: { /usr/bin/ceph: "fsid": "de25325c-c243-11ed-8232-98944902eb22", /usr/bin/ceph: "health": { /usr/bin/ceph: "status": "HEALTH_OK", /usr/bin/ceph: "checks": {}, /usr/bin/ceph: "mutes": [] /usr/bin/ceph: }, /usr/bin/ceph: "election_epoch": 3, /usr/bin/ceph: "quorum": [ /usr/bin/ceph: 0 /usr/bin/ceph: ], /usr/bin/ceph: "quorum_names": [ /usr/bin/ceph: "ceph01" /usr/bin/ceph: ], /usr/bin/ceph: "quorum_age": 6, /usr/bin/ceph: "monmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "min_mon_release_name": "pacific", /usr/bin/ceph: "num_mons": 1 /usr/bin/ceph: }, /usr/bin/ceph: "osdmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "num_osds": 0, /usr/bin/ceph: "num_up_osds": 0, /usr/bin/ceph: "osd_up_since": 0, /usr/bin/ceph: "num_in_osds": 0, /usr/bin/ceph: "osd_in_since": 0, /usr/bin/ceph: "num_remapped_pgs": 0 /usr/bin/ceph: }, /usr/bin/ceph: "pgmap": { /usr/bin/ceph: "pgs_by_state": [], /usr/bin/ceph: "num_pgs": 0, /usr/bin/ceph: "num_pools": 0, /usr/bin/ceph: "num_objects": 0, /usr/bin/ceph: "data_bytes": 0, /usr/bin/ceph: "bytes_used": 0, /usr/bin/ceph: "bytes_avail": 0, /usr/bin/ceph: "bytes_total": 0 /usr/bin/ceph: }, /usr/bin/ceph: "fsmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "by_rank": [], /usr/bin/ceph: "up:standby": 0 /usr/bin/ceph: }, /usr/bin/ceph: "mgrmap": { /usr/bin/ceph: "available": false, /usr/bin/ceph: "num_standbys": 0, /usr/bin/ceph: "modules": [ /usr/bin/ceph: "iostat", /usr/bin/ceph: "nfs", /usr/bin/ceph: "restful" /usr/bin/ceph: ], /usr/bin/ceph: "services": {} /usr/bin/ceph: }, /usr/bin/ceph: "servicemap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "modified": "2023-03-14T08:40:37.818278+0000", /usr/bin/ceph: "services": {} /usr/bin/ceph: }, /usr/bin/ceph: "progress_events": {} /usr/bin/ceph: } mgr not available, waiting (2/15)... /usr/bin/ceph: /usr/bin/ceph: { /usr/bin/ceph: "fsid": "de25325c-c243-11ed-8232-98944902eb22", /usr/bin/ceph: "health": { /usr/bin/ceph: "status": "HEALTH_OK", /usr/bin/ceph: "checks": {}, /usr/bin/ceph: "mutes": [] /usr/bin/ceph: }, /usr/bin/ceph: "election_epoch": 3, /usr/bin/ceph: "quorum": [ /usr/bin/ceph: 0 /usr/bin/ceph: ], /usr/bin/ceph: "quorum_names": [ /usr/bin/ceph: "ceph01" /usr/bin/ceph: ], /usr/bin/ceph: "quorum_age": 9, /usr/bin/ceph: "monmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "min_mon_release_name": "pacific", /usr/bin/ceph: "num_mons": 1 /usr/bin/ceph: }, /usr/bin/ceph: "osdmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "num_osds": 0, /usr/bin/ceph: "num_up_osds": 0, /usr/bin/ceph: "osd_up_since": 0, /usr/bin/ceph: "num_in_osds": 0, /usr/bin/ceph: "osd_in_since": 0, /usr/bin/ceph: "num_remapped_pgs": 0 /usr/bin/ceph: }, /usr/bin/ceph: "pgmap": { /usr/bin/ceph: "pgs_by_state": [], /usr/bin/ceph: "num_pgs": 0, /usr/bin/ceph: "num_pools": 0, /usr/bin/ceph: "num_objects": 0, /usr/bin/ceph: "data_bytes": 0, /usr/bin/ceph: "bytes_used": 0, /usr/bin/ceph: "bytes_avail": 0, /usr/bin/ceph: "bytes_total": 0 /usr/bin/ceph: }, /usr/bin/ceph: "fsmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "by_rank": [], /usr/bin/ceph: "up:standby": 0 /usr/bin/ceph: }, /usr/bin/ceph: "mgrmap": { /usr/bin/ceph: "available": false, /usr/bin/ceph: "num_standbys": 0, /usr/bin/ceph: "modules": [ /usr/bin/ceph: "iostat", /usr/bin/ceph: "nfs", /usr/bin/ceph: "restful" /usr/bin/ceph: ], /usr/bin/ceph: "services": {} /usr/bin/ceph: }, /usr/bin/ceph: "servicemap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "modified": "2023-03-14T08:40:37.818278+0000", /usr/bin/ceph: "services": {} /usr/bin/ceph: }, /usr/bin/ceph: "progress_events": {} /usr/bin/ceph: } mgr not available, waiting (3/15)... /usr/bin/ceph: /usr/bin/ceph: { /usr/bin/ceph: "fsid": "de25325c-c243-11ed-8232-98944902eb22", /usr/bin/ceph: "health": { /usr/bin/ceph: "status": "HEALTH_OK", /usr/bin/ceph: "checks": {}, /usr/bin/ceph: "mutes": [] /usr/bin/ceph: }, /usr/bin/ceph: "election_epoch": 3, /usr/bin/ceph: "quorum": [ /usr/bin/ceph: 0 /usr/bin/ceph: ], /usr/bin/ceph: "quorum_names": [ /usr/bin/ceph: "ceph01" /usr/bin/ceph: ], /usr/bin/ceph: "quorum_age": 12, /usr/bin/ceph: "monmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "min_mon_release_name": "pacific", /usr/bin/ceph: "num_mons": 1 /usr/bin/ceph: }, /usr/bin/ceph: "osdmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "num_osds": 0, /usr/bin/ceph: "num_up_osds": 0, /usr/bin/ceph: "osd_up_since": 0, /usr/bin/ceph: "num_in_osds": 0, /usr/bin/ceph: "osd_in_since": 0, /usr/bin/ceph: "num_remapped_pgs": 0 /usr/bin/ceph: }, /usr/bin/ceph: "pgmap": { /usr/bin/ceph: "pgs_by_state": [], /usr/bin/ceph: "num_pgs": 0, /usr/bin/ceph: "num_pools": 0, /usr/bin/ceph: "num_objects": 0, /usr/bin/ceph: "data_bytes": 0, /usr/bin/ceph: "bytes_used": 0, /usr/bin/ceph: "bytes_avail": 0, /usr/bin/ceph: "bytes_total": 0 /usr/bin/ceph: }, /usr/bin/ceph: "fsmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "by_rank": [], /usr/bin/ceph: "up:standby": 0 /usr/bin/ceph: }, /usr/bin/ceph: "mgrmap": { /usr/bin/ceph: "available": false, /usr/bin/ceph: "num_standbys": 0, /usr/bin/ceph: "modules": [ /usr/bin/ceph: "iostat", /usr/bin/ceph: "nfs", /usr/bin/ceph: "restful" /usr/bin/ceph: ], /usr/bin/ceph: "services": {} /usr/bin/ceph: }, /usr/bin/ceph: "servicemap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "modified": "2023-03-14T08:40:37.818278+0000", /usr/bin/ceph: "services": {} /usr/bin/ceph: }, /usr/bin/ceph: "progress_events": {} /usr/bin/ceph: } mgr not available, waiting (4/15)... /usr/bin/ceph: /usr/bin/ceph: { /usr/bin/ceph: "fsid": "de25325c-c243-11ed-8232-98944902eb22", /usr/bin/ceph: "health": { /usr/bin/ceph: "status": "HEALTH_OK", /usr/bin/ceph: "checks": {}, /usr/bin/ceph: "mutes": [] /usr/bin/ceph: }, /usr/bin/ceph: "election_epoch": 3, /usr/bin/ceph: "quorum": [ /usr/bin/ceph: 0 /usr/bin/ceph: ], /usr/bin/ceph: "quorum_names": [ /usr/bin/ceph: "ceph01" /usr/bin/ceph: ], /usr/bin/ceph: "quorum_age": 15, /usr/bin/ceph: "monmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "min_mon_release_name": "pacific", /usr/bin/ceph: "num_mons": 1 /usr/bin/ceph: }, /usr/bin/ceph: "osdmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "num_osds": 0, /usr/bin/ceph: "num_up_osds": 0, /usr/bin/ceph: "osd_up_since": 0, /usr/bin/ceph: "num_in_osds": 0, /usr/bin/ceph: "osd_in_since": 0, /usr/bin/ceph: "num_remapped_pgs": 0 /usr/bin/ceph: }, /usr/bin/ceph: "pgmap": { /usr/bin/ceph: "pgs_by_state": [], /usr/bin/ceph: "num_pgs": 0, /usr/bin/ceph: "num_pools": 0, /usr/bin/ceph: "num_objects": 0, /usr/bin/ceph: "data_bytes": 0, /usr/bin/ceph: "bytes_used": 0, /usr/bin/ceph: "bytes_avail": 0, /usr/bin/ceph: "bytes_total": 0 /usr/bin/ceph: }, /usr/bin/ceph: "fsmap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "by_rank": [], /usr/bin/ceph: "up:standby": 0 /usr/bin/ceph: }, /usr/bin/ceph: "mgrmap": { /usr/bin/ceph: "available": true, /usr/bin/ceph: "num_standbys": 0, /usr/bin/ceph: "modules": [ /usr/bin/ceph: "iostat", /usr/bin/ceph: "nfs", /usr/bin/ceph: "restful" /usr/bin/ceph: ], /usr/bin/ceph: "services": {} /usr/bin/ceph: }, /usr/bin/ceph: "servicemap": { /usr/bin/ceph: "epoch": 1, /usr/bin/ceph: "modified": "2023-03-14T08:40:37.818278+0000", /usr/bin/ceph: "services": {} /usr/bin/ceph: }, /usr/bin/ceph: "progress_events": {} /usr/bin/ceph: } mgr is available Enabling cephadm module... /usr/bin/ceph: { /usr/bin/ceph: "epoch": 5, /usr/bin/ceph: "available": true, /usr/bin/ceph: "active_name": "ceph01.hztcnu", /usr/bin/ceph: "num_standby": 0 /usr/bin/ceph: } Waiting for the mgr to restart... Waiting for mgr epoch 5... /usr/bin/ceph: { /usr/bin/ceph: "mgrmap_epoch": 7, /usr/bin/ceph: "initialized": true /usr/bin/ceph: } mgr epoch 5 is available Setting orchestrator backend to cephadm... /usr/bin/ceph: value unchanged Generating ssh key... Adding key to root@localhost authorized_keys... Adding host ceph01... /usr/bin/ceph: Added host 'ceph01' with addr '172.31.242.65' Deploying mon service with default placement... /usr/bin/ceph: Scheduled mon update... Deploying mgr service with default placement... /usr/bin/ceph: Scheduled mgr update... Deploying crash service with default placement... /usr/bin/ceph: Scheduled crash update... Deploying prometheus service with default placement... /usr/bin/ceph: Scheduled prometheus update... Deploying grafana service with default placement... /usr/bin/ceph: Scheduled grafana update... Deploying node-exporter service with default placement... /usr/bin/ceph: Scheduled node-exporter update... Deploying alertmanager service with default placement... /usr/bin/ceph: Scheduled alertmanager update... Enabling the dashboard module... /usr/bin/ceph: { /usr/bin/ceph: "epoch": 9, /usr/bin/ceph: "available": true, /usr/bin/ceph: "active_name": "ceph01.hztcnu", /usr/bin/ceph: "num_standby": 0 /usr/bin/ceph: } Waiting for the mgr to restart... Waiting for mgr epoch 9... /usr/bin/ceph: { /usr/bin/ceph: "mgrmap_epoch": 11, /usr/bin/ceph: "initialized": true /usr/bin/ceph: } mgr epoch 9 is available ...... Fetching dashboard port number... /usr/bin/ceph: 8443 systemctl: disabled systemctl: inactive firewalld.service is not enabled Not possible to open ports <[8443]>. firewalld.service is not available Ceph Dashboard is now available at: Enabling autotune for osd_memory_target /usr/bin/ceph: set mgr/dashboard/cluster/status You can access the Ceph CLI as following in case of multi-cluster or non-default config: sudo /usr/sbin/cephadm shell --fsid de25325c-c243-11ed-8232-98944902eb22 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring Or, if you are only running a single cluster on this host: sudo /usr/sbin/cephadm shell Please consider enabling telemetry to help improve Ceph: ceph telemetry on For more information see: https://docs.ceph.com/en/pacific/mgr/telemetry/ Bootstrap complete. [ceph: root@ceph01 /]# ceph orch host add ceph02 172.31.242.66 Added host 'ceph02' with addr '172.31.242.66' [ceph: root@ceph01 /]# ceph orch ls NAME PORTS RUNNING REFRESHED AGE PLACEMENT alertmanager ?:9093,9094 1/1 119s ago 3m count:1 crash 1/2 119s ago 3m * grafana ?:3000 1/1 119s ago 3m count:1 mgr 1/2 119s ago 3m count:2 mon 1/5 119s ago 3m count:5 node-exporter ?:9100 1/2 119s ago 3m * prometheus ?:9095 1/1 119s ago 3m count:1 [ceph: root@ceph01 /]# ceph -s cluster: id: de25325c-c243-11ed-8232-98944902eb22 health: HEALTH_WARN Failed to place 1 daemon(s) OSD count 0 < osd_pool_default_size 3 services: mon: 1 daemons, quorum ceph01 (age 3m) mgr: ceph01.hztcnu(active, since 2m) osd: 0 osds: 0 up, 0 in data: pools: 0 pools, 0 pgs objects: 0 objects, 0 B usage: 0 B used, 0 B / 0 B avail pgs: [ceph: root@ceph01 /]# ceph health detail HEALTH_WARN Failed to place 1 daemon(s); 1 failed cephadm daemon(s); OSD count 0 < osd_pool_default_size 3 [WRN] CEPHADM_DAEMON_PLACE_FAIL: Failed to place 1 daemon(s) Failed while placing mon.ceph02 on ceph02: auth get failed: failed to find mon. in keyring retval: -2 [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) daemon mgr.ceph02.weayqf on ceph02 is in error state [WRN] TOO_FEW_OSDS: OSD count 0 < osd_pool_default_size 3